code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python
import multiprocessing
import os, re, errno
import subprocess
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
mkdir_p("results/plt")
purge("results/plt","fm_plt*")
total_processes = multiprocessing.cpu_count()
counter=1
samplesize=100
randomnumbers=1000000
procs = []
# Loops through the processes
# The wait command informs the script to wait for all child processes to finish.
# The eve process partitions the events to a numbered process (counter).
# The output stream of events invokes getmodel which calculates the CDFs for that subset of events.
# The CDF stream invokes gulcalc which performs the ground up loss sampling. The required parameters are the number of Samples -S and number of random numbers -R, and output stream type -i.
# The ground up losses are streamed through to fmcalc to apply policy terms and conditions and output insured losses
# The insured losses are streamed through summarycalc to summarize the samples to portfolio level
# The portfolio level insured losses are streamed through eltcalc to calculate the sample statistics and output to a csv file.
while (counter <= total_processes) :
cmd="eve %d %d | getmodel | gulcalc -r -S%d -i - | fmcalc | summarycalc -f -2 - | pltcalc > results/plt/fm_plt_summary2_p%d.csv " % (counter,total_processes,samplesize,counter)
p1 = subprocess.Popen(cmd,shell=True)
procs.append(p1)
print(cmd)
counter = counter + 1
for p in procs:
p.wait()
counter=1
# After all processes are finished, the final step is to concatenate the output files together.
filenames = []
for file in os.listdir("results/plt"):
if file.endswith(".csv"):
file = "results/plt/"+ file
filenames.append(file)
with open('results/plt/fm_plt_summary2.csv', 'w') as outfile:
outfile.write("Summary_id, period_num, event_id, mean, standard_deviation, exposure_value, occ_year, occ_month, occ_date\n")
for fname in filenames:
with open(fname) as infile:
lineno=1
for line in infile:
if lineno>1:
outfile.write(line)
lineno=lineno+1
print ("Finished. View outputs in results/plt") | OasisLMF/ktools | examples/pltcalc_example.py | Python | bsd-3-clause | 2,488 |
"""
Tests that duplicate columns are handled appropriately when parsed by the
CSV engine. In general, the expected result is that they are either thoroughly
de-duplicated (if mangling requested) or ignored otherwise.
"""
from io import StringIO
import pytest
from pandas import DataFrame
import pandas._testing as tm
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
@skip_pyarrow
@pytest.mark.parametrize("kwargs", [{}, {"mangle_dupe_cols": True}])
def test_basic(all_parsers, kwargs):
# TODO: add test for condition "mangle_dupe_cols=False"
# once it is actually supported (gh-12935)
parser = all_parsers
data = "a,a,b,b,b\n1,2,3,4,5"
result = parser.read_csv(StringIO(data), sep=",", **kwargs)
expected = DataFrame([[1, 2, 3, 4, 5]], columns=["a", "a.1", "b", "b.1", "b.2"])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_basic_names(all_parsers):
# See gh-7160
parser = all_parsers
data = "a,b,a\n0,1,2\n3,4,5"
expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=["a", "b", "a.1"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_basic_names_raise(all_parsers):
# See gh-7160
parser = all_parsers
data = "0,1,2\n3,4,5"
with pytest.raises(ValueError, match="Duplicate names"):
parser.read_csv(StringIO(data), names=["a", "b", "a"])
@skip_pyarrow
@pytest.mark.parametrize(
"data,expected",
[
("a,a,a.1\n1,2,3", DataFrame([[1, 2, 3]], columns=["a", "a.2", "a.1"])),
(
"a,a,a.1,a.1.1,a.1.1.1,a.1.1.1.1\n1,2,3,4,5,6",
DataFrame(
[[1, 2, 3, 4, 5, 6]],
columns=["a", "a.2", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"],
),
),
(
"a,a,a.3,a.1,a.2,a,a\n1,2,3,4,5,6,7",
DataFrame(
[[1, 2, 3, 4, 5, 6, 7]],
columns=["a", "a.4", "a.3", "a.1", "a.2", "a.5", "a.6"],
),
),
],
)
def test_thorough_mangle_columns(all_parsers, data, expected):
# see gh-17060
parser = all_parsers
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"data,names,expected",
[
(
"a,b,b\n1,2,3",
["a.1", "a.1", "a.1.1"],
DataFrame(
[["a", "b", "b"], ["1", "2", "3"]], columns=["a.1", "a.1.1", "a.1.1.1"]
),
),
(
"a,b,c,d,e,f\n1,2,3,4,5,6",
["a", "a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"],
DataFrame(
[["a", "b", "c", "d", "e", "f"], ["1", "2", "3", "4", "5", "6"]],
columns=["a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1", "a.1.1.1.1.1"],
),
),
(
"a,b,c,d,e,f,g\n1,2,3,4,5,6,7",
["a", "a", "a.3", "a.1", "a.2", "a", "a"],
DataFrame(
[
["a", "b", "c", "d", "e", "f", "g"],
["1", "2", "3", "4", "5", "6", "7"],
],
columns=["a", "a.1", "a.3", "a.1.1", "a.2", "a.2.1", "a.3.1"],
),
),
],
)
def test_thorough_mangle_names(all_parsers, data, names, expected):
# see gh-17095
parser = all_parsers
with pytest.raises(ValueError, match="Duplicate names"):
parser.read_csv(StringIO(data), names=names)
@skip_pyarrow
def test_mangled_unnamed_placeholders(all_parsers):
# xref gh-13017
orig_key = "0"
parser = all_parsers
orig_value = [1, 2, 3]
df = DataFrame({orig_key: orig_value})
# This test recursively updates `df`.
for i in range(3):
expected = DataFrame()
for j in range(i + 1):
col_name = "Unnamed: 0" + f".{1*j}" * min(j, 1)
expected.insert(loc=0, column=col_name, value=[0, 1, 2])
expected[orig_key] = orig_value
df = parser.read_csv(StringIO(df.to_csv()))
tm.assert_frame_equal(df, expected)
@skip_pyarrow
def test_mangle_dupe_cols_already_exists(all_parsers):
# GH#14704
parser = all_parsers
data = "a,a,a.1,a,a.3,a.1,a.1.1\n1,2,3,4,5,6,7"
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4, 5, 6, 7]],
columns=["a", "a.2", "a.1", "a.4", "a.3", "a.1.2", "a.1.1"],
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_mangle_dupe_cols_already_exists_unnamed_col(all_parsers):
# GH#14704
parser = all_parsers
data = ",Unnamed: 0,,Unnamed: 2\n1,2,3,4"
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4]],
columns=["Unnamed: 0.1", "Unnamed: 0", "Unnamed: 2.1", "Unnamed: 2"],
)
tm.assert_frame_equal(result, expected)
| pandas-dev/pandas | pandas/tests/io/parser/test_mangle_dupes.py | Python | bsd-3-clause | 4,824 |
from datetime import date, timedelta
from django.test.client import Client
from django.contrib.auth.models import User
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.core.cache import cache
from model_mommy import mommy
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.test.client import RequestFactory
from survey.models import *
from survey.context_processor import CachedValue
from survey.models.locations import *
from survey.templatetags.template_tags import *
from survey.models.questions import *
from survey.models.respondents import (RespondentGroupCondition, GroupTestArgument,
ParameterQuestion, SurveyParameterList, RespondentGroup,
ParameterTemplate,)
class TemplateTagsTest(TestCase):
fixtures = ['answeraccessdefinition.json', ]
def setUp(self):
self.survey = mommy.make(Survey)
self.batch = mommy.make(Batch, survey=self.survey)
self.qset = QuestionSet.get(pk=self.batch.id)
self.question = mommy.make(Question, qset=self.qset, answer_type=NumericalAnswer.choice_name())
self.ea = EnumerationArea.objects.create(name="BUBEMBE", code="11-BUBEMBE")
self.investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
self.surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer=self.investigator,
survey=self.survey,
allocation_ea=self.ea,
status=1
)
self.interview = Interview.objects.create(
interviewer=self.investigator,
ea=self.ea,
survey=self.survey,
question_set=self.qset,
)
self.listingsample = ListingSample.objects.create(survey=self.survey, interview=self.interview)
def test_get_value(self):
class A(object):
b = 5
a = A()
self.assertEquals(get_value(a, 'b'), 5)
a = {'c': 7}
self.assertEquals(get_value(a, 'c'), 7)
def test_show_flow_condition(self):
# flow without validation
flow = mommy.make(QuestionFlow, question=self.question)
self.assertEquals(show_condition(flow), '')
validation = mommy.make(ResponseValidation, validation_test=NumericalAnswer.equals.__name__)
text_argument = mommy.make(TextArgument, validation=validation, position=1, param=1)
flow.validation = validation
self.assertIn(flow.validation.validation_test, show_condition(flow))
def test_modulo_understands_number_is_modulo_of_another(self):
self.assertTrue(modulo(4, 2))
def test_modulo_understands_number_is_not_modulo_of_another(self):
self.assertFalse(modulo(4, 3))
def test_knows_mobile_number_not_in_field_string(self):
self.assertFalse(is_mobile_number(""))
def test_knows_mobile_number_in_field_string(self):
self.assertTrue(is_mobile_number("mobile number : 1234567"))
def test_gets_key_value_from_location_dict(self):
country_name = 'Uganda'
district_name = 'Kampala'
county_name = 'Bukoto'
location_dict = {'Country': country_name,
'District': district_name, 'County': county_name}
self.assertEqual(get_value(location_dict, 'Country'), country_name)
self.assertEqual(get_value(location_dict, 'District'), district_name)
self.assertEqual(get_value(location_dict, 'County'), county_name)
def test_returns_empty_string_if_key_does_not_exist_from_location_dict(self):
country_name = 'Uganda'
district_name = 'Kampala'
location_dict = {'Country': country_name, 'District': district_name}
self.assertEqual(get_value(location_dict, 'Country'), country_name)
self.assertEqual(get_value(location_dict, 'District'), district_name)
self.assertEqual(get_value(location_dict, 'County'), "")
def test_should_know_how_to_format_date(self):
date_entered = date(2008, 4, 5)
date_expected = "Apr 05, 2008"
self.assertEqual(format_date(date_entered), date_expected)
def test_shoud_return_months_given_month_number(self):
self.assertEqual('January', get_month(0))
self.assertEqual('March', get_month(2))
self.assertEqual('N/A', get_month(None))
self.assertEqual('N/A', get_month(''))
def test_should_return_url_given_url_name(self):
self.assertEqual('/surveys/', get_url_without_ids('survey_list_page'))
def test_should_return_url_given_url_name_and_ids(self):
self.assertEqual('/surveys/1/delete/',
get_url_with_ids(1, 'delete_survey'))
self.assertEqual('/surveys/1/batches/2/',
get_url_with_ids("1, 2", 'batch_show_page'))
def test_current(self):
l= [1,2]
self.assertEqual(1,current(l,0))
self.assertEqual(None,current(l,10))
def test_replace(self):
str = " world"
self.assertEqual("helloworld", replace_space(str, "hello"))
def test_should_return_concatenated_ints_in_a_single_string(self):
self.assertEqual('1, 2', add_string(1, 2))
self.assertEqual('1, 2', add_string('1', '2'))
def test_concat_strings(self):
arg = "abc"
self.assertEqual('abc', arg)
def test_condition_text(self):
self.assertEqual('EQUALS', condition_text('EQUALS'))
self.assertEqual('', condition_text('abv'))
def test_should_return_repeated_string(self):
self.assertEqual('000', repeat_string('0', 4))
def test_should_return_selected_for_selected_batch(self):
survey = Survey.objects.create(
name="open survey", description="open survey", has_sampling=True)
batch = Batch.objects.create(name="open survey", survey=survey)
self.assertEqual("selected='selected'",
is_survey_selected_given(survey, batch))
def test_should_return_selected_for_is_selected(self):
# survey = Survey.objects.create(
# name="open survey", description="open survey", has_sampling=True)
batch = Batch.objects.create(name="batchnames")
self.assertEqual("selected='selected'",
is_selected(batch,batch))
def test_should_return_none_for_selected_batch(self):
survey = Survey.objects.create(
name="open survey", description="open survey", has_sampling=True)
batch = Batch.objects.create(name="Batch not belonging to survey")
self.assertIsNone(is_survey_selected_given(survey, batch))
def test_should_return_none_if_selected_batch_has_no_survey(self):
survey = Survey.objects.create(
name="open survey", description="open survey", has_sampling=True)
batch = Batch.objects.create(name="Batch not belonging to survey")
self.assertIsNone(is_survey_selected_given(survey, batch))
def test_should_return_none_when_selected_batch_is_none(self):
survey = Survey.objects.create(
name="open survey", description="open survey", has_sampling=True)
self.assertIsNone(is_survey_selected_given(survey, None))
def test_knows_batch_is_activated_for_non_response_for_location(self):
country = LocationType.objects.create(name="Country", slug='country')
district = LocationType.objects.create(
name="District", parent=country, slug='district')
uganda = Location.objects.create(name="Uganda", type=country)
kampala = Location.objects.create(
name="Kampala", type=district, parent=uganda)
all_open_locations = Location.objects.all()
self.assertEqual("checked='checked'", non_response_is_activefor(
all_open_locations, kampala))
def test_knows_batch_is_not_activated_for_non_response_for_location(self):
country = LocationType.objects.create(name="Country", slug='country')
district = LocationType.objects.create(
name="District", parent=country, slug='district')
uganda = Location.objects.create(name="Uganda", type=country)
kampala = Location.objects.create(
name="Kampala", type=district, parent=uganda)
all_open_locations = Location.objects.filter(name="Mbarara")
self.assertEqual(None, non_response_is_activefor(
all_open_locations, kampala))
def test_knows_ea_is_selected_given_location_data(self):
country = LocationType.objects.create(name="Country", slug='country')
district = LocationType.objects.create(
name="District", parent=country, slug='district')
uganda = Location.objects.create(name="Uganda", type=country)
kisasi = Location.objects.create(
name='Kisaasi', type=district, parent=uganda)
ea1 = EnumerationArea.objects.create(name="EA Kisasi1")
ea2 = EnumerationArea.objects.create(name="EA Kisasi2")
ea1.locations.add(kisasi)
ea2.locations.add(kisasi)
def test_ea_is_location_selected(self):
country = LocationType.objects.create(name="Country1", slug='country')
district = LocationType.objects.create(
name="District1", parent=country, slug='district')
uganda = Location.objects.create(name="Uganda1", type=country)
kisasi = Location.objects.create(
name='Kisaasi1', type=district, parent=uganda)
ea1 = EnumerationArea.objects.create(name="EA Kisasi11")
ea2 = EnumerationArea.objects.create(name="EA Kisasi12")
ea1.locations.add(kisasi)
ea2.locations.add(kisasi)
def test_batch_is_selected(self):
batch = Batch.objects.create(order=1, name="Batch name")
self.assertFalse(batch.is_open())
country = LocationType.objects.create(name='Country', slug='country')
district = LocationType.objects.create(
name='District', parent=country, slug='district')
uganda = Location.objects.create(name="Uganda", type=country)
kampala = Location.objects.create(
name="Kampala", type=district, parent=uganda)
batch.open_for_location(kampala)
expected = "selected='selected'"
self.assertEqual(expected, is_selected(batch, batch))
def test_is_batch_open_for_location(self):
batch = Batch.objects.create(order=1, name="Batch name")
self.assertFalse(batch.is_open())
country = LocationType.objects.create(name='Country', slug='country')
district = LocationType.objects.create(
name='District', parent=country, slug='district')
uganda = Location.objects.create(name="Uganda", type=country)
kampala = Location.objects.create(
name="Kampala", type=district, parent=uganda)
batch.open_for_location(kampala)
open_locations = [uganda, kampala]
self.assertEqual("checked='checked'",
is_batch_open_for_location(open_locations, kampala))
def test_condition(self):
condition = RespondentGroupCondition.objects.create(validation_test="EQUALS",
respondent_group_id=1,test_question_id=1)
self.assertEqual("EQUALS", condition.validation_test)
def test_quest_validation_opts(self):
batch = Batch.objects.create(order=1, name="Batch name")
condition = RespondentGroupCondition.objects.create(validation_test="EQUALS",
respondent_group_id=1,
test_question_id=1)
def test_ancestors_reversed_reversed(self):
country = LocationType.objects.create(name='Country', slug='country')
region = LocationType.objects.create(name='Region', slug='region')
city = LocationType.objects.create(name='City', slug='city')
parish = LocationType.objects.create(name='Parish', slug='parish')
village = LocationType.objects.create(name='Village', slug='village')
subcounty = LocationType.objects.create(
name='Subcounty', slug='subcounty')
africa = Location.objects.create(name='Africa', type=country)
uganda = Location.objects.create(
name='Uganda', type=region, parent=africa)
abim = Location.objects.create(name='ABIM', parent=uganda, type=city)
abim_son = Location.objects.create(
name='LABWOR', parent=abim, type=parish)
abim_son_son = Location.objects.create(
name='KALAKALA', parent=abim_son, type=village)
abim_son_daughter = Location.objects.create(
name='OYARO', parent=abim_son, type=village)
abim_son_daughter_daughter = Location.objects.create(
name='WIAWER', parent=abim_son_daughter, type=subcounty)
abim_son_son_daughter = Location.objects.create(
name='ATUNGA', parent=abim_son_son, type=subcounty)
abim_son_son_son = Location.objects.create(
name='WICERE', parent=abim_son_son, type=subcounty)
self.assertEqual([], ancestors_reversed(africa))
self.assertEqual([africa], ancestors_reversed(uganda))
self.assertEqual([africa, uganda], ancestors_reversed(abim))
self.assertEqual([africa, uganda, abim], ancestors_reversed(abim_son))
self.assertEqual([africa, uganda, abim, abim_son],
ancestors_reversed(abim_son_son))
self.assertEqual([africa, uganda, abim, abim_son,
abim_son_son], ancestors_reversed(abim_son_son_son))
def test_trim(self):
str1 = "survey_test"
self.assertEquals(str1.strip(), trim(str1))
def test_current(self):
d = [3, 5]
self.assertEquals(d[1], current(d, 1))
self.assertEquals(None, current(d, 7))
def test_next(self):
d = [3, 5, 8]
self.assertEquals(d[2], next(d, 1))
self.assertEquals(None, next(d, 7))
def test_space_replace(self):
msg = 'hey|guys|how|are|you'
self.assertEquals('hey guys how are you', space_replace(msg, '|'))
def test_has_location_selected(self):
class Dummy(object):
has_loc = True
def has_location_selected(self, obj):
return self.has_loc
loc_data = Dummy()
# following tests is logical since is_location_selected only calls has_location_selected of loc_data
self.assertEquals("selected='selected'", is_location_selected(loc_data, {'me': 'you'}))
loc_data.has_loc = False
self.assertEquals(None, is_location_selected(loc_data, {'me': 'you'}))
def test_selected_ea(self):
class Dummy(object):
selected_ea = self.ea
def is_ea_selected(self, ea):
return self.selected_ea
loc_data = Dummy()
# following tests is logical since is_location_selected only calls has_location_selected of loc_data
self.assertEquals("selected='selected'", is_ea_selected(loc_data, self.ea))
ea2 = EnumerationArea.objects.create(name='testea2')
self.assertEquals(None, is_ea_selected(loc_data, ea2))
def test_is_radio(self):
field = 'radio-select'
self.assertEquals(is_radio(field), "")
field = 'radio'
self.assertEquals(is_radio(field), "radio_field")
def test_display_list(self):
l = [2, 4, 6]
self.assertEquals(', '.join([str(i) for i in l]), display_list(l))
def test_join_list(self):
l = ['me', 'you', 'us']
new_list = ['<span class="muted">%s</span>' % string.capwords(str(item)) for item in l]
self.assertEquals(mark_safe(' '.join(new_list)), join_list(l, ' '))
def test_get_callable_value(self):
def callable(x):
return str(x*x)
self.assertEquals(get_value(callable, 3), '9')
def test_cached_value(self):
cache.set('key', 8)
self.assertEquals(get_cached_result('key', 4), 8)
cache.delete('key')
self.assertEquals(get_cached_result('key', 4), 4)
def test_batches_enabled(self):
class Dummy(object):
enabled = True
def batches_enabled(self, ea):
return self.enabled
d = Dummy()
self.assertEquals('Enabled', batches_enabled(d, self.ea))
d.enabled = False
self.assertEquals('Not Enabled', batches_enabled(d, self.ea))
def test_get_age(self):
age = 10
d = date.today() - timedelta(days=365*age)
self.assertEquals(get_age(d), age-1) # since starts from 0
def test_url_with_ids_using_dict(self):
d = (1, )
url = reverse('deactivate_user', args=d)
self.assertEquals(get_url_with_ids(d, 'deactivate_user'), url)
def test_concat_str(self):
l = ['hey', 'boy']
self.assertEquals(concat_strings(*l), ''.join(l))
def test_get_channels(self):
answer_type = NumericalAnswer.choice_name()
channels = AnswerAccessDefinition.objects.filter(answer_type=answer_type
).values_list('channel', flat=True).order_by('channel')
self.assertEquals(','.join(channels), access_channels(answer_type))
def test_quest_validation_opts(self):
batch = mommy.make(Batch)
import json
validation_options = json.loads(quest_validation_opts(batch))
for cls in Answer.__subclasses__():
options = validation_options[cls.choice_name()]
for idx, validator in enumerate(cls.validators()):
self.assertEquals(options[idx]['display'], validator.__name__)
self.assertEquals(options[idx]['value'], validator.__name__.upper())
def test_validation_args(self):
batch = mommy.make(Batch)
import json
args_dict = json.loads(validation_args(batch))
self.assertEquals(args_dict['EQUALS'], 1)
self.assertEquals(args_dict['BETWEEN'], 2)
self.assertEquals(args_dict['GREATER_THAN'], 1)
self.assertEquals(args_dict['LESS_THAN'], 1)
def test_get_question_value(self):
batch = mommy.make(Batch)
question = mommy.make(Question, qset=batch)
answers_dict = {question.pk: 'hey'}
self.assertEquals(get_question_value(question, answers_dict), 'hey')
def test_can_start_survey(self):
interviewer = mommy.make(Interviewer, name='intesg')
survey = mommy.make(Survey, has_sampling=False, name='testks')
mommy.make(SurveyAllocation, survey=survey, interviewer=interviewer, allocation_ea=self.ea)
self.assertEquals(can_start_survey(interviewer), True)
def test_is_relevant_sample(self):
interviewer = mommy.make(Interviewer, name='intesg')
survey = mommy.make(Survey, has_sampling=False, name='testks')
mommy.make(SurveyAllocation, survey=survey, interviewer=interviewer, allocation_ea=self.ea)
mommy.make(SurveyAllocation, survey=survey, interviewer=interviewer,
allocation_ea=mommy.make(EnumerationArea))
assignments = interviewer.unfinished_assignments
ea_assignmts = assignments.filter(allocation_ea__id=self.ea.id)
odk_print = ' or '.join(["selected(/qset/surveyAllocation, '%s')" % a.allocation_ea.name for a in ea_assignmts])
self.assertEquals(is_relevant_sample(self.ea.id, assignments), odk_print)
def test_get_download_url(self):
rf = RequestFactory()
url_name = 'users_index'
url = reverse(url_name)
request = rf.get(url)
self.assertIn(url, get_download_url(request, url_name))
def test_get_download_url(self):
rf = RequestFactory()
url_name = 'deactivate_user'
url = reverse(url_name, args=(1, ))
request = rf.get(url)
self.assertIn(url, get_absolute_url(request, url_name, 1))
self.assertIn('http', get_absolute_url(request, url_name, 1))
self.assertIn(request.build_absolute_uri('/'), get_home_url(request))
# just as a bonus add test context processor
cache.set('key', 'me')
response = self.client.get('/')
self.assertIn('cached_value', response.context)
self.assertEquals(response.context['cached_value'].key, 'me')
| unicefuganda/uSurvey | survey/tests/template_tags/test_template_tags.py | Python | bsd-3-clause | 20,720 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Steven Cummings'
__email__ = '[email protected]'
__version__ = '0.1.0' | estebistec/django-get-forms | django_get_forms/__init__.py | Python | bsd-3-clause | 134 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handles generating profiles and transferring them to/from mobile devices."""
import logging
import optparse
import os
import shutil
import sys
import tempfile
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import profile_creator
from telemetry.page import test_expectations
def _DiscoverProfileCreatorClasses():
profile_creators_dir = os.path.abspath(os.path.join(util.GetBaseDir(),
os.pardir, 'perf', 'profile_creators'))
base_dir = os.path.abspath(os.path.join(profile_creators_dir, os.pardir))
profile_creators_unfiltered = discover.DiscoverClasses(
profile_creators_dir, base_dir, profile_creator.ProfileCreator)
# Remove '_creator' suffix from keys.
profile_creators = {}
for test_name, test_class in profile_creators_unfiltered.iteritems():
assert test_name.endswith('_creator')
test_name = test_name[:-len('_creator')]
profile_creators[test_name] = test_class
return profile_creators
def GenerateProfiles(profile_creator_class, profile_creator_name, options):
"""Generate a profile"""
expectations = test_expectations.TestExpectations()
test = profile_creator_class()
temp_output_directory = tempfile.mkdtemp()
options.output_profile_path = temp_output_directory
results = page_runner.Run(test, test.page_set, expectations, options)
if results.errors or results.failures:
logging.warning('Some pages failed.')
if results.errors or results.failures:
logging.warning('Failed pages:\n%s',
'\n'.join(zip(*results.errors + results.failures)[0]))
return 1
# Everything is a-ok, move results to final destination.
generated_profiles_dir = os.path.abspath(options.output_dir)
if not os.path.exists(generated_profiles_dir):
os.makedirs(generated_profiles_dir)
out_path = os.path.join(generated_profiles_dir, profile_creator_name)
if os.path.exists(out_path):
shutil.rmtree(out_path)
shutil.move(temp_output_directory, out_path)
sys.stderr.write("SUCCESS: Generated profile copied to: '%s'.\n" % out_path)
return 0
def Main():
profile_creators = _DiscoverProfileCreatorClasses()
legal_profile_creators = '|'.join(profile_creators.keys())
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser(
"%%prog <--profile-type-to-generate=...> <--browser=...>"
" <--output-directory>")
page_runner.AddCommandLineOptions(parser)
group = optparse.OptionGroup(parser, 'Profile generation options')
group.add_option('--profile-type-to-generate',
dest='profile_type_to_generate',
default=None,
help='Type of profile to generate. '
'Supported values: %s' % legal_profile_creators)
group.add_option('--output-dir',
dest='output_dir',
help='Generated profile is placed in this directory.')
parser.add_option_group(group)
_, _ = parser.parse_args()
# Sanity check arguments.
if not options.profile_type_to_generate:
raise Exception("Must specify --profile-type-to-generate option.")
if options.profile_type_to_generate not in profile_creators.keys():
raise Exception("Invalid profile type, legal values are: %s." %
legal_profile_creators)
if not options.browser_type:
raise Exception("Must specify --browser option.")
if not options.output_dir:
raise Exception("Must specify --output-dir option.")
if options.browser_options.dont_override_profile:
raise Exception("Can't use existing profile when generating profile.")
# Generate profile.
profile_creator_class = profile_creators[options.profile_type_to_generate]
return GenerateProfiles(profile_creator_class,
options.profile_type_to_generate, options)
| mogoweb/chromium-crosswalk | tools/telemetry/telemetry/page/profile_generator.py | Python | bsd-3-clause | 3,960 |
def extractTeatimetranslateHomeBlog(item):
'''
Parser for 'teatimetranslate.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Adore Trick of Beauty', 'Adore Trick of Beauty', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractTeatimetranslateHomeBlog.py | Python | bsd-3-clause | 656 |
from __future__ import absolute_import, division, print_function
from .core import istask, get
from .context import set_options
try:
from .imperative import do, value
except ImportError:
pass
__version__ = '0.6.1'
| jayhetee/dask | dask/__init__.py | Python | bsd-3-clause | 225 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PubMedVis.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| whitews/PubMedVis | manage.py | Python | bsd-3-clause | 252 |
# Hierarchical Agglomerative Cluster Analysis
#
# Copyright (C) 2013 Folgert Karsdorp
# Author: Folgert Karsdorp <[email protected]>
# URL: <https://github.com/fbkarsdorp/HAC-python>
# For licence information, see LICENCE.TXT
class AbstractClusterer(object):
"""
Abstract interface covering basic clustering functionality.
"""
def __init__(self, data, linkage, num_clusters):
"""
@param data: A DistanceMatrix or list of feature value pairs from which
a DistanceMatrix can be constructed.
@type data: L{DistanceMatrix} or C{list}
@param linkage: a clustering of linkage method. The following methods
are implemented:
1. Single Linkage (L{single_link})
2. Complete Linkage (L{complete_link})
3. Average Linkage (L{average_link})
4. Median Linkage (L{median_link})
5. Centroid Linkage (L{centroid_link})
6. Ward Linkage or Minimum Variance Linkage (L{ward_link})
@type linkage: C{function}
"""
raise AssertionError('AbstractClusterer is an abstract interface')
def iterate_clusters(self):
"""
Iterate over all unique vector combinations in the matrix.
"""
raise AssertionError('AbstractClusterer is an abstract interface')
def smallest_distance(self, clusters):
"""
Return the smallest distance in the distance matrix.
The smallest distance depends on the possible connections in the
distance matrix.
"""
raise AssertionError('AbstractClusterer is an abstract interface')
def cluster(self, verbose=0, sum_ess=False):
"""
Cluster all clusters hierarchically unitl the level of
num_clusters is obtained.
"""
raise AssertionError('AbstractClusterer is an abstract interface')
def update_distmatrix(self, i, j, clusters):
"""
Update the distance matrix using the specified linkage method, so that
it represents the correct distances to the newly formed cluster.
"""
return self.linkage(clusters, i, j, self._dendrogram)
def dendrogram(self):
"""
Return the dendrogram object.
"""
return self._dendrogram
def num_clusters(self):
"""
Return the number of clusters.
"""
return self._num_clusters
| mikekestemont/PyStyl | pystyl/clustering/api.py | Python | bsd-3-clause | 2,483 |
# Generated by Django 2.2.17 on 2020-12-18 21:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("treenav", "0002_auto_20151001_1646"),
]
operations = [
migrations.AlterField(
model_name="menuitem",
name="level",
field=models.PositiveIntegerField(editable=False),
),
migrations.AlterField(
model_name="menuitem",
name="lft",
field=models.PositiveIntegerField(editable=False),
),
migrations.AlterField(
model_name="menuitem",
name="rght",
field=models.PositiveIntegerField(editable=False),
),
]
| caktus/django-treenav | treenav/migrations/0003_mptt_drop_indexes.py | Python | bsd-3-clause | 729 |
#!/usr/bin/env python
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
def main():
import flask
import mimetypes
import os.path as osp
import smqtk_config
mimetypes.add_type('image/png', '.png')
mimetypes.add_type('video/ogg', '.ogv')
mimetypes.add_type('video/webm', '.webm')
#print "[DEBUG] Setting static directory to:", smqtk_config.STATIC_DIR
app = flask.Flask(__name__,
static_url_path='/static',
static_folder=smqtk_config.STATIC_DIR)
@app.route('/static/data/clips/<path:filename>')
def send_static_clip(filename):
#print "[DEBUG] Request for filename:", filename
#print "[DEBUG] calling send_from_directory:", (smqtk_config.STATIC_DIR, filename)
return flask.send_from_directory(osp.join(smqtk_config.STATIC_DIR, 'data', 'clips'), filename)
#app.run(host='0.0.0.0', port=5001, debug=True, use_reloader=True)
#app.run(host='127.0.0.1', port=5001, debug=True, use_reloader=True)
app.run(host='127.0.0.1', port=5001, debug=True, use_reloader=False)
#app.run(host='127.0.0.1', port=5001, debug=False, use_reloader=False)
if __name__ == '__main__':
main()
| anguoyang/SMQTK | OLD_ROOT/run_static_server.py | Python | bsd-3-clause | 1,381 |
#coding=utf-8
import datetime
import time
from django.test import TestCase
from .models import *
from pgindex.models import Index
from pgindex.helpers import search
LOREM = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
LOREM_SV = u'Femton goa gubbar ifrån Göteborg, ingen hittade ut'
class SimpleTest(TestCase):
def setUp(self):
self.item = Item.objects.create(title='xyz', content=LOREM,
content_sv=LOREM_SV)
def test_common_words(self):
item = Item.objects.create(title='the a me you can')
result = search('the a me you can').filter(lang='')
self.assertEqual(1, result.count())
def test_create(self):
idx = Index.objects.get_for_object(self.item)
self.assertEqual(idx.data.pk, self.item.pk)
def test_search(self):
qs = search('Lorem')
item = Item.objects.create(title='Lorem', content=LOREM)
self.assertEqual(qs[0].data.pk, item.pk)
def test_lang(self):
qs = search('ut')
idx_sv = qs.filter(lang='sv')
self.assertEqual(qs.count(), 2)
self.assertEqual(idx_sv.count(), 1)
def test_delete(self):
before = Index.objects.count()
item = Item.objects.create(title='Ipsum', content=LOREM)
# Two new indexes are created, ItemIndex and ItemIndexSv
self.assertEqual(before + 2, Index.objects.count())
item.delete()
self.assertEqual(before, Index.objects.count())
def test_url(self):
self.assertEqual(
self.item.get_absolute_url(),
Index.objects.get_for_object(self.item).url,
)
def test_publish(self):
before = Index.objects.all().count()
item = ItemPubl.objects.create(title='invisible', content=LOREM)
after = Index.objects.all().count()
self.assertEqual(before, after)
def test_start_publish_creation(self):
past = datetime.datetime.now() - datetime.timedelta(days=1)
before = Index.objects.count()
item = ItemPublStart.objects.create(title='xyz', content=LOREM)
after = Index.objects.count()
self.assertEqual(before + 1, after)
def test_end_publish_creation_past(self):
past = datetime.datetime.now() - datetime.timedelta(days=1)
before = Index.objects.count()
item = ItemPublStop.objects.create(title='xyz', content=LOREM, stop_publish=past)
after = Index.objects.count()
self.assertEqual(before, after)
def test_end_publish_creation_future(self):
future = datetime.datetime.now() + datetime.timedelta(days=1)
before = Index.objects.count()
item = ItemPublStop.objects.create(title='xyz', content=LOREM, stop_publish=future)
after = Index.objects.count()
self.assertEqual(before + 1, after)
def test_start_publish(self):
future = datetime.datetime.now() + datetime.timedelta(days=1)
item = ItemPublStart.objects.create(title='r0x', content=LOREM, start_publish=future)
self.assertEqual(0, search('r0x').count())
item.start_publish = datetime.datetime.now() - datetime.timedelta(days=1)
item.save()
self.assertEqual(1, search('r0x').count())
def test_end_publish(self):
t = datetime.datetime.now() + datetime.timedelta(microseconds=1)
item = ItemPublStop.objects.create(title='woof', content=LOREM, stop_publish=t)
time.sleep(0.01)
self.assertEqual(0, search('woof').count())
item.stop_publish = datetime.datetime.now() + datetime.timedelta(days=1)
item.save()
self.assertEqual(1, search('woof').count())
| aino/django-pgindex | tests/pgindex_tests/tests.py | Python | bsd-3-clause | 3,716 |
import json
from requests import HTTPError
class DjrillError(Exception):
"""Base class for exceptions raised by Djrill
Overrides __str__ to provide additional information about
Mandrill API call and response.
"""
def __init__(self, *args, **kwargs):
"""
Optional kwargs:
email_message: the original EmailMessage being sent
payload: data arg (*not* json-stringified) for the Mandrill send call
response: requests.Response from the send call
"""
self.email_message = kwargs.pop('email_message', None)
self.payload = kwargs.pop('payload', None)
if isinstance(self, HTTPError):
# must leave response in kwargs for HTTPError
self.response = kwargs.get('response', None)
else:
self.response = kwargs.pop('response', None)
super(DjrillError, self).__init__(*args, **kwargs)
def __str__(self):
parts = [
" ".join([str(arg) for arg in self.args]),
self.describe_send(),
self.describe_response(),
]
return "\n".join(filter(None, parts))
def describe_send(self):
"""Return a string describing the Mandrill send in self.payload, or None"""
if self.payload is None:
return None
description = "Sending a message"
try:
to_emails = [to['email'] for to in self.payload['message']['to']]
description += " to %s" % ','.join(to_emails)
except KeyError:
pass
try:
description += " from %s" % self.payload['message']['from_email']
except KeyError:
pass
return description
def describe_response(self):
"""Return a formatted string of self.response, or None"""
if self.response is None:
return None
description = "Mandrill API response %d:" % self.response.status_code
try:
json_response = self.response.json()
description += "\n" + json.dumps(json_response, indent=2)
except (AttributeError, KeyError, ValueError): # not JSON = ValueError
try:
description += " " + self.response.text
except AttributeError:
pass
return description
class MandrillAPIError(DjrillError, HTTPError):
"""Exception for unsuccessful response from Mandrill API."""
def __init__(self, *args, **kwargs):
super(MandrillAPIError, self).__init__(*args, **kwargs)
if self.response is not None:
self.status_code = self.response.status_code
class MandrillRecipientsRefused(DjrillError):
"""Exception for send where all recipients are invalid or rejected."""
def __init__(self, message=None, *args, **kwargs):
if message is None:
message = "All message recipients were rejected or invalid"
super(MandrillRecipientsRefused, self).__init__(message, *args, **kwargs)
class NotSupportedByMandrillError(DjrillError, ValueError):
"""Exception for email features that Mandrill doesn't support.
This is typically raised when attempting to send a Django EmailMessage that
uses options or values you might expect to work, but that are silently
ignored by or can't be communicated to Mandrill's API. (E.g., non-HTML
alternative parts.)
It's generally *not* raised for Mandrill-specific features, like limitations
on Mandrill tag names or restrictions on from emails. (Djrill expects
Mandrill to return an API error for these where appropriate, and tries to
avoid duplicating Mandrill's validation logic locally.)
"""
class NotSerializableForMandrillError(DjrillError, TypeError):
"""Exception for data that Djrill doesn't know how to convert to JSON.
This typically results from including something like a date or Decimal
in your merge_vars (or other Mandrill-specific EmailMessage option).
"""
# inherits from TypeError for backwards compatibility with Djrill 1.x
def __init__(self, message=None, orig_err=None, *args, **kwargs):
if message is None:
message = "Don't know how to send this data to Mandrill. " \
"Try converting it to a string or number first."
if orig_err is not None:
message += "\n%s" % str(orig_err)
super(NotSerializableForMandrillError, self).__init__(message, *args, **kwargs)
| brack3t/Djrill | djrill/exceptions.py | Python | bsd-3-clause | 4,460 |
from setuptools import find_packages, setup
setup(
name='HX711',
packages=['.'],
version='1.0.0',
description='HX711 chip interface library',
author='gandalf15@github',
license='BSD 3-Clause',
)
| gandalf15/HX711 | HX711_Python3/setup.py | Python | bsd-3-clause | 220 |
c = get_config()
## Generic nbgrader options (technically, the exchange directory is not used
## by the formgrader, but it is left in here for consistency with the rest of
## the user guide):
c.NbGrader.course_id = "example_course"
c.TransferApp.exchange_directory = "/tmp/exchange"
## Options that are specific to the formgrader and integrating it with JuptyerHub:
c.FormgradeApp.ip = "127.0.0.1"
c.FormgradeApp.port = 9000
c.FormgradeApp.authenticator_class = "nbgrader.auth.hubauth.HubAuth"
# This is the actual URL or public IP address where JupyterHub is running (by
# default, the HubAuth will just use the same address as what the formgrader is
# running on -- so in this case, 127.0.0.1). If you have JupyterHub behind a
# domain name, you probably want to set that here.
c.HubAuth.hub_address = "127.0.0.1"
# Change this to be the path to the user guide folder in your clone of
# nbgrader, or just wherever you have your class files. This is relative
# to the root of the notebook server launched by JupyterHub, which is
# probably your home directory. This is used for accessing the *live*
# version of notebooks via JupyterHub. If you don't want to access the
# live notebooks and are fine with just the static interface provided by
# the formgrader, then you can ignore this option.
c.HubAuth.notebook_url_prefix = "path/to/class_files"
# Change this to be the list of unix usernames that are allowed to access
# the formgrader.
c.HubAuth.graders = ["instructor1", "instructor2"]
# This specifies that the formgrader should automatically generate an api
# token to authenticate itself with JupyterHub.
c.HubAuth.generate_hubapi_token = True
# Change this to be the jupyterhub.sqlite located in the directory where
# you actually run JupyterHub.
c.HubAuth.hub_db = "path/to/jupyterhub.sqlite"
| minrk/nbgrader | docs/source/user_guide/jupyterhub/nbgrader_config.py | Python | bsd-3-clause | 1,813 |
# -*- coding: utf-8 -*-
import os
# For different service
SITE_NAME = 'Dentimonial'
TRACKING_HASHTAG = '#dentimonial'
TWEET_ACTION_NAME = 'Send'
SERVICE_NAME = 'Identi.ca'
SERVICE_URI = 'http://identi.ca/'
FOLLOWERS_NAME = 'Subscribers'
FOLLOWED_NAME = 'Subscribed'
FOLLOW_NAME = 'Subscribe to'
TWEET_NAME = 'Notice'
# Twitter Account
TWITTER_ID = ''
TWITTER_PW = ''
# Switches
DEBUG = True
# UI
MAIN_CSS_REV = '0'
MAIN_JS_REV = '0'
# APIs
TWITTER_USERS_SHOW_URI = 'https://identi.ca/api/users/show.json?screen_name=%s'
TWITTER_SEARCH_BASE_URI = 'https://identi.ca/api/search.json'
TWITTER_SHOW_URI = 'https://identi.ca/api/friendships/show.json?source_screen_name=%s&target_screen_name=%s'
# Tasks
TASK_GET_TWIMONIAL_INTERVAL = 300
TASK_PROCESS_TQI_INTERVAL = 300
# Rate limit
RATE_AGREE_DURATION = 3600
RATE_AGREE_MASS = 5
RATE_AGREE_MASS_DURATION = 60
# Cache time
CACHE_TIME_HOMEPAGE = 300
CACHE_TIME_USERPAGE = 300
CACHE_TIME_USERLISTPAGE = 300
CACHE_TIME_LISTPAGE = 300
CACHE_TIME_USERFEED_TOP = 300
# Check Profile Image
CHECK_PROFILE_IMAGE_INTERVAL = 86400 * 7
# Under development server?
DEV = os.environ['SERVER_SOFTWARE'].startswith('Development')
# Base URI
if DEV:
BASE_URI = 'http://localhost:8080/'
BASE_SECURE_URI = BASE_URI
else:
BASE_URI = 'http://%s.appspot.com/' % os.environ['APPLICATION_ID']
BASE_SECURE_URI = 'https://%s.appspot.com/' % os.environ['APPLICATION_ID']
BEFORE_HEAD_END = ''
BEFORE_BODY_END = ''
| livibetter-backup/twimonial | dentsrc/config_base.py | Python | bsd-3-clause | 1,453 |
from Chip import OpCodeDefinitions
from Tests.OpCodeTests.OpCodeTestBase import OpCodeTestBase
class TestEorOpCode(OpCodeTestBase):
def test_eor_indirect_x_command_calls_eor_method(self):
self.assert_opcode_execution(OpCodeDefinitions.eor_indirect_x_command, self.target.get_eor_command_executed)
def test_eor_zero_page_command_calls_eor_method(self):
self.assert_opcode_execution(OpCodeDefinitions.eor_zero_page_command, self.target.get_eor_command_executed)
def test_eor_immediate_command_does_exclusive_or(self):
self.target.set_accumulator(0x13)
self.target.execute(OpCodeDefinitions.eor_immediate_command, 0x37)
expected_value = 0x13 ^ 0x37
actual_value = self.target.get_accumulator()
self.assertEqual(expected_value, actual_value)
def test_eor_absolute_command_calls_eor_method(self):
self.assert_opcode_execution(OpCodeDefinitions.eor_absolute_command, self.target.get_eor_command_executed)
def test_eor_indirect_y_command_calls_eor_method(self):
self.assert_opcode_execution(OpCodeDefinitions.eor_indirect_y_command, self.target.get_eor_command_executed)
def test_eor_zero_page_x_command_calls_eor_method(self):
self.assert_opcode_execution(OpCodeDefinitions.eor_zero_page_x_command, self.target.get_eor_command_executed)
def test_eor_absolute_y_command_calls_eor_method(self):
self.assert_opcode_execution(OpCodeDefinitions.eor_absolute_y_command, self.target.get_eor_command_executed)
def test_eor_absolute_x_command_calls_eor_method(self):
self.assert_opcode_execution(OpCodeDefinitions.eor_absolute_x_command, self.target.get_eor_command_executed) | jeroanan/Nes2 | Tests/OpCodeTests/TestEorOpCode.py | Python | bsd-3-clause | 1,700 |
# -*- coding: utf-8 -*-
import os
import urllib.parse as urlparse
import warnings
from unittest.mock import patch
from oauthlib import common, signals
from oauthlib.oauth2 import (
BackendApplicationClient, Client, LegacyApplicationClient,
MobileApplicationClient, WebApplicationClient,
)
from oauthlib.oauth2.rfc6749 import errors, utils
from oauthlib.oauth2.rfc6749.clients import AUTH_HEADER, BODY, URI_QUERY
from tests.unittest import TestCase
@patch('time.time', new=lambda: 1000)
class WebApplicationClientTest(TestCase):
client_id = "someclientid"
client_secret = 'someclientsecret'
uri = "https://example.com/path?query=world"
uri_id = uri + "&response_type=code&client_id=" + client_id
uri_redirect = uri_id + "&redirect_uri=http%3A%2F%2Fmy.page.com%2Fcallback"
redirect_uri = "http://my.page.com/callback"
scope = ["/profile"]
state = "xyz"
uri_scope = uri_id + "&scope=%2Fprofile"
uri_state = uri_id + "&state=" + state
kwargs = {
"some": "providers",
"require": "extra arguments"
}
uri_kwargs = uri_id + "&some=providers&require=extra+arguments"
uri_authorize_code = uri_redirect + "&scope=%2Fprofile&state=" + state
code = "zzzzaaaa"
body = "not=empty"
body_code = "not=empty&grant_type=authorization_code&code={}&client_id={}".format(code, client_id)
body_redirect = body_code + "&redirect_uri=http%3A%2F%2Fmy.page.com%2Fcallback"
body_kwargs = body_code + "&some=providers&require=extra+arguments"
response_uri = "https://client.example.com/cb?code=zzzzaaaa&state=xyz"
response = {"code": "zzzzaaaa", "state": "xyz"}
token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_in":3600,'
' "scope":"/profile",'
' "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter":"example_value"}')
token = {
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"expires_at": 4600,
"scope": scope,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
}
def test_auth_grant_uri(self):
client = WebApplicationClient(self.client_id)
# Basic, no extra arguments
uri = client.prepare_request_uri(self.uri)
self.assertURLEqual(uri, self.uri_id)
# With redirection uri
uri = client.prepare_request_uri(self.uri, redirect_uri=self.redirect_uri)
self.assertURLEqual(uri, self.uri_redirect)
# With scope
uri = client.prepare_request_uri(self.uri, scope=self.scope)
self.assertURLEqual(uri, self.uri_scope)
# With state
uri = client.prepare_request_uri(self.uri, state=self.state)
self.assertURLEqual(uri, self.uri_state)
# With extra parameters through kwargs
uri = client.prepare_request_uri(self.uri, **self.kwargs)
self.assertURLEqual(uri, self.uri_kwargs)
def test_request_body(self):
client = WebApplicationClient(self.client_id, code=self.code)
# Basic, no extra arguments
body = client.prepare_request_body(body=self.body)
self.assertFormBodyEqual(body, self.body_code)
rclient = WebApplicationClient(self.client_id)
body = rclient.prepare_request_body(code=self.code, body=self.body)
self.assertFormBodyEqual(body, self.body_code)
# With redirection uri
body = client.prepare_request_body(body=self.body, redirect_uri=self.redirect_uri)
self.assertFormBodyEqual(body, self.body_redirect)
# With extra parameters
body = client.prepare_request_body(body=self.body, **self.kwargs)
self.assertFormBodyEqual(body, self.body_kwargs)
def test_parse_grant_uri_response(self):
client = WebApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_uri_response(self.response_uri, state=self.state)
self.assertEqual(response, self.response)
self.assertEqual(client.code, self.code)
# Mismatching state
self.assertRaises(errors.MismatchingStateError,
client.parse_request_uri_response,
self.response_uri,
state="invalid")
def test_populate_attributes(self):
client = WebApplicationClient(self.client_id)
response_uri = (self.response_uri +
"&access_token=EVIL-TOKEN"
"&refresh_token=EVIL-TOKEN"
"&mac_key=EVIL-KEY")
client.parse_request_uri_response(response_uri, self.state)
self.assertEqual(client.code, self.code)
# We must not accidentally pick up any further security
# credentials at this point.
self.assertIsNone(client.access_token)
self.assertIsNone(client.refresh_token)
self.assertIsNone(client.mac_key)
def test_parse_token_response(self):
client = WebApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_body_response(self.token_json, scope=self.scope)
self.assertEqual(response, self.token)
self.assertEqual(client.access_token, response.get("access_token"))
self.assertEqual(client.refresh_token, response.get("refresh_token"))
self.assertEqual(client.token_type, response.get("token_type"))
# Mismatching state
self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
token = client.parse_request_body_response(self.token_json, scope="invalid")
self.assertTrue(token.scope_changed)
scope_changes_recorded = []
def record_scope_change(sender, message, old, new):
scope_changes_recorded.append((message, old, new))
signals.scope_changed.connect(record_scope_change)
try:
client.parse_request_body_response(self.token_json, scope="invalid")
self.assertEqual(len(scope_changes_recorded), 1)
message, old, new = scope_changes_recorded[0]
self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
self.assertEqual(old, ['invalid'])
self.assertEqual(new, ['/profile'])
finally:
signals.scope_changed.disconnect(record_scope_change)
del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
def test_prepare_authorization_requeset(self):
client = WebApplicationClient(self.client_id)
url, header, body = client.prepare_authorization_request(
self.uri, redirect_url=self.redirect_uri, state=self.state, scope=self.scope)
self.assertURLEqual(url, self.uri_authorize_code)
# verify default header and body only
self.assertEqual(header, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(body, '')
def test_prepare_request_body(self):
"""
see issue #585
https://github.com/oauthlib/oauthlib/issues/585
`prepare_request_body` should support the following scenarios:
1. Include client_id alone in the body (default)
2. Include client_id and client_secret in auth and not include them in the body (RFC preferred solution)
3. Include client_id and client_secret in the body (RFC alternative solution)
4. Include client_id in the body and an empty string for client_secret.
"""
client = WebApplicationClient(self.client_id)
# scenario 1, default behavior to include `client_id`
r1 = client.prepare_request_body()
self.assertEqual(r1, 'grant_type=authorization_code&client_id=%s' % self.client_id)
r1b = client.prepare_request_body(include_client_id=True)
self.assertEqual(r1b, 'grant_type=authorization_code&client_id=%s' % self.client_id)
# scenario 2, do not include `client_id` in the body, so it can be sent in auth.
r2 = client.prepare_request_body(include_client_id=False)
self.assertEqual(r2, 'grant_type=authorization_code')
# scenario 3, Include client_id and client_secret in the body (RFC alternative solution)
# the order of kwargs being appended is not guaranteed. for brevity, check the 2 permutations instead of sorting
r3 = client.prepare_request_body(client_secret=self.client_secret)
r3_params = dict(urlparse.parse_qsl(r3, keep_blank_values=True))
self.assertEqual(len(r3_params.keys()), 3)
self.assertEqual(r3_params['grant_type'], 'authorization_code')
self.assertEqual(r3_params['client_id'], self.client_id)
self.assertEqual(r3_params['client_secret'], self.client_secret)
r3b = client.prepare_request_body(include_client_id=True, client_secret=self.client_secret)
r3b_params = dict(urlparse.parse_qsl(r3b, keep_blank_values=True))
self.assertEqual(len(r3b_params.keys()), 3)
self.assertEqual(r3b_params['grant_type'], 'authorization_code')
self.assertEqual(r3b_params['client_id'], self.client_id)
self.assertEqual(r3b_params['client_secret'], self.client_secret)
# scenario 4, `client_secret` is an empty string
r4 = client.prepare_request_body(include_client_id=True, client_secret='')
r4_params = dict(urlparse.parse_qsl(r4, keep_blank_values=True))
self.assertEqual(len(r4_params.keys()), 3)
self.assertEqual(r4_params['grant_type'], 'authorization_code')
self.assertEqual(r4_params['client_id'], self.client_id)
self.assertEqual(r4_params['client_secret'], '')
# scenario 4b, `client_secret` is `None`
r4b = client.prepare_request_body(include_client_id=True, client_secret=None)
r4b_params = dict(urlparse.parse_qsl(r4b, keep_blank_values=True))
self.assertEqual(len(r4b_params.keys()), 2)
self.assertEqual(r4b_params['grant_type'], 'authorization_code')
self.assertEqual(r4b_params['client_id'], self.client_id)
# scenario Warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always") # catch all
# warning1 - raise a DeprecationWarning if a `client_id` is submitted
rWarnings1 = client.prepare_request_body(client_id=self.client_id)
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, DeprecationWarning)
# testing the exact warning message in Python2&Python3 is a pain
# scenario Exceptions
# exception1 - raise a ValueError if the a different `client_id` is submitted
with self.assertRaises(ValueError) as cm:
client.prepare_request_body(client_id='different_client_id')
# testing the exact exception message in Python2&Python3 is a pain
| idan/oauthlib | tests/oauth2/rfc6749/clients/test_web_application.py | Python | bsd-3-clause | 11,045 |
from . import numpy as np
from . import FILE_1, reset_image_set
import pytest
from pdsspect.roi import Rectangle, Polygon
from pdsspect.pan_view import PanViewController, PanView, PanViewWidget
from pdsspect.pdsspect_image_set import PDSSpectImageSet, SubPDSSpectImageSet
class TestPanViewController(object):
image_set = PDSSpectImageSet([FILE_1])
controller = PanViewController(image_set, None)
default_roi_data = image_set._roi_data.copy()
@pytest.fixture
def test_set(self):
yield self.image_set
self.image_set._roi_data = self.default_roi_data
self.image_set._alpha = 1
self.image_set._subsets = []
def test_get_parent_set(self, test_set):
subset = test_set.create_subset()
assert self.controller._get_parent_set() == test_set
controller2 = PanViewController(subset, None)
assert controller2._get_parent_set() == test_set
def test_add_ROI(self, test_set):
subset = test_set.create_subset()
assert test_set.current_color_index == 0
assert test_set.color == 'red'
coords = np.array([[42, 42]])
rows, cols = np.column_stack(coords)
assert np.array_equal(
test_set._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
self.image_set.alpha = 1
self.controller.add_ROI(coords)
assert np.array_equal(
self.image_set._roi_data[rows, cols],
np.array([[255.0, 0.0, 0.0, 255.]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
self.image_set.alpha = .75
self.image_set.current_color_index = 1
self.controller.add_ROI(coords)
assert np.array_equal(
self.image_set._roi_data[rows, cols],
np.array([[165.0, 42.0, 42.0, 191.25]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
self.image_set.alpha = .25
self.image_set.current_color_index = 13
self.controller.add_ROI(coords)
assert np.array_equal(
self.image_set._roi_data[rows, cols],
np.array([[160.0, 32.0, 240.0, 63.75]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
self.image_set.alpha = 1
self.image_set.current_color_index = 0
test_set.simultaneous_roi = True
self.controller.add_ROI(coords)
assert np.array_equal(
self.image_set._roi_data[rows, cols],
np.array([[255.0, 0.0, 0.0, 255.0]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[255.0, 0.0, 0.0, 255.0]])
)
self.image_set.current_color_index = 1
test_set.simultaneous_roi = False
self.controller.add_ROI(coords)
assert np.array_equal(
self.image_set._roi_data[rows, cols],
np.array([[165.0, 42.0, 42.0, 255.0]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[255.0, 0.0, 0.0, 255.0]])
)
def test_erase_ROI(self, test_set):
subset = test_set.create_subset()
coords = np.array([[42, 42]])
rows, cols = np.column_stack(coords)
test_set.add_coords_to_roi_data_with_color(coords, 'red')
subset.add_coords_to_roi_data_with_color(coords, 'red')
assert np.array_equal(
test_set._roi_data[rows, cols],
np.array([[255.0, 0.0, 0.0, 255.0]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[255.0, 0.0, 0.0, 255.0]])
)
self.controller.erase_ROI(coords)
assert np.array_equal(
test_set._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[255.0, 0.0, 0.0, 255.0]])
)
test_set.add_coords_to_roi_data_with_color(coords, 'brown')
assert np.array_equal(
test_set._roi_data[rows, cols],
np.array([[165.0, 42.0, 42.0, 255.0]])
)
self.controller.erase_ROI(coords)
assert np.array_equal(
test_set._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
test_set.simultaneous_roi = True
test_set.add_coords_to_roi_data_with_color(coords, 'red')
subset.add_coords_to_roi_data_with_color(coords, 'red')
self.controller.erase_ROI(coords)
assert np.array_equal(
test_set._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
assert np.array_equal(
subset._roi_data[rows, cols],
np.array([[0.0, 0.0, 0.0, 0.0]])
)
class TestPanView(object):
image_set = PDSSpectImageSet([FILE_1])
@pytest.fixture
def view(self, qtbot):
reset_image_set(self.image_set)
view = PanView(self.image_set)
view.show()
qtbot.add_widget(view)
return view
def test_is_erasing(self, view):
assert not view.is_erasing
self.image_set.current_color_index = 14
assert view.is_erasing
self.image_set.current_color_index = 0
assert not view.is_erasing
def test_set_data(self, view):
assert np.array_equal(
view.view_canvas.get_image().get_data(),
self.image_set.pan_data)
self.image_set._zoom = 2
assert not np.array_equal(
view.view_canvas.get_image().get_data(),
self.image_set.pan_data)
view.set_data()
assert np.array_equal(
view.view_canvas.get_image().get_data(),
self.image_set.pan_data)
self.image_set._zoom = 1
view.set_data()
assert np.array_equal(
view.view_canvas.get_image().get_data(),
self.image_set.pan_data)
def test_set_roi_data(self, view):
assert np.array_equal(
self.image_set._maskrgb.get_data(),
self.image_set.pan_roi_data)
self.image_set._zoom = 2
assert not np.array_equal(
self.image_set._maskrgb.get_data(),
self.image_set.pan_roi_data)
view.set_data()
assert np.array_equal(
self.image_set._maskrgb.get_data(),
self.image_set.pan_roi_data)
self.image_set._zoom = 1
view.set_data()
assert np.array_equal(
self.image_set._maskrgb.get_data(),
self.image_set.pan_roi_data)
@pytest.mark.parametrize(
'pre_x, pre_y, expected_x, expected_y',
[
(512, 512, 512, 512),
(-.5, -.5, -.5, -.5),
(1023, -.5, 1023, -.5),
(-.5, 1023, -.5, 1023),
(1023, 1023, 1023, 1023),
(-.6, -.6, -.5, -.5),
(1024, -.6, 1023, -.5),
(-.6, 1024, -.5, 1023),
(1024, 1024, 1023, 1023),
]
)
def test_make_x_y_in_pan(self, pre_x, pre_y, expected_x, expected_y, view):
post_x, post_y = view._make_x_y_in_pan(pre_x, pre_y)
assert post_x == expected_x
assert post_y == expected_y
def test_start_ROI(self, view):
assert not view._making_roi
assert view._current_roi is None
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
assert self.image_set.selection_type == 'filled rectangle'
assert isinstance(view._current_roi, Rectangle)
view._making_roi = False
view._current_roi = None
self.image_set._selection_index = 1
assert self.image_set.selection_type == 'filled polygon'
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
assert self.image_set.selection_type == 'filled polygon'
assert isinstance(view._current_roi, Polygon)
view._making_roi = False
view._current_roi = None
self.image_set._selection_index = 2
assert self.image_set.selection_type == 'pencil'
view.start_ROI(view.view_canvas, None, 512, 512)
# Pencil ROIs stop directly after starting
assert not view._making_roi
assert view._current_roi is None
assert self.image_set.selection_type == 'pencil'
self.image_set._selection_index = 0
assert self.image_set.selection_type == 'filled rectangle'
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
assert self.image_set.selection_type == 'filled rectangle'
assert isinstance(view._current_roi, Rectangle)
view._making_roi = False
view._current_roi = None
def test_continue_ROI(self, view):
assert not view._making_roi
assert view._current_roi is None
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
view.continue_ROI(None, None, 514, 514)
assert not view._making_roi
assert view._current_roi is None
self.image_set._selection_index = 1
assert self.image_set.selection_type == 'filled polygon'
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
# Make sure to continue ROI even when start_ROI is called
view.start_ROI(None, None, 514, 514)
assert view._making_roi
assert view._current_roi is not None
view._making_roi = False
view._current_roi = None
def test_extend_ROI(self, view):
assert not view._making_roi
assert view._current_roi is None
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
view.extend_ROI(None, None, 514, 514)
assert view._making_roi
assert view._current_roi is not None
def test_stop_ROI(self, view):
assert not view._making_roi
assert view._current_roi is None
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
view.stop_ROI(view.view_canvas, None, 512, 512)
assert not view._making_roi
assert view._current_roi is None
assert self.image_set.get_coordinates_of_color('red') == ([513], [513])
self.image_set.current_color_index = 14
assert self.image_set.color == 'eraser'
view.start_ROI(view.view_canvas, None, 512, 512)
assert view._making_roi
assert view._current_roi is not None
view.stop_ROI(view.view_canvas, None, 512, 512)
assert not view._making_roi
assert view._current_roi is None
assert np.array_equal(
self.image_set.get_coordinates_of_color('red'),
(np.array([]), np.array([])))
class TestPanViewWidget(object):
image_set = PDSSpectImageSet([FILE_1])
pan = PanView(image_set)
@pytest.fixture
def pan_widget(self):
reset_image_set(self.image_set)
self.pan = PanView(self.image_set)
return PanViewWidget(self.pan, None)
def test_init(self, pan_widget):
assert len(pan_widget.pans) == 1
assert pan_widget.pans[0] == self.pan
assert pan_widget.main_layout.itemAt(0).widget() == self.pan
def test_add_pan(self, pan_widget):
subset = SubPDSSpectImageSet(self.image_set)
pan2 = PanView(subset)
pan_widget.add_pan(pan2)
assert pan_widget.pans[1] == pan2
assert pan_widget.main_layout.itemAt(1).widget() == pan2
| planetarypy/pdsspect | tests/test_pan_view.py | Python | bsd-3-clause | 12,114 |
from django.core.management import BaseCommand
from corehq.apps.sms.tests.data_generator import create_fake_sms
class Command(BaseCommand):
help = """
Generates a few fake SMS message models for a domain, for testing.
"""
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('num_messages', type=int, help="The number of SMS messages to create")
def handle(self, domain, num_messages, **kwargs):
for i in range(num_messages):
create_fake_sms(domain, randomize=True)
print(f'successfully created {num_messages} messages in {domain}')
| dimagi/commcare-hq | corehq/apps/sms/management/commands/generate_fake_sms_data.py | Python | bsd-3-clause | 636 |
from PropertiesBlueZInterface import PropertiesBlueZInterface
from ServiceInterface import ServiceInterface
from errors import raise_dbus_error
import dbus
import xml.dom.minidom
class Device(PropertiesBlueZInterface):
@raise_dbus_error
def __init__(self, obj_path=None):
if self.__class__.get_interface_version()[0] < 5:
interface = 'org.bluez.Device'
else:
interface = 'org.bluez.Device1'
super(Device, self).__init__(interface, obj_path)
@raise_dbus_error
def list_services(self):
interfaces = []
dbus_object = dbus.SystemBus().get_object('org.bluez', self.get_object_path())
dbus_introspect = dbus.Interface(dbus_object, 'org.freedesktop.DBus.Introspectable')
introspect_xml = dbus_introspect.Introspect()
root_node = xml.dom.minidom.parseString(introspect_xml)
for interface in root_node.getElementsByTagName('interface'):
interface_name = interface.getAttribute('name')
if interface_name != self.get_interface_name():
methods = []
for method in interface.getElementsByTagName('method'):
methods.append(method.getAttribute('name'))
interfaces.append(ServiceInterface(interface_name, self.get_object_path(), methods))
return interfaces
@raise_dbus_error
def pair(self, reply_handler=None, error_handler=None):
# BlueZ 5 only!
def ok():
if callable(reply_handler):
reply_handler()
def err(err):
if callable(error_handler):
error_handler(err)
self.get_interface().Pair(reply_handler=ok, error_handler=err)
| nmercier/linux-cross-gcc | linux/lib/python2.7/dist-packages/blueman/bluez/Device.py | Python | bsd-3-clause | 1,722 |
import os
import codecs
from datetime import datetime
from stat import ST_MTIME, ST_CTIME
from re import search
from django.conf import settings
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loaders.app_directories import app_template_dirs
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from templatesadmin.forms import TemplateForm , RichTemplateForm
from templatesadmin.models import FTemplate
from templatesadmin import TemplatesAdminException
from django.contrib import messages
# Default settings that may be overriden by global settings (settings.py)
TEMPLATESADMIN_VALID_FILE_EXTENSIONS = getattr(
settings,
'TEMPLATESADMIN_VALID_FILE_EXTENSIONS',
('html', 'htm', 'txt', 'js', 'css', 'backup',)
)
TEMPLATESADMIN_GROUP = getattr(
settings,
'TEMPLATESADMIN_GROUP',
'TemplateAdmins'
)
TEMPLATESADMIN_EDITHOOKS = getattr(
settings,
'TEMPLATESADMIN_EDITHOOKS',
('templatesadmin.edithooks.dotbackupfiles.DotBackupFilesHook', )
)
TEMPLATESADMIN_HIDE_READONLY = getattr(
settings,
'TEMPLATESADMIN_HIDE_READONLY',
False
)
TEMPLATESADMIN_USE_RICHEDITOR = getattr(
settings,
'TEMPLATESADMIN_USE_RICHEDITOR',
True
)
if str == type(TEMPLATESADMIN_EDITHOOKS):
TEMPLATESADMIN_EDITHOOKS = (TEMPLATESADMIN_EDITHOOKS,)
_hooks = []
for path in TEMPLATESADMIN_EDITHOOKS:
# inspired by django.template.context.get_standard_processors
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, {}, {}, [attr])
except ImportError, e:
raise ImproperlyConfigured('Error importing edithook module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr))
_hooks.append(func)
TEMPLATESADMIN_EDITHOOKS = tuple(_hooks)
_fixpath = lambda path: os.path.abspath(os.path.normpath(path))
# Load all templates (recursively)
TEMPLATESADMIN_TEMPLATE_DIRS = getattr(
settings,
'TEMPLATESADMIN_TEMPLATE_DIRS', [
d for d in list(settings.TEMPLATE_DIRS) + \
list(app_template_dirs) if os.path.isdir(d)
]
)
TEMPLATESADMIN_TEMPLATE_DIRS = [_fixpath(dir) for dir in TEMPLATESADMIN_TEMPLATE_DIRS]
def user_in_templatesadmin_group(user):
try:
user.is_superuser or user.groups.get(name=TEMPLATESADMIN_GROUP)
return True
except ObjectDoesNotExist:
return False
@never_cache
def listing(request,
template_name='templatesadmin/overview.html',
available_template_dirs=TEMPLATESADMIN_TEMPLATE_DIRS):
template_dict = []
for templatedir in available_template_dirs:
for root, dirs, files in os.walk(templatedir):
for f in sorted([f for f in files if f.rsplit('.')[-1] \
in TEMPLATESADMIN_VALID_FILE_EXTENSIONS]):
full_path = os.path.join(root, f)
l = {
'templatedir': templatedir,
'rootpath': root,
'abspath': full_path,
'modified': datetime.fromtimestamp(os.stat(full_path)[ST_MTIME]),
'created': datetime.fromtimestamp(os.stat(full_path)[ST_CTIME]),
'writeable': os.access(full_path, os.W_OK)
}
# Do not fetch non-writeable templates if settings set.
if (TEMPLATESADMIN_HIDE_READONLY == True and \
l['writeable'] == True) or \
TEMPLATESADMIN_HIDE_READONLY == False:
try:
template_dict += (l,)
except KeyError:
template_dict = (l,)
template_context = {
'messages': messages.get_messages(request),
'template_dict': template_dict,
'opts': FTemplate._meta,
}
return render_to_response(template_name, template_context,
RequestContext(request))
@never_cache
def modify(request,
path,
template_name='templatesadmin/edit.html',
base_form=TemplateForm,
available_template_dirs=TEMPLATESADMIN_TEMPLATE_DIRS):
template_path = _fixpath(path)
base_form = (TEMPLATESADMIN_USE_RICHEDITOR and RichTemplateForm or TemplateForm)
# Check if file is within template-dirs
if not any([template_path.startswith(templatedir) for templatedir in available_template_dirs]):
messages.error(request, message=_('Sorry, that file is not available for editing.'))
return HttpResponseRedirect(reverse('admin:templatesadmin_ftemplate_changelist'))
if request.method == 'POST':
formclass = base_form
for hook in TEMPLATESADMIN_EDITHOOKS:
formclass.base_fields.update(hook.contribute_to_form(template_path))
form = formclass(
data=request.POST,
widget_syntax = os.path.splitext(path)[1][1:]
)
if form.is_valid():
content = form.cleaned_data['content']
try:
for hook in TEMPLATESADMIN_EDITHOOKS:
pre_save_notice = hook.pre_save(request, form, template_path)
if pre_save_notice:
messages.warning(request, message=pre_save_notice)
except TemplatesAdminException, e:
messages.error(request, message=e.message)
return HttpResponseRedirect(request.build_absolute_uri())
# Save the template
try:
f = open(template_path, 'r')
file_content = f.read()
f.close()
# browser tend to strip newlines from <textarea/>s before
# HTTP-POSTing: re-insert them if neccessary
# content is in dos-style lineending, will be converted in next step
if (file_content[-1] == '\n' or file_content[:-2] == '\r\n') \
and content[:-2] != '\r\n':
content = u"%s\r\n" % content
# Template is saved in unix-style, save in unix style.
if None == search("\r\n", file_content):
content = content.replace("\r\n", "\n")
f = codecs.open(template_path, 'w', 'utf-8')
f.write(content)
f.close()
except IOError, e:
messages.error(request,
message=_('Template "%(path)s" has not been saved! Reason: %(errormsg)s') % {
'path': path,
'errormsg': e
}
)
return HttpResponseRedirect(request.build_absolute_uri())
try:
for hook in TEMPLATESADMIN_EDITHOOKS:
post_save_notice = hook.post_save(request, form, template_path)
if post_save_notice:
messages.info(request, message=post_save_notice)
except TemplatesAdminException, e:
messages.error(request, message=e.message)
return HttpResponseRedirect(request.build_absolute_uri())
messages.success(request,
message=_('Template "%s" was saved successfully.' % path)
)
return HttpResponseRedirect(reverse('admin:templatesadmin_ftemplate_changelist'))
else:
template_file = codecs.open(template_path, 'r', 'utf-8').read()
formclass = base_form
for hook in TEMPLATESADMIN_EDITHOOKS:
formclass.base_fields.update(hook.contribute_to_form(template_path))
form = formclass(
initial={'content': template_file},
widget_syntax = os.path.splitext(path)[1][1:]
)
template_context = {
'messages': messages.get_messages(request),
'form': form,
'short_path': path,
'template_path': path,
'opts': FTemplate._meta,
'template_writeable': os.access(template_path, os.W_OK),
}
return render_to_response(template_name, template_context,
RequestContext(request))
# For backwards compatibility and secure out-of-the-box views
overview = login_required(user_passes_test(lambda u: user_in_templatesadmin_group(u))(listing))
edit = login_required(user_passes_test(lambda u: user_in_templatesadmin_group(u))(modify))
| buriy/django-templatesadmin | templatesadmin/views.py | Python | bsd-3-clause | 8,863 |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import fnmatch
import IECore
import Gaffer
import GafferUI
Gaffer.Metadata.registerNode(
Gaffer.Expression,
"description",
"""
Utility node for computing values via
scripted expressions.
""",
plugs = {
"engine" : (
"description",
"""
The expression language to use.
""",
"layout:section", "",
"nodule:type", "",
),
"expression" : (
## \todo We need better help here, specific to the
# different engines themselves.
"description",
"""
The expression to evaluate."
""",
"layout:section", "",
"nodule:type", "",
),
}
)
# PlugValueWidget popup menu for creating expressions
##########################################################################
def __createExpression( plug ) :
node = plug.node()
parentNode = node.ancestor( Gaffer.Node )
with Gaffer.UndoContext( node.scriptNode() ) :
expressionNode = Gaffer.Expression()
parentNode.addChild( expressionNode )
expression = "parent['"
expression += plug.relativeName( parentNode ).replace( ".", "']['" )
expression += "'] = "
expression += repr( plug.getValue() )
expressionNode["expression"].setValue( expression )
__editExpression( plug )
def __editExpression( plug ) :
expressionNode = plug.getInput().node()
GafferUI.NodeEditor.acquire( expressionNode )
def __popupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
if not isinstance( plug, (
Gaffer.FloatPlug, Gaffer.IntPlug,
Gaffer.StringPlug, Gaffer.BoolPlug,
Gaffer.V3fPlug, Gaffer.V3iPlug,
Gaffer.V2fPlug, Gaffer.V2iPlug,
Gaffer.Color3fPlug, Gaffer.Color4fPlug,
Gaffer.Box2fPlug, Gaffer.Box2iPlug,
Gaffer.Box3fPlug, Gaffer.Box3iPlug,
) ) :
return
node = plug.node()
if node is None or node.parent() is None :
return
input = plug.getInput()
if input is None and plugValueWidget._editable() :
menuDefinition.prepend( "/ExpressionDivider", { "divider" : True } )
menuDefinition.prepend( "/Create Expression...", { "command" : IECore.curry( __createExpression, plug ) } )
__popupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __popupMenu )
# _ExpressionPlugValueWidget
##########################################################################
class _ExpressionPlugValueWidget( GafferUI.MultiLineStringPlugValueWidget ) :
def __init__( self, plug, **kw ) :
GafferUI.MultiLineStringPlugValueWidget.__init__( self, plug, **kw )
self.__dropTextConnection = self.textWidget().dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ) )
def hasLabel( self ) :
# strictly speaking we don't have a label, but i think it's pretty obvious
# what we are - what else is a giant text input box in an expression ui
# going to be?
return True
def __dropText( self, widget, dragData ) :
if isinstance( dragData, IECore.StringVectorData ) :
return repr( list( dragData ) )
elif isinstance( dragData, Gaffer.GraphComponent ) :
name = dragData.relativeName( self.getPlug().node().parent() )
if not name :
return None
return "parent" + "".join( [ "['" + n + "']" for n in name.split( "." ) ] )
elif isinstance( dragData, Gaffer.Set ) :
if len( dragData ) == 1 :
return self.__dropText( widget, dragData[0] )
else :
return None
return None
# PlugValueWidget registrations
##########################################################################
GafferUI.PlugValueWidget.registerCreator(
Gaffer.Expression,
"engine",
GafferUI.EnumPlugValueWidget,
labelsAndValues = [
( IECore.CamelCase.toSpaced( e ), e ) for e in Gaffer.Expression.Engine.registeredEngines()
]
)
GafferUI.PlugValueWidget.registerCreator(
Gaffer.Expression,
"expression",
_ExpressionPlugValueWidget,
)
GafferUI.PlugValueWidget.registerCreator(
Gaffer.Expression,
"in",
None
)
GafferUI.PlugValueWidget.registerCreator(
Gaffer.Expression,
"out",
None
)
GafferUI.PlugValueWidget.registerCreator(
Gaffer.Expression,
"user",
None
)
| goddardl/gaffer | python/GafferUI/ExpressionUI.py | Python | bsd-3-clause | 5,817 |
from behave import *
import nose.tools
import json
import numpy
import os
import subprocess
import sys
import tempfile
import toyplot
try:
import toyplot.mp4
except:
pass
try:
import toyplot.webm
except:
pass
@given(u'an animated canvas')
def step_impl(context):
context.canvas = toyplot.Canvas(
style={"background-color": "white"}, width=600, height=600)
axes = context.canvas.axes()
scatterplot = axes.scatterplot(numpy.arange(10))
def callback(frame):
if frame.index() == 0:
for i in numpy.arange(10):
frame.set_datum_style(scatterplot, 0, i, {"opacity": 0})
else:
frame.set_datum_style(
scatterplot, 0, frame.index() - 1, {"opacity": 1})
context.canvas.animate(11, callback)
@when(u'rendering a {type} video')
def step_impl(context, type):
nose.tools.assert_in(type, ["mp4", "webm"])
# Return quietly if the video backend isn't available
if not hasattr(toyplot, type):
return
backend = getattr(toyplot, type)
def progress(frame):
pass
context.path = os.path.join(tempfile.mkdtemp(), "test.%s" % type)
backend.render(context.canvas, context.path, progress=progress)
sys.stderr.write("**** %s ****\n" % context.path)
sys.stderr.flush()
@then(u'the backend should return a {type} video')
def step_impl(context, type):
nose.tools.assert_in(type, ["mp4", "webm"])
# Return quietly if the video backend isn't available
if not hasattr(toyplot, type):
return
command = ["ffprobe", "-print_format", "json", "-show_format",
"-show_streams", "-count_frames", context.path]
ffprobe = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = ffprobe.communicate()
video_metadata = json.loads(stdout)
video_format = video_metadata["format"]
nose.tools.assert_equal(video_format["nb_streams"], 1)
nose.tools.assert_in(type, video_format["format_name"])
video_stream = video_metadata["streams"][0]
nose.tools.assert_equal(
video_stream["codec_name"], "h264" if type == "mp4" else "vp8")
nose.tools.assert_equal(video_stream["codec_type"], "video")
nose.tools.assert_equal(video_stream["width"], 600)
nose.tools.assert_equal(video_stream["height"], 600)
nose.tools.assert_equal(video_stream["nb_read_frames"], "11")
| cmorgan/toyplot | features/steps/animation.py | Python | bsd-3-clause | 2,424 |
"""
This module implements the "new" binary OpenEphys format.
In this format channels are interleaved in one file.
See
https://open-ephys.github.io/gui-docs/User-Manual/Recording-data/Binary-format.html
Author: Julia Sprenger and Samuel Garcia
"""
import os
import re
import json
from pathlib import Path
import numpy as np
from .baserawio import (BaseRawIO, _signal_channel_dtype, _signal_stream_dtype,
_spike_channel_dtype, _event_channel_dtype)
class OpenEphysBinaryRawIO(BaseRawIO):
"""
Handle several Blocks and several Segments.
# Correspondencies
Neo OpenEphys
block[n-1] experiment[n] New device start/stop
segment[s-1] recording[s] New recording start/stop
This IO handles several signal streams.
Special event (npy) data are represented as array_annotations.
The current implementation does not handle spiking data, this will be added upon user request
"""
extensions = []
rawmode = 'one-dir'
def __init__(self, dirname=''):
BaseRawIO.__init__(self)
self.dirname = dirname
def _source_name(self):
return self.dirname
def _parse_header(self):
all_streams, nb_block, nb_segment_per_block = explore_folder(self.dirname)
sig_stream_names = sorted(list(all_streams[0][0]['continuous'].keys()))
event_stream_names = sorted(list(all_streams[0][0]['events'].keys()))
# first loop to reasign stream by "stream_index" instead of "stream_name"
self._sig_streams = {}
self._evt_streams = {}
for block_index in range(nb_block):
self._sig_streams[block_index] = {}
self._evt_streams[block_index] = {}
for seg_index in range(nb_segment_per_block[block_index]):
self._sig_streams[block_index][seg_index] = {}
self._evt_streams[block_index][seg_index] = {}
for stream_index, stream_name in enumerate(sig_stream_names):
d = all_streams[block_index][seg_index]['continuous'][stream_name]
d['stream_name'] = stream_name
self._sig_streams[block_index][seg_index][stream_index] = d
for i, stream_name in enumerate(event_stream_names):
d = all_streams[block_index][seg_index]['events'][stream_name]
d['stream_name'] = stream_name
self._evt_streams[block_index][seg_index][i] = d
# signals zone
# create signals channel map: several channel per stream
signal_channels = []
for stream_index, stream_name in enumerate(sig_stream_names):
# stream_index is the index in vector sytream names
stream_id = str(stream_index)
d = self._sig_streams[0][0][stream_index]
new_channels = []
for chan_info in d['channels']:
chan_id = chan_info['channel_name']
new_channels.append((chan_info['channel_name'],
chan_id, float(d['sample_rate']), d['dtype'], chan_info['units'],
chan_info['bit_volts'], 0., stream_id))
signal_channels.extend(new_channels)
signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype)
signal_streams = []
for stream_index, stream_name in enumerate(sig_stream_names):
stream_id = str(stream_index)
signal_streams.append((stream_name, stream_id))
signal_streams = np.array(signal_streams, dtype=_signal_stream_dtype)
# create memmap for signals
for block_index in range(nb_block):
for seg_index in range(nb_segment_per_block[block_index]):
for stream_index, d in self._sig_streams[block_index][seg_index].items():
num_channels = len(d['channels'])
print(d['raw_filename'])
memmap_sigs = np.memmap(d['raw_filename'], d['dtype'],
order='C', mode='r').reshape(-1, num_channels)
d['memmap'] = memmap_sigs
# events zone
# channel map: one channel one stream
event_channels = []
for stream_ind, stream_name in enumerate(event_stream_names):
d = self._evt_streams[0][0][stream_ind]
event_channels.append((d['channel_name'], stream_ind, 'event'))
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
# create memmap
for stream_ind, stream_name in enumerate(event_stream_names):
# inject memmap loaded into main dict structure
d = self._evt_streams[0][0][stream_ind]
for name in _possible_event_stream_names:
if name + '_npy' in d:
data = np.load(d[name + '_npy'], mmap_mode='r')
d[name] = data
# check that events have timestamps
assert 'timestamps' in d
# for event the neo "label" will change depending the nature
# of event (ttl, text, binary)
# and this is transform into unicode
# all theses data are put in event array annotations
if 'text' in d:
# text case
d['labels'] = d['text'].astype('U')
elif 'metadata' in d:
# binary case
d['labels'] = d['channels'].astype('U')
elif 'channels' in d:
# ttl case use channels
d['labels'] = d['channels'].astype('U')
else:
raise ValueError(f'There is no possible labels for this event: {stream_name}')
# no spike read yet
# can be implemented on user demand
spike_channels = np.array([], dtype=_spike_channel_dtype)
# loop for t_start/t_stop on segment browse all object
self._t_start_segments = {}
self._t_stop_segments = {}
for block_index in range(nb_block):
self._t_start_segments[block_index] = {}
self._t_stop_segments[block_index] = {}
for seg_index in range(nb_segment_per_block[block_index]):
global_t_start = None
global_t_stop = None
# loop over signals
for stream_index, d in self._sig_streams[block_index][seg_index].items():
t_start = d['t_start']
dur = d['memmap'].shape[0] / float(d['sample_rate'])
t_stop = t_start + dur
if global_t_start is None or global_t_start > t_start:
global_t_start = t_start
if global_t_stop is None or global_t_stop < t_stop:
global_t_stop = t_stop
# loop over events
for stream_index, stream_name in enumerate(event_stream_names):
d = self._evt_streams[0][0][stream_index]
if d['timestamps'].size == 0:
continue
t_start = d['timestamps'][0] / d['sample_rate']
t_stop = d['timestamps'][-1] / d['sample_rate']
if global_t_start is None or global_t_start > t_start:
global_t_start = t_start
if global_t_stop is None or global_t_stop < t_stop:
global_t_stop = t_stop
self._t_start_segments[block_index][seg_index] = global_t_start
self._t_stop_segments[block_index][seg_index] = global_t_stop
# main header
self.header = {}
self.header['nb_block'] = nb_block
self.header['nb_segment'] = nb_segment_per_block
self.header['signal_streams'] = signal_streams
self.header['signal_channels'] = signal_channels
self.header['spike_channels'] = spike_channels
self.header['event_channels'] = event_channels
# Annotate some objects from continuous files
self._generate_minimal_annotations()
for block_index in range(nb_block):
bl_ann = self.raw_annotations['blocks'][block_index]
for seg_index in range(nb_segment_per_block[block_index]):
seg_ann = bl_ann['segments'][seg_index]
# array annotations for signal channels
for stream_index, stream_name in enumerate(sig_stream_names):
sig_ann = seg_ann['signals'][stream_index]
d = self._sig_streams[0][0][stream_index]
for k in ('identifier', 'history', 'source_processor_index',
'recorded_processor_index'):
if k in d['channels'][0]:
values = np.array([chan_info[k] for chan_info in d['channels']])
sig_ann['__array_annotations__'][k] = values
# array annotations for event channels
# use other possible data in _possible_event_stream_names
for stream_index, stream_name in enumerate(event_stream_names):
ev_ann = seg_ann['events'][stream_index]
d = self._evt_streams[0][0][stream_index]
for k in _possible_event_stream_names:
if k in ('timestamps', ):
continue
if k in d:
# split custom dtypes into separate annotations
if d[k].dtype.names:
for name in d[k].dtype.names:
ev_ann['__array_annotations__'][name] = d[k][name].flatten()
else:
ev_ann['__array_annotations__'][k] = d[k]
def _segment_t_start(self, block_index, seg_index):
return self._t_start_segments[block_index][seg_index]
def _segment_t_stop(self, block_index, seg_index):
return self._t_stop_segments[block_index][seg_index]
def _channels_to_group_id(self, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
channels = self.header['signal_channels']
group_ids = channels[channel_indexes]['group_id']
assert np.unique(group_ids).size == 1
group_id = group_ids[0]
return group_id
def _get_signal_size(self, block_index, seg_index, stream_index):
sigs = self._sig_streams[block_index][seg_index][stream_index]['memmap']
return sigs.shape[0]
def _get_signal_t_start(self, block_index, seg_index, stream_index):
t_start = self._sig_streams[block_index][seg_index][stream_index]['t_start']
return t_start
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
stream_index, channel_indexes):
sigs = self._sig_streams[block_index][seg_index][stream_index]['memmap']
sigs = sigs[i_start:i_stop, :]
if channel_indexes is not None:
sigs = sigs[:, channel_indexes]
return sigs
def _spike_count(self, block_index, seg_index, unit_index):
pass
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
pass
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
pass
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
pass
def _event_count(self, block_index, seg_index, event_channel_index):
d = self._evt_streams[0][0][event_channel_index]
return d['timestamps'].size
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
d = self._evt_streams[0][0][event_channel_index]
timestamps = d['timestamps']
durations = None
labels = d['labels']
# slice it if needed
if t_start is not None:
ind_start = int(t_start * d['sample_rate'])
mask = timestamps >= ind_start
timestamps = timestamps[mask]
labels = labels[mask]
if t_stop is not None:
ind_stop = int(t_stop * d['sample_rate'])
mask = timestamps < ind_stop
timestamps = timestamps[mask]
labels = labels[mask]
return timestamps, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype, event_channel_index):
d = self._evt_streams[0][0][event_channel_index]
event_times = event_timestamps.astype(dtype) / float(d['sample_rate'])
return event_times
def _rescale_epoch_duration(self, raw_duration, dtype):
pass
_possible_event_stream_names = ('timestamps', 'channels', 'text',
'full_word', 'channel_states', 'data_array', 'metadata')
def explore_folder(dirname):
"""
Exploring the OpenEphys folder structure and structure.oebin
Returns nested dictionary structure:
[block_index][seg_index][stream_type][stream_information]
where
- node_name is the open ephys node id
- block_index is the neo Block index
- segment_index is the neo Segment index
- stream_type can be 'continuous'/'events'/'spikes'
- stream_information is a dictionionary containing e.g. the sampling rate
Parmeters
---------
dirname (str): Root folder of the dataset
Returns
-------
nested dictionaries containing structure and stream information
"""
nb_block = 0
nb_segment_per_block = []
# nested dictionary: block_index > seg_index > data_type > stream_name
all_streams = {}
for root, dirs, files in os.walk(dirname):
for file in files:
if not file == 'structure.oebin':
continue
root = Path(root)
node_name = root.parents[1].stem
if not node_name.startswith('Record'):
# before version 5.x.x there was not multi Node recording
# so no node_name
node_name = ''
block_index = int(root.parents[0].stem.replace('experiment', '')) - 1
if block_index not in all_streams:
all_streams[block_index] = {}
if block_index >= nb_block:
nb_block = block_index + 1
nb_segment_per_block.append(0)
seg_index = int(root.stem.replace('recording', '')) - 1
if seg_index not in all_streams[block_index]:
all_streams[block_index][seg_index] = {
'continuous': {},
'events': {},
'spikes': {},
}
if seg_index >= nb_segment_per_block[block_index]:
nb_segment_per_block[block_index] = seg_index + 1
# metadata
with open(root / 'structure.oebin', encoding='utf8', mode='r') as f:
structure = json.load(f)
if (root / 'continuous').exists() and len(structure['continuous']) > 0:
for d in structure['continuous']:
# when multi Record Node the stream name also contains
# the node name to make it unique
stream_name = node_name + '#' + d['folder_name']
raw_filename = root / 'continuous' / d['folder_name'] / 'continuous.dat'
timestamp_file = root / 'continuous' / d['folder_name'] / 'timestamps.npy'
timestamps = np.load(str(timestamp_file), mmap_mode='r')
timestamp0 = timestamps[0]
t_start = timestamp0 / d['sample_rate']
# TODO for later : gap checking
signal_stream = d.copy()
signal_stream['raw_filename'] = str(raw_filename)
signal_stream['dtype'] = 'int16'
signal_stream['timestamp0'] = timestamp0
signal_stream['t_start'] = t_start
all_streams[block_index][seg_index]['continuous'][stream_name] = signal_stream
if (root / 'events').exists() and len(structure['events']) > 0:
for d in structure['events']:
stream_name = node_name + '#' + d['folder_name']
event_stream = d.copy()
for name in _possible_event_stream_names:
npz_filename = root / 'events' / d['folder_name'] / f'{name}.npy'
if npz_filename.is_file():
event_stream[f'{name}_npy'] = str(npz_filename)
all_streams[block_index][seg_index]['events'][stream_name] = event_stream
# TODO for later: check stream / channel consistency across segment
return all_streams, nb_block, nb_segment_per_block
| apdavison/python-neo | neo/rawio/openephysbinaryrawio.py | Python | bsd-3-clause | 16,829 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
import shutil
import biom
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from q2_types.feature_data import DNAIterator
import q2templates
import skbio
import qiime2
import json
from ._vega_spec import vega_spec
_blast_url_template = ("http://www.ncbi.nlm.nih.gov/BLAST/Blast.cgi?"
"ALIGNMENT_VIEW=Pairwise&PROGRAM=blastn&DATABASE"
"=nt&CMD=Put&QUERY=%s")
TEMPLATES = pkg_resources.resource_filename('q2_feature_table', '_summarize')
def tabulate_seqs(output_dir: str, data: DNAIterator) -> None:
sequences = []
seq_lengths = []
with open(os.path.join(output_dir, 'sequences.fasta'), 'w') as fh:
for sequence in data:
skbio.io.write(sequence, format='fasta', into=fh)
str_seq = str(sequence)
seq_len = len(str_seq)
sequences.append({'id': sequence.metadata['id'],
'len': seq_len,
'url': _blast_url_template % str_seq,
'seq': str_seq})
seq_lengths.append(seq_len)
seq_len_stats = _compute_descriptive_stats(seq_lengths)
_write_tsvs_of_descriptive_stats(seq_len_stats, output_dir)
index = os.path.join(TEMPLATES, 'tabulate_seqs_assets', 'index.html')
q2templates.render(index, output_dir, context={'data': sequences,
'stats': seq_len_stats})
js = os.path.join(
TEMPLATES, 'tabulate_seqs_assets', 'js', 'tsorter.min.js')
os.mkdir(os.path.join(output_dir, 'js'))
shutil.copy(js, os.path.join(output_dir, 'js', 'tsorter.min.js'))
def summarize(output_dir: str, table: biom.Table,
sample_metadata: qiime2.Metadata = None) -> None:
# this value is to limit the amount of memory used by seaborn.histplot, for
# more information see: https://github.com/mwaskom/seaborn/issues/2325
MAX_BINS = 50
number_of_features, number_of_samples = table.shape
sample_summary, sample_frequencies = _frequency_summary(
table, axis='sample')
if number_of_samples > 1:
# Calculate the bin count, with a minimum of 5 bins
IQR = sample_summary['3rd quartile'] - sample_summary['1st quartile']
if IQR == 0.0:
bins = 5
else:
# Freedman–Diaconis rule
bin_width = (2 * IQR) / (number_of_samples ** (1/3))
bins = max((sample_summary['Maximum frequency'] -
sample_summary['Minimum frequency']) / bin_width, 5)
bins = min(bins, MAX_BINS)
sample_frequencies_ax = sns.histplot(sample_frequencies, kde=False,
bins=int(round(bins)))
sample_frequencies_ax.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
sample_frequencies_ax.set_xlabel('Frequency per sample')
sample_frequencies_ax.set_ylabel('Number of samples')
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.pdf'))
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.png'))
plt.gcf().clear()
feature_summary, feature_frequencies = _frequency_summary(
table, axis='observation')
if number_of_features > 1:
IQR = feature_summary['3rd quartile'] - feature_summary['1st quartile']
if IQR == 0.0:
bins = 5
else:
# Freedman–Diaconis rule
bin_width = (2 * IQR) / (number_of_features ** (1/3))
bins = max((feature_summary['Maximum frequency'] -
feature_summary['Minimum frequency']) / bin_width, 5)
bins = min(bins, MAX_BINS)
feature_frequencies_ax = sns.histplot(feature_frequencies, kde=False,
bins=int(round(bins)))
feature_frequencies_ax.set_xlabel('Frequency per feature')
feature_frequencies_ax.set_ylabel('Number of features')
feature_frequencies_ax.set_xscale('log')
feature_frequencies_ax.set_yscale('log')
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.pdf'))
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.png'))
sample_summary_table = q2templates.df_to_html(
sample_summary.apply('{:,}'.format).to_frame('Frequency'))
feature_summary_table = q2templates.df_to_html(
feature_summary.apply('{:,}'.format).to_frame('Frequency'))
index = os.path.join(TEMPLATES, 'summarize_assets', 'index.html')
context = {
'number_of_samples': number_of_samples,
'number_of_features': number_of_features,
'total_frequencies': int(np.sum(sample_frequencies)),
'sample_summary_table': sample_summary_table,
'feature_summary_table': feature_summary_table,
}
feature_qualitative_data = _compute_qualitative_summary(table)
sample_frequencies.sort_values(inplace=True, ascending=False)
feature_frequencies.sort_values(inplace=True, ascending=False)
sample_frequencies.to_csv(
os.path.join(output_dir, 'sample-frequency-detail.csv'))
feature_frequencies.to_csv(
os.path.join(output_dir, 'feature-frequency-detail.csv'))
feature_frequencies = feature_frequencies.astype(int) \
.apply('{:,}'.format).to_frame('Frequency')
feature_frequencies['# of Samples Observed In'] = \
pd.Series(feature_qualitative_data).astype(int).apply('{:,}'.format)
feature_frequencies_table = q2templates.df_to_html(feature_frequencies)
sample_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'sample-frequency-detail.html')
feature_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'feature-frequency-detail.html')
context.update({'max_count': sample_frequencies.max(),
'feature_frequencies_table': feature_frequencies_table,
'feature_qualitative_data': feature_qualitative_data,
'tabs': [{'url': 'index.html',
'title': 'Overview'},
{'url': 'sample-frequency-detail.html',
'title': 'Interactive Sample Detail'},
{'url': 'feature-frequency-detail.html',
'title': 'Feature Detail'}]})
# Create a JSON object containing the Sample Frequencies to build the
# table in sample-frequency-detail.html
sample_frequencies_json = sample_frequencies.to_json()
templates = [index, sample_frequency_template, feature_frequency_template]
context.update({'frequencies_list':
json.dumps(sorted(sample_frequencies.values.tolist()))})
if sample_metadata is not None:
context.update({'vega_spec':
json.dumps(vega_spec(sample_metadata,
sample_frequencies
))
})
context.update({'sample_frequencies_json': sample_frequencies_json})
q2templates.util.copy_assets(os.path.join(TEMPLATES,
'summarize_assets',
'vega'),
output_dir)
q2templates.render(templates, output_dir, context=context)
plt.close('all')
def _compute_descriptive_stats(lst: list):
"""Basic descriptive statistics and a (parametric) seven-number summary.
Calculates descriptive statistics for a list of numerical values, including
count, min, max, mean, and a parametric seven-number-summary. This summary
includes values for the lower quartile, median, upper quartile, and
percentiles 2, 9, 91, and 98. If the data is normally distributed, these
seven percentiles will be equally spaced when plotted.
Parameters
----------
lst : list of int or float values
Returns
-------
dict
a dictionary containing the following descriptive statistics:
count
int: the number of items in `lst`
min
int or float: the smallest number in `lst`
max
int or float: the largest number in `lst`
mean
float: the mean of `lst`
range
int or float: the range of values in `lst`
std
float: the standard deviation of values in `lst`
seven_num_summ_percentiles
list of floats: the parameter percentiles used to calculate this
seven-number summary: [2, 9, 25, 50, 75, 91, 98]
seven_num_summ_values
list of floats: the calculated percentile values of the summary
"""
# NOTE: With .describe(), NaN values in passed lst are excluded by default
if len(lst) == 0:
raise ValueError('No values provided.')
seq_lengths = pd.Series(lst)
seven_num_summ_percentiles = [0.02, 0.09, 0.25, 0.5, 0.75, 0.91, 0.98]
descriptive_stats = seq_lengths.describe(
percentiles=seven_num_summ_percentiles)
return {'count': int(descriptive_stats.loc['count']),
'min': descriptive_stats.loc['min'],
'max': descriptive_stats.loc['max'],
'range': descriptive_stats.loc['max'] -
descriptive_stats.loc['min'],
'mean': descriptive_stats.loc['mean'],
'std': descriptive_stats.loc['std'],
'seven_num_summ_percentiles': seven_num_summ_percentiles,
'seven_num_summ_values': descriptive_stats.loc['2%':'98%'].tolist()
}
def _write_tsvs_of_descriptive_stats(dictionary: dict, output_dir: str):
descriptive_stats = ['count', 'min', 'max', 'mean', 'range', 'std']
stat_list = []
for key in descriptive_stats:
stat_list.append(dictionary[key])
descriptive_stats = pd.DataFrame(
{'Statistic': descriptive_stats, 'Value': stat_list})
descriptive_stats.to_csv(
os.path.join(output_dir, 'descriptive_stats.tsv'),
sep='\t', index=False, float_format='%g')
seven_number_summary = pd.DataFrame(
{'Quantile': dictionary['seven_num_summ_percentiles'],
'Value': dictionary['seven_num_summ_values']})
seven_number_summary.to_csv(
os.path.join(output_dir, 'seven_number_summary.tsv'),
sep='\t', index=False, float_format='%g')
def _compute_qualitative_summary(table):
table = table.transpose()
sample_count = {}
for count_vector, feature_id, _ in table.iter():
sample_count[feature_id] = (count_vector != 0).sum()
return sample_count
def _frequencies(table, axis):
return pd.Series(data=table.sum(axis=axis), index=table.ids(axis=axis))
def _frequency_summary(table, axis='sample'):
frequencies = _frequencies(table, axis=axis)
summary = pd.Series([frequencies.min(), frequencies.quantile(0.25),
frequencies.median(), frequencies.quantile(0.75),
frequencies.max(), frequencies.mean()],
index=['Minimum frequency', '1st quartile',
'Median frequency', '3rd quartile',
'Maximum frequency', 'Mean frequency'])
return summary, frequencies
| qiime2/q2-feature-table | q2_feature_table/_summarize/_visualizer.py | Python | bsd-3-clause | 11,912 |
from __future__ import annotations
from scitbx.array_family import flex
from dials.algorithms.image.filter import convolve
from dials.algorithms.statistics import BinnedStatistics
# Module-level definition imported by the image viewer
phil_str = """
n_iqr = 6
.type = int
.help = "IQR multiplier for determining the threshold value"
blur = narrow wide
.type = choice
.help = "Optional preprocessing of the image by a convolution with"
"a simple Gaussian kernel of size either 3×3 (narrow) or"
"5×5 (wide). This may help to reduce noise peaks and to"
"combine split spots."
n_bins = 100
.type = int
.help = "Number of 2θ bins in which to calculate background"
"""
class RadialProfileSpotFinderThresholdExt:
"""
Extension to calculate a radial profile threshold. This method calculates
background value and iqr in 2θ shells, then sets a threshold at a level
n_iqr above the radial background. As such, it is important to have the
beam centre correct and to mask out any significant shadows. The method may
be particularly useful for electron diffraction images, where there can be
considerable inelastic scatter around low resolution spots. In addition, the
algorithm is relatively insensitive to noise properties of the detector.
This helps for the case of integrating detectors with poorly known gain
and response statistics.
A similar algorithm is available in other programs. The description of
'peakfinder 8' in https://doi.org/10.1107/S1600576714007626 was helpful
in the development of this method.
"""
name = "radial_profile"
@staticmethod
def phil():
from libtbx.phil import parse
phil = parse(phil_str)
return phil
def __init__(self, params):
"""
Initialise the algorithm.
:param params: The input parameters
"""
self.params = params
# Set approximate Gaussian kernel for blurring
if self.params.spotfinder.threshold.radial_profile.blur == "narrow":
# fmt: off
self.kernel = flex.double(
(0.0625, 0.125, 0.0625,
0.125, 0.25, 0.125,
0.0625, 0.125, 0.0625)
)
# fmt: on
self.kernel.reshape(flex.grid((3, 3)))
elif self.params.spotfinder.threshold.radial_profile.blur == "wide":
# fmt: off
self.kernel = (
flex.double(
(
1, 4, 7, 4, 1,
4, 16, 26, 16, 4,
7, 26, 41, 26, 7,
4, 16, 26, 16, 4,
1, 4, 7, 4, 1,
)
) / 273
)
# fmt: on
self.kernel.reshape(flex.grid((5, 5)))
else:
self.kernel = None
def compute_threshold(
self, image, mask, *, imageset, i_panel, region_of_interest=None, **kwargs
):
r"""
Compute the threshold.
:param image: The image to process
:param mask: The pixel mask on the image
:\*\*kwargs: Arbitrary keyword arguments
:returns: A boolean mask showing foreground/background pixels
"""
if self.kernel:
image = convolve(image, self.kernel)
panel = imageset.get_detector()[i_panel]
beam = imageset.get_beam()
# Get 2θ array for the panel or ROI
two_theta_array = panel.get_two_theta_array(beam.get_s0())
if region_of_interest:
x0, x1, y0, y1 = region_of_interest
two_theta_array = two_theta_array[y0:y1, x0:x1]
# Convert to 2θ bin selections
lookup = two_theta_array - flex.min(two_theta_array)
n_bins = self.params.spotfinder.threshold.radial_profile.n_bins
multiplier = n_bins / flex.max(lookup + 1e-10)
lookup *= multiplier # values now in range [0,n_bins+1)
lookup = (
flex.floor(lookup).iround().as_size_t()
) # values now in range [0,n_bins-1]
# Calculate median intensity and IQR within each bin of masked values
masked_lookup = lookup.select(mask.as_1d())
masked_image = image.select(mask.as_1d())
binned_statistics = BinnedStatistics(masked_image, masked_lookup, n_bins)
med_I = binned_statistics.get_medians()
iqr = binned_statistics.get_iqrs()
# Determine the threshold value for each bin. This should be at least
# 1 quantum greater value than the median to avoid selecting everything
# in low background cases
n_iqr = self.params.spotfinder.threshold.radial_profile.n_iqr
add_level = n_iqr * iqr
adu = 1 / panel.get_gain()
add_level.set_selected(add_level <= adu, 2.0 * adu)
threshold = med_I + add_level
# Now construct a threshold image
thresh_im = threshold.select(lookup.as_1d())
# Peaks are unmasked pixels greater than the threshold
peaks = image > thresh_im
return peaks & mask
| dials/dials | extensions/radial_profile_spotfinder_threshold_ext.py | Python | bsd-3-clause | 5,140 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from flask.ext.script import Manager, Shell, Server
from flask.ext.migrate import MigrateCommand
from alexistheman.app import create_app
from alexistheman.user.models import User
from alexistheman.settings import DevConfig, ProdConfig
from alexistheman.database import db
if os.environ.get("ALEXISTHEMAN_ENV") == 'prod':
app = create_app(ProdConfig)
else:
app = create_app(DevConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| gurkslask/alexistheman | manage.py | Python | bsd-3-clause | 1,122 |
from django.test import TestCase
class TemplateTagTestCases(TestCase):
pass
| designcc/django-ccpages | ccpages/tests/test_templatetags.py | Python | bsd-3-clause | 81 |
# Authors: Alexandre Gramfort <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_allclose
from mne.viz.utils import (compare_fiff, _fake_click, _compute_scalings,
_validate_if_list_of_axes)
from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap
from mne.utils import run_tests_if_main
from mne.io import read_raw_fif
from mne.event import read_events
from mne.epochs import Epochs
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
ev_fname = op.join(base_dir, 'test_raw-eve.fif')
def test_mne_analyze_colormap():
"""Test mne_analyze_colormap
"""
assert_raises(ValueError, mne_analyze_colormap, [0])
assert_raises(ValueError, mne_analyze_colormap, [-1, 1, 2])
assert_raises(ValueError, mne_analyze_colormap, [0, 2, 1])
def test_compare_fiff():
import matplotlib.pyplot as plt
compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
plt.close('all')
def test_clickable_image():
"""Test the ClickableImage class."""
# Gen data and create clickable image
import matplotlib.pyplot as plt
im = np.random.RandomState(0).randn(100, 100)
clk = ClickableImage(im)
clicks = [(12, 8), (46, 48), (10, 24)]
# Generate clicks
for click in clicks:
_fake_click(clk.fig, clk.ax, click, xform='data')
assert_allclose(np.array(clicks), np.array(clk.coords))
assert_true(len(clicks) == len(clk.coords))
# Exporting to layout
lt = clk.to_layout()
assert_true(lt.pos.shape[0] == len(clicks))
assert_allclose(lt.pos[1, 0] / lt.pos[2, 0],
clicks[1][0] / float(clicks[2][0]))
clk.plot_clicks()
plt.close('all')
def test_add_background_image():
"""Test adding background image to a figure."""
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
f, axs = plt.subplots(1, 2)
x, y = rng.randn(2, 10)
im = rng.randn(10, 10)
axs[0].scatter(x, y)
axs[1].scatter(y, x)
for ax in axs:
ax.set_aspect(1)
# Background without changing aspect
ax_im = add_background_image(f, im)
assert_true(ax_im.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 1)
# Background with changing aspect
ax_im_asp = add_background_image(f, im, set_ratios='auto')
assert_true(ax_im_asp.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 'auto')
# Make sure passing None as image returns None
assert_true(add_background_image(f, None) is None)
def test_auto_scale():
"""Test auto-scaling of channels for quick plotting."""
raw = read_raw_fif(raw_fname, preload=False)
ev = read_events(ev_fname)
epochs = Epochs(raw, ev)
rand_data = np.random.randn(10, 100)
for inst in [raw, epochs]:
scale_grad = 1e10
scalings_def = dict([('eeg', 'auto'), ('grad', scale_grad),
('stim', 'auto')])
# Test for wrong inputs
assert_raises(ValueError, inst.plot, scalings='foo')
assert_raises(ValueError, _compute_scalings, 'foo', inst)
# Make sure compute_scalings doesn't change anything not auto
scalings_new = _compute_scalings(scalings_def, inst)
assert_true(scale_grad == scalings_new['grad'])
assert_true(scalings_new['eeg'] != 'auto')
assert_raises(ValueError, _compute_scalings, scalings_def, rand_data)
epochs = epochs[0].load_data()
epochs.pick_types(eeg=True, meg=False)
assert_raises(ValueError, _compute_scalings,
dict(grad='auto'), epochs)
def test_validate_if_list_of_axes():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, 2)
assert_raises(ValueError, _validate_if_list_of_axes, ax)
ax_flat = ax.ravel()
ax = ax.ravel().tolist()
_validate_if_list_of_axes(ax_flat)
_validate_if_list_of_axes(ax_flat, 4)
assert_raises(ValueError, _validate_if_list_of_axes, ax_flat, 5)
assert_raises(ValueError, _validate_if_list_of_axes, ax, 3)
assert_raises(ValueError, _validate_if_list_of_axes, 'error')
assert_raises(ValueError, _validate_if_list_of_axes, ['error'] * 2)
assert_raises(ValueError, _validate_if_list_of_axes, ax[0])
assert_raises(ValueError, _validate_if_list_of_axes, ax, 3)
ax_flat[2] = 23
assert_raises(ValueError, _validate_if_list_of_axes, ax_flat)
_validate_if_list_of_axes(ax, 4)
run_tests_if_main()
| alexandrebarachant/mne-python | mne/viz/tests/test_utils.py | Python | bsd-3-clause | 4,889 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.value import summarizable
class SummarizableTest(unittest.TestCase):
def testAsDictWithoutImprovementDirection(self):
value = summarizable.SummarizableValue(
None, 'foo', 'bars', important=False, description='desc',
tir_label=None, improvement_direction=None)
self.assertNotIn('improvement_direction', value.AsDict())
def testAsDictWithoutBaseClassEntries(self):
value = summarizable.SummarizableValue(
None, 'foo', 'bars', important=False, description='desc',
tir_label=None, improvement_direction=None)
self.assertFalse(value.AsDictWithoutBaseClassEntries())
def testAsDictWithInvalidImprovementDirection(self):
# TODO(eakuefner): Remove this test when we check I.D. in constructor
value = summarizable.SummarizableValue(
None, 'foo', 'bars', important=False, description='desc',
tir_label=None, improvement_direction='baz')
self.assertNotIn('improvement_direction', value.AsDict())
| SummerLW/Perf-Insight-Report | telemetry/telemetry/value/summarizable_unittest.py | Python | bsd-3-clause | 1,168 |
import os
import os.path as op
import re
import warnings
import numpy as np
from scipy import io as spio
from mne import Epochs, write_evokeds
from mne.defaults import DEFAULTS
from mne.epochs import combine_event_ids
from mne.externals.h5io import write_hdf5
from mne.io import read_raw_fif, concatenate_raws
from mne.viz import plot_drop_log
from mne.utils import use_log_level
from ._paths import get_raw_fnames, get_epochs_evokeds_fnames
from ._scoring import _read_events
from ._sss import _read_raw_prebad
from ._utils import (_fix_raw_eog_cals, _get_baseline, get_args, _handle_dict,
_restrict_reject_flat, _get_epo_kwargs, _handle_decim)
def save_epochs(p, subjects, in_names, in_numbers, analyses, out_names,
out_numbers, must_match, decim, run_indices):
"""Generate epochs from raw data based on events.
Can only complete after preprocessing is complete.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
in_names : list of str
Names of input events.
in_numbers : list of list of int
Event numbers (in scored event files) associated with each name.
analyses : list of str
Lists of analyses of interest.
out_names : list of list of str
Event types to make out of old ones.
out_numbers : list of list of int
Event numbers to convert to (e.g., [[1, 1, 2, 3, 3], ...] would create
three event types, where the first two and last two event types from
the original list get collapsed over).
must_match : list of int
Indices from the original in_names that must match in event counts
before collapsing. Should eventually be expanded to allow for
ratio-based collapsing.
decim : int | list of int
Amount to decimate.
run_indices : array-like | None
Run indices to include.
"""
in_names = np.asanyarray(in_names)
old_dict = dict()
for n, e in zip(in_names, in_numbers):
old_dict[n] = e
# let's do some sanity checks
if len(in_names) != len(in_numbers):
raise RuntimeError('in_names (%d) must have same length as '
'in_numbers (%d)'
% (len(in_names), len(in_numbers)))
if np.any(np.array(in_numbers) <= 0):
raise ValueError('in_numbers must all be > 0')
if len(out_names) != len(out_numbers):
raise RuntimeError('out_names must have same length as out_numbers')
for name, num in zip(out_names, out_numbers):
num = np.array(num)
if len(name) != len(np.unique(num[num > 0])):
raise RuntimeError('each entry in out_names must have length '
'equal to the number of unique elements in the '
'corresponding entry in out_numbers:\n%s\n%s'
% (name, np.unique(num[num > 0])))
if len(num) != len(in_names):
raise RuntimeError('each entry in out_numbers must have the same '
'length as in_names')
if (np.array(num) == 0).any():
raise ValueError('no element of out_numbers can be zero')
ch_namess = list()
drop_logs = list()
sfreqs = set()
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Loading raw files for subject %s.' % subj)
epochs_dir = op.join(p.work_dir, subj, p.epochs_dir)
if not op.isdir(epochs_dir):
os.mkdir(epochs_dir)
evoked_dir = op.join(p.work_dir, subj, p.inverse_dir)
if not op.isdir(evoked_dir):
os.mkdir(evoked_dir)
# read in raw files
raw_names = get_raw_fnames(p, subj, 'pca', False, False,
run_indices[si])
raw, ratios = _concat_resamp_raws(p, subj, raw_names)
assert ratios.shape == (len(raw_names),)
# optionally calculate autoreject thresholds
this_decim = _handle_decim(decim[si], raw.info['sfreq'])
new_sfreq = raw.info['sfreq'] / this_decim
if p.disp_files:
print(' Epoching data (decim=%s -> sfreq=%0.1f Hz).'
% (this_decim, new_sfreq))
if new_sfreq not in sfreqs:
if len(sfreqs) > 0:
warnings.warn('resulting new sampling frequency %s not equal '
'to previous values %s' % (new_sfreq, sfreqs))
sfreqs.add(new_sfreq)
epochs_fnames, evoked_fnames = get_epochs_evokeds_fnames(p, subj,
analyses)
mat_file, fif_file = epochs_fnames
if isinstance(p.reject_epochs_by_annot, str):
reject_epochs_by_annot = True
reg = re.compile(p.reject_epochs_by_annot)
n_orig = sum(desc.lower().startswith('bad_')
for desc in raw.annotations.description)
mask = np.array([reg.match(desc) is not None
for desc in raw.annotations.description], bool)
print(f' Rejecting epochs with via {mask.sum()} '
'annotation(s) via regex matching '
f'({n_orig} originally were BAD_ type)')
# remove the unwanted ones
raw.annotations.delete(np.where(~mask)[0])
for ii in range(len(raw.annotations)):
raw.annotations.description[ii] = 'BAD_REGEX'
else:
assert isinstance(p.reject_epochs_by_annot, bool)
reject_epochs_by_annot = p.reject_epochs_by_annot
if p.autoreject_thresholds:
assert len(p.autoreject_types) > 0
assert all(a in ('mag', 'grad', 'eeg', 'ecg', 'eog')
for a in p.autoreject_types)
from autoreject import get_rejection_threshold
picker = p.pick_events_autoreject
if type(picker) is str:
assert picker == 'restrict', \
'Only "restrict" is valid str for p.pick_events_autoreject'
events = _read_events(
p, subj, run_indices[si], raw, ratios, picker=picker)
print(' Computing autoreject thresholds', end='')
rtmin = p.reject_tmin if p.reject_tmin is not None else p.tmin
rtmax = p.reject_tmax if p.reject_tmax is not None else p.tmax
temp_epochs = Epochs(
raw, events, event_id=None, tmin=rtmin, tmax=rtmax,
baseline=_get_baseline(p), proj=True, reject=None,
flat=None, preload=True, decim=this_decim,
reject_by_annotation=reject_epochs_by_annot)
kwargs = dict()
if 'verbose' in get_args(get_rejection_threshold):
kwargs['verbose'] = False
new_dict = get_rejection_threshold(temp_epochs, **kwargs)
use_reject = dict()
msgs = list()
for k in p.autoreject_types:
msgs.append('%s=%d %s'
% (k, DEFAULTS['scalings'][k] * new_dict[k],
DEFAULTS['units'][k]))
use_reject[k] = new_dict[k]
print(': ' + ', '.join(msgs))
hdf5_file = fif_file.replace('-epo.fif', '-reject.h5')
assert hdf5_file.endswith('.h5')
write_hdf5(hdf5_file, use_reject, overwrite=True)
else:
use_reject = _handle_dict(p.reject, subj)
# read in events and create epochs
events = _read_events(p, subj, run_indices[si], raw, ratios,
picker='restrict')
if len(events) == 0:
raise ValueError('No valid events found')
flat = _handle_dict(p.flat, subj)
use_reject, use_flat = _restrict_reject_flat(use_reject, flat, raw)
epochs = Epochs(raw, events, event_id=old_dict, tmin=p.tmin,
tmax=p.tmax, baseline=_get_baseline(p),
reject=use_reject, flat=use_flat, proj=p.epochs_proj,
preload=True, decim=this_decim,
on_missing=p.on_missing,
reject_tmin=p.reject_tmin, reject_tmax=p.reject_tmax,
reject_by_annotation=reject_epochs_by_annot)
if epochs.events.shape[0] < 1:
_raise_bad_epochs(raw, epochs, events)
del raw
drop_logs.append(epochs.drop_log)
ch_namess.append(epochs.ch_names)
# only kept trials that were not dropped
sfreq = epochs.info['sfreq']
# now deal with conditions to save evoked
if p.disp_files:
print(' Matching trial counts and saving data to disk.')
for var, name in ((out_names, 'out_names'),
(out_numbers, 'out_numbers'),
(must_match, 'must_match'),
(evoked_fnames, 'evoked_fnames')):
if len(var) != len(analyses):
raise ValueError('len(%s) (%s) != len(analyses) (%s)'
% (name, len(var), len(analyses)))
for analysis, names, numbers, match, fn in zip(analyses, out_names,
out_numbers, must_match,
evoked_fnames):
# do matching
numbers = np.asanyarray(numbers)
nn = numbers[numbers >= 0]
new_numbers = []
for num in numbers:
if num > 0 and num not in new_numbers:
# Eventually we could relax this requirement, but not
# having it in place is likely to cause people pain...
if any(num < n for n in new_numbers):
raise RuntimeError('each list of new_numbers must be '
' monotonically increasing')
new_numbers.append(num)
new_numbers = np.array(new_numbers)
in_names_match = list(in_names[match])
# use some variables to allow safe name re-use
offset = max(epochs.events[:, 2].max(), new_numbers.max()) + 1
safety_str = '__mnefun_copy__'
assert len(new_numbers) == len(names) # checked above
if p.match_fun is None:
e = None
else: # use custom matching
args = [epochs.copy(), analysis, nn, in_names_match, names]
if len(get_args(p.match_fun)) > 5:
args = args + [numbers]
e = p.match_fun(*args)
if e is None:
# first, equalize trial counts (this will make a copy)
e = epochs[list(in_names[numbers > 0])]
if len(in_names_match) > 1:
print(f' Equalizing: {in_names_match}')
e.equalize_event_counts(in_names_match)
# second, collapse relevant types
for num, name in zip(new_numbers, names):
collapse = [x for x in in_names[num == numbers]
if x in e.event_id]
combine_event_ids(e, collapse,
{name + safety_str: num + offset},
copy=False)
for num, name in zip(new_numbers, names):
e.events[e.events[:, 2] == num + offset, 2] -= offset
e.event_id[name] = num
del e.event_id[name + safety_str]
# now make evoked for each out type
evokeds = list()
n_standard = 0
kinds = ['standard']
if p.every_other:
kinds += ['even', 'odd']
for kind in kinds:
for name in names:
this_e = e[name]
if kind == 'even':
this_e = this_e[::2]
elif kind == 'odd':
this_e = this_e[1::2]
else:
assert kind == 'standard'
with use_log_level('error'):
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
ave = this_e.average(picks='all')
ave.comment = name
stde = this_e.standard_error(picks='all')
stde.comment = name
if kind != 'standard':
ave.comment += ' %s' % (kind,)
stde.comment += ' %s' % (kind,)
evokeds.append(ave)
evokeds.append(stde)
if kind == 'standard':
n_standard += 2
write_evokeds(fn, evokeds)
naves = [str(n) for n in sorted(set([
evoked.nave for evoked in evokeds[:n_standard]]))]
bad = [evoked.comment for evoked in evokeds[:n_standard:2]
if evoked.nave == 0]
if bad:
print(f' Got 0 epochs for: {bad}')
naves = ', '.join(naves)
if p.disp_files:
print(' Analysis "%s": %s epochs / condition'
% (analysis, naves))
if p.disp_files:
print(' Saving epochs to disk.')
if 'mat' in p.epochs_type:
spio.savemat(mat_file, dict(epochs=epochs.get_data(),
events=epochs.events, sfreq=sfreq,
drop_log=epochs.drop_log),
do_compression=True, oned_as='column')
if 'fif' in p.epochs_type:
epochs.save(fif_file, **_get_epo_kwargs())
if p.plot_drop_logs:
for subj, drop_log in zip(subjects, drop_logs):
plot_drop_log(drop_log, threshold=p.drop_thresh, subject=subj)
def _concat_resamp_raws(p, subj, fnames, fix='EOG', prebad=False,
preload=None):
raws = []
first_samps = []
last_samps = []
for raw_fname in fnames:
if prebad:
raw = _read_raw_prebad(p, subj, raw_fname, False)
else:
raw = read_raw_fif(
raw_fname, preload=False, allow_maxshield='yes')
raws.append(raw)
first_samps.append(raw._first_samps[0])
last_samps.append(raw._last_samps[-1])
del raw
assert len(raws) > 0
rates = np.array([r.info['sfreq'] for r in raws], float)
ratios = rates[0] / rates
assert rates.shape == (len(fnames),)
if not (ratios == 1).all():
if not p.allow_resample:
raise RuntimeError(
'Raw sample rates do not match, consider using '
f'params.allow_resample=True:\n{rates}')
for ri, (raw, ratio) in enumerate(zip(raws[1:], ratios[1:])):
if ratio != 1:
fr, to = raws[0].info['sfreq'], raw.info['sfreq']
print(f' Resampling raw {ri + 1}/{len(raws)} ({fr}→{to})')
raw.load_data().resample(raws[0].info['sfreq'])
_fix_raw_eog_cals(raws, fix) # safe b/c cov only needs MEEG
assert len(ratios) == len(fnames)
bads = raws[0].info['bads']
if prebad:
bads = sorted(set(sum((r.info['bads'] for r in raws), [])))
for r in raws:
r.info['bads'] = bads
raw = concatenate_raws(raws, preload=preload)
assert raw.info['bads'] == bads
return raw, ratios
def _raise_bad_epochs(raw, epochs, events, kind=None):
extra = '' if kind is None else f' of type {kind} '
plot_drop_log(epochs.drop_log)
raw.plot(events=events)
raise RuntimeError(
f'Only {len(epochs)}/{len(events)} good epochs found{extra}')
| ktavabi/mnefun | mnefun/_epoching.py | Python | bsd-3-clause | 15,977 |
import os
import sys
import re
import glob
import copy
import subprocess
"""
args:
- parallel: max number of parallel sessions mobatch will use. default=10.
- bin_path: path, if moshell/mobatch binaries are installed in a
non-standard location.
"""
class Amos:
def __init__(self, **kwargs):
self.bin_path = None
self.moshellbin = None
self.mobatchbin = None
self.parallel = 10
allowed = ('parallel', 'bin_path')
for k, v in kwargs.items():
if not k in allowed:
raise KeyError("Invalid option-key: %s" % k)
setattr(self, k, v)
if not self.moshellbin:
try:
self.moshellbin = self.__amos_location(self.bin_path)
except:
raise RuntimeError('amos or moshell program not found')
if not self.mobatchbin:
try:
self.mobatchbin = self.__amosbatch_location(self.bin_path)
except:
raise RuntimeError('amosbatch or mobatch program not found')
"""
moshell()
send amos command to node, and get results
params:
node name (or ip address)
command string
optional keyword-args (valid amos optional variables only)
returns:
tuple (return-code[0 ok|1 fail], stdout text, stderr text)
"""
def moshell(self, node, cmd, **kwargs):
opts = self.__parse_kwargs(kwargs)
return self.__amos_runner(node, cmd, opts)
"""
mobatch()
send amosbatch(mobatch) commands to nodes, and get result logs.
WARNING! mobatch commands can take a very, very long time to complete,
depending on number of nodes and commands to be run. commands run against
thousands of nodes may take 6-10 hours(or more) to complete!
Also, using over 30 parallel sessions is not recommended.
params:
node list (or path to existing sitefile)
command string (or path to existing mos command file)
optional keyword-args (valid amos optional variables only)
returns:
a list-of-tuples. Each result tuple contains the following:
(node-name, exit-code, path-to-logfile)
"""
def mobatch(self, nodes, cmd, **kwargs):
opts = self.__parse_kwargs(kwargs)
sitefile = None
cmdfile = None
rmv_sitefile = False
rmv_cmdfile = False
if len(nodes) == 1:
# only one node? seems odd. possibly it is a sitefile?
if os.path.isfile(nodes[0]):
sitefile = nodes[0]
# write the sitefile if required
if not sitefile:
rmv_sitefile = True
sitefile = '/tmp/pymobatch.' + str(os.getpid()) + '.sitefile'
fh = open(sitefile, 'w')
for n in nodes:
fh.write(n + "\n")
fh.close()
# write amos commands to a file
if len(cmd) == 1 and os.path.isfile(cmd):
cmdfile = cmd
else:
rmv_cmdfile = True
cmdfile = '/tmp/pymobatch.' + str(os.getpid()) + '.mos'
fh = open(cmdfile, 'w')
atoms = cmd.split(';')
for a in atoms:
fh.write(a.strip() + "\n")
fh.close()
results = self.__amosbatch_runner(sitefile, cmdfile, opts)
if rmv_sitefile:
os.unlink(sitefile)
if rmv_cmdfile:
os.unlink(cmdfile)
return results
"""
__amos_location()
PRIVATE
get full path to either the amos or moshell binary
params:
path to search(optional)
returns:
full path to binary | None
"""
def __amos_location(self, path):
loc = self.__find_possibles(('amos','moshell'), path)
if not loc:
raise
else:
return loc
"""
__amosbatch_location()
PRIVATE
get full path to either the amosbatch or mobatch binary
params:
path to search(optional)
returns:
full path to binary | None
"""
def __amosbatch_location(self, path):
loc = self.__find_possibles(('amosbatch','mobatch'), path)
if not loc:
raise
else:
return loc
"""
__find_possibles()
PRIVATE
return the first binary found from a list of possibles
params:
a list of binary names
a search path (optional)
returns:
full path to binary | None
"""
def __find_possibles(self, possibles, path):
if not possibles or len(possibles) < 1:
return None
if not path:
for p in possibles:
target = self.__which(p)
if target:
return target
else:
for p in possibles:
target = path + "/" + p
if os.path.isfile(target) and os.access(fpath, os.X_OK):
return target
return None
"""
__which()
PRIVATE
duplicates function of unix 'which' command to find a program in the path
params:
a program name
returns:
full path to program | None
"""
def __which(self, program):
fpath, fname = os.path.split(program)
if fpath:
if os.path.isfile(program) and os.access(program, os.X_OK):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return exe_file
return None
"""
__parse_kwargs()
PRIVATE
parse any amos options that were passed in, and filter out invalid options.
See Ericsson Advanced Moshell Scripting user guide for variable information.
params:
a dict
returns:
a dict
"""
def __parse_kwargs(self, kwargs):
if not kwargs:
return None
opts = copy.copy(kwargs)
valid = (
'amos_debug',
'ask_for_attribute_type',
'bldebset_confirmation',
'credential',
'commandlog_path',
'corba_class',
'csnotiflist',
'default_mom',
'del_confirmation',
'dontfollowlist',
'editor',
'fast_lh_threshold',
'fast_cab_threshold',
'ftp_port',
'followlist',
'ftp_timeout',
'http_port',
'inactivity_timeout',
'include_nonpm',
'ip_connection_timeout',
'ip_database',
'ip_inactivity_timeout',
'java_settings_high',
'java_settings_low',
'java_settings_medium',
'keepLmList',
'lt_confirmation',
'loginfo_print',
'logdir', # custom option, not E/// supported. see documentation
'muteFactor',
'nm_credential',
'node_login',
'print_lmid',
'PrintProxyLDN',
'PrintProxySilent',
'prompt_highlight',
'pm_wait',
'pm_logdir',
'sa_credential',
'sa_password',
'secure_ftp',
'secure_port',
'secure_shell',
'set_window_title',
'show_timestamp',
'telnet_port',
'transaction_timeout',
'username',
'xmlmomlist', )
for k, v in opts.items():
if k not in valid:
raise KeyError("Invalid option-key: %s" % k)
return opts
"""
__amos_runner()
PRIVATE
run a moshell/amos command subprocess against a specific node
params:
1. a node name or ipaddress
2. a command string
3. an option dict (optional)
returns:
A tuple. two elements.
(return-code(0=ok, 1=fail), stdout, stderr)
"""
def __amos_runner(self, node, cmd, opts=None):
v = None;
script = [self.moshellbin]
logdir = None
if opts:
atoms = []
for k, v in opts.items():
if k == 'logdir':
logdir = v
continue
else:
atoms.append("=".join((k, str(v))))
v = "-v"
v += ",".join(atoms)
script.append(v)
if logdir:
script.append('-o')
script.append(logdir)
script.append(node)
script.append(cmd)
child = subprocess.Popen(script,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE )
output, errors = child.communicate()
return (child.returncode, output, errors)
"""
__amosbatch_runner()
PRIVATE
run a moshell/amos command against a several nodes in parallel.
the results for a node is the path to the logfile containing the
amos results for that node.
params:
1. a path to a sitefile
2. a command string
3. an option dict (optional)
returns:
A list of tuples:
[(node, rval, results-file), (node, rval, results-file)... ]
On error, returns an empty list
"""
def __amosbatch_runner(self, sitefile, cmdfile, opts=None):
v = None;
logdir = None
script = [self.mobatchbin]
script.append('-p')
script.append(str(self.parallel))
if opts:
atoms = []
for k, v in opts.items():
if k == 'logdir':
logdir = v
continue
else:
atoms.append("=".join((k, str(v))))
v = "-v"
v += ",".join(atoms)
script.append(v)
if logdir:
script.append('-o')
script.append(logdir)
script.append(sitefile)
script.append(cmdfile)
child = subprocess.Popen(script,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE )
output, errors = child.communicate()
if child.returncode:
sys.stderr.write(errors)
return []
# find results of all the logfiles
for line in output.splitlines():
match = re.match(r'Logfiles stored in\s+(.+)', line)
if match:
return self.__amosbatch_result_parser(match.group(1))
raise RuntimeError('could not find amosbatch result path from results')
"""
__amosbatch_result_parser()
PRIVATE
Parse the directory contents of an amosbatch results dir
params:
a path to the amosbatch log dir
returns:
A list of tuples:
[(node, rval, results), (node, rval, results)... ]
"""
def __amosbatch_result_parser(self, path):
results = []
# find results text log, and pull out any nodes that failed to connect
rlog = glob.glob(path + '/*result.txt')[0]
if not rlog:
raise RuntimeError('amosbatch results file not found in ' + path)
nocontact = self.__amosbatch_nocontact_nodes(rlog)
for n in nocontact:
results.append((n, 1, 'no contact'))
# store the path for each node output log
logs = glob.glob(path + '/*log')
for log in logs:
nodename = None
match = re.match(r'^.+/(\S+)\.log', log)
if match:
node = match.group(1)
if node in nocontact:
continue
results.append((node, 0, log))
return results
"""
__amosbatch_nocontact_nodes()
PRIVATE
Parse amosbatch results.txt log for any nodes that could not be reached
params:
a path to the amosbatch results text file
returns:
A list of node names.
An empty list is returned if nothing is found
"""
def __amosbatch_nocontact_nodes(self, fname):
results = []
"""
Look for lines like this:
OK 0m13s PSLEeNB04
OK 0m13s PSLEeNB02
no contact 0m15s PSLEeNB01
"""
fh = open(fname, 'r+')
for line in fh.readlines():
match = re.match(r'^\s*no contact\s+\S+\s+(\S+)\s*$', line)
if match:
results.append(match.group(1))
fh.close()
return results
| jeffleary00/amos | amoshell/amoshell.py | Python | bsd-3-clause | 12,968 |
from minishift.font import font
class Interface(object):
"""An ABC for Minishift interfaces."""
def send(self, data):
"""Sends data to the minishift."""
raise NotImplementedError
class MCP2210Interface(Interface):
"""An interface implementation that communicates over the MCP2210 USB-SPI interface."""
def __init__(self, vid=0x04d8, pid=0xf517):
"""Constructor.
Arguments:
vid: Integer. Vendor ID.
pid: Integer. Product ID.
"""
import mcp2210
self.device = mcp2210.MCP2210(vid, pid)
def send(self, data):
self.device.transfer(data)
class Canvas(object):
"""Represents a canvas onto which images and text can be drawn.
Canvases are assumed to be 8 pixels high.
"""
def __init__(self, size=None):
"""Constructor.
Arguments:
size: integer. The width of the canvas. If not supplied, an 'infinite' canvas is
created, which expands in size to accommodate whatever is written to it.
"""
self.size = size
self._wrap = False
if size:
self._data = [0] * size
else:
self._data = []
@property
def wrap(self):
"""Whether writes should wrap from the end of the display back to the beginning.
Only valid for fixed-size canvases.
"""
return self._wrap
@wrap.setter
def wrap(self, value):
self._wrap = bool(value)
def _getxy(self, idx):
if isinstance(idx, tuple):
x, y = idx
else:
x, y = idx, None
if x >= len(self._data):
if not self.size:
self._data.extend([0] * (x - len(self._data) + 1))
elif self._wrap:
x %= self.size
else:
raise IndexError()
elif x < 0:
raise IndexError()
if y is not None and (y < 0 or y >= 8):
raise IndexError()
return x, y
def __getitem__(self, idx):
"""Gets the value of a column or a single pixel from the canvas.
>>> canvas[x] # Returns a byte representing the specified column
>>> canvas[x, y] # Returns an integer representing the specified pixel
"""
x, y = self._getxy(idx)
if y is None:
return self._data[x]
else:
return (self._data[x] >> y) & 1
def __setitem__(self, idx, value):
"""Sets the value of a column or a single pixel on the canvas.
>>> canvas[x] = value # Sets a column
>>> canvas[x, y] = 1 # Sets a pixel
"""
x, y = self._getxy(idx)
if y is None:
self._data[x] = value
elif value:
self._data[x] |= 1 << y
else:
self._data[x] &= ~(1 << y)
def write_text(self, x, text):
"""Writes a string of text to the canvas.
Arguments:
x: Start column
text: Text to write
Returns:
The index of the first column after the text.
"""
for char in text:
x = self.write_char(x, char)
self[x] = 0
x += 1
return x
def write_char(self, x, char):
"""Writes a single character to the canvas.
Arguments:
x: Start column
text: Character to write
Returns:
The index of the first column after the character.
"""
for col in font[ord(char)]:
if char != ' ' and col == 0:
continue
self[x] = col
x += 1
return x
def scroll(self):
"""Returns an iterator that facilitates producing a scrolling display.
Example:
>>> for col in canvas.scroll():
... minishift.update(col)
... time.sleep(0.05)
"""
for x in range(len(self._data)):
canvas = Canvas(1)
canvas[0] = self[x]
yield canvas
def to_bytes(self):
"""Returns a text representation of the canvas, suitable for sending to the minishift."""
return ''.join(chr(x) for x in self._data)
class Minishift(object):
"""Interface for working with a chain of minishifts."""
def __init__(self, interface, width):
"""Constructor.
Arguments:
interface: An instance of Interface for Minishift communication.
width: The width of the Minishift array.
"""
self.interface = interface
self.width = width
self._canvas = Canvas(width)
@property
def canvas(self):
"""The built-in canvas."""
return self._canvas
@canvas.setter
def canvas(self, canvas):
self._canvas = canvas
def update(self, canvas=None):
"""Updates the minishift with a canvas image.
Arguments:
canvas: Optional. If supplied, draw the specified canvas to the minishift.
Otherwise, draw the built-in canvas.
"""
if not canvas:
canvas = self.canvas
self.interface.send(canvas.to_bytes())
| arachnidlabs/minishift-python | minishift/draw.py | Python | bsd-3-clause | 5,183 |
"""Performs a number of path mutation and monkey patching operations which are
required for Olympia to start up correctly.
This is imported into manage.py and wsgi.py.
This is executed when celery starts up by way of `bin/celery --app=olympia`
where `olympia` is this module.
This docstring will probably be wrong by the time you read it.
"""
import logging
import os
import warnings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
log = logging.getLogger('z.startup')
def filter_warnings():
"""Ignore Python warnings unless we're running in debug mode."""
# Do not import this from the top-level. It depends on set-up from the
# functions above.
from django.conf import settings
if not settings.DEBUG:
warnings.simplefilter('ignore')
def init_session_csrf():
"""Load the `session_csrf` module and enable its monkey patches to
Django's CSRF middleware."""
import session_csrf
session_csrf.monkeypatch()
def init_jinja2():
"""Monkeypatch jinja2's Markup class to handle errors from bad `%` format
operations, due to broken strings from localizers."""
from jinja2 import Markup
mod = Markup.__mod__
trans_log = logging.getLogger('z.trans')
def new(self, arg):
try:
return mod(self, arg)
except Exception:
trans_log.error(unicode(self))
return ''
Markup.__mod__ = new
def init_jingo():
"""Load Jingo and trigger its Django monkey patches, so it supports the
`__html__` protocol used by Jinja2 and MarkupSafe."""
import jingo.monkey
jingo.monkey.patch()
def init_amo():
"""Load the `amo` module.
Waffle and amo form an import cycle because amo patches waffle and waffle
loads the user model, so we have to make sure amo gets imported before
anything else imports waffle."""
global amo
amo = __import__('olympia.amo')
def configure_logging():
"""Configure the `logging` module to route logging based on settings
in our various settings modules and defaults in `lib.log_settings_base`."""
from olympia.lib.log_settings_base import log_configure
log_configure()
def load_product_details():
"""Fetch product details, if we don't already have them."""
from product_details import product_details
if not product_details.last_update:
from django.core.management import call_command
log.info('Product details missing, downloading...')
call_command('update_product_details')
product_details.__init__() # reload the product details
filter_warnings()
init_session_csrf()
init_jinja2()
init_amo()
configure_logging()
init_jingo()
load_product_details()
| jpetto/olympia | src/olympia/startup.py | Python | bsd-3-clause | 2,705 |
#!/usr/bin/env python
# Author Michele Mattioni
# Fri Apr 9 11:35:29 BST 2010
"""Main script to start Neuronvisio"""
from neuronvisio.controls import Controls
import sys
if __name__ == '__main__':
controls = Controls()
if len(sys.argv) == 2:
controls.load(sys.argv[1])
| mattions/TimeScales | nrnvisio.py | Python | bsd-3-clause | 309 |
import unittest
import asyncio
import pulsar
class Context:
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._result = value
return True
class TestApi(unittest.TestCase):
@asyncio.coroutine
def test_with_statement(self):
with Context() as c:
yield None
yield None
raise ValueError
self.assertIsInstance(c._result, ValueError)
def test_get_proxy(self):
self.assertRaises(ValueError, pulsar.get_proxy, 'shcbjsbcjcdcd')
self.assertEqual(pulsar.get_proxy('shcbjsbcjcdcd', safe=True), None)
def test_bad_concurrency(self):
# bla concurrency does not exists
return self.async.assertRaises(ValueError, pulsar.spawn, kind='bla')
def test_actor_coverage(self):
'''test case for coverage'''
return self.async.assertRaises(pulsar.CommandNotFound,
pulsar.send, 'arbiter',
'sjdcbhjscbhjdbjsj', 'bla')
| dejlek/pulsar | tests/async/api.py | Python | bsd-3-clause | 1,062 |
#!/usr/bin/env python
import jip.utils as utils
import pytest
@pytest.mark.parametrize('data', [60, '1h', '60m', '3600s', "01:00:00"])
def test_parse_time_minutes(data):
assert utils.parse_time(data) == 60
@pytest.mark.parametrize('data', [90, '1h30m', '90m', '30m3600s', '1:30'])
def test_parse_time_timestamp(data):
assert utils.parse_time(data) == 90
@pytest.mark.parametrize('data', [1024, '1g', '1G', '1048576k', '1048576K',
"1024m", "1024M"])
def test_parse_mem(data):
assert utils.parse_mem(data) == 1024
| thasso/pyjip | test/test_utils.py | Python | bsd-3-clause | 564 |
from pulsar.utils.structures import inverse_mapping
ASYNC_IO = 1 # run in the worker event loop
THREAD_IO = 3 # run in the event loop executor
CPUBOUND = 4 # run in a subprocess
concurrency = {'asyncio': ASYNC_IO,
'thread': THREAD_IO,
'process': CPUBOUND}
concurrency_name = dict(inverse_mapping(concurrency))
| quantmind/pulsar-queue | pq/utils/concurrency.py | Python | bsd-3-clause | 365 |
# -*- coding: utf-8 -*-
"""Defines fixtures available to all tests."""
import pytest
from webtest import TestApp
from chamberlain.app import create_app
from chamberlain.database import db as _db
from chamberlain.settings import TestConfig
from .factories import UserFactory
@pytest.yield_fixture(scope='function')
def app():
"""An application for the tests."""
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture(scope='function')
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@pytest.yield_fixture(scope='function')
def db(app):
"""A database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
_db.drop_all()
@pytest.fixture
def user(db):
"""A user for the tests."""
user = UserFactory(password='myprecious')
db.session.commit()
return user
| sean-abbott/chamberlain | tests/conftest.py | Python | bsd-3-clause | 938 |
import collections
import six
from ..compat \
import \
OrderedDict
from ..bundled.traitlets \
import \
HasTraits, Dict, Instance
from ..errors \
import \
DepSolverError
from ..pool \
import \
Pool
Decision = collections.namedtuple("Decision", ["literal", "reason"])
class DecisionsSet(HasTraits):
"""A DecisionsSet instance keeps track of decided literals (and the
rational for each decision), and can infer new literals depending on
their type."""
pool = Instance(Pool)
# Package id -> decision level mapping
_decision_map = Instance(OrderedDict)
# Queue of Decision instances
_decision_queue = Instance(collections.deque)
@property
def last_literal(self):
return self._decision_queue[-1].literal
@property
def last_reason(self):
return self._decision_queue[-1].reason
def __init__(self, pool, **kw):
super(DecisionsSet, self).__init__(self, pool=pool, **kw)
self._decision_map = OrderedDict()
self._decision_queue = collections.deque()
def decide(self, literal, level, why):
"""
Add the given literal to the decision set at the given level.
Parameters
----------
literal: int
Package id
level: int
Level
why: str
Rational for the decision
"""
self._add_decision(literal, level)
self._decision_queue.append(Decision(literal, why))
def satisfy(self, literal):
"""
Return True if ths given literal is satisfied
"""
package_id = abs(literal)
positive_case = literal > 0 and package_id in self._decision_map \
and self._decision_map[package_id] > 0
negative_case = literal < 0 and package_id in self._decision_map \
and self._decision_map[package_id] < 0
return positive_case or negative_case
def conflict(self, literal):
"""
Return True if the given literal conflicts with the decision set.
"""
package_id = abs(literal)
positive_case = literal > 0 and package_id in self._decision_map \
and self._decision_map[package_id] < 0
negative_case = literal < 0 and package_id in self._decision_map \
and self._decision_map[package_id] > 0
return positive_case or negative_case
def is_decided(self, literal):
"""
Return True if the given literal has been decided at any level.
"""
return self._decision_map.get(abs(literal), 0) != 0
def is_undecided(self, literal):
"""
Return True if the given literal has not been decided at any level.
"""
return self._decision_map.get(abs(literal), 0) == 0
def is_decided_install(self, literal):
package_id = abs(literal)
return self._decision_map.get(package_id, 0) > 0
def decision_level(self, literal):
"""
Returns the decision level of the given literal.
If the literal is not decided yet, returns 0.
"""
package_id = abs(literal)
if package_id in self._decision_map:
return abs(self._decision_map[package_id])
else:
return 0
def at_offset(self, offset):
return self._decision_queue[offset]
def is_offset_valid(self, offset):
return offset >= 0 and offset < len(self._decision_queue)
def revert_last(self):
self._decision_map[abs(self.last_literal)] = 0
self._decision_queue.pop()
#------------
# Private API
#------------
def _add_decision(self, literal, level):
package_id = abs(literal)
if package_id in self._decision_map:
previous_decision = self._decision_map[package_id]
literal_string = self.pool.id_to_string(package_id)
package = self.pool.package_by_id(package_id)
raise DepSolverError("Trying to decide %s on level %d, even though "
"%s was previously decided as %d" % (literal_string, level,
package, previous_decision))
else:
if literal > 0:
self._decision_map[package_id] = level
else:
self._decision_map[package_id] = -level
#------------------
# Sequence protocol
#------------------
def __contains__(self, literal):
return literal in self._decision_map
def __len__(self):
return len(self._decision_queue)
#------------------
# Iterator protocol
#------------------
# We return a reversed iterator to follow composer behavior, not sure about
# the rationale
def __iter__(self):
return reversed(self._decision_queue)
| enthought/depsolver | depsolver/solver/decisions.py | Python | bsd-3-clause | 4,814 |
from mozdns.api.tests.basic import *
| rtucker-mozilla/mozilla_inventory | mozdns/api/tests/all.py | Python | bsd-3-clause | 37 |
#!/usr/bin/env python3
#
# Tests the easy optimisation methods fmin and curve_fit.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import pints
import unittest
import numpy as np
class TestEasyOptimisation(unittest.TestCase):
"""
Tests the easy optimisation methods fmin and curve_fit.
"""
def test_fmin(self):
# Tests :meth:`pints.fmin()`.
# Note: This just wraps around `OptimisationController`, so testing
# done here is for wrapper code, not main functionality!
# Basic test
np.random.seed(1)
xopt, fopt = pints.fmin(f, [1, 1], method=pints.XNES)
self.assertAlmostEqual(xopt[0], 3)
self.assertAlmostEqual(xopt[1], -5)
# Function must be callable
self.assertRaisesRegex(ValueError, 'callable', pints.fmin, 3, [1])
# Test with boundaries
xopt, fopt = pints.fmin(
f, [1, 1], boundaries=([-10, -10], [10, 10]), method=pints.SNES)
self.assertAlmostEqual(xopt[0], 3)
self.assertAlmostEqual(xopt[1], -5)
# Test with extra arguments
def g(x, y, z):
return (x[0] - 3) ** 2 + (x[1] + 5) ** 2 + y / z
xopt, fopt = pints.fmin(g, [1, 1], args=[1, 2], method=pints.XNES)
self.assertAlmostEqual(xopt[0], 3)
self.assertAlmostEqual(xopt[1], -5)
# Test with parallelisation
pints.fmin(f, [1, 1], parallel=True, method=pints.XNES)
def test_curve_fit(self):
# Tests :meth:`pints.curve_fit()`.
# Note: This just wraps around `OptimisationController`, so testing
# done here is for wrapper code, not main functionality!
np.random.seed(1)
# Basic test
x = np.linspace(-5, 5, 100)
e = np.random.normal(loc=0, scale=0.1, size=x.shape)
y = g(x, 9, 3, 1) + e
p0 = [0, 0, 0]
np.random.seed(1)
popt, fopt = pints.curve_fit(g, x, y, p0, method=pints.XNES)
self.assertAlmostEqual(popt[0], 9, places=1)
self.assertAlmostEqual(popt[1], 3, places=1)
self.assertAlmostEqual(popt[2], 1, places=1)
# Function must be callable
self.assertRaisesRegex(
ValueError, 'callable', pints.curve_fit, 3, x, y, p0)
# Test with boundaries
popt, fopt = pints.curve_fit(
g, x, y, p0,
boundaries=([-10, -10, -10], [10, 10, 10]), method=pints.XNES)
self.assertAlmostEqual(popt[0], 9, places=1)
self.assertAlmostEqual(popt[1], 3, places=1)
self.assertAlmostEqual(popt[2], 1, places=1)
# Test with parallelisation
pints.curve_fit(g, x, y, p0, parallel=True, method=pints.XNES)
# Test with invalid sizes of `x` and `y`
x = np.linspace(-5, 5, 99)
self.assertRaisesRegex(
ValueError, 'dimension', pints.curve_fit, g, x, y, p0)
def f(x):
""" Pickleable test function. """
return (x[0] - 3) ** 2 + (x[1] + 5) ** 2
def g(x, a, b, c):
""" Pickleable test function. """
return a + b * x + c * x ** 2
if __name__ == '__main__':
unittest.main()
| martinjrobins/hobo | pints/tests/test_opt_easy_optimisation.py | Python | bsd-3-clause | 3,247 |
import json
import logging
import os
import re
import requests
import traceback
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import Permission
from django.http import HttpResponse
from django.utils import timezone
from django.utils.translation import trans_real
from translate.filters import checks
from translate.storage import base as storage_base
from translate.storage.placeables import base, general, parse
from translate.storage.placeables.interfaces import BasePlaceable
from translate.lang import data as lang_data
log = logging.getLogger('pontoon')
def add_can_localize(user):
email = user.email
log.debug(email)
# Grant permission to Mozilla localizers
url = "https://mozillians.org/api/v1/users/"
payload = {
"app_name": "pontoon",
"app_key": settings.MOZILLIANS_API_KEY,
"email": email,
"is_vouched": True,
"groups": "localization",
}
try:
response = requests.get(url, params=payload)
mozillians = response.json()["objects"]
if len(mozillians) > 0:
can_localize = Permission.objects.get(codename="can_localize")
user.user_permissions.add(can_localize)
log.debug("Permission can_localize set.")
# Fallback if profile does not allow accessing data
user.first_name = mozillians[0].get("full_name", email)
user.save()
except Exception as e:
log.debug(e)
log.debug("Is your MOZILLIANS_API_KEY set?")
user.save()
def get_project_locale_from_request(request, locales):
"""Get Pontoon locale from Accept-language request header."""
header = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
accept = trans_real.parse_accept_lang_header(header)
for a in accept:
try:
return locales.get(code__iexact=a[0]).code
except:
continue
class NewlineEscapePlaceable(base.Ph):
"""Placeable handling newline escapes."""
istranslatable = False
regex = re.compile(r'\\n')
parse = classmethod(general.regex_parse)
class TabEscapePlaceable(base.Ph):
"""Placeable handling tab escapes."""
istranslatable = False
regex = re.compile(r'\t')
parse = classmethod(general.regex_parse)
class EscapePlaceable(base.Ph):
"""Placeable handling escapes."""
istranslatable = False
regex = re.compile(r'\\')
parse = classmethod(general.regex_parse)
class SpacesPlaceable(base.Ph):
"""Placeable handling spaces."""
istranslatable = False
regex = re.compile('^ +| +$|[\r\n\t] +| {2,}')
parse = classmethod(general.regex_parse)
def mark_placeables(text):
"""Wrap placeables to easily distinguish and manipulate them.
Source: http://bit.ly/1yQOC9B
"""
PARSERS = [
NewlineEscapePlaceable.parse,
TabEscapePlaceable.parse,
EscapePlaceable.parse,
# The spaces placeable can match '\n ' and mask the newline,
# so it has to come later.
SpacesPlaceable.parse,
general.XMLTagPlaceable.parse,
general.AltAttrPlaceable.parse,
general.XMLEntityPlaceable.parse,
general.PythonFormattingPlaceable.parse,
general.JavaMessageFormatPlaceable.parse,
general.FormattingPlaceable.parse,
# The Qt variables can consume the %1 in %1$s which will mask a printf
# placeable, so it has to come later.
general.QtFormattingPlaceable.parse,
general.UrlPlaceable.parse,
general.FilePlaceable.parse,
general.EmailPlaceable.parse,
general.CapsPlaceable.parse,
general.CamelCasePlaceable.parse,
general.OptionPlaceable.parse,
general.PunctuationPlaceable.parse,
general.NumberPlaceable.parse,
]
TITLES = {
'NewlineEscapePlaceable': "Escaped newline",
'TabEscapePlaceable': "Escaped tab",
'EscapePlaceable': "Escaped sequence",
'SpacesPlaceable': "Unusual space in string",
'AltAttrPlaceable': "'alt' attribute inside XML tag",
'NewlinePlaceable': "New-line",
'NumberPlaceable': "Number",
'QtFormattingPlaceable': "Qt string formatting variable",
'PythonFormattingPlaceable': "Python string formatting variable",
'JavaMessageFormatPlaceable': "Java Message formatting variable",
'FormattingPlaceable': "String formatting variable",
'UrlPlaceable': "URI",
'FilePlaceable': "File location",
'EmailPlaceable': "Email",
'PunctuationPlaceable': "Punctuation",
'XMLEntityPlaceable': "XML entity",
'CapsPlaceable': "Long all-caps string",
'CamelCasePlaceable': "Camel case string",
'XMLTagPlaceable': "XML tag",
'OptionPlaceable': "Command line option",
}
output = u""
# Get a flat list of placeables and StringElem instances
flat_items = parse(text, PARSERS).flatten()
for item in flat_items:
# Placeable: mark
if isinstance(item, BasePlaceable):
class_name = item.__class__.__name__
placeable = unicode(item)
# CSS class used to mark the placeable
css = {
'TabEscapePlaceable': "escape ",
'EscapePlaceable': "escape ",
'SpacesPlaceable': "space ",
'NewlinePlaceable': "escape ",
}.get(class_name, "")
title = TITLES.get(class_name, "Unknown placeable")
spaces = ' ' * len(placeable)
if not placeable.startswith(' '):
spaces = placeable[0] + ' ' * (len(placeable) - 1)
# Correctly render placeables in translation editor
content = {
'TabEscapePlaceable': u'\\t',
'EscapePlaceable': u'\\',
'SpacesPlaceable': spaces,
'NewlinePlaceable': {
u'\r\n': u'\\r\\n<br/>\n',
u'\r': u'\\r<br/>\n',
u'\n': u'\\n<br/>\n',
}.get(placeable),
'XMLEntityPlaceable': placeable.replace('&', '&'),
'XMLTagPlaceable':
placeable.replace('<', '<').replace('>', '>'),
}.get(class_name, placeable)
output += ('<mark class="%splaceable" title="%s">%s</mark>') \
% (css, title, content)
# Not a placeable: skip
else:
output += unicode(item).replace('<', '<').replace('>', '>')
return output
def quality_check(original, string, locale, ignore):
"""Check for obvious errors like blanks and missing interpunction."""
if not ignore:
original = lang_data.normalized_unicode(original)
string = lang_data.normalized_unicode(string)
unit = storage_base.TranslationUnit(original)
unit.target = string
checker = checks.StandardChecker(
checkerconfig=checks.CheckerConfig(targetlanguage=locale.code))
warnings = checker.run_filters(unit)
if warnings:
# https://github.com/translate/pootle/
check_names = {
'accelerators': 'Accelerators',
'acronyms': 'Acronyms',
'blank': 'Blank',
'brackets': 'Brackets',
'compendiumconflicts': 'Compendium conflict',
'credits': 'Translator credits',
'doublequoting': 'Double quotes',
'doublespacing': 'Double spaces',
'doublewords': 'Repeated word',
'emails': 'E-mail',
'endpunc': 'Ending punctuation',
'endwhitespace': 'Ending whitespace',
'escapes': 'Escapes',
'filepaths': 'File paths',
'functions': 'Functions',
'gconf': 'GConf values',
'kdecomments': 'Old KDE comment',
'long': 'Long',
'musttranslatewords': 'Must translate words',
'newlines': 'Newlines',
'nplurals': 'Number of plurals',
'notranslatewords': 'Don\'t translate words',
'numbers': 'Numbers',
'options': 'Options',
'printf': 'printf()',
'puncspacing': 'Punctuation spacing',
'purepunc': 'Pure punctuation',
'sentencecount': 'Number of sentences',
'short': 'Short',
'simplecaps': 'Simple capitalization',
'simpleplurals': 'Simple plural(s)',
'singlequoting': 'Single quotes',
'startcaps': 'Starting capitalization',
'startpunc': 'Starting punctuation',
'startwhitespace': 'Starting whitespace',
'tabs': 'Tabs',
'unchanged': 'Unchanged',
'untranslated': 'Untranslated',
'urls': 'URLs',
'validchars': 'Valid characters',
'variables': 'Placeholders',
'xmltags': 'XML tags',
}
warnings_array = []
for key in warnings.keys():
warning = check_names.get(key, key)
warnings_array.append(warning)
return HttpResponse(json.dumps({
'warnings': warnings_array,
}), content_type='application/json')
def req(method, project, resource, locale,
username, password, payload=False):
"""
Make request to Transifex server.
Args:
method: Request method
project: Transifex project name
resource: Transifex resource name
locale: Locale code
username: Transifex username
password: Transifex password
payload: Data to be sent to the server
Returns:
A server response or error message.
"""
url = os.path.join(
'https://www.transifex.com/api/2/project/', project,
'resource', resource, 'translation', locale, 'strings')
try:
if method == 'get':
r = requests.get(
url + '?details', auth=(username, password), timeout=10)
elif method == 'put':
r = requests.put(url, auth=(username, password), timeout=10,
data=json.dumps(payload),
headers={'content-type': 'application/json'})
log.debug(r.status_code)
if r.status_code == 401:
return "authenticate"
elif r.status_code != 200:
log.debug("Response not 200")
return "error"
return r
# Network problem (DNS failure, refused connection, etc.)
except requests.exceptions.ConnectionError as e:
log.debug('ConnectionError: ' + str(e))
return "error"
# Invalid HTTP response
except requests.exceptions.HTTPError as e:
log.debug('HTTPError: ' + str(e))
return "error"
# A valid URL is required
except requests.exceptionsURLRequired as e:
log.debug('URLRequired: ' + str(e))
return "error"
# Request times out
except requests.exceptions.Timeout as e:
log.debug('Timeout: ' + str(e))
return "error"
# Request exceeds the number of maximum redirections
except requests.exceptions.TooManyRedirects as e:
log.debug('TooManyRedirects: ' + str(e))
return "error"
# Ambiguous exception occurres
except requests.exceptions.RequestException as e:
log.debug('RequestException: ' + str(e))
return "error"
except Exception:
log.debug('Generic exception: ' + traceback.format_exc())
return "error"
def first(collection, test, default=None):
"""
Return the first item that, when passed to the given test function,
returns True. If no item passes the test, return the default value.
"""
return next((c for c in collection if test(c)), default)
def match_attr(collection, **attributes):
"""
Return the first item that has matching values for the given
attributes, or None if no item is found to match.
"""
return first(
collection,
lambda i: all(getattr(i, attrib) == value
for attrib, value in attributes.items()),
default=None
)
def aware_datetime(*args, **kwargs):
"""Return an aware datetime using Django's configured timezone."""
return timezone.make_aware(datetime(*args, **kwargs))
def extension_in(filename, extensions):
"""
Check if the extension for the given filename is in the list of
allowed extensions. Uses os.path.splitext rules for getting the
extension.
"""
filename, extension = os.path.splitext(filename)
if extension and extension[1:] in extensions:
return True
else:
return False
| yfdyh000/pontoon | pontoon/base/utils.py | Python | bsd-3-clause | 12,890 |
"""Code for utility tools."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numba import njit
from sklearn.utils import check_array
def segmentation(ts_size, window_size, overlapping=False, n_segments=None):
"""Compute the indices for Piecewise Agrgegate Approximation.
Parameters
----------
ts_size : int
The size of the time series.
window_size : int
The size of the window.
overlapping : bool (default = False)
If True, overlapping windows may be used. If False, non-overlapping
are used.
n_segments : int or None (default = None)
The number of windows. If None, the number is automatically
computed using ``window_size``.
Returns
-------
start : array
The lower bound for each window.
end : array
The upper bound for each window.
size : int
The size of ``start``.
Examples
--------
>>> from pyts.utils import segmentation
>>> start, end, size = segmentation(ts_size=12, window_size=3)
>>> print(start)
[0 3 6 9]
>>> print(end)
[ 3 6 9 12]
>>> size
4
"""
if not isinstance(ts_size, (int, np.integer)):
raise TypeError("'ts_size' must be an integer.")
if not ts_size >= 2:
raise ValueError("'ts_size' must be an integer greater than or equal "
"to 2 (got {0}).".format(ts_size))
if not isinstance(window_size, (int, np.integer)):
raise TypeError("'window_size' must be an integer.")
if not window_size >= 1:
raise ValueError("'window_size' must be an integer greater than or "
"equal to 1 (got {0}).".format(window_size))
if not window_size <= ts_size:
raise ValueError("'window_size' must be lower than or equal to "
"'ts_size' ({0} > {1}).".format(window_size, ts_size))
if not (n_segments is None or isinstance(n_segments, (int, np.integer))):
raise TypeError("'n_segments' must be None or an integer.")
if isinstance(n_segments, (int, np.integer)):
if not n_segments >= 2:
raise ValueError(
"If 'n_segments' is an integer, it must be greater than or "
"equal to 2 (got {0}).".format(n_segments)
)
if not n_segments <= ts_size:
raise ValueError(
"If 'n_segments' is an integer, it must be lower than or "
"equal to 'ts_size' ({0} > {1}).".format(n_segments, ts_size)
)
if n_segments is None:
quotient, remainder = divmod(ts_size, window_size)
n_segments = quotient if remainder == 0 else quotient + 1
if not overlapping:
bounds = np.linspace(0, ts_size, n_segments + 1).astype('int64')
start = bounds[:-1]
end = bounds[1:]
size = start.size
return start, end, size
else:
n_overlapping = (n_segments * window_size) - ts_size
n_overlaps = n_segments - 1
overlaps = np.linspace(0, n_overlapping,
n_overlaps + 1).astype('int64')
bounds = np.arange(0, (n_segments + 1) * window_size, window_size)
start = bounds[:-1] - overlaps
end = bounds[1:] - overlaps
size = start.size
return start, end, size
@njit()
def _windowed_view(X, n_samples, n_timestamps, window_size, window_step):
overlap = window_size - window_step
shape_new = (n_samples,
(n_timestamps - overlap) // window_step,
window_size // 1)
s0, s1 = X.strides
strides_new = (s0, window_step * s1, s1)
return as_strided(X, shape=shape_new, strides=strides_new)
def windowed_view(X, window_size, window_step=1):
"""Return a windowed view of a 2D array.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Input data.
window_size : int
The size of the window. It must be between 1 and ``n_timestamps``.
window_step : int (default = 1)
The step of the sliding window
Returns
-------
X_new : array, shape = (n_samples, n_windows, window_size)
Windowed view of the input data. ``n_windows`` is computed as
``(n_timestamps - window_size + window_step) // window_step``.
Examples
--------
>>> import numpy as np
>>> from pyts.utils import windowed_view
>>> windowed_view(np.arange(6).reshape(1, -1), window_size=2)
array([[[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5]]])
"""
X = check_array(X, dtype=None)
n_samples, n_timestamps = X.shape
if not isinstance(window_size, (int, np.integer)):
raise TypeError("'window_size' must be an integer.")
if not 1 <= window_size <= n_timestamps:
raise ValueError("'window_size' must be an integer between 1 and "
"n_timestamps.")
if not isinstance(window_step, (int, np.integer)):
raise TypeError("'window_step' must be an integer.")
if not 1 <= window_step <= n_timestamps:
raise ValueError("'window_step' must be an integer between 1 and "
"n_timestamps.")
return _windowed_view(X, n_samples, n_timestamps, window_size, window_step)
| johannfaouzi/pyts | pyts/utils/utils.py | Python | bsd-3-clause | 5,388 |
# -*- coding: utf-8 -*-
"""Product models."""
from flask import url_for, current_app
from fulfil_client.model import (MoneyType, IntType, ModelType, One2ManyType,
StringType)
from shop.fulfilio import Model, ShopQuery
from shop.globals import current_channel
from shop.utils import get_random_product, imgixify, json_ld_dict
from fulfil_client.client import loads, dumps
from cached_property import cached_property
class ProductTemplate(Model):
__model_name__ = 'product.template'
name = StringType()
description = StringType()
long_description = StringType()
media = One2ManyType("product.media", cache=True)
products = One2ManyType("product.product", cache=True)
variation_attributes = One2ManyType(
"product.variation_attributes", cache=True
)
@property
def lowest_price(self):
return min([
listing.unit_price for listing in self.listings
])
@property
def highest_price(self):
return max([
listing.unit_price for listing in self.listings
])
@property
def listings(self):
return self._get_listings()
@property
def image(self):
if self.media:
image = self.media[0].url
return imgixify([image])[0]
else:
# Iterate through listings to find an image
for listing in self.listings:
image = listing.product.image
if image:
return image
def _get_listings(self):
"""
Return the products (that are listed in the current channel) and
active.
"""
key = "%s:%s:listing_ids" % (self.__model_name__, self.id)
if self.cache_backend.exists(key):
listing_ids = loads(self.cache_backend.get(key))
return ChannelListing.from_cache(listing_ids)
else:
listings = ChannelListing.query.filter_by_domain(
[
('channel', '=', current_channel.id),
('state', '=', 'active'),
('product.template', '=', self.id),
('product.active', '=', True),
],
).all()
map(lambda l: l.store_in_cache(), listings)
listings = sorted(
listings,
key=lambda l: self.products.index(l.product)
)
self.cache_backend.set(
key, dumps([l.id for l in listings]),
ex=current_app.config['REDIS_EX']
)
return listings
def get_product_variation_data(self):
"""
"""
key = '%s:get_product_variation_data:%s' % (
self.__model_name__, self.id
)
if self.cache_backend.exists(key):
return loads(self.cache_backend.get(key))
self.refresh()
variation_attributes = map(
lambda variation: variation.serialize(),
self.variation_attributes
)
variants = []
for listing in self.listings:
product = listing.product
product.refresh() # Fetch record again
data = product.serialize(purpose='variant_selection')
data['inventory_status'] = listing.get_availability()['value']
data['attributes'] = {}
for variation in self.variation_attributes:
attribute = variation.attribute # actual attribute
value = product.get_attribute_value(attribute)
data['attributes'][attribute.id] = value
variants.append(data)
rv = {
'variants': variants,
'variation_attributes': variation_attributes,
}
self.cache_backend.set(
key, dumps(rv),
ex=current_app.config['REDIS_EX'],
)
return rv
class Product(Model):
__model_name__ = 'product.product'
_eager_fields = set([
'template', 'variant_name', 'default_image.url'
])
code = StringType()
list_price = MoneyType('currency_code')
description = StringType()
long_description = StringType()
uri = StringType()
attributes = One2ManyType("product.product.attribute", cache=True)
cross_sells = One2ManyType('product.product', cache=True)
@property
def currency_code(self):
return current_channel.currency_code
@property
def image(self):
image = self._values.get('default_image.url')
if image:
return imgixify([image])[0]
return image
@property
def nodes(self):
# TODO: Return a list of nodes that this product belongs to
return []
@property
def images(self):
key = '%s:images:%s' % (self.__model_name__, self.id)
if self.cache_backend.exists(key):
return loads(self.cache_backend.get(key))
else:
rv = self.rpc.get_images_urls(self.id)
if not rv and self.available_image:
rv = [self.available_image]
rv = imgixify(rv)
self.cache_backend.set(
key, dumps(rv),
ex=current_app.config['REDIS_EX'],
)
return rv
@cached_property
def available_image(self):
if self.image:
return self.image
if self.template.image:
return self.template.image
@cached_property
def available_description(self):
return self.description or self.template.description
@cached_property
def available_long_description(self):
return self.long_description or self.template.long_description
@property
def name(self):
return self._values['variant_name'] or self.template.name
@property
def template(self):
return ProductTemplate.from_cache(self._values['template'])
@property
def brand(self):
# To be implemented
return None
@cached_property
def listing(self):
key = '%s:listing:%s' % (self.__model_name__, self.id)
if self.cache_backend.exists(key):
return ChannelListing.from_cache(
int(self.cache_backend.get(key))
)
else:
listing = ChannelListing.query.filter_by_domain(
[
('product', '=', self.id),
('channel', '=', current_channel.id)
]
).first()
if listing:
self.cache_backend.set(
key, listing.id,
ex=current_app.config['REDIS_EX'],
)
return listing
def get_absolute_url(self, **kwargs):
return url_for(
'products.product', handle=self.uri, **kwargs
)
def get_related_products(self):
"""
Return relsted products of this product
"""
return [
get_random_product() for c in range(5)
]
def get_attribute_value(self, attribute, silent=True):
for product_attr in self.attributes:
if product_attr.attribute == attribute:
value = getattr(
product_attr,
'value_%s' % attribute.type_
)
if value and attribute.type_ == 'selection':
value = value.id
return value
else:
if silent:
return
raise AttributeError(attribute.name)
def serialize(self, purpose=None):
return {
'id': self.id,
'rec_name': self.name,
'name': self.name,
'code': self.code,
'description': self.available_description or "",
'long_description': self.available_long_description or "",
'price': "%s" % self.list_price.format(),
'image_urls': self.images,
}
class ChannelListing(Model):
__model_name__ = 'product.product.channel_listing'
_eager_fields = set(['channel', 'product', 'product.template'])
product_identifier = StringType()
state = StringType()
@classmethod
def from_slug(cls, slug):
key = '%s:from_slug:%s:%s' % (
cls.__model_name__, slug, current_channel.id
)
if cls.cache_backend.exists(key):
return cls.from_cache(loads(cls.cache_backend.get(key)))
else:
listing = cls.query.filter_by_domain(
[
('channel', '=', current_channel.id),
('product_identifier', '=', slug),
]
).first()
if listing:
cls.cache_backend.set(
key, listing.id,
ex=current_app.config['REDIS_EX']
)
listing.store_in_cache()
return listing
@property
def channel(self):
return self._values['channel']
@property
def template(self):
return ProductTemplate.from_cache(self._values['product.template'])
@property
def product(self):
return Product.from_cache(self._values['product'])
@property
def unit_price(self):
# TODO: Price should come from the listing and customer
return self.product.list_price
@classmethod
def get_shop_query(cls):
return ShopQuery(cls.rpc, cls)
def get_availability(self):
"""
It is recommended to use the availability property than directly
call this method, which will always result in a web services call.
"""
return self.rpc.get_availability(self.id)
@cached_property
def availability(self):
return self.get_availability()
def get_absolute_url(self, node=None, **kwargs):
kwargs['handle'] = self.product_identifier
if node is not None:
kwargs['node'] = node
return url_for('products.product', **kwargs)
@property
def json_ld(self):
return json_ld_dict({
'@context': 'http://schema.org',
'@type': 'Product',
'description': self.product.description,
'name': self.product.name,
'sku': self.product.code,
'image': self.product.image,
'offers': {
'@type': 'Offer',
'availability': 'http://schema.org/InStock',
'url': self.get_absolute_url(_external=True),
'price': '%0.2f' % self.unit_price,
'priceCurrency': self.unit_price.currency,
},
})
def get_tree_crumbs_json_ld(self, node):
"""
Return a JSON+LD for tree node and crumbs
"""
node_tree = node.tree_crumbs_json_ld
node_tree['itemListElement'].append({
'@type': 'ListItem',
'position': len(node_tree['itemListElement']) + 1,
'item': {
'@id': self.get_absolute_url(_external=True),
'name': self.product.name,
'image': self.product.image,
}
})
return node_tree
class ProductVariationAttributes(Model):
__model_name__ = 'product.variation_attributes'
attribute = ModelType('product.attribute', cache=True)
sequence = IntType()
widget = StringType()
template = ModelType('product.template', cache=True)
def serialize(self):
"""
Returns serialized version of the attribute::
{
'sequence': 1, # Integer id to determine order
'name': 'shirt color', # Internal name of the attribute
'display_name': 'Color', # (opt) display name of attr
'rec_name': 'Color', # The name that should be shown
'widget': 'swatch', # clue on how to render widget
'options': [
# id, value of the options available to choose from
(12, 'Blue'),
(13, 'Yellow'),
...
]
}
"""
if self.attribute.type_ == 'selection':
# The attribute type needs options to choose from.
# Send only the options that the products displayed on webshop
# can have, instead of the exhaustive list of attribute options
# the attribute may have.
#
# For example, the color attribute values could be
# ['red', 'yellow', 'orange', 'green', 'black', 'blue']
# but the shirt itself might only be available in
# ['red', 'yellow']
#
# This can be avoided by returning options based on the product
# rather than on the attributes list of values
options = set()
for listing in self.template.listings:
product = listing.product
product.refresh()
value = product.get_attribute_value(self.attribute)
if value:
option = ProductAttributeSelectionOption.from_cache(value)
options.add((option.id, option.name))
else:
options = []
return {
'sequence': self.sequence,
'name': self.attribute.name,
'display_name': self.attribute.display_name,
'widget': self.widget,
'options': list(options),
'attribute_id': self.attribute.id,
}
class ProductAttribute(Model):
__model_name__ = 'product.attribute'
type_ = StringType()
name = StringType()
display_name = StringType()
class ProductProductAttribute(Model):
__model_name__ = 'product.product.attribute'
attribute = ModelType('product.attribute', cache=True)
value_selection = ModelType(
'product.attribute.selection_option', cache=True
)
class ProductAttributeSelectionOption(Model):
__model_name__ = 'product.attribute.selection_option'
name = StringType()
class ProductUOM(Model):
__model_name__ = 'product.uom'
symbol = StringType()
class ProductMedia(Model):
__model_name__ = 'product.media'
url = StringType()
| joeirimpan/shop | shop/product/models.py | Python | bsd-3-clause | 14,218 |
import datetime
import uuid
from functools import lru_cache
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import strip_quotes, truncate_name
from django.db.models.expressions import Exists, ExpressionWrapper
from django.db.models.query_utils import Q
from django.db.utils import DatabaseError
from django.utils import timezone
from django.utils.encoding import force_bytes, force_str
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
from .base import Database
from .utils import BulkInsertMapper, InsertVar, Oracle_datetime
class DatabaseOperations(BaseDatabaseOperations):
# Oracle uses NUMBER(5), NUMBER(11), and NUMBER(19) for integer fields.
# SmallIntegerField uses NUMBER(11) instead of NUMBER(5), which is used by
# SmallAutoField, to preserve backward compatibility.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
'SmallAutoField': (-99999, 99999),
'AutoField': (-99999999999, 99999999999),
'BigAutoField': (-9999999999999999999, 9999999999999999999),
}
set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'}
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
seq_name user_tab_identity_cols.sequence_name%%TYPE;
BEGIN
BEGIN
SELECT sequence_name INTO seq_name FROM user_tab_identity_cols
WHERE table_name = '%(table_name)s' AND
column_name = '%(column_name)s';
EXCEPTION WHEN NO_DATA_FOUND THEN
seq_name := '%(no_autofield_sequence_name)s';
END;
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = seq_name;
WHILE table_value > seq_value LOOP
EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL'
INTO seq_value;
END LOOP;
END;
/"""
# Oracle doesn't support string without precision; use the max string size.
cast_char_field_without_max_length = 'NVARCHAR2(2000)'
cast_data_types = {
'AutoField': 'NUMBER(11)',
'BigAutoField': 'NUMBER(19)',
'SmallAutoField': 'NUMBER(5)',
'TextField': cast_char_field_without_max_length,
}
def cache_key_culling_sql(self):
return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY'
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
elif lookup_type == 'iso_week_day':
return "TO_CHAR(%s - 1, 'D')" % field_name
elif lookup_type == 'week':
# IW = ISO week number
return "TO_CHAR(%s, 'IW')" % field_name
elif lookup_type == 'quarter':
return "TO_CHAR(%s, 'Q')" % field_name
elif lookup_type == 'iso_year':
return "TO_CHAR(%s, 'IYYY')" % field_name
else:
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
return "TRUNC(%s, 'Q')" % field_name
elif lookup_type == 'week':
return "TRUNC(%s, 'IW')" % field_name
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = _lazy_re_compile(r'^[\w/:+-]+$')
def _prepare_tzname_delta(self, tzname):
if '+' in tzname:
return tzname[tzname.find('+'):]
elif '-' in tzname:
return tzname[tzname.find('-'):]
return tzname
def _convert_field_to_tz(self, field_name, tzname):
if not settings.USE_TZ:
return field_name
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from connection timezone to the local time, returning
# TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the
# TIME ZONE details.
if self.connection.timezone_name != tzname:
return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % (
field_name,
self.connection.timezone_name,
self._prepare_tzname_delta(tzname),
)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return 'TRUNC(%s)' % field_name
def datetime_cast_time_sql(self, field_name, tzname):
# Since `TimeField` values are stored as TIMESTAMP where only the date
# part is ignored, convert the field to the specified timezone.
return self._convert_field_to_tz(field_name, tzname)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
sql = "TRUNC(%s, 'Q')" % field_name
elif lookup_type == 'week':
sql = "TRUNC(%s, 'IW')" % field_name
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def time_trunc_sql(self, lookup_type, field_name):
# The implementation is similar to `datetime_trunc_sql` as both
# `DateTimeField` and `TimeField` are stored as TIMESTAMP where
# the date part of the later is ignored.
if lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
elif lookup_type == 'second':
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
# Oracle stores empty strings as null. If the field accepts the empty
# string, undo this to adhere to the Django convention of using
# the empty string instead of null.
if expression.field.empty_strings_allowed:
converters.append(
self.convert_empty_bytes
if internal_type == 'BinaryField' else
self.convert_empty_string
)
return converters
def convert_textfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = value.read()
return value
def convert_binaryfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
@staticmethod
def convert_empty_string(value, expression, connection):
return '' if value is None else value
@staticmethod
def convert_empty_bytes(value, expression, connection):
return b'' if value is None else value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_columns(self, cursor, returning_params):
for param in returning_params:
value = param.get_value()
if value is None or value == []:
# cx_Oracle < 6.3 returns None, >= 6.3 returns empty list.
raise DatabaseError(
'The database did not return a new row id. Probably '
'"ORA-1403: no data found" was raised internally but was '
'hidden by the Oracle OCI library (see '
'https://code.djangoproject.com/ticket/28859).'
)
# cx_Oracle < 7 returns value, >= 7 returns list with single value.
yield value[0] if isinstance(value, list) else value
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def no_limit_value(self):
return None
def limit_offset_sql(self, low_mark, high_mark):
fetch, offset = self._get_limit_offset_params(low_mark, high_mark)
return ' '.join(sql for sql in (
('OFFSET %d ROWS' % offset) if offset else None,
('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None,
) if sql)
def last_executed_query(self, cursor, sql, params):
# https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
# Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's
# `statement` doesn't contain the query parameters. Substitute
# parameters manually.
if isinstance(params, (tuple, list)):
for i, param in enumerate(params):
statement = statement.replace(':arg%d' % i, force_str(param, errors='replace'))
elif isinstance(params, dict):
for key, param in params.items():
statement = statement.replace(':%s' % key, force_str(param, errors='replace'))
return statement
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name)
cursor.execute('"%s".currval' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def pk_default_value(self):
return "NULL"
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return value.read()
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name.upper(), self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_columns(self, fields):
if not fields:
return '', ()
field_names = []
params = []
for field in fields:
field_names.append('%s.%s' % (
self.quote_name(field.model._meta.db_table),
self.quote_name(field.column),
))
params.append(InsertVar(field))
return 'RETURNING %s INTO %s' % (
', '.join(field_names),
', '.join(['%s'] * len(params)),
), tuple(params)
def __foreign_key_constraints(self, table_name, recursive):
with self.connection.cursor() as cursor:
if recursive:
cursor.execute("""
SELECT
user_tables.table_name, rcons.constraint_name
FROM
user_tables
JOIN
user_constraints cons
ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U'))
LEFT JOIN
user_constraints rcons
ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R')
START WITH user_tables.table_name = UPPER(%s)
CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name
GROUP BY
user_tables.table_name, rcons.constraint_name
HAVING user_tables.table_name != UPPER(%s)
ORDER BY MAX(level) DESC
""", (table_name, table_name))
else:
cursor.execute("""
SELECT
cons.table_name, cons.constraint_name
FROM
user_constraints cons
WHERE
cons.constraint_type = 'R'
AND cons.table_name = UPPER(%s)
""", (table_name,))
return cursor.fetchall()
@cached_property
def _foreign_key_constraints(self):
# 512 is large enough to fit the ~330 tables (as of this writing) in
# Django's test suite.
return lru_cache(maxsize=512)(self.__foreign_key_constraints)
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
truncated_tables = {table.upper() for table in tables}
constraints = set()
# Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE
# foreign keys which Django doesn't define. Emulate the
# PostgreSQL behavior which truncates all dependent tables by
# manually retrieving all foreign key constraints and resolving
# dependencies.
for table in tables:
for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade):
if allow_cascade:
truncated_tables.add(foreign_table)
constraints.add((foreign_table, constraint))
sql = [
"%s %s %s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('DISABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
style.SQL_KEYWORD('KEEP'),
style.SQL_KEYWORD('INDEX'),
) for table, constraint in constraints
] + [
"%s %s %s;" % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
) for table in truncated_tables
] + [
"%s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('ENABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
) for table, constraint in constraints
]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table'])
table = self.quote_name(sequence_info['table'])
column = self.quote_name(sequence_info['column'] or 'id')
query = self._sequence_reset_sql % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table)
table = self.quote_name(model._meta.db_table)
column = self.quote_name(f.column)
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.remote_field.through:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table())
table = self.quote_name(f.m2m_db_table())
column = self.quote_name('id')
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': 'ID',
})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
if isinstance(value, str):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def combine_expression(self, connector, sub_expressions):
lhs, rhs = sub_expressions
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs}
elif connector == '<<':
return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '>>':
return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def _get_no_autofield_sequence_name(self, table):
"""
Manually created sequence name to keep backward compatibility for
AutoFields that aren't Oracle identity columns.
"""
name_length = self.max_name_length() - 3
return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper()
def _get_sequence_name(self, cursor, table, pk_name):
cursor.execute("""
SELECT sequence_name
FROM user_tab_identity_cols
WHERE table_name = UPPER(%s)
AND column_name = UPPER(%s)""", [table, pk_name])
row = cursor.fetchone()
return self._get_no_autofield_sequence_name(table) if row is None else row[0]
def bulk_insert_sql(self, fields, placeholder_rows):
query = []
for row in placeholder_rows:
select = []
for i, placeholder in enumerate(row):
# A model without any fields has fields=[None].
if fields[i]:
internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type()
placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder
# Add columns aliases to the first select to avoid "ORA-00918:
# column ambiguously defined" when two or more columns in the
# first select have the same value.
if not query:
placeholder = '%s col_%s' % (placeholder, i)
select.append(placeholder)
query.append('SELECT %s FROM DUAL' % ', '.join(select))
# Bulk insert to tables with Oracle identity columns causes Oracle to
# add sequence.nextval to it. Sequence.nextval cannot be used with the
# UNION operator. To prevent incorrect SQL, move UNION to a subquery.
return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query)
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super().subtract_temporals(internal_type, lhs, rhs)
def bulk_batch_size(self, fields, objs):
"""Oracle restricts the number of parameters in a query."""
if fields:
return self.connection.features.max_query_params // len(fields)
return len(objs)
def conditional_expression_supported_in_where_clause(self, expression):
"""
Oracle supports only EXISTS(...) or filters in the WHERE clause, others
must be compared with True.
"""
if isinstance(expression, Exists):
return True
if isinstance(expression, ExpressionWrapper) and isinstance(expression.expression, Q):
return True
return False
| georgemarshall/django | django/db/backends/oracle/operations.py | Python | bsd-3-clause | 27,551 |
""" Module that uses CMAC 2.0 to remove and correct second trip returns,
correct velocity and more. A new radar object is then created with all CMAC
2.0 products. """
import copy
import json
import sys
import netCDF4
import numpy as np
import pyart
from .cmac_processing import (
do_my_fuzz, get_melt, get_texture, fix_phase_fields, gen_clutter_field_from_refl, beam_block)
from .config import get_cmac_values, get_field_names, get_metadata
def cmac(radar, sonde, config, geotiff=None, flip_velocity=False,
meta_append=None, verbose=True):
"""
Corrected Moments in Antenna Coordinates
Parameters
----------
radar : Radar
Radar object to use in the CMAC calculation.
sonde : Object
Object containing all the sonde data.
config : str
A string pointing to dictionaries containing values for CMAC 2.0
specific to a radar.
Other Parameters
----------------
geotiff : str
Filepath for a geotiff, if provided, will generate a beam blockage
gate id.
meta_append : dict, json and None
Value key pairs to attend to global attributes. If None,
a default metadata will be created. The metadata can also
be created by providing a dictionary or a json file.
verbose : bool
If True, this will display more statistics.
Returns
-------
radar : Radar
Radar object with new CMAC added fields.
"""
# Retrieve values from the configuration file.
cmac_config = get_cmac_values(config)
field_config = get_field_names(config)
meta_config = get_metadata(config)
# Over write site altitude
if 'site_alt' in cmac_config.keys():
radar.altitude['data'][0] = cmac_config['site_alt']
# Obtaining variables needed for fuzzy logic.
radar_start_date = netCDF4.num2date(
radar.time['data'][0], radar.time['units'],
only_use_cftime_datetimes=False, only_use_python_datetimes=True)
print('##', str(radar_start_date))
temp_field = field_config['temperature']
alt_field = field_config['altitude']
vel_field = field_config['velocity']
if 'gen_clutter_from_refl' not in cmac_config.keys():
cmac_config['gen_clutter_from_refl'] = False
if cmac_config['gen_clutter_from_refl']:
new_clutter_field = gen_clutter_field_from_refl(radar, field_config['input_clutter_corrected_reflectivity'],
field_config['reflectivity'],
diff_dbz=cmac_config['gen_clutter_from_refl_diff'],
max_h=cmac_config['gen_clutter_from_refl_alt'])
radar.add_field(field_config['clutter'], new_clutter_field, replace_existing=True)
radar.fields[field_config['clutter']]['units'] = '1'
# ZDR offsets
if 'zdr_offset' in cmac_config.keys():
if 'offset_zdrs' in cmac_config.keys():
for fld in cmac_config['offset_zdrs']:
radar.fields[fld]['data'] += cmac_config['zdr_offset']
else:
radar.fields[field_config['input_zdr']]['data'] += cmac_config['zdr_offset']
# flipping phidp
if 'flip_phidp' not in cmac_config.keys():
cmac_config['flip_phidp'] = False
if cmac_config['flip_phidp']:
if 'phidp_flipped' in cmac_config.keys(): # user specifies fields to flip
for fld in cmac_config['phidp_flipped']:
radar.fields[fld]['data'] = radar.fields[fld]['data'] * -1.0
else: # just flip defined phidp field
radar.fields[field_config['input_phidp_field']]['data'] = radar.fields[field_config['input_phidp_field']]['data']*-1.0
if flip_velocity:
radar.fields[vel_field]['data'] = radar.fields[
vel_field]['data'] * -1.0
z_dict, temp_dict = pyart.retrieve.map_profile_to_gates(
sonde.variables[temp_field][:], sonde.variables[alt_field][:], radar)
if 'clutter_mask_z_for_texture' not in cmac_config.keys():
cmac_config['clutter_mask_z_for_texture'] = False
if cmac_config['clutter_mask_z_for_texture']:
masked_vr = copy.deepcopy(radar.fields[vel_field])
if 'ground_clutter' in radar.fields.keys():
masked_vr['data'] = np.ma.masked_where(radar.fields['ground_clutter']['data'] == 1, masked_vr['data'])
masked_vr['data'][radar.fields['ground_clutter']['data'] == 1] = np.nan
radar.add_field('clutter_masked_velocity', masked_vr, replace_existing=True)
texture = get_texture(radar, 'clutter_masked_velocity')
texture['data'][np.isnan(texture['data'])] = 0.0
else:
texture = get_texture(radar, vel_field)
snr = pyart.retrieve.calculate_snr_from_reflectivity(radar)
if not verbose:
print('## Adding radar fields...')
if verbose:
print('##')
print('## These radar fields are being added:')
temp_dict['units'] = 'deg_C'
z_dict['units'] = 'm'
radar.add_field('sounding_temperature', temp_dict, replace_existing=True)
radar.add_field('height', z_dict, replace_existing=True)
radar.add_field('signal_to_noise_ratio', snr, replace_existing=True)
radar.add_field('velocity_texture', texture, replace_existing=True)
if verbose:
print('## sounding_temperature')
print('## height')
print('## signal_to_noise_ratio')
print('## velocity_texture')
# Performing fuzzy logic to obtain the gate ids.
rhv_field = field_config['cross_correlation_ratio']
ncp_field = field_config['normalized_coherent_power']
if 'mbfs' not in cmac_config:
cmac_config['mbfs'] = None
if 'hard_const' not in cmac_config:
cmac_config['hard_const'] = None
# Specifically for dealing with the ingested C-SAPR2 data
my_fuzz, _ = do_my_fuzz(radar, rhv_field, ncp_field, verbose=verbose,
custom_mbfs=cmac_config['mbfs'],
custom_hard_constraints=cmac_config['hard_const'])
radar.add_field('gate_id', my_fuzz,
replace_existing=True)
if 'ground_clutter' in radar.fields.keys():
# Adding fifth gate id, clutter.
clutter_data = radar.fields['ground_clutter']['data']
gate_data = radar.fields['gate_id']['data'].copy()
radar.fields['gate_id']['data'][clutter_data == 1] = 5
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',5:clutter'
radar.fields['gate_id']['valid_max'] = 5
if 'classification_mask' in radar.fields.keys():
clutter_data = radar.fields['classification_mask']['data']
gate_data = radar.fields['gate_id']['data'].copy()
radar.fields['gate_id']['data'][clutter_data == 8] = 5
radar.fields['gate_id']['data'][clutter_data == 16] = 5
radar.fields['gate_id']['data'][clutter_data == 4] = 5
radar.fields['gate_id']['data'][clutter_data == 1] = 0
radar.fields['gate_id']['data'][clutter_data == 2] = 0
radar.fields['gate_id']['data'][gate_data == 0] = 0
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',5:clutter'
radar.fields['gate_id']['valid_max'] = 5
if geotiff is not None:
pbb_all, cbb_all = beam_block(
radar, geotiff, cmac_config['radar_height_offset'],
cmac_config['beam_width'])
radar.fields['gate_id']['data'][cbb_all > 0.30] = 6
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',6:terrain_blockage'
radar.fields['gate_id']['valid_max'] = 6
pbb_dict = pbb_to_dict(pbb_all)
cbb_dict = cbb_to_dict(cbb_all)
radar.add_field('partial_beam_blockage', pbb_dict)
radar.add_field('cumulative_beam_blockage', cbb_dict)
cat_dict = {}
for pair_str in radar.fields['gate_id']['notes'].split(','):
cat_dict.update(
{pair_str.split(':')[1]:int(pair_str.split(':')[0])})
if verbose:
print('## gate_id')
# Corrected velocity using pyart's region dealiaser.
cmac_gates = pyart.correct.GateFilter(radar)
cmac_gates.exclude_all()
cmac_gates.include_equal('gate_id', cat_dict['rain'])
cmac_gates.include_equal('gate_id', cat_dict['melting'])
cmac_gates.include_equal('gate_id', cat_dict['snow'])
# Create a simulated velocity field from the sonde object.
u_field = field_config['u_wind']
v_field = field_config['v_wind']
u_wind = sonde.variables[u_field][:]
v_wind = sonde.variables[v_field][:]
alt_field = field_config['altitude']
sonde_alt = sonde.variables[alt_field][:]
profile = pyart.core.HorizontalWindProfile.from_u_and_v(
sonde_alt, u_wind, v_wind)
sim_vel = pyart.util.simulated_vel_from_profile(radar, profile)
radar.add_field('simulated_velocity', sim_vel, replace_existing=True)
# Create the corrected velocity field from the region dealias algorithm.
corr_vel = pyart.correct.dealias_region_based(
radar, vel_field=vel_field, ref_vel_field='simulated_velocity',
keep_original=False, gatefilter=cmac_gates, centered=True)
radar.add_field('corrected_velocity', corr_vel, replace_existing=True)
if verbose:
print('## corrected_velocity')
print('## simulated_velocity')
fzl = get_melt(radar)
# Is the freezing level realistic? If not, assume
ref_offset = cmac_config['ref_offset']
self_const = cmac_config['self_const']
# Calculating differential phase fields.
radar.fields['differential_phase']['data'][
radar.fields['differential_phase']['data']<0] += 360.0
phidp, kdp = pyart.correct.phase_proc_lp_gf(
radar, gatefilter=cmac_gates, offset=ref_offset, debug=True,
nowrap=50, fzl=fzl, self_const=self_const)
phidp_filt, kdp_filt = fix_phase_fields(
copy.deepcopy(kdp), copy.deepcopy(phidp), radar.range['data'],
cmac_gates)
radar.add_field('corrected_differential_phase', phidp,
replace_existing=True)
radar.add_field('filtered_corrected_differential_phase', phidp_filt,
replace_existing=True)
radar.add_field('corrected_specific_diff_phase', kdp,
replace_existing=True)
radar.add_field('filtered_corrected_specific_diff_phase', kdp_filt,
replace_existing=True)
radar.fields['filtered_corrected_specific_diff_phase']['long_name'] = 'Filtered Corrected Specific differential phase (KDP)'
radar.fields['filtered_corrected_differential_phase']['long_name'] = 'Filtered Corrected Differential Phase'
if 'clutter_masked_velocity' in radar.fields.keys():
radar.fields['clutter_masked_velocity']['long_name'] = 'Radial mean Doppler velocity, positive for motion away from the instrument, clutter removed'
if verbose:
print('## corrected_specific_diff_phase')
print('## filtered_corrected_specific_diff_phase')
print('## corrected_differential_phase')
print('## filtered_corrected_differential_phase')
# Calculating attenuation by using pyart.
refl_field = field_config['reflectivity']
attenuation_a_coef = cmac_config['attenuation_a_coef']
c_coef = cmac_config['c_coef']
d_coef = cmac_config['d_coef']
beta_coef = cmac_config['beta_coef']
rr_a = cmac_config['rain_rate_a_coef']
rr_b = cmac_config['rain_rate_b_coef']
zdr_field = field_config['differential_reflectivity']
radar.fields['corrected_differential_reflectivity'] = copy.deepcopy(
radar.fields[zdr_field])
radar.fields['corrected_reflectivity'] = copy.deepcopy(
radar.fields[refl_field])
radar.fields['corrected_reflectivity']['data'] = np.ma.masked_where(
cmac_gates.gate_excluded,
radar.fields['corrected_reflectivity']['data'])
# Get specific differential attenuation.
# Need height over 0C isobar.
iso0 = np.ma.mean(radar.fields['height']['data'][
np.where(np.abs(radar.fields['sounding_temperature']['data']) < 0.1)])
radar.fields['height_over_iso0'] = copy.deepcopy(radar.fields['height'])
radar.fields['height_over_iso0']['data'] -= iso0
phidp_field = field_config['phidp_field']
(spec_at, pia_dict, cor_z, spec_diff_at,
pida_dict, cor_zdr) = pyart.correct.calculate_attenuation_zphi(
radar, temp_field='sounding_temperature',
iso0_field='height_over_iso0',
zdr_field=field_config['zdr_field'],
pia_field=field_config['pia_field'],
phidp_field=field_config['phidp_field'],
refl_field=field_config['refl_field'], c=c_coef, d=d_coef,
a_coef=attenuation_a_coef, beta=beta_coef,
gatefilter=cmac_gates)
# cor_zdr['data'] += cmac_config['zdr_offset'] Now taken care of at start
radar.add_field('specific_attenuation', spec_at, replace_existing=True)
radar.add_field('path_integrated_attenuation', pia_dict,
replace_existing=True)
radar.add_field('corrected_reflectivity', cor_z, replace_existing=True)
radar.add_field('specific_differential_attenuation', spec_diff_at,
replace_existing=True)
radar.add_field('path_integrated_differential_attenuation', pida_dict,
replace_existing=True)
radar.add_field('corrected_differential_reflectivity', cor_zdr,
replace_existing=True)
radar.fields['corrected_velocity']['units'] = 'm/s'
radar.fields['corrected_velocity']['valid_min'] = np.round(
radar.fields['corrected_velocity']['valid_min'], 4)
radar.fields['corrected_velocity']['valid_max'] = np.round(
radar.fields['corrected_velocity']['valid_max'], 4)
radar.fields['simulated_velocity']['units'] = 'm/s'
radar.fields['velocity_texture']['units'] = 'm/s'
cat_dict = {}
for pair_str in radar.fields['gate_id']['notes'].split(','):
if verbose:
print(pair_str)
cat_dict.update({pair_str.split(':')[1]: int(pair_str.split(':')[0])})
rain_gates = pyart.correct.GateFilter(radar)
rain_gates.exclude_all()
rain_gates.include_equal('gate_id', cat_dict['rain'])
# Calculating rain rate.
R = rr_a * (radar.fields['specific_attenuation']['data']) ** rr_b
rainrate = copy.deepcopy(radar.fields['specific_attenuation'])
rainrate['data'] = R
rainrate['valid_min'] = 0.0
rainrate['valid_max'] = 400.0
rainrate['standard_name'] = 'rainfall_rate'
rainrate['long_name'] = 'rainfall_rate'
rainrate['least_significant_digit'] = 1
rainrate['units'] = 'mm/hr'
radar.fields.update({'rain_rate_A': rainrate})
# This needs to be updated to a gatefilter.
mask = radar.fields['reflectivity']['data'].mask
radar.fields['rain_rate_A'].update({
'comment': 'Rain rate calculated from specific_attenuation,'
+ ' R=51.3*specific_attenuation**0.81, note R=0.0 where'
+ ' norm coherent power < 0.4 or rhohv < 0.8'})
if verbose:
print('## Rainfall rate as a function of A ##')
print('##')
print('## All CMAC fields have been added to the radar object.')
print('##')
# Adding the metadata to the cmac radar object.
print('## Appending metadata')
command_line = ''
for item in sys.argv:
command_line = command_line + ' ' + item
if meta_append is None:
meta = {
'site_id': None,
'data_level': 'sgp',
'comment': 'This is highly experimental and initial data. '
+ 'There are many known and unknown issues. Please do '
+ 'not use before contacting the Translator responsible '
+ '[email protected]',
'attributions': 'This data is collected by the ARM Climate Research '
+ 'facility. Radar system is operated by the radar '
+ 'engineering team [email protected] and the data is '
+ 'processed by the precipitation radar products '
+ 'team. LP code courtesy of Scott Giangrande, BNL.',
'version': '2.0 lite',
'vap_name': 'cmac',
'known_issues': 'False phidp jumps in insect regions. Still uses '
+ 'old Giangrande code.',
'developers': 'Robert Jackson, ANL. Zachary Sherman, ANL.',
'translator': 'Scott Collis, ANL.',
'mentors': 'Bradley Isom, PNNL., Iosif Lindenmaier, PNNL.',
'Conventions': 'CF/Radial instrument_parameters ARM-1.3'}
else:
if meta_append.lower().endswith('.json'):
with open(meta_append, 'r') as infile:
meta = json.load(infile)
elif meta_append == 'config':
meta = meta_config
else:
raise RuntimeError('Must provide the file name of the json file',
'or say config to use the meta data from',
'config.py')
radar.metadata.clear()
radar.metadata.update(meta)
radar.metadata['command_line'] = command_line
return radar
def area_coverage(radar, precip_threshold=10.0, convection_threshold=40.0):
""" Returns percent coverage of precipitation and convection. """
temp_radar = radar.extract_sweeps([0])
ref = temp_radar.fields['corrected_reflectivity']['data']
total_len = len(ref.flatten())
ref_10_len = len(np.argwhere(ref >= precip_threshold))
ref_40_len = len(np.argwhere(ref >= convection_threshold))
ref_10_per = (ref_10_len/total_len)*100
ref_40_per = (ref_40_len/total_len)*100
del temp_radar
return ref_10_per, ref_40_per
def pbb_to_dict(pbb_all):
""" Function that takes the pbb_all array and turns
it into a dictionary to be used and added to the
pyart radar object. """
pbb_dict = {}
pbb_dict['coordinates'] = 'elevation azimuth range'
pbb_dict['units'] = '1'
pbb_dict['data'] = pbb_all
pbb_dict['standard_name'] = 'partial_beam_block'
pbb_dict['long_name'] = 'Partial Beam Block Fraction'
pbb_dict['comment'] = 'Partial beam block fraction due to terrain.'
return pbb_dict
def cbb_to_dict(cbb_all):
""" Function that takes the cbb_all array and turns
it into a dictionary to be used and added to the
pyart radar object. """
cbb_dict = {}
cbb_dict['coordinates'] = 'elevation azimuth range'
cbb_dict['units'] = '1'
cbb_dict['data'] = cbb_all
cbb_dict['standard_name'] = 'cumulative_beam_block'
cbb_dict['long_name'] = 'Cumulative Beam Block Fraction'
cbb_dict['comment'] = 'Cumulative beam block fraction due to terrain.'
return cbb_dict
| EVS-ATMOS/cmac2.0 | cmac/cmac_radar.py | Python | bsd-3-clause | 18,889 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import codecs
try:
from setuptools import setup, Command
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Command # noqa
from distutils.command.install import INSTALL_SCHEMES
# -*- Distribution Meta -*-
NAME = "django-celery"
import re
re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)')
re_doc = re.compile(r'^"""(.+?)"""')
rq = lambda s: s.strip("\"'")
def add_default(m):
attr_name, attr_value = m.groups()
return ((attr_name, rq(attr_value)), )
def add_version(m):
v = list(map(rq, m.groups()[0].split(", ")))
return (("VERSION", ".".join(v[0:3]) + "".join(v[3:])), )
def add_doc(m):
return (("doc", m.groups()[0]), )
pats = {re_meta: add_default,
re_vers: add_version,
re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
meta_fh = open(os.path.join(here, "djcelery/__init__.py"))
try:
meta = {}
for line in meta_fh:
if line.strip() == '# -eof meta-':
break
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
meta.update(handler(m))
finally:
meta_fh.close()
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
src_dir = "djcelery"
def fullsplit(path, result=None):
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
SKIP_EXTENSIONS = [".pyc", ".pyo", ".swp", ".swo"]
def is_unwanted_file(filename):
for skip_ext in SKIP_EXTENSIONS:
if filename.endswith(skip_ext):
return True
return False
for dirpath, dirnames, filenames in os.walk(src_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith("."):
del dirnames[i]
for filename in filenames:
if filename.endswith(".py"):
packages.append('.'.join(fullsplit(dirpath)))
elif is_unwanted_file(filename):
pass
else:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in
filenames]])
class RunTests(Command):
description = "Run the django test suite from the tests dir."
user_options = []
extra_env = {}
extra_args = []
def run(self):
for env_name, env_value in self.extra_env.items():
os.environ[env_name] = str(env_value)
this_dir = os.getcwd()
testproj_dir = os.path.join(this_dir, "tests")
os.chdir(testproj_dir)
sys.path.append(testproj_dir)
from django.core.management import execute_manager
os.environ["DJANGO_SETTINGS_MODULE"] = os.environ.get(
"DJANGO_SETTINGS_MODULE", "settings")
settings_file = os.environ["DJANGO_SETTINGS_MODULE"]
settings_mod = __import__(settings_file, {}, {}, [''])
prev_argv = list(sys.argv)
try:
sys.argv = [__file__, "test"] + self.extra_args
execute_manager(settings_mod, argv=sys.argv)
finally:
sys.argv = prev_argv
def initialize_options(self):
pass
def finalize_options(self):
pass
class QuickRunTests(RunTests):
extra_env = dict(SKIP_RLIMITS=1, QUICKTEST=1)
class CIRunTests(RunTests):
@property
def extra_args(self):
toxinidir = os.environ.get("TOXINIDIR", "")
return [
"--with-coverage3",
"--cover3-xml",
"--cover3-xml-file=%s" % (
os.path.join(toxinidir, "coverage.xml"), ),
"--with-xunit",
"--xunit-file=%s" % (
os.path.join(toxinidir, "nosetests.xml"), ),
"--cover3-html",
"--cover3-html-dir=%s" % (
os.path.join(toxinidir, "cover"), ),
]
if os.path.exists("README.rst"):
long_description = codecs.open("README.rst", "r", "utf-8").read()
else:
long_description = "See http://github.com/celery/django-celery"
setup(
name=NAME,
version=meta["VERSION"],
description=meta["doc"],
author=meta["author"],
author_email=meta["contact"],
url=meta["homepage"],
platforms=["any"],
license="BSD",
packages=packages,
data_files=data_files,
scripts=["bin/djcelerymon"],
zip_safe=False,
install_requires=[
"pytz",
"celery>=3.0.11",
],
cmdclass={"test": RunTests,
"quicktest": QuickRunTests,
"citest": CIRunTests},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Topic :: Communications",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries :: Python Modules",
],
entry_points={
"console_scripts": ["djcelerymon = djcelery.mon:main"],
},
long_description=long_description,
)
| planorama/django-celery | setup.py | Python | bsd-3-clause | 5,472 |
# This is essentially a slimmed down mapreduce. There are some differences with the sharding logic
# and the whole thing leverages defer and there's no reducing, just mapping.
# If you're wondering why we're not using MR here...
# 1. We don't want a hard dependency on it and migrations are core (unlike stuff in contrib)
# 2. MR is massive overkill for what we're doing here
import copy
import cPickle
import logging
from datetime import datetime
from django.conf import settings
from google.appengine.api import datastore, datastore_errors
from google.appengine.api.taskqueue.taskqueue import _DEFAULT_QUEUE
from google.appengine.ext import deferred
from google.appengine.runtime import DeadlineExceededError
class Redefer(Exception):
""" Custom exception class to allow triggering of the re-deferring of a processing task. """
pass
def _mid_string(string1, string2):
""" Given 2 unicode strings, return the string that is alphabetically half way between them. """
# Put the strings in order, so the lowest one is lhs
lhs = min(string1, string2)
rhs = max(string1, string2)
# Pad out the shorter string so that they're both the same length
longest_length = max(len(lhs), len(rhs))
lhs = lhs.ljust(longest_length, "\0")
# For each position in the strings, find the mid character
mid = []
for l, r in zip(lhs, rhs):
l = ord(l)
r = ord(r)
mid.append(l + (r - l) / 2)
# Note that some of the numbers might be invalid unicode values, but for the purposes of
# filtering Datastore keys it doesn't matter
result = u"".join([unichr(x) for x in mid])
# Strings starting with a double underscore are not valid Datastore keys
if result.startswith(u"__"):
result = u"_`" + result[2:]
return result
def _next_string(string):
""" Given a string (or unicode), return the alphabetically next string. """
# Note that in python 2 at least, unicode is 16 bit, and therefore some characters (e.g. emoji)
# are encoded as 2 characters, so when we slice the last "character" off the string we're
# actually getting half a character, and then when we increment it we're possibly creating an
# invalid character, but for the purpose of ordering Datastore keys it shouldn't matter
try:
# Try to increment the last character by 1
return string[:-1] + unichr(ord(string[-1]) + 1)
except ValueError:
# If the last character was already the highest possible unicode value, then instead add
# another character to the string
return string + unichr(1)
def _next_key(key):
"""
Given a key, this returns key + 1. In the case of key names
we simply calculate the next alphabetical key
"""
val = key.id_or_name()
if isinstance(val, basestring):
return datastore.Key.from_path(
key.kind(),
_next_string(val),
namespace=key.namespace()
)
else:
return datastore.Key.from_path(
key.kind(),
val + 1,
namespace=key.namespace()
)
def _mid_key(key1, key2):
"""
Given two keys, this function returns the key mid-way between them
- this needs some thought on how to do this with strings
"""
key1_val = key1.id_or_name()
key2_val = key2.id_or_name()
if type(key1_val) != type(key2_val):
raise NotImplementedError(
"Sharding of entities with mixed integer and string types is not yet supported."
)
if isinstance(key1_val, basestring):
mid_id_or_name = _mid_string(key1_val, key2_val)
else:
mid_id_or_name = key1_val + ((key2_val - key1_val) // 2)
return datastore.Key.from_path(
key1.kind(),
mid_id_or_name,
namespace=key1.namespace()
)
def _get_range(key1, key2):
""" Given 2 Datastore keys, return the range that their IDs span.
E.g. if the IDs are 7 and 100, then the range is 93.
Works for string-based keys as well, but returns a string representation of the difference.
"""
val1 = key1.id_or_name()
val2 = key2.id_or_name()
if type(val1) != type(val2):
raise Exception("Cannot calculate range between keys of different types.")
if isinstance(val1, (int, long)):
return val2 - val1
# Otherwise, the values are strings...
# Put the strings in order, so the lowest one is lhs
lhs = min(val1, val2)
rhs = max(val1, val2)
# Pad out the shorter string so that they're both the same length
longest_length = max(len(lhs), len(rhs))
lhs = lhs.ljust(longest_length, "\0")
# For each position in the strings, find the difference
diffs = []
for l, r in zip(lhs, rhs):
diffs.append(ord(r) - ord(l))
# We return this "difference" as a string
return u"".join([unichr(x) for x in diffs])
def _generate_shards(keys, shard_count):
"""
Given a set of keys with:
- The first key being the lowest in the range
- The last key being the highest in the range
- The other keys being evenly distributed (e.g. __scatter__)
This function returns a list of [start_key, end_key] shards to cover the range
This may not return shard_count shards if there aren't enough split points
"""
keys = sorted(keys) # Ensure the keys are sorted
# Special case single key
if shard_count == 1:
return [[keys[0], keys[-1]]]
elif shard_count < len(keys):
# If there are more split point keys than we need then We have to calculate a
# stride to skip some of the split point keys to return shard_count shards
index_stride = len(keys) / float(shard_count)
keys = [keys[int(round(index_stride * i))] for i in range(1, shard_count)]
shards = []
for i in xrange(len(keys) - 1):
shards.append([keys[i], keys[i + 1]])
return shards
def _find_largest_shard(shards):
"""
Given a list of shards, where each shard is a pair of (lowest_key, highest_key),
return the shard with the largest ID range
"""
largest_shard = None
range_of_largest_shard = None
for shard in shards:
if largest_shard is None:
largest_shard = shard
range_of_largest_shard = _get_range(shard[0], shard[1])
else:
this_range = _get_range(shard[0], shard[1])
if this_range > range_of_largest_shard:
largest_shard = shard
range_of_largest_shard = _get_range(shard[0], shard[1])
return largest_shard
def shard_query(query, shard_count):
""" Given a datastore.Query object and a number of shards, return a list of shards where each
shard is a pair of (low_key, high_key).
May return fewer than `shard_count` shards in cases where there aren't many entities.
"""
OVERSAMPLING_MULTIPLIER = 32 # This value is used in Mapreduce
try:
query.Order("__key__")
min_id = query.Run().next().key()
query.Order(("__key__", query.DESCENDING))
max_id = query.Run().next().key()
except StopIteration:
# No objects, so no shards
return []
query.Order("__scatter__") # Order by the scatter property
# Get random keys to shard on
keys = [x.key() for x in query.Get(shard_count * OVERSAMPLING_MULTIPLIER)]
keys.sort()
if not keys: # If no keys...
# Shard on the upper and lower PKs in the query this is *not* efficient
keys = [min_id, max_id]
else:
if keys[0] != min_id:
keys.insert(0, min_id)
if keys[-1] != max_id or min_id == max_id:
keys.append(max_id)
# We generate as many shards as we can, but if it's not enough then we
# iterate, splitting the largest shard into two shards until either:
# - we hit the desired shard count
# - we can't subdivide anymore
shards = _generate_shards(keys, shard_count)
while True:
if len(shards) >= shard_count:
break
# If we don't have enough shards, divide the largest key range until we have enough
largest_shard = _find_largest_shard(shards)
# OK we can't shard anymore, just bail
if largest_shard[0] == largest_shard[1]:
break
left_shard = [
largest_shard[0],
_mid_key(largest_shard[0], largest_shard[1])
]
right_shard = [
_next_key(_mid_key(largest_shard[0], largest_shard[1])),
largest_shard[1]
]
# We already have these shards, just give up now
if left_shard in shards and right_shard in shards:
break
shards.remove(largest_shard)
if left_shard not in shards:
shards.append(left_shard)
if right_shard not in shards:
shards.append(right_shard)
shards.sort()
assert len(shards) <= shard_count
# We shift the end keys in each shard by one, so we can
# do a >= && < query
for shard in shards:
shard[1] = _next_key(shard[1])
return shards
class ShardedTaskMarker(datastore.Entity):
""" Manages the running of an operation over the entities of a query using multiple processing
tasks. Stores details of the current state on itself as an Entity in the Datastore.
"""
KIND = "_djangae_migration_task"
QUEUED_KEY = "shards_queued"
RUNNING_KEY = "shards_running"
FINISHED_KEY = "shards_finished"
def __init__(self, identifier, query, *args, **kwargs):
kwargs["kind"] = self.KIND
kwargs["name"] = identifier
super(ShardedTaskMarker, self).__init__(*args, **kwargs)
self[ShardedTaskMarker.QUEUED_KEY] = []
self[ShardedTaskMarker.RUNNING_KEY] = []
self[ShardedTaskMarker.FINISHED_KEY] = []
self["time_started"] = None
self["time_finished"] = None
self["query"] = cPickle.dumps(query)
self["is_finished"] = False
@classmethod
def get_key(cls, identifier, namespace):
return datastore.Key.from_path(
cls.KIND,
identifier,
namespace=namespace
)
def put(self, *args, **kwargs):
if not self["is_finished"]:
# If we aren't finished, see if we are now
# This if-statement is important because if a task had no shards
# it would be 'finished' immediately so we don't want to incorrectly
# set it to False when we save if we manually set it to True
self["is_finished"] = bool(
not self[ShardedTaskMarker.QUEUED_KEY] and
not self[ShardedTaskMarker.RUNNING_KEY] and
self[ShardedTaskMarker.FINISHED_KEY]
)
if self["is_finished"]:
self["time_finished"] = datetime.utcnow()
datastore.Put(self)
def run_shard(
self, original_query, shard, operation, operation_method=None, offset=0,
entities_per_task=None, queue=_DEFAULT_QUEUE
):
""" Given a datastore.Query which does not have any high/low bounds on it, apply the bounds
of the given shard (which is a pair of keys), and run either the given `operation`
(if it's a function) or the given method of the given operation (if it's an object) on
each entity that the query returns, starting at entity `offset`, and redeferring every
`entities_per_task` entities to avoid hitting DeadlineExceededError.
Tries (but does not guarantee) to avoid processing the same entity more than once.
"""
entities_per_task = entities_per_task or getattr(
settings, "DJANGAE_MIGRATION_DEFAULT_ENTITIES_PER_TASK", 100
)
if operation_method:
function = getattr(operation, operation_method)
else:
function = operation
marker = datastore.Get(self.key())
if cPickle.dumps(shard) not in marker[ShardedTaskMarker.RUNNING_KEY]:
return
# Copy the query so that we can re-defer the original, unadulterated version, because once
# we've applied limits and ordering to the query it causes pickle errors with defer.
query = copy.deepcopy(original_query)
query.Order("__key__")
query["__key__ >="] = shard[0]
query["__key__ <"] = shard[1]
num_entities_processed = 0
try:
results = query.Run(offset=offset, limit=entities_per_task)
for entity in results:
function(entity)
num_entities_processed += 1
if num_entities_processed >= entities_per_task:
raise Redefer()
except (DeadlineExceededError, Redefer):
# By keeping track of how many entities we've processed, we can (hopefully) avoid
# re-processing entities if we hit DeadlineExceededError by redeferring with the
# incremented offset. But note that if we get crushed by the HARD DeadlineExceededError
# before we can redefer, then the whole task will retry and so entities will get
# processed twice.
deferred.defer(
self.run_shard,
original_query,
shard,
operation,
operation_method,
offset=offset+num_entities_processed,
entities_per_task=entities_per_task,
# Defer this task onto the correct queue (with `_queue`), passing the `queue`
# parameter back to the function again so that it can do the same next time
queue=queue,
_queue=queue,
)
return # This is important!
# Once we've run the operation on all the entities, mark the shard as done
def txn():
pickled_shard = cPickle.dumps(shard)
marker = datastore.Get(self.key())
marker.__class__ = ShardedTaskMarker
marker[ShardedTaskMarker.RUNNING_KEY].remove(pickled_shard)
marker[ShardedTaskMarker.FINISHED_KEY].append(pickled_shard)
marker.put()
datastore.RunInTransaction(txn)
def begin_processing(self, operation, operation_method, entities_per_task, queue):
BATCH_SIZE = 3
# Unpickle the source query
query = cPickle.loads(str(self["query"]))
def txn():
try:
marker = datastore.Get(self.key())
marker.__class__ = ShardedTaskMarker
queued_shards = marker[ShardedTaskMarker.QUEUED_KEY]
processing_shards = marker[ShardedTaskMarker.RUNNING_KEY]
queued_count = len(queued_shards)
for j in xrange(min(BATCH_SIZE, queued_count)):
pickled_shard = queued_shards.pop()
processing_shards.append(pickled_shard)
shard = cPickle.loads(str(pickled_shard))
deferred.defer(
self.run_shard,
query,
shard,
operation,
operation_method,
entities_per_task=entities_per_task,
# Defer this task onto the correct queue with `_queue`, passing the `queue`
# parameter back to the function again so that it can do the same next time
queue=queue,
_queue=queue,
_transactional=True,
)
marker.put()
except datastore_errors.EntityNotFoundError:
logging.error(
"Unable to start task %s as marker is missing",
self.key().id_or_name()
)
return
# Reload the marker (non-transactionally) and defer the shards in batches
# transactionally. If this task fails somewhere, it will resume where it left off
marker = datastore.Get(self.key())
for i in xrange(0, len(marker[ShardedTaskMarker.QUEUED_KEY]), BATCH_SIZE):
datastore.RunInTransaction(txn)
def start_mapping(
identifier, query, operation, operation_method=None, shard_count=None,
entities_per_task=None, queue=None
):
""" This must *transactionally* defer a task which will call `operation._wrapped_map_entity` on
all entities of the given `kind` in the given `namespace` and will then transactionally
update the entity of the given `task_marker_key_key` with `is_finished=True` after all
entities have been mapped.
"""
shard_count = shard_count or getattr(settings, "DJANGAE_MIGRATION_DEFAULT_SHARD_COUNT", 32)
shards_to_run = shard_query(query, shard_count)
queue = queue or getattr(settings, "DJANGAE_MIGRATION_DEFAULT_QUEUE", _DEFAULT_QUEUE)
def txn(shards):
marker_key = ShardedTaskMarker.get_key(identifier, query._Query__namespace)
try:
datastore.Get(marker_key)
# If the marker already exists, don't do anything - just return
return
except datastore_errors.EntityNotFoundError:
pass
marker = ShardedTaskMarker(identifier, query, namespace=query._Query__namespace)
if shards:
for shard in shards:
marker["shards_queued"].append(cPickle.dumps(shard))
else:
# No shards, then there is nothing to do!
marker["is_finished"] = True
marker["time_started"] = datetime.utcnow()
marker.put()
if not marker["is_finished"]:
deferred.defer(
marker.begin_processing, operation, operation_method, entities_per_task, queue,
_transactional=True, _queue=queue
)
return marker_key
return datastore.RunInTransaction(txn, shards_to_run)
def mapper_exists(identifier, namespace):
"""
Returns True if the mapper exists, False otherwise
"""
try:
datastore.Get(ShardedTaskMarker.get_key(identifier, namespace))
return True
except datastore_errors.EntityNotFoundError:
return False
def is_mapper_finished(identifier, namespace):
"""
Returns True if the mapper exists, and it's not running.
"""
return mapper_exists(identifier, namespace) and not is_mapper_running(identifier, namespace)
def is_mapper_running(identifier, namespace):
"""
Returns True if the mapper exists, but it's not finished
"""
try:
marker = datastore.Get(ShardedTaskMarker.get_key(identifier, namespace))
return not marker["is_finished"]
except datastore_errors.EntityNotFoundError:
return False
| grzes/djangae | djangae/db/migrations/mapper_library.py | Python | bsd-3-clause | 18,863 |
# -*- coding: utf-8
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.utils import translation
from pages.cache import cache
from pages.models import PageSlugContent, PageMetaContent
from pages.models import PageTextContent
from pages.views import PageDetailsView
from pagesext.tests.base import PagesExtCase
from pagesext.models import PageTagsContent, PageImageContent, PageVideoContent, PageFileContent
class TestExtPages(PagesExtCase):
def test_page_tags_model(self):
PageTagsContent.objects.create(page=self.page_foo)
obj = PageTagsContent.objects.filter(page=self.page_foo, language='en')[0]
sid = obj.sid
self.assertEqual(sid, 'en:Test:tags:1')
obj.language = 'de'
obj.save()
self.assertEqual(obj.sid, 'de:Test:tags:1')
def test_page_tags_api(self):
PageTagsContent.objects.create(page=self.page_foo)
obj = PageTagsContent.objects.filter(page=self.page_foo, language='en')[0]
sid = obj.sid
self.assertEqual(sid, 'en:Test:tags:1')
obj.add('tag1', 'tag2')
names = sorted(list(obj.tags.names()))
self.assertEqual(names[0], 'tag1')
self.assertEqual(names[1], 'tag2')
obj.set('tag3', 'tag4')
names = sorted(list(obj.tags.names()))
self.assertEqual(names[0], 'tag3')
self.assertEqual(names[1], 'tag4')
slugs = sorted(list(obj.tags.slugs()))
self.assertEqual(slugs[0], 'tag3')
self.assertEqual(slugs[1], 'tag4')
PageTagsContent.objects.create(page=self.page_foo2)
obj2 = PageTagsContent.objects.filter(page=self.page_foo2, language='en')[0]
obj2.add('tag4', 'tag5')
objects = obj.similar_objects()
self.assertEqual(objects[0], obj2)
obj.remove('tag3')
names = obj.tags.names()
self.assertEqual(len(names), 1)
obj.clear()
names = obj.tags.names()
self.assertEqual(len(names), 0)
def test_page_image_model(self):
PageImageContent.objects.create(page=self.page_foo)
obj = PageImageContent.objects.filter(page=self.page_foo, language='en')[0]
sid = obj.sid
self.assertEqual(sid, 'en:Test:image:1')
obj.language = 'de'
obj.save()
self.assertEqual(obj.sid, 'de:Test:image:1')
def test_page_video_model(self):
PageVideoContent.objects.create(page=self.page_foo)
obj = PageVideoContent.objects.filter(page=self.page_foo, language='en')[0]
sid = obj.sid
self.assertEqual(sid, 'en:Test:video:1')
obj.language = 'de'
obj.save()
self.assertEqual(obj.sid, 'de:Test:video:1')
def test_page_file_model(self):
PageFileContent.objects.create(page=self.page_foo)
obj = PageFileContent.objects.filter(page=self.page_foo, language='en')[0]
sid = obj.sid
self.assertEqual(sid, 'en:Test:file:1')
obj.language = 'de'
obj.save()
self.assertEqual(obj.sid, 'de:Test:file:1')
def test_page_tags_view(self):
PageSlugContent.objects.create(page=self.page_foo, slug='test')
PageMetaContent.objects.create(page=self.page_foo, title='test', description='test', keywords='test')
PageTextContent.objects.create(page=self.page_foo, text='test')
PageTagsContent.objects.create(page=self.page_foo)
obj = PageTagsContent.objects.filter(page=self.page_foo, language='en')[0]
obj.add('tag1', 'tag2')
self.page_foo.template = 'pages/page_tags.html'
self.page_foo.save()
page_url = reverse('pages:show', kwargs={'slug': 'test'})
request = self.factory.get(page_url)
request.user = AnonymousUser()
context = RequestContext(request)
view = PageDetailsView.as_view()
translation.activate('en')
response = view(request=request, context=context, slug='test')
translation.deactivate()
self.assertEqual(response.status_code, 200)
self.page_foo.delete()
cache.clear()
def test_page_image_view(self):
PageSlugContent.objects.create(page=self.page_foo, slug='test')
PageMetaContent.objects.create(page=self.page_foo, title='test', description='test', keywords='test')
PageTextContent.objects.create(page=self.page_foo, text='test')
PageImageContent.objects.create(page=self.page_foo, image='img/test.jpg', title='test')
img1 = PageImageContent.objects.create(page=self.page_foo, image='img/test.jpg', title='test')
img1.tags.add('test', 'image-1')
img2 = PageImageContent.objects.create(page=self.page_foo, image='img/test.jpg', title='test')
img2.tags.add('test', 'image-2')
self.page_foo.template = 'pages/page_image.html'
self.page_foo.save()
page_url = reverse('pages:show', kwargs={'slug': 'test'})
request = self.factory.get(page_url)
request.user = AnonymousUser()
context = RequestContext(request)
view = PageDetailsView.as_view()
translation.activate('en')
response = view(request=request, context=context, slug='test')
translation.deactivate()
self.assertEqual(response.status_code, 200)
self.page_foo.delete()
cache.clear()
def test_page_video_view(self):
PageSlugContent.objects.create(page=self.page_foo, slug='test')
PageMetaContent.objects.create(page=self.page_foo, title='test', description='test', keywords='test')
PageTextContent.objects.create(page=self.page_foo, text='test')
PageVideoContent.objects.create(
page=self.page_foo, video='https://www.youtube.com/watch?v=C0DPdy98e4c', title='test'
)
video1 = PageVideoContent.objects.create(
page=self.page_foo, video='https://www.youtube.com/watch?v=C0DPdy98e4c', title='test'
)
video1.tags.add('test', 'video-1')
video2 = PageVideoContent.objects.create(
page=self.page_foo, video='https://www.youtube.com/watch?v=C0DPdy98e4c', title='test'
)
video2.tags.add('test', 'video-2')
self.page_foo.template = 'pages/page_video.html'
self.page_foo.save()
page_url = reverse('pages:show', kwargs={'slug': 'test'})
request = self.factory.get(page_url)
request.user = AnonymousUser()
context = RequestContext(request)
view = PageDetailsView.as_view()
translation.activate('en')
response = view(request=request, context=context, slug='test')
translation.deactivate()
self.assertEqual(response.status_code, 200)
self.page_foo.delete()
cache.clear()
def test_page_file_view(self):
PageSlugContent.objects.create(page=self.page_foo, slug='test')
PageMetaContent.objects.create(page=self.page_foo, title='test', description='test', keywords='test')
PageTextContent.objects.create(page=self.page_foo, text='test')
PageFileContent.objects.create(page=self.page_foo, file='files/test.txt', title='test')
file1 = PageFileContent.objects.create(page=self.page_foo, file='files/test.txt', title='test')
file1.tags.add('test', 'file-1')
file2 = PageFileContent.objects.create(page=self.page_foo, file='files/test.txt', title='test')
file2.tags.add('test', 'file-2')
self.page_foo.template = 'pages/page_file.html'
self.page_foo.save()
page_url = reverse('pages:show', kwargs={'slug': 'test'})
request = self.factory.get(page_url)
request.user = AnonymousUser()
context = RequestContext(request)
view = PageDetailsView.as_view()
translation.activate('en')
response = view(request=request, context=context, slug='test')
translation.deactivate()
self.assertEqual(response.status_code, 200)
self.page_foo.delete()
cache.clear()
| dlancer/django-pages-cms-extensions | pagesext/tests/test_pages.py | Python | bsd-3-clause | 8,065 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# django-kaio documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 13 12:57:40 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.githubpages',
'sphinx.ext.todo'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-kaio'
copyright = '2017, APSL'
author = 'APSL'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.14.0'
# The full version, including alpha/beta/rc tags.
release = '0.14.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-kaiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-kaio.tex', 'django-kaio Documentation',
'APSL', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-kaio', 'django-kaio Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-kaio', 'django-kaio Documentation',
author, 'django-kaio', 'One line description of project.',
'Miscellaneous'),
]
| APSL/django-kaio | docs/conf.py | Python | bsd-3-clause | 4,821 |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't.
Also test that nodes that send unsupported service bits to bitcoind are disconnected
and don't receive a VERACK. Unsupported service bits are currently 1 << 5 and
1 << 7 (until August 1st 2018)."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self):
self.ever_connected = True
def on_version(self, message): self.bad_message(message)
def on_verack(self, message): self.bad_message(message)
def on_reject(self, message): self.bad_message(message)
def on_inv(self, message): self.bad_message(message)
def on_addr(self, message): self.bad_message(message)
def on_getdata(self, message): self.bad_message(message)
def on_getblocks(self, message): self.bad_message(message)
def on_tx(self, message): self.bad_message(message)
def on_block(self, message): self.bad_message(message)
def on_getaddr(self, message): self.bad_message(message)
def on_headers(self, message): self.bad_message(message)
def on_getheaders(self, message): self.bad_message(message)
def on_ping(self, message): self.bad_message(message)
def on_mempool(self, message): self.bad_message(message)
def on_pong(self, message): self.bad_message(message)
def on_feefilter(self, message): self.bad_message(message)
def on_sendheaders(self, message): self.bad_message(message)
def on_sendcmpct(self, message): self.bad_message(message)
def on_cmpctblock(self, message): self.bad_message(message)
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
def on_open(self):
super().on_open()
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, message): pass
def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
self.nodes[0].setmocktime(1501545600) # August 1st 2017
no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False)
no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False)
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())
unsupported_service_bit5_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
unsupported_service_bit7_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock)
wait_until(lambda: unsupported_service_bit5_node.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: unsupported_service_bit7_node.ever_connected, timeout=10, lock=mininode_lock)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.is_connected
# These nodes should have been disconnected
assert not unsupported_service_bit5_node.is_connected
assert not unsupported_service_bit7_node.is_connected
self.nodes[0].disconnect_p2ps()
# Wait until all connections are closed
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
assert not unsupported_service_bit5_node.unexpected_msg
assert not unsupported_service_bit7_node.unexpected_msg
self.log.info("Service bits 5 and 7 are allowed after August 1st 2018")
self.nodes[0].setmocktime(1533168000) # August 2nd 2018
allowed_service_bit5_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
allowed_service_bit7_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
wait_until(lambda: allowed_service_bit5_node.message_count["verack"], lock=mininode_lock)
wait_until(lambda: allowed_service_bit7_node.message_count["verack"], lock=mininode_lock)
if __name__ == '__main__':
P2PLeakTest().main()
| jmcorgan/bitcoin | test/functional/p2p_leak.py | Python | mit | 6,730 |
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
* power targets
* trying without first two sigmoid layers.
* updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0
which fixes LSTM bug.
https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0
* Subsampling *bidirectional* LSTM
* Output every sequence in the batch
* Change W_in_to_cell from Normal(1.0) to Uniform(5)
* put back the two sigmoid layers
* use Conv1D to create a hierarchical subsampling LSTM
* Using LSTM (not BLSTM) to speed up training while testing
* Use dimshuffle not reshape
* 2 dense layers back
* back to default init
* conv between LSTMs.
* More data
* BLSTM
* Try just using a 1D convnet on input
* add second Convnet layer (not sure this is correct thing to do?)
* third conv layer
* large inits
* back to 2 conv layers
e70
* Based on e65
* Using sigmoid instead of rectify in Conv1D layers
e71
* Larger layers
* More data
e72
* At a third conv layer
e73
* Add a dense layer after 3 conv layers
e74
* Removed dense layer after 3 conv layers (because it failed to learn anything)
* Trying standard inits for weights and biases throughout network.
e75
* Putting back large init for first layer
e76
* Removed 3rd conv layer
e77
* Try init Uniform(1)
e78
* Back to large inits for first layers
* Trying 3rd conv layer, also with large init
e79
* Trying to merge 1D conv on bottom layer with hierarchical subsampling
from e59a.
* Replace first LSTM with BLSTM
* Add second BLSTM layer
* Add conv1d between BLSTM layers.
e80
* Remove third 1d conv layer
e81
* Change num_filters in conv layer between BLSTMs from 20 to 80
e83
* Same net as e81
* Using different appliances, longer seq, and validation on house 5
(unseen during training!) Might be unfair because, for example,
house 1 doesn't use its washer dryer in drying mode ever but it
house 5 does.
* Using a seq_length of 4000 resulted in NaNs very quickly.
Dropping to 2000 resulted in NaNs after 100 epochs
1000 resulted in Nans after 4500 epochs
e83b
* Back to seq_length of 2000, modified net.py so it called IPDB
if train error is NaN or > 1
e83c
* Changed inits to standard values to try to stop NaN train costs
Results: I let it run for a little over 100 epochs. No Nans. But
wasn't learning anything very sane.
e83d
* Uniform(1)
e83e
* Try adagrad
e84
* Trying to find minimial example which gets NaNs
RESULT: Blows up after 19 epochs! Yay!
e85
* Try adagrad
RESULTS at different learning rates:
* 1 goes to NaN within 2 epochs ;(
* 0.01 went to NaN in 13 epochs
* 0.0001 doesn't go to NaN after 1000 epochs and may even be starting to learning something!
* 0.001 (e)85b doesn't go to NaN about >140 epochs
e86
* Trying larger network again (with adagrad with learning rate 0.001)
* Doesn't go to NaN (after >770 epochs) and learns something very vaguely useful
but not great. At all. Doesn't discriminate between appliances.
e87
* Network like e82. Just LSTM -> Conv -> LSTM -> Dense.
* More data
Results
"""
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'kettle',
'dish washer',
['washer dryer', 'washing machine'],
'microwave'
],
max_appliance_powers=[300, 3000, 2500, 2400, 2000],
on_power_thresholds=[80, 200, 20, 600, 100],
min_on_durations=[60, 10, 300, 300, 10],
window=("2013-05-22", "2015-01-01"),
seq_length=2000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1,2,3,4],
validation_buildings=[5]
)
net = Net(
experiment_name="e87",
source=source,
save_plot_interval=50,
loss_function=crossentropy,
updates=partial(adagrad, learning_rate=0.001),
layers_config=[
{
'type': LSTMLayer, # TODO change to BLSTM
'num_units': 60,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
| mmottahedi/neuralnilm_prototype | scripts/e87.py | Python | mit | 5,219 |
import os
import pytest
import yaml
from mock import patch, Mock
from teuthology.task import install
class TestInstall(object):
def _get_default_package_list(self, project='ceph', debug=False):
path = os.path.join(
os.path.dirname(__file__),
'..', '..', 'task', 'install', 'packages.yaml',
)
pkgs = yaml.safe_load(open(path))[project]
if not debug:
pkgs['deb'] = [p for p in pkgs['deb']
if not p.endswith('-dbg')]
pkgs['rpm'] = [p for p in pkgs['rpm']
if not p.endswith('-debuginfo')]
return pkgs
def test_get_package_list_debug(self):
default_pkgs = self._get_default_package_list(debug=True)
default_pkgs['rpm'].sort()
default_pkgs['deb'].sort()
config = dict(debuginfo=True)
result = install.get_package_list(ctx=None, config=config)
result['rpm'].sort()
result['deb'].sort()
assert result == default_pkgs
def test_get_package_list_no_debug(self):
default_pkgs = self._get_default_package_list(debug=False)
default_pkgs['rpm'].sort()
default_pkgs['deb'].sort()
config = dict(debuginfo=False)
result = install.get_package_list(ctx=None, config=config)
result['rpm'].sort()
result['deb'].sort()
assert result == default_pkgs
def test_get_package_list_custom_rpm(self):
default_pkgs = self._get_default_package_list(debug=False)
default_pkgs['rpm'].sort()
default_pkgs['deb'].sort()
rpms = ['rpm1', 'rpm2', 'rpm2-debuginfo']
config = dict(packages=dict(rpm=rpms))
result = install.get_package_list(ctx=None, config=config)
result['rpm'].sort()
result['deb'].sort()
assert result['rpm'] == ['rpm1', 'rpm2']
assert result['deb'] == default_pkgs['deb']
@patch("teuthology.task.install._get_builder_project")
@patch("teuthology.task.install.packaging.get_package_version")
def test_get_upgrade_version(self, m_get_package_version,
m_gitbuilder_project):
gb = Mock()
gb.version = "11.0.0"
gb.project = "ceph"
m_gitbuilder_project.return_value = gb
m_get_package_version.return_value = "11.0.0"
install.get_upgrade_version(Mock(), Mock(), Mock())
@patch("teuthology.task.install._get_builder_project")
@patch("teuthology.task.install.packaging.get_package_version")
def test_verify_ceph_version_success(self, m_get_package_version,
m_gitbuilder_project):
gb = Mock()
gb.version = "0.89.0"
gb.project = "ceph"
m_gitbuilder_project.return_value = gb
m_get_package_version.return_value = "0.89.0"
config = dict()
install.verify_package_version(Mock(), config, Mock())
@patch("teuthology.task.install._get_builder_project")
@patch("teuthology.task.install.packaging.get_package_version")
def test_verify_ceph_version_failed(self, m_get_package_version,
m_gitbuilder_project):
gb = Mock()
gb.version = "0.89.0"
gb.project = "ceph"
m_gitbuilder_project.return_value = gb
m_get_package_version.return_value = "0.89.1"
config = dict()
with pytest.raises(RuntimeError):
install.verify_package_version(Mock(), config, Mock())
@patch("teuthology.task.install._get_builder_project")
@patch("teuthology.task.install.packaging.get_package_version")
def test_skip_when_using_ceph_deploy(self, m_get_package_version,
m_gitbuilder_project):
gb = Mock()
gb.version = "0.89.0"
gb.project = "ceph"
m_gitbuilder_project.return_value = gb
# ceph isn't installed because ceph-deploy would install it
m_get_package_version.return_value = None
config = dict()
config['extras'] = True
install.verify_package_version(Mock(), config, Mock())
def test_get_flavor_default(self):
config = dict()
assert install.get_flavor(config) == 'basic'
def test_get_flavor_simple(self):
config = dict(
flavor='notcmalloc'
)
assert install.get_flavor(config) == 'notcmalloc'
def test_get_flavor_valgrind(self):
config = dict(
valgrind=True
)
assert install.get_flavor(config) == 'notcmalloc'
def test_upgrade_is_downgrade(self):
assert_ok_vals = [
('9.0.0', '10.0.0'),
('10.2.2-63-g8542898-1trusty', '10.2.2-64-gabcdef1-1trusty'),
('11.0.0-918.g13c13c7', '11.0.0-2165.gabcdef1')
]
for t in assert_ok_vals:
assert install._upgrade_is_downgrade(t[0], t[1]) == False
@patch("teuthology.packaging.get_package_version")
@patch("teuthology.misc.get_system_type")
@patch("teuthology.task.install.verify_package_version")
@patch("teuthology.task.install.get_upgrade_version")
def test_upgrade_common(self,
m_get_upgrade_version,
m_verify_package_version,
m_get_system_type,
m_get_package_version):
expected_system_type = 'deb'
def make_remote():
remote = Mock()
remote.arch = 'x86_64'
remote.os = Mock()
remote.os.name = 'ubuntu'
remote.os.version = '14.04'
remote.os.codename = 'trusty'
remote.system_type = expected_system_type
return remote
ctx = Mock()
class cluster:
remote1 = make_remote()
remote2 = make_remote()
remotes = {
remote1: ['client.0'],
remote2: ['mon.a','osd.0'],
}
def only(self, role):
result = Mock()
if role in ('client.0',):
result.remotes = { cluster.remote1: None }
if role in ('osd.0', 'mon.a'):
result.remotes = { cluster.remote2: None }
return result
ctx.cluster = cluster()
config = {
'client.0': {
'sha1': 'expectedsha1',
},
}
ctx.config = {
'roles': [ ['client.0'], ['mon.a','osd.0'] ],
'tasks': [
{
'install.upgrade': config,
},
],
}
m_get_upgrade_version.return_value = "11.0.0"
m_get_package_version.return_value = "10.2.4"
m_get_system_type.return_value = "deb"
def upgrade(ctx, node, remote, pkgs, system_type):
assert system_type == expected_system_type
assert install.upgrade_common(ctx, config, upgrade) == 1
expected_config = {
'project': 'ceph',
'sha1': 'expectedsha1',
}
m_verify_package_version.assert_called_with(ctx,
expected_config,
cluster.remote1)
def test_upgrade_remote_to_config(self):
expected_system_type = 'deb'
def make_remote():
remote = Mock()
remote.arch = 'x86_64'
remote.os = Mock()
remote.os.name = 'ubuntu'
remote.os.version = '14.04'
remote.os.codename = 'trusty'
remote.system_type = expected_system_type
return remote
ctx = Mock()
class cluster:
remote1 = make_remote()
remote2 = make_remote()
remotes = {
remote1: ['client.0'],
remote2: ['mon.a','osd.0'],
}
def only(self, role):
result = Mock()
if role in ('client.0',):
result.remotes = { cluster.remote1: None }
elif role in ('osd.0', 'mon.a'):
result.remotes = { cluster.remote2: None }
else:
result.remotes = None
return result
ctx.cluster = cluster()
ctx.config = {
'roles': [ ['client.0'], ['mon.a','osd.0'] ],
}
# nothing -> nothing
assert install.upgrade_remote_to_config(ctx, {}) == {}
# select the remote for the osd.0 role
# the 'ignored' role does not exist and is ignored
# the remote for mon.a is the same as for osd.0 and
# is silently ignored (actually it could be the other
# way around, depending on how the keys are hashed)
config = {
'osd.0': {
'sha1': 'expectedsha1',
},
'ignored': None,
'mon.a': {
'sha1': 'expectedsha1',
},
}
expected_config = {
cluster.remote2: {
'project': 'ceph',
'sha1': 'expectedsha1',
},
}
assert install.upgrade_remote_to_config(ctx, config) == expected_config
# select all nodes, regardless
config = {
'all': {
'sha1': 'expectedsha1',
},
}
expected_config = {
cluster.remote1: {
'project': 'ceph',
'sha1': 'expectedsha1',
},
cluster.remote2: {
'project': 'ceph',
'sha1': 'expectedsha1',
},
}
assert install.upgrade_remote_to_config(ctx, config) == expected_config
# verify that install overrides are used as default
# values for the upgrade task, not as override
ctx.config['overrides'] = {
'install': {
'ceph': {
'sha1': 'overridesha1',
'tag': 'overridetag',
'branch': 'overridebranch',
},
},
}
config = {
'client.0': {
'sha1': 'expectedsha1',
},
'osd.0': {
},
}
expected_config = {
cluster.remote1: {
'project': 'ceph',
'sha1': 'expectedsha1',
},
cluster.remote2: {
'project': 'ceph',
'sha1': 'overridesha1',
'tag': 'overridetag',
'branch': 'overridebranch',
},
}
assert install.upgrade_remote_to_config(ctx, config) == expected_config
@patch("teuthology.task.install.packaging.get_package_version")
@patch("teuthology.task.install.redhat.set_deb_repo")
def test_rh_install_deb_pkgs(self, m_set_rh_deb_repo, m_get_pkg_version):
ctx = Mock()
remote = Mock()
version = '1.3.2'
rh_ds_yaml = dict()
rh_ds_yaml = {
'versions': {'deb': {'mapped': {'1.3.2': '0.94.5'}}},
'pkgs': {'deb': ['pkg1', 'pkg2']},
'extra_system_packages': {'deb': ['es_pkg1', 'es_pkg2']},
'extra_packages': {'deb': ['e_pkg1', 'e_pkg2']},
}
m_get_pkg_version.return_value = "0.94.5"
install.redhat.install_deb_pkgs(ctx, remote, version, rh_ds_yaml)
@patch("teuthology.task.install.packaging.get_package_version")
def test_rh_install_pkgs(self, m_get_pkg_version):
ctx = Mock()
remote = Mock()
version = '1.3.2'
rh_ds_yaml = dict()
rh_ds_yaml = {
'versions': {'rpm': {'mapped': {'1.3.2': '0.94.5',
'1.3.1': '0.94.3'}}},
'pkgs': {'rpm': ['pkg1', 'pkg2']},
'extra_system_packages': {'rpm': ['es_pkg1', 'es_pkg2']},
'extra_packages': {'rpm': ['e_pkg1', 'e_pkg2']},
}
m_get_pkg_version.return_value = "0.94.5"
install.redhat.install_pkgs(ctx, remote, version, rh_ds_yaml)
version = '1.3.1'
with pytest.raises(RuntimeError) as e:
install.redhat.install_pkgs(ctx, remote, version, rh_ds_yaml)
assert "Version check failed" in str(e)
| dmick/teuthology | teuthology/test/task/test_install.py | Python | mit | 12,343 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-24 13:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='user_mention_notification',
field=models.CharField(choices=[('', 'No notification'), ('sms', 'Text Message (SMS)'), ('email', 'Email')], default='', max_length=20, verbose_name='Notification when mentioned'),
),
]
| mcallistersean/b2-issue-tracker | toucan/user_profile/migrations/0002_profile_user_mention_notification.py | Python | mit | 604 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SubscriberFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SubscriberFeatures()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSubscriberFeatures(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SubscriberFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubscriberFeatures
def PublisherIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PatternBasedSubscription(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PublicationTrustlevels(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def SubscriptionRevocation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def EventHistory(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def AcknowledgeSubscriberReceived(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def Start(builder): builder.StartObject(8)
def SubscriberFeaturesStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddPublisherIdentification(builder, publisherIdentification): builder.PrependBoolSlot(0, publisherIdentification, 0)
def SubscriberFeaturesAddPublisherIdentification(builder, publisherIdentification):
"""This method is deprecated. Please switch to AddPublisherIdentification."""
return AddPublisherIdentification(builder, publisherIdentification)
def AddPatternBasedSubscription(builder, patternBasedSubscription): builder.PrependBoolSlot(1, patternBasedSubscription, 0)
def SubscriberFeaturesAddPatternBasedSubscription(builder, patternBasedSubscription):
"""This method is deprecated. Please switch to AddPatternBasedSubscription."""
return AddPatternBasedSubscription(builder, patternBasedSubscription)
def AddPublicationTrustlevels(builder, publicationTrustlevels): builder.PrependBoolSlot(2, publicationTrustlevels, 0)
def SubscriberFeaturesAddPublicationTrustlevels(builder, publicationTrustlevels):
"""This method is deprecated. Please switch to AddPublicationTrustlevels."""
return AddPublicationTrustlevels(builder, publicationTrustlevels)
def AddSubscriptionRevocation(builder, subscriptionRevocation): builder.PrependBoolSlot(3, subscriptionRevocation, 0)
def SubscriberFeaturesAddSubscriptionRevocation(builder, subscriptionRevocation):
"""This method is deprecated. Please switch to AddSubscriptionRevocation."""
return AddSubscriptionRevocation(builder, subscriptionRevocation)
def AddEventHistory(builder, eventHistory): builder.PrependBoolSlot(4, eventHistory, 0)
def SubscriberFeaturesAddEventHistory(builder, eventHistory):
"""This method is deprecated. Please switch to AddEventHistory."""
return AddEventHistory(builder, eventHistory)
def AddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived): builder.PrependBoolSlot(5, acknowledgeSubscriberReceived, 0)
def SubscriberFeaturesAddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived):
"""This method is deprecated. Please switch to AddAcknowledgeSubscriberReceived."""
return AddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived)
def AddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(6, payloadTransparency, 0)
def SubscriberFeaturesAddPayloadTransparency(builder, payloadTransparency):
"""This method is deprecated. Please switch to AddPayloadTransparency."""
return AddPayloadTransparency(builder, payloadTransparency)
def AddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(7, payloadEncryptionCryptobox, 0)
def SubscriberFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox):
"""This method is deprecated. Please switch to AddPayloadEncryptionCryptobox."""
return AddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox)
def End(builder): return builder.EndObject()
def SubscriberFeaturesEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | crossbario/autobahn-python | autobahn/wamp/gen/wamp/proto/SubscriberFeatures.py | Python | mit | 6,121 |
# mako/codegen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
MAGIC_NUMBER = 7
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not util.py3k and isinstance(source_encoding, unicode):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline(
"_template_filename = %r" % self.compiler.filename)
self.printer.writeline("_template_uri = %r" % self.compiler.uri)
self.printer.writeline(
"_source_encoding = %r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside <%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r, context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri, module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union([c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline(
"_mako_get_namespace(context, %r)._populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline("def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve
captured content, apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache.get_and_replace(%s, lambda:__M_%s(%s), %s__M_defname=%r)" % \
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v) for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache.get_and_replace(%s, lambda:__M_%s(%s), %s__M_defname=%r))" %
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v) for k, v in cache_args.items()]),
name,
),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
if not node.get_children():
self.printer.writeline("pass")
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline('__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for '
'__M_key in [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline("context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"__M_caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node, (parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
| youngrok/mako | mako/codegen.py | Python | mit | 43,222 |
from django.http import Http404
from wagtail.core.models import Page, Site
from wagtailmenus.models.menuitems import MenuItem
def get_site_from_request(request, fallback_to_default=True):
site = getattr(request, 'site', None)
if isinstance(site, Site):
return request.site
site = Site.find_for_request(request)
if site:
return site
if fallback_to_default:
return Site.objects.filter(is_default_site=True).first()
return None
def derive_page(request, site, accept_best_match=True, max_subsequent_route_failures=3):
"""
Attempts to find a ``Page`` from the provided ``site`` matching the path
of the supplied ``request``. Returns a tuple, where the first item is
the matching page (or ``None`` if no match was found), and the second item
is a boolean indicating whether the page matched the full URL.
If ``accept_best_match`` is ``True``, the method will attempt to find a
'best match', matching as many path components as possible. This process
will continue until all path components have been exhausted, or routing
fails more that ``max_subsequent_route_failures`` times in a row.
"""
routing_point = site.root_page.specific
path_components = [pc for pc in request.path.split('/') if pc]
if not accept_best_match:
try:
return routing_point.route(request, path_components)[0], True
except Http404:
return None, False
best_match = None
full_url_match = False
lookup_components = []
subsequent_route_failures = 0
for i, component in enumerate(path_components, 1):
lookup_components.append(component)
try:
best_match = routing_point.route(request, lookup_components)[0]
except Http404:
# route() was unsucessful. keep trying with more components until
# they are exhausted, or the maximum number of subsequent failures
# has been reached
subsequent_route_failures += 1
if subsequent_route_failures >= max_subsequent_route_failures:
break # give up
else:
# route() was successful. have all components been used yet?
full_url_match = bool(i == len(path_components))
# reset failure count
subsequent_route_failures = 0
if best_match != routing_point:
# A new page was reached. Next, try route() from this new
# page, using fresh lookup components
routing_point = best_match
lookup_components = []
else:
# `routing_point` has multiple routes. Next, try route() from
# the same page with more components, as a new page could still
# be reached
continue
return best_match, full_url_match
def derive_section_root(page):
"""
Returns the 'section root' for the provided ``page``, or ``None``
if no such page can be identified. Results are dependant on the
value of the ``WAGTAILMENUS_SECTION_ROOT_DEPTH`` setting.
"""
from wagtailmenus.conf import settings
desired_depth = settings.SECTION_ROOT_DEPTH
if page.depth == desired_depth:
return page.specific
if page.depth > desired_depth:
return page.get_ancestors().get(depth=desired_depth).specific
def validate_supplied_values(tag, max_levels=None, parent_page=None,
menuitem_or_page=None):
if max_levels is not None:
if max_levels not in (1, 2, 3, 4, 5):
raise ValueError(
"The `%s` tag expects `max_levels` to be an integer value "
"between 1 and 5. Please review your template." % tag
)
if parent_page is not None:
if not isinstance(parent_page, Page):
raise ValueError(
"The `%s` tag expects `parent_page` to be a `Page` instance. "
"A value of type `%s` was supplied." %
(tag, parent_page.__class__)
)
if menuitem_or_page is not None:
if not isinstance(menuitem_or_page, (Page, MenuItem)):
raise ValueError(
"The `%s` tag expects `menuitem_or_page` to be a `Page` or "
"`MenuItem` instance. A value of type `%s` was supplied." %
(tag, menuitem_or_page.__class__)
)
| rkhleics/wagtailmenus | wagtailmenus/utils/misc.py | Python | mit | 4,425 |
"""
===========================
Formaldehyde mm-line fitter
===========================
This is a formaldehyde 3_03-2_02 / 3_22-221 and 3_03-2_02/3_21-2_20 fitter.
It is based entirely on RADEX models.
"""
import numpy as np
import hyperfine
from . import fitter,model#,modelgrid
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
line_names = ['threeohthree','threetwotwo','threetwoone']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'threeohthree': 218.222192e9,
'threetwotwo': 218.475632e9,
'threetwoone': 218.760066e9,
}
line_strength_dict={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
relative_strength_total_degeneracy={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
freq_dict = central_freq_dict
aval_dict = {
'threeohthree': 2.818e-4,
'threetwotwo': 1.571e-4,
'threetwoone': 1.577e-4,
}
voff_lines_dict = {
'threeohthree': 0.,
'threetwotwo': 0.,
'threetwoone': 0.,
}
formaldehyde_mm_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict, relative_strength_total_degeneracy)
formaldehyde_mm_vtau_fitter = formaldehyde_mm_vtau.fitter
formaldehyde_mm_vtau_vheight_fitter = formaldehyde_mm_vtau.vheight_fitter
def formaldehyde_mm_radex(xarr,
temperature=25,
column=13,
density=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
Parameters
----------
grid_vwidth : float
the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
density : float
Density!
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
zinds,yinds,xinds = np.indices(taugrid[0].shape)
if 'CD1_1' in hdr:
cd11 = 'CD1_1'
cd22 = 'CD2_2'
else:
cd11 = 'CDELT1'
cd22 = 'CDELT2'
densityarr = (xinds+hdr['CRPIX1']-1)*hdr[cd11]+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr[cd22]+hdr['CRVAL2'] # log column
temparr = (zinds+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # lin temperature
minfreq = (218.,)
maxfreq = (219.,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
zinds,yinds,xinds = np.indices(taugrid[0].shape)
if 'CD1_1' in hdr:
cd11 = 'CD1_1'
cd22 = 'CD2_2'
else:
cd11 = 'CDELT1'
cd22 = 'CDELT2'
densityarr = (xinds+hdr['CRPIX1']-1)*hdr[cd11]+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr[cd22]+hdr['CRVAL2'] # log column
temparr = (zinds+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # lin temperature
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
#tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,0,:], xinds[0,0,:])
gridval2 = np.interp(column, columnarr[0,:,0], yinds[0,:,0])
gridval3 = np.interp(temperature, temparr[:,0,0], zinds[:,0,0])
if np.isnan(gridval1) or np.isnan(gridval2) or np.isnan(gridval3):
raise ValueError("Invalid column/density")
if scipyOK:
# this is mostly a trick for speed: slice so you only have two thin layers to interpolate
# between
#slices = [density_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
slices = [slice(np.floor(gridval3),np.floor(gridval3)+2),
slice(np.floor(gridval2),np.floor(gridval2)+2),
slice(np.floor(gridval1),np.floor(gridval1)+2)
]
tau = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval3%1],[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval3%1],[gridval2%1],[gridval1%1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
for ta,tk in zip(tau,tex):
print "density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, temperature, column, ta, tk)
if debug:
import pdb; pdb.set_trace()
spec = np.sum([
(formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),
xoff_v=xoff_v, width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_mm(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_components=False ):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define it to be Tex given tau=0.01 when
passing to the fitter
The final spectrum is then rescaled to that value
The components are independent, but with offsets set by frequency... in principle.
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*0.01, tau=0.01, xoff_v=xoff_v,
width=width,
return_components=return_components)
if return_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
class formaldehyde_mm_model(model.SpectralModel):
pass
formaldehyde_mm_fitter = formaldehyde_mm_model(formaldehyde_mm, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunits='Hz' )
formaldehyde_mm_vheight_fitter = formaldehyde_mm_model(fitter.vheightmodel(formaldehyde_mm), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunits='Hz' )
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
| bsipocz/pyspeckit | pyspeckit/spectrum/models/formaldehyde_mm.py | Python | mit | 8,388 |
import IPython.core.display
import matplotlib as mpl
def clean():
# set mpl defaults for nice display
mpl.rcParams['font.size'] = 12
mpl.rcParams['figure.figsize'] = (18, 6)
mpl.rcParams['lines.linewidth'] = 1
return IPython.core.display.HTML("""
<style type="text/css">
div.input {
width: 105ex; /* about 80 chars + buffer */
}
div.text_cell {
width: 105ex /* instead of 100%, */
}
div.text_cell_render {
font-family: Cambria, Candara, serif;
font-size: 14pt
line-height: 145%; /* added for some line spacing of text. */
width: 105ex; /* instead of 'inherit' for shorter lines */
}
/* Set the size of the headers */
div.text_cell_render h1 {
font-size: 20pt;
}
div.text_cell_render h2 {
font-size: 18pt;
}
.CodeMirror {
font-family: Consolas, monospace;
width: 105ex;
}
.rendered_html ol {list-style:decimal; margin: 1em 2em;}
</style>""")
| boffi/boffi.github.io | dati_2015/01/style.py | Python | mit | 998 |
'''
This script can be used to generate large numbers of warnings with little effort.
It generates a set of warnings for each line in each compilation unit, where
the number per line is controlled by the variable WARNINGS_PER_LINE below.
If supplied by Grammatech for Praqma for testing the Code Sonar plugin.
There are three approaches to using it; 1) plugin approach for all analysis running 2) plugin approach for single project 3) interactive runninng command from command line
1) Use at Code Sonar plugin for all analysis running, but adding the following line to the standard template configuration file usually found in <codesonar installation path>/template.conf
PLUGINS += wgen.py
2) Add the plugin line to a project configuration file. Copy the default template from <codesonar installation path>/template.conf to project workspace as wgen-codesonar-template.conf, and run command:
codesonar analyze <PROJNAME< -foreground -conf-file wgen-codesonar-template.conf <HUB IP:PORT> <project compile, eg. make -j2>
3) Use interactively (useful for debugging) do something like this:
codesonar analyze -foreground -preset python_debug_console Wgen riddle:9450 gcc -c wgen.c
At the prompt do:
execfile("wgen.py")
go()
Quit the interpreter to let the analysis finish and so that the warnings show up in the hub.
'''
import cs
@cs.compunit_visitor
def do_cu(cu):
if not cu.is_user():
return
WARNINGS_PER_LINE = 2
wcs = []
for i in range(WARNINGS_PER_LINE):
wcs.append(cs.analysis.create_warningclass("wgen.py plugin generated warning %d" % i))
sfi = cu.get_sfileinst()
for i in range(1,sfi.line_count()):
for wc in wcs:
wc.report(sfi, i, "This is line %d from wgen.py plugin" % i)
def go():
for cu in cs.project.current().compunits():
do_cu(cu)
| Praqma/codesonar-plugin | test/wgen.py | Python | mit | 1,845 |
# -*- coding: utf-8 -*-
from django.conf.urls import url, patterns, include
from django.views.generic import View
from django.http import HttpResponse
from djangular.views.mixins import JSONResponseMixin, allow_remote_invocation
class RemoteMethodsView(JSONResponseMixin, View):
@allow_remote_invocation
def foo(self, in_data):
return {'foo': 'abc'}
@allow_remote_invocation
def bar(self, in_data):
return {'bar': 'abc'}
def get(self, request):
return HttpResponse('OK')
subsub_patterns = patterns('',
url(r'^app/$', RemoteMethodsView.as_view(), name='app'),
)
sub_patterns = patterns('',
url(r'^sub/', include(subsub_patterns, namespace='sub')),
)
urlpatterns = patterns('',
url(r'^sub_methods/', include(sub_patterns, namespace='submethods')),
url(r'^straight_methods/$', RemoteMethodsView.as_view(), name='straightmethods'),
)
| jinankjain/django-angular | examples/server/tests/urls.py | Python | mit | 897 |
from pyramid.config import Configurator
from .models.node import root_factory
def main(global_config, **settings):
config = Configurator(
settings=settings,
root_factory=root_factory
)
config.include('pyramid_jinja2')
config.include('.models')
config.scan('.views')
config.add_static_view('static', 'mysite:static')
return config.make_wsgi_app()
| pauleveritt/pyramid_sqltraversal | docs/traversal_crud/mysite/__init__.py | Python | mit | 392 |
"""
Based on gist From https://gist.github.com/kesor/1229681
"""
from django.core.exceptions import MiddlewareNotUsed
from django.conf import settings
import cProfile
import pstats
import marshal
from cStringIO import StringIO
class ProfileMiddleware(object):
def __init__(self):
if not settings.DEBUG:
raise MiddlewareNotUsed()
self.profiler = None
def process_view(self, request, callback, callback_args, callback_kwargs):
if settings.DEBUG and ('profile' in request.GET
or 'profilebin' in request.GET):
self.profiler = cProfile.Profile()
args = (request,) + callback_args
return self.profiler.runcall(callback, *args, **callback_kwargs)
def process_response(self, request, response):
if settings.DEBUG:
if 'profile' in request.GET:
self.profiler.create_stats()
out = StringIO()
stats = pstats.Stats(self.profiler, stream=out)
# Values for stats.sort_stats():
# - calls call count
# - cumulative cumulative time
# - file file name
# - module file name
# - pcalls primitive call count
# - line line number
# - name function name
# - nfl name/file/line
# - stdname standard name
# - time internal time
stats.sort_stats('cumulative').print_stats(.2)
response.content = out.getvalue()
response['Content-type'] = 'text/plain'
return response
if 'profilebin' in request.GET:
self.profiler.create_stats()
response.content = marshal.dumps(self.profiler.stats)
filename = request.path.strip('/').replace('/','_') + '.pstat'
response['Content-Disposition'] = \
'attachment; filename=%s' % (filename,)
response['Content-type'] = 'application/octet-stream'
return response
return response | MadeInHaus/django-social | example/SocialExample/project/apps/utils/profile_middleware.py | Python | mit | 2,245 |
"""
Support for Nest devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/nest/
"""
import logging
import socket
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (CONF_PASSWORD, CONF_USERNAME, CONF_STRUCTURE)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['python-nest==2.10.0']
DOMAIN = 'nest'
NEST = None
STRUCTURES_TO_INCLUDE = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_STRUCTURE): vol.All(cv.ensure_list, cv.string)
})
}, extra=vol.ALLOW_EXTRA)
def devices():
"""Generator returning list of devices and their location."""
try:
for structure in NEST.structures:
if structure.name in STRUCTURES_TO_INCLUDE:
for device in structure.devices:
yield (structure, device)
else:
_LOGGER.debug("Ignoring structure %s, not in %s",
structure.name, STRUCTURES_TO_INCLUDE)
except socket.error:
_LOGGER.error("Connection error logging into the nest web service.")
def protect_devices():
"""Generator returning list of protect devices."""
try:
for structure in NEST.structures:
if structure.name in STRUCTURES_TO_INCLUDE:
for device in structure.protectdevices:
yield(structure, device)
else:
_LOGGER.info("Ignoring structure %s, not in %s",
structure.name, STRUCTURES_TO_INCLUDE)
except socket.error:
_LOGGER.error("Connection error logging into the nest web service.")
# pylint: disable=unused-argument
def setup(hass, config):
"""Setup the Nest thermostat component."""
global NEST
global STRUCTURES_TO_INCLUDE
conf = config[DOMAIN]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
import nest
NEST = nest.Nest(username, password)
if CONF_STRUCTURE not in conf:
STRUCTURES_TO_INCLUDE = [s.name for s in NEST.structures]
else:
STRUCTURES_TO_INCLUDE = conf[CONF_STRUCTURE]
_LOGGER.debug("Structures to include: %s", STRUCTURES_TO_INCLUDE)
return True
| hexxter/home-assistant | homeassistant/components/nest.py | Python | mit | 2,384 |
#! /usr/bin/python3
# -*- coding:utf-8 -*-
__author__ = 'TonyZhu'
import mysql.connector
import discount
def saveToMySQL(discount_str):
conn = mysql.connector.connect(user = 'root',password='password',database = 'Test')
cursor = conn.cursor()
cursor.execute('insert into discount values(%s)',[discount_str])
count = cursor.rowcount
conn.commit()
cursor.close()
return count
if __name__ == '__main__':
discount_arr = discount.produce(3)
for _discount in discount_arr:
flag = True if saveToMySQL(_discount) == 1 else False
print(flag) | Show-Me-the-Code/python | bbos1994/0002/saveDiscountToDB.py | Python | mit | 551 |
# -*- coding: utf-8 -*-
#
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ..exceptions import StopInterpretationException, BreakLoopException
class StopInterpretation(models.Model):
class Meta:
verbose_name = _('Stop instruction')
verbose_name_plural = _('Stop instructions')
def interpret(self, ctx):
raise StopInterpretationException()
class BreakLoop(models.Model):
class Meta:
verbose_name = _('Break instruction')
verbose_name_plural = _('Break instructions')
def interpret(self, ctx):
raise BreakLoopException()
| vlfedotov/django-business-logic | business_logic/models/stop.py | Python | mit | 634 |
#!/usr/bin/env python3
from pag import GameWorld
from pag import CommandLineInterface
from pag import classes
# This gives the Game the list of all locations that is updated every time a
# new location is created. Since setting a variable to another variable with a
# list points to the one list's memory address, the list in the game class also
# updates.
gameworld = GameWorld(locations=classes.location_list)
cli = CommandLineInterface(gameworld)
class ToiletPaper(classes.Item):
def __init__(self):
super().__init__(name='toilet paper',
description='The toilet paper is labeled "X-t'
'raSoft.',
loc_description='A roll of toilet paper is in '
'the room.',
weight=1)
home = classes.Location('Home', start=True, show_name_when_exit=True)
home.description = 'You\'re at home.'
bathroom = classes.Location('Bathroom', items=[ToiletPaper()], show_name_when_exit=True)
bathroom.description = 'You\'re in the bathroom.'
home.exits = {'south': bathroom}
bathroom.exits = {'north': home}
cli.play()
| allanburleson/python-adventure-game | example.py | Python | mit | 1,136 |
from __future__ import absolute_import
from struct import pack
from vertica_python.vertica.messages.message import FrontendMessage
class CopyStream(FrontendMessage):
def __init__(self, stream, buffer_size=131072):
self.stream = stream
self.bufsize = buffer_size
def read_bytes(self):
data = self.stream.read(self.bufsize)
if len(data) == 0:
return data
return self.message_string(data)
CopyStream._message_id('d')
| brokendata/vertica-python | vertica_python/vertica/messages/frontend_messages/copy_stream.py | Python | mit | 483 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import importlib
import traceback
import logging
import json
import re
from six import iteritems
from mattermost_bot.utils import WorkerPool
from mattermost_bot import settings
logger = logging.getLogger(__name__)
MESSAGE_MATCHER = re.compile(r'^(@.*?\:?)\s(.*)', re.MULTILINE | re.DOTALL)
BOT_ICON = settings.BOT_ICON if hasattr(settings, 'BOT_ICON') else None
BOT_EMOJI = settings.BOT_EMOJI if hasattr(settings, 'BOT_EMOJI') else None
class MessageDispatcher(object):
def __init__(self, client, plugins):
self._client = client
self._pool = WorkerPool(self.dispatch_msg, settings.WORKERS_NUM)
self._plugins = plugins
self._channel_info = {}
self.event = None
def start(self):
self._pool.start()
@staticmethod
def get_message(msg):
return msg.get('data', {}).get('post', {}).get('message', '').strip()
def ignore(self, _msg):
msg = self.get_message(_msg)
for prefix in settings.IGNORE_NOTIFIES:
if msg.startswith(prefix):
return True
def is_mentioned(self, msg):
mentions = msg.get('data', {}).get('mentions', [])
return self._client.user['id'] in mentions
def is_personal(self, msg):
channel_id = msg['data']['post']['channel_id']
if channel_id in self._channel_info:
channel_type = self._channel_info[channel_id]
else:
channel = self._client.api.channel(channel_id)
channel_type = channel['channel']['type']
self._channel_info[channel_id] = channel_type
return channel_type == 'D'
def dispatch_msg(self, msg):
category = msg[0]
msg = msg[1]
text = self.get_message(msg)
responded = False
msg['message_type'] = '?'
if self.is_personal(msg):
msg['message_type'] = 'D'
for func, args in self._plugins.get_plugins(category, text):
if func:
responded = True
try:
func(Message(self._client, msg, self._pool), *args)
except Exception as err:
logger.exception(err)
reply = '[%s] I have problem when handling "%s"\n' % (
func.__name__, text)
reply += '```\n%s\n```' % traceback.format_exc()
self._client.channel_msg(
msg['data']['post']['channel_id'], reply)
if not responded and category == 'respond_to':
if settings.DEFAULT_REPLY_MODULE is not None:
mod = importlib.import_module(settings.DEFAULT_REPLY_MODULE)
if hasattr(mod, 'default_reply'):
return getattr(mod, 'default_reply')(self, msg)
self._default_reply(msg)
def _on_new_message(self, msg):
if self.ignore(msg) is True:
return
msg = self.filter_text(msg)
if self.is_mentioned(msg) or self.is_personal(msg):
self._pool.add_task(('respond_to', msg))
else:
self._pool.add_task(('listen_to', msg))
def filter_text(self, msg):
text = self.get_message(msg)
if self.is_mentioned(msg):
m = MESSAGE_MATCHER.match(text)
if m:
msg['data']['post']['message'] = m.group(2).strip()
return msg
def load_json(self):
if self.event.get('data', {}).get('post'):
self.event['data']['post'] = json.loads(
self.event['data']['post'])
if self.event.get('data', {}).get('mentions'):
self.event['data']['mentions'] = json.loads(
self.event['data']['mentions'])
def loop(self):
for self.event in self._client.messages(True, 'posted'):
self.load_json()
self._on_new_message(self.event)
def _default_reply(self, msg):
if settings.DEFAULT_REPLY:
return self._client.channel_msg(
msg['data']['post']['channel_id'], settings.DEFAULT_REPLY)
default_reply = [
u'Bad command "%s", You can ask me one of the '
u'following questions:\n' % self.get_message(msg),
]
docs_fmt = u'{1}' if settings.PLUGINS_ONLY_DOC_STRING else u'`{0}` {1}'
default_reply += [
docs_fmt.format(p.pattern, v.__doc__ or "")
for p, v in iteritems(self._plugins.commands['respond_to'])]
self._client.channel_msg(
msg['data']['post']['channel_id'], '\n'.join(default_reply))
class Message(object):
users = {}
channels = {}
def __init__(self, client, body, pool):
from mattermost_bot.bot import PluginsManager
self._plugins = PluginsManager()
self._client = client
self._body = body
self._pool = pool
def get_user_info(self, key, user_id=None):
if key == 'username':
sender_name = self._get_sender_name()
if sender_name:
return sender_name
user_id = user_id or self._body['data']['post']['user_id']
if not Message.users or user_id not in Message.users:
Message.users = self._client.get_users()
return Message.users[user_id].get(key)
def get_username(self, user_id=None):
return self.get_user_info('username', user_id)
def get_user_mail(self, user_id=None):
return self.get_user_info('email', user_id)
def get_user_id(self, user_id=None):
return self.get_user_info('id', user_id)
def get_channel_name(self, channel_id=None):
channel_id = channel_id or self._body['data']['post']['channel_id']
if channel_id in self.channels:
channel_name = self.channels[channel_id]
else:
channel = self._client.api.channel(channel_id)
channel_name = channel['channel']['name']
self.channels[channel_id] = channel_name
return channel_name
def get_team_id(self):
return self._client.api.team_id
def get_message(self):
return self._body['data']['post']['message'].strip()
def is_direct_message(self):
return self._body['message_type'] == 'D'
def get_busy_workers(self):
return self._pool.get_busy_workers()
def get_mentions(self):
return self._body['data'].get('mentions')
def _gen_at_message(self, text):
return '@{}: {}'.format(self.get_username(), text)
def _gen_reply(self, text):
if self._body['message_type'] == '?':
return self._gen_at_message(text)
return text
def _get_sender_name(self):
return self._body['data'].get('sender_name', '').strip()
def _get_first_webhook(self):
hooks = self._client.api.hooks_list()
if not hooks:
for channel in self._client.api.get_channels():
if channel.get('name') == 'town-square':
return self._client.api.hooks_create(
channel_id=channel.get('id')).get('id')
return hooks[0].get('id')
@staticmethod
def _get_webhook_url_by_id(hook_id):
base = '/'.join(settings.BOT_URL.split('/')[:3])
return '%s/hooks/%s' % (base, hook_id)
def reply_webapi(self, text, *args, **kwargs):
self.send_webapi(self._gen_reply(text), *args, **kwargs)
def send_webapi(self, text, attachments=None, channel_id=None, **kwargs):
url = self._get_webhook_url_by_id(self._get_first_webhook())
kwargs['username'] = kwargs.get(
'username', self.get_username(self._client.user['id']))
kwargs['icon_url'] = kwargs.get('icon_url', BOT_ICON)
kwargs['icon_emoji'] = kwargs.get('icon_emoji', BOT_EMOJI)
self._client.api.in_webhook(
url, self.get_channel_name(channel_id), text,
attachments=attachments, ssl_verify=self._client.api.ssl_verify,
**kwargs)
def reply(self, text):
self.send(self._gen_reply(text))
def send(self, text, channel_id=None):
return self._client.channel_msg(
channel_id or self._body['data']['post']['channel_id'], text)
def update(self, text, message_id, channel_id=None):
return self._client.update_msg(
message_id, channel_id or self._body['data']['post']['channel_id'],
text
)
def react(self, emoji_name):
self._client.channel_msg(
self._body['data']['post']['channel_id'], emoji_name,
pid=self._body['data']['post']['id'])
def comment(self, message):
self.react(message)
def docs_reply(self, docs_format=' • `{0}` {1}'):
reply = [docs_format.format(v.__name__, v.__doc__ or "")
for p, v in iteritems(self._plugins.commands['respond_to'])]
return '\n'.join(reply)
@property
def channel(self):
return self._body['data']['post']['channel_id']
@property
def body(self):
return self._body
| LPgenerator/mattermost_bot | mattermost_bot/dispatcher.py | Python | mit | 9,090 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import unittest
import flask
import browsepy
import browsepy.file as browsepy_file
import browsepy.widget as browsepy_widget
import browsepy.manager as browsepy_manager
from browsepy.tests.deprecated.plugin import player as player
class ManagerMock(object):
def __init__(self):
self.blueprints = []
self.mimetype_functions = []
self.actions = []
self.widgets = []
@staticmethod
def style_class(endpoint, **kwargs):
return ('style', endpoint, kwargs)
@staticmethod
def button_class(*args, **kwargs):
return ('button', args, kwargs)
@staticmethod
def javascript_class(endpoint, **kwargs):
return ('javascript', endpoint, kwargs)
@staticmethod
def link_class(*args, **kwargs):
return ('link', args, kwargs)
def register_blueprint(self, blueprint):
self.blueprints.append(blueprint)
def register_mimetype_function(self, fnc):
self.mimetype_functions.append(fnc)
def register_widget(self, widget):
self.widgets.append(widget)
def register_action(self, blueprint, widget, mimetypes=(), **kwargs):
self.actions.append((blueprint, widget, mimetypes, kwargs))
class FileMock(object):
@property
def type(self):
return self.mimetype.split(';')[0]
@property
def category(self):
return self.mimetype.split('/')[0]
is_directory = False
name = 'unnamed'
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class TestPlugins(unittest.TestCase):
app_module = browsepy
manager_module = browsepy_manager
def setUp(self):
self.app = self.app_module.app
self.manager = self.manager_module.PluginManager(self.app)
self.original_namespaces = self.app.config['plugin_namespaces']
self.plugin_namespace, self.plugin_name = __name__.rsplit('.', 1)
self.app.config['plugin_namespaces'] = (self.plugin_namespace,)
def tearDown(self):
self.app.config['plugin_namespaces'] = self.original_namespaces
self.manager.clear()
def test_manager(self):
self.manager.load_plugin(self.plugin_name)
self.assertTrue(self.manager._plugin_loaded)
endpoints = sorted(
action.endpoint
for action in self.manager.get_actions(FileMock(mimetype='a/a'))
)
self.assertEqual(
endpoints,
sorted(('test_x_x', 'test_a_x', 'test_x_a', 'test_a_a')))
self.assertEqual(
self.app.view_functions['old_test_plugin.root'](),
'old_test_plugin')
self.assertIn('old_test_plugin', self.app.blueprints)
self.assertRaises(
self.manager_module.PluginNotFoundError,
self.manager.load_plugin,
'non_existent_plugin_module'
)
def register_plugin(manager):
widget_class = browsepy_widget.WidgetBase
manager._plugin_loaded = True
manager.register_action('test_x_x', widget_class('test_x_x'), ('*/*',))
manager.register_action('test_a_x', widget_class('test_a_x'), ('a/*',))
manager.register_action('test_x_a', widget_class('test_x_a'), ('*/a',))
manager.register_action('test_a_a', widget_class('test_a_a'), ('a/a',))
manager.register_action('test_b_x', widget_class('test_b_x'), ('b/*',))
test_plugin_blueprint = flask.Blueprint(
'old_test_plugin', __name__, url_prefix='/old_test_plugin_blueprint')
test_plugin_blueprint.add_url_rule(
'/', endpoint='root', view_func=lambda: 'old_test_plugin')
manager.register_blueprint(test_plugin_blueprint)
class TestPlayerBase(unittest.TestCase):
module = player
scheme = 'test'
hostname = 'testing'
urlprefix = '%s://%s' % (scheme, hostname)
def assertUrlEqual(self, a, b):
self.assertIn(a, (b, '%s%s' % (self.urlprefix, b)))
def setUp(self):
self.app = flask.Flask(self.__class__.__name__)
self.app.config['directory_remove'] = None
self.app.config['SERVER_NAME'] = self.hostname
self.app.config['PREFERRED_URL_SCHEME'] = self.scheme
self.manager = ManagerMock()
class TestPlayer(TestPlayerBase):
def test_register_plugin(self):
self.module.register_plugin(self.manager)
self.assertIn(self.module.player, self.manager.blueprints)
self.assertIn(
self.module.detect_playable_mimetype,
self.manager.mimetype_functions)
widgets = [action[1] for action in self.manager.widgets]
self.assertIn('deprecated_player.static', widgets)
widgets = [action[2] for action in self.manager.widgets]
self.assertIn({'filename': 'css/browse.css'}, widgets)
actions = [action[0] for action in self.manager.actions]
self.assertIn('deprecated_player.audio', actions)
class TestIntegrationBase(TestPlayerBase):
player_module = player
browsepy_module = browsepy
manager_module = browsepy_manager
widget_module = browsepy_widget
file_module = browsepy_file
class TestIntegration(TestIntegrationBase):
def test_register_plugin(self):
self.app.config.update(self.browsepy_module.app.config)
self.app.config.update(
SERVER_NAME=self.hostname,
PREFERRED_URL_SCHEME=self.scheme,
plugin_namespaces=('browsepy.tests.deprecated.plugin',)
)
manager = self.manager_module.PluginManager(self.app)
manager.load_plugin('player')
self.assertIn(self.player_module.player, self.app.blueprints.values())
def test_register_action(self):
manager = self.manager_module.MimetypeActionPluginManager(self.app)
widget = self.widget_module.WidgetBase() # empty
manager.register_action('browse', widget, mimetypes=('*/*',))
actions = manager.get_actions(FileMock(mimetype='text/plain'))
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].widget, widget)
manager.register_action('browse', widget, mimetypes=('text/*',))
actions = manager.get_actions(FileMock(mimetype='text/plain'))
self.assertEqual(len(actions), 2)
self.assertEqual(actions[1].widget, widget)
manager.register_action('browse', widget, mimetypes=('text/plain',))
actions = manager.get_actions(FileMock(mimetype='text/plain'))
self.assertEqual(len(actions), 3)
self.assertEqual(actions[2].widget, widget)
widget = self.widget_module.ButtonWidget()
manager.register_action('browse', widget, mimetypes=('text/plain',))
actions = manager.get_actions(FileMock(mimetype='text/plain'))
self.assertEqual(len(actions), 4)
self.assertEqual(actions[3].widget, widget)
widget = self.widget_module.LinkWidget()
manager.register_action('browse', widget, mimetypes=('*/plain',))
actions = manager.get_actions(FileMock(mimetype='text/plain'))
self.assertEqual(len(actions), 5)
self.assertNotEqual(actions[4].widget, widget)
widget = self.widget_module.LinkWidget(icon='file', text='something')
manager.register_action('browse', widget, mimetypes=('*/plain',))
actions = manager.get_actions(FileMock(mimetype='text/plain'))
self.assertEqual(len(actions), 6)
self.assertEqual(actions[5].widget, widget)
def test_register_widget(self):
file = self.file_module.Node()
manager = self.manager_module.MimetypeActionPluginManager(self.app)
widget = self.widget_module.StyleWidget('static', filename='a.css')
manager.register_widget(widget)
widgets = manager.get_widgets('style')
self.assertEqual(len(widgets), 1)
self.assertIsInstance(widgets[0], self.widget_module.StyleWidget)
self.assertEqual(widgets[0], widget)
widgets = manager.get_widgets(place='style')
self.assertEqual(len(widgets), 1)
self.assertIsInstance(widgets[0], self.widget_module.StyleWidget)
self.assertEqual(widgets[0], widget)
widgets = manager.get_widgets(file=file, place='styles')
self.assertEqual(len(widgets), 1)
self.assertIsInstance(widgets[0], manager.widget_types['stylesheet'])
self.assertUrlEqual(widgets[0].href, '/static/a.css')
widget = self.widget_module.JavascriptWidget('static', filename='a.js')
manager.register_widget(widget)
widgets = manager.get_widgets('javascript')
self.assertEqual(len(widgets), 1)
self.assertIsInstance(widgets[0], self.widget_module.JavascriptWidget)
self.assertEqual(widgets[0], widget)
widgets = manager.get_widgets(place='javascript')
self.assertEqual(len(widgets), 1)
self.assertIsInstance(widgets[0], self.widget_module.JavascriptWidget)
self.assertEqual(widgets[0], widget)
widgets = manager.get_widgets(file=file, place='scripts')
self.assertEqual(len(widgets), 1)
self.assertIsInstance(widgets[0], manager.widget_types['script'])
self.assertUrlEqual(widgets[0].src, '/static/a.js')
def test_for_file(self):
manager = self.manager_module.MimetypeActionPluginManager(self.app)
widget = self.widget_module.LinkWidget(icon='asdf', text='something')
manager.register_action('browse', widget, mimetypes=('*/plain',))
file = self.file_module.File('asdf.txt', plugin_manager=manager,
app=self.app)
self.assertEqual(file.link.icon, 'asdf')
self.assertEqual(file.link.text, 'something')
widget = self.widget_module.LinkWidget()
manager.register_action('browse', widget, mimetypes=('*/plain',))
file = self.file_module.File('asdf.txt', plugin_manager=manager,
app=self.app)
self.assertEqual(file.link.text, 'asdf.txt')
def test_from_file(self):
file = self.file_module.File('asdf.txt')
widget = self.widget_module.LinkWidget.from_file(file)
self.assertEqual(widget.text, 'asdf.txt')
class TestPlayable(TestIntegrationBase):
module = player
def setUp(self):
super(TestPlayable, self).setUp()
self.manager = self.manager_module.MimetypeActionPluginManager(
self.app)
self.manager.register_mimetype_function(
self.player_module.detect_playable_mimetype)
def test_playablefile(self):
exts = {
'mp3': 'mp3',
'wav': 'wav',
'ogg': 'ogg'
}
for ext, media_format in exts.items():
pf = self.module.PlayableFile(path='asdf.%s' % ext, app=self.app)
self.assertEqual(pf.media_format, media_format)
if __name__ == '__main__':
unittest.main()
| ergoithz/browsepy | browsepy/tests/deprecated/test_plugins.py | Python | mit | 10,826 |
"""Store zygosity instead of alleles in Observation model
Revision ID: 1b7581fb6db9
Revises: 200de898ae9f
Create Date: 2013-04-04 11:35:32.568953
"""
# revision identifiers, used by Alembic.
revision = '1b7581fb6db9'
down_revision = '200de898ae9f'
from alembic import op
from sqlalchemy import sql
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# https://bitbucket.org/zzzeek/alembic/issue/89/opadd_column-and-opdrop_column-should
context = op.get_context()
if context.bind.dialect.name == 'postgresql':
has_zygosity_type = context.bind.execute(
"select exists (select 1 from pg_type "
"where typname='zygosity')").scalar()
if not has_zygosity_type:
op.execute("CREATE TYPE zygosity AS ENUM ('heterozygous', 'homozygous')")
op.add_column('observation', sa.Column('zygosity', sa.Enum('heterozygous', 'homozygous', name='zygosity'), nullable=True))
# https://alembic.readthedocs.org/en/latest/ops.html#alembic.operations.Operations.execute
observation = sql.table('observation',
sql.column('alleles', sa.INTEGER()),
sql.column('zygosity', sa.Enum('heterozygous', 'homozygous', name='zygosity')))
op.execute(observation.update().where(observation.c.alleles == op.inline_literal(1)).values({'zygosity': 'heterozygous'}))
op.execute(observation.update().where(observation.c.alleles == op.inline_literal(2)).values({'zygosity': 'homozygous'}))
op.drop_column('observation', u'alleles')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('observation', sa.Column(u'alleles', sa.INTEGER(), nullable=True))
# https://alembic.readthedocs.org/en/latest/ops.html#alembic.operations.Operations.execute
observation = sql.table('observation',
sql.column('alleles', sa.INTEGER()),
sql.column('zygosity', sa.Enum('heterozygous', 'homozygous', name='zygosity')))
op.execute(observation.update().where(observation.c.zygosity == op.inline_literal('heterozygous')).values({'alleles': 1}))
op.execute(observation.update().where(observation.c.zygosity == op.inline_literal('homozygous')).values({'alleles': 2}))
op.drop_column('observation', 'zygosity')
### end Alembic commands ###
| sndrtj/varda | alembic/versions/1b7581fb6db9_store_zygosity_inste.py | Python | mit | 2,435 |
#!/usr/bin/python3
# python_name.py
# A Python class that returns its name.
#
# Written by Billy Wilson Arante <[email protected]>
# Last updated on 2017/08/21 PHT
class Python():
def __init__(self, name):
self.name = name
bubba = Python("Bubba")
print(bubba.name)
| arantebillywilson/python-snippets | codewars/python_name.py | Python | mit | 291 |
"""
Flask-CacheOBJ provides some caching decorators
"""
from setuptools import setup
setup(
name='Flask-CacheOBJ',
version='0.2.2',
url='https://github.com/liwushuo/Flask-CacheOBJ',
license='MIT',
author='Ju Lin',
author_email='[email protected]',
description='Flask-CacheOBJ provides some caching decorators',
long_description=__doc__,
packages=['flask_cacheobj'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'python-dateutil',
'pytz',
'msgpack-python',
'redis',
'decorator',
],
classifiers=[
'Framework :: Flask',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| liwushuo/Flask-CacheOBJ | setup.py | Python | mit | 1,078 |
import os
import boto.swf
import json
import datetime
import time
import zipfile
import shutil
import re
from ftplib import FTP
import ftplib
import activity
import boto.s3
from boto.s3.connection import S3Connection
import provider.s3lib as s3lib
import provider.simpleDB as dblib
from elifetools import parseJATS as parser
from elifetools import xmlio
"""
PMCDeposit activity
"""
class activity_PMCDeposit(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "PMCDeposit"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 30
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 15
self.description = ("Download single zip file an article, repackage it, " +
"send to PMC and notify them.")
# Local directory settings
self.TMP_DIR = self.get_tmp_dir() + os.sep + "tmp_dir"
self.INPUT_DIR = self.get_tmp_dir() + os.sep + "input_dir"
self.JUNK_DIR = self.get_tmp_dir() + os.sep + "junk_dir"
self.ZIP_DIR = self.get_tmp_dir() + os.sep + "zip_dir"
self.EPS_DIR = self.get_tmp_dir() + os.sep + "eps_dir"
self.TIF_DIR = self.get_tmp_dir() + os.sep + "tif_dir"
self.OUTPUT_DIR = self.get_tmp_dir() + os.sep + "output_dir"
# Data provider where email body is saved
self.db = dblib.SimpleDB(settings)
# Bucket settings
self.input_bucket = None
self.input_bucket_default = (settings.publishing_buckets_prefix +
settings.archive_bucket)
self.publish_bucket = settings.poa_packaging_bucket
self.published_folder = "pmc/published"
self.published_zip_folder = "pmc/zip"
# journal
self.journal = 'elife'
# Outgoing FTP settings are set later
self.FTP_URI = None
self.FTP_USERNAME = None
self.FTP_PASSWORD = None
self.FTP_CWD = None
self.FTP_SUBDIR = []
def do_activity(self, data=None):
"""
Activity, do the work
"""
if self.logger:
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
# Data passed to this activity
self.document = data["data"]["document"]
# Custom bucket, if specified
if "bucket" in data["data"]:
self.input_bucket = data["data"]["bucket"]
else:
self.input_bucket = self.input_bucket_default
# Create output directories
self.create_activity_directories()
# Download the S3 objects
self.download_files_from_s3(self.document)
verified = None
# Check for an empty folder and respond true
# if we do not do this it will continue to attempt this activity
if len(self.file_list(self.INPUT_DIR)) <= 0:
if self.logger:
self.logger.info('folder was empty in PMCDeposit: ' + self.INPUT_DIR)
verified = True
folder = self.INPUT_DIR
if self.logger:
self.logger.info('processing files in folder ' + folder)
self.unzip_article_files(self.file_list(folder))
(fid, status, version, volume) = self.profile_article(self.document)
# Rename the files
file_name_map = self.rename_files_remove_version_number()
(verified, renamed_list, not_renamed_list) = self.verify_rename_files(file_name_map)
if self.logger:
self.logger.info("verified " + folder + ": " + str(verified))
self.logger.info(file_name_map)
if len(not_renamed_list) > 0:
if self.logger:
self.logger.info("not renamed " + str(not_renamed_list))
# Convert the XML
self.convert_xml(xml_file=self.article_xml_file(),
file_name_map=file_name_map)
# Get the new zip file name
# TODO - may need to take into account the r1 r2 revision numbers when replacing an article
revision = self.zip_revision_number(fid)
self.zip_file_name = self.new_zip_filename(self.journal, volume, fid, revision)
print self.zip_file_name
self.create_new_zip(self.zip_file_name)
# Set FTP settings
self.set_ftp_settings(fid)
ftp_status = None
if verified and self.zip_file_name:
ftp_status = self.ftp_to_endpoint(self.file_list(self.ZIP_DIR), self.FTP_SUBDIR, passive=True)
if ftp_status is True:
self.upload_article_zip_to_s3()
# Send email
file_size = self.file_size(os.path.join(self.ZIP_DIR, self.zip_file_name))
self.add_email_to_queue(self.journal, volume, fid, revision, self.zip_file_name, file_size)
# Return the activity result, True or False
if verified is True and ftp_status is True:
result = True
else:
result = False
# Clean up disk
self.clean_tmp_dir()
return result
def set_ftp_settings(self, doi_id):
"""
Set the outgoing FTP server settings based on the
workflow type specified
"""
self.FTP_URI = self.settings.PMC_FTP_URI
self.FTP_USERNAME = self.settings.PMC_FTP_USERNAME
self.FTP_PASSWORD = self.settings.PMC_FTP_PASSWORD
self.FTP_CWD = self.settings.PMC_FTP_CWD
def ftp_upload(self, ftp, file):
ext = os.path.splitext(file)[1]
#print file
uploadname = file.split(os.sep)[-1]
if ext in (".txt", ".htm", ".html"):
ftp.storlines("STOR " + file, open(file))
else:
#print "uploading " + uploadname
ftp.storbinary("STOR " + uploadname, open(file, "rb"), 1024)
#print "uploaded " + uploadname
def ftp_cwd_mkd(self, ftp, sub_dir):
"""
Given an FTP connection and a sub_dir name
try to cwd to the directory. If the directory
does not exist, create it, then cwd again
"""
cwd_success = None
try:
ftp.cwd(sub_dir)
cwd_success = True
except ftplib.error_perm:
# Directory probably does not exist, create it
ftp.mkd(sub_dir)
cwd_success = False
if cwd_success is not True:
ftp.cwd(sub_dir)
return cwd_success
def ftp_to_endpoint(self, uploadfiles, sub_dir_list=None, passive=True):
try:
for uploadfile in uploadfiles:
ftp = FTP()
if passive is False:
ftp.set_pasv(False)
ftp.connect(self.FTP_URI)
ftp.login(self.FTP_USERNAME, self.FTP_PASSWORD)
self.ftp_cwd_mkd(ftp, "/")
if self.FTP_CWD != "":
self.ftp_cwd_mkd(ftp, self.FTP_CWD)
if sub_dir_list is not None:
for sub_dir in sub_dir_list:
self.ftp_cwd_mkd(ftp, sub_dir)
self.ftp_upload(ftp, uploadfile)
ftp.quit()
return True
except:
return False
def download_files_from_s3(self, document):
if self.logger:
self.logger.info('downloading VoR file ' + document)
subfolder_name = ""
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(self.input_bucket)
s3_key_name = document
s3_key_names = [s3_key_name]
self.download_s3_key_names_to_subfolder(bucket, s3_key_names, subfolder_name)
def download_s3_key_names_to_subfolder(self, bucket, s3_key_names, subfolder_name):
for s3_key_name in s3_key_names:
# Download objects from S3 and save to disk
s3_key = bucket.get_key(s3_key_name)
filename = s3_key_name.split("/")[-1]
# Make the subfolder if it does not exist yet
try:
os.mkdir(self.INPUT_DIR + os.sep + subfolder_name)
except:
pass
filename_plus_path = (self.INPUT_DIR
+ os.sep + subfolder_name
+ os.sep + filename)
mode = "wb"
f = open(filename_plus_path, mode)
s3_key.get_contents_to_file(f)
f.close()
def upload_article_zip_to_s3(self):
"""
Upload PMC zip file to S3
"""
bucket_name = self.publish_bucket
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
for file_name in self.file_list(self.ZIP_DIR):
s3_key_name = self.published_zip_folder + '/' + self.file_name_from_name(file_name)
s3_key = boto.s3.key.Key(bucket)
s3_key.key = s3_key_name
s3_key.set_contents_from_filename(file_name, replace=True)
def list_dir(self, dir_name):
dir_list = os.listdir(dir_name)
dir_list = map(lambda item: dir_name + os.sep + item, dir_list)
return dir_list
def folder_list(self, dir_name):
dir_list = self.list_dir(dir_name)
return filter(lambda item: os.path.isdir(item), dir_list)
def file_list(self, dir_name):
dir_list = self.list_dir(dir_name)
return filter(lambda item: os.path.isfile(item), dir_list)
def folder_name_from_name(self, input_dir, file_name):
folder_name = file_name.split(input_dir)[1]
folder_name = folder_name.split(os.sep)[1]
return folder_name
def file_name_from_name(self, file_name):
name = file_name.split(os.sep)[-1]
return name
def file_extension(self, file_name):
name = self.file_name_from_name(file_name)
if name:
if len(name.split('.')) > 1:
return name.split('.')[-1]
else:
return None
return None
def file_size(self, file_name):
return os.path.getsize(file_name)
def unzip_or_move_file(self, file_name, to_dir, do_unzip=True):
"""
If file extension is zip, then unzip contents
If file the extension
"""
if self.file_extension(file_name) == 'zip' and do_unzip is True:
# Unzip
if self.logger:
self.logger.info("going to unzip " + file_name + " to " + to_dir)
myzip = zipfile.ZipFile(file_name, 'r')
myzip.extractall(to_dir)
elif self.file_extension(file_name):
# Copy
if self.logger:
self.logger.info("going to move and not unzip " + file_name + " to " + to_dir)
shutil.copyfile(file_name, to_dir + os.sep + self.file_name_from_name(file_name))
def approve_file(self, file_name):
return True
def unzip_article_files(self, file_list):
for file_name in file_list:
if self.approve_file(file_name):
if self.logger:
self.logger.info("unzipping or moving file " + file_name)
self.unzip_or_move_file(file_name, self.TMP_DIR)
def rename_files_remove_version_number(self):
"""
Rename files to not include the version number, if present
Pre-PPP files will not have a version number, for before PPP is launched
"""
file_name_map = {}
# Get a list of all files
dirfiles = self.file_list(self.TMP_DIR)
for df in dirfiles:
filename = df.split(os.sep)[-1]
# Get the new file name
file_name_map[filename] = None
# TODO strip the -v1 from it
file_extension = filename.split('.')[-1]
if '-v' in filename:
# Use part before the -v number
part_without_version = filename.split('-v')[0]
else:
# No -v found, use the file name minus the extension
part_without_version = ''.join(filename.split('.')[0:-1])
renamed_filename = part_without_version + '.' + file_extension
if renamed_filename:
file_name_map[filename] = renamed_filename
else:
if self.logger:
self.logger.info('there is no renamed file for ' + filename)
for old_name, new_name in file_name_map.iteritems():
if new_name is not None:
shutil.move(self.TMP_DIR + os.sep + old_name, self.OUTPUT_DIR + os.sep + new_name)
return file_name_map
def verify_rename_files(self, file_name_map):
"""
Each file name as key should have a non None value as its value
otherwise the file did not get renamed to something new and the
rename file process was not complete
"""
verified = True
renamed_list = []
not_renamed_list = []
for k, v in file_name_map.items():
if v is None:
verified = False
not_renamed_list.append(k)
else:
renamed_list.append(k)
return (verified, renamed_list, not_renamed_list)
def convert_xml(self, xml_file, file_name_map):
# Register namespaces
xmlio.register_xmlns()
root, doctype_dict = xmlio.parse(xml_file, return_doctype_dict=True)
# Convert xlink href values
total = xmlio.convert_xlink_href(root, file_name_map)
# TODO - compare whether all file names were converted
# Start the file output
reparsed_string = xmlio.output(root, type=None, doctype_dict=doctype_dict)
f = open(xml_file, 'wb')
f.write(reparsed_string)
f.close()
def zip_revision_number(self, fid):
"""
Look at previously supplied files and determine the
next revision number
"""
revision = None
bucket_name = self.publish_bucket
prefix = self.published_zip_folder + '/'
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
s3_key_names = s3lib.get_s3_key_names_from_bucket(
bucket=bucket,
prefix=prefix)
s3_key_name = s3lib.latest_pmc_zip_revision(fid, s3_key_names)
if s3_key_name:
# Found an existing PMC zip file, look for a revision number
revision_match = re.match(ur'.*r(.*)\.zip$', s3_key_name)
if revision_match is None:
# There is a zip but no revision number, use 1
revision = 1
else:
# Use the latest revision plus 1
revision = int(revision_match.group(1)) + 1
return revision
def new_zip_filename(self, journal, volume, fid, revision=None):
filename = journal
filename = filename + '-' + str(volume).zfill(2)
filename = filename + '-' + str(fid).zfill(5)
if revision:
filename = filename + '.r' + str(revision)
filename += '.zip'
return filename
def create_new_zip(self, zip_file_name):
if self.logger:
self.logger.info("creating new PMC zip file named " + zip_file_name)
new_zipfile = zipfile.ZipFile(self.ZIP_DIR + os.sep + zip_file_name,
'w', zipfile.ZIP_DEFLATED, allowZip64=True)
dirfiles = self.file_list(self.OUTPUT_DIR)
for df in dirfiles:
filename = df.split(os.sep)[-1]
new_zipfile.write(df, filename)
new_zipfile.close()
def profile_article(self, document):
"""
Temporary, profile the article by folder names in test data set
In real code we still want this to return the same values
"""
# Temporary setting of version values from directory names
soup = self.article_soup(self.article_xml_file())
# elife id / doi id / manuscript id
fid = parser.doi(soup).split('.')[-1]
# article status
if parser.is_poa(soup) is True:
status = 'poa'
else:
status = 'vor'
# version
version = self.version_number(document)
# volume
volume = parser.volume(soup)
return (fid, status, version, volume)
def version_number(self, document):
version = None
m = re.search(ur'-v([0-9]*?)[\.|-]', document)
if m is not None:
version = m.group(1)
return version
def article_xml_file(self):
"""
Two directories the XML file might be in depending on the step
"""
file_name = None
for file_name in self.file_list(self.TMP_DIR):
if file_name.endswith('.xml'):
return file_name
if not file_name:
for file_name in self.file_list(self.OUTPUT_DIR):
if file_name.endswith('.xml'):
return file_name
return file_name
def article_soup(self, xml_filename):
return parser.parse_document(xml_filename)
def add_email_to_queue(self, journal, volume, fid, revision, file_name, file_size):
"""
After do_activity is finished, send emails to recipients
on the status
"""
# Connect to DB
db_conn = self.db.connect()
current_time = time.gmtime()
body = self.get_email_body(current_time, journal, volume, fid, revision,
file_name, file_size)
if revision:
subject = self.get_revision_email_subject(fid)
else:
subject = self.get_email_subject(current_time, journal, volume, fid, revision,
file_name, file_size)
sender_email = self.settings.ses_pmc_sender_email
recipient_email_list = self.email_recipients(revision)
for email in recipient_email_list:
# Add the email to the email queue
self.db.elife_add_email_to_email_queue(
recipient_email=email,
sender_email=sender_email,
email_type="PMCDeposit",
format="text",
subject=subject,
body=body)
return True
def email_recipients(self, revision):
"""
Get a list of email recipients depending on the revision number
because for PMC we will redirect a revision email to different recipients
"""
recipient_email_list = []
if revision:
settings_email_recipient = self.settings.ses_pmc_revision_recipient_email
else:
settings_email_recipient = self.settings.ses_pmc_recipient_email
# Handle multiple recipients, if specified
if type(settings_email_recipient) == list:
for email in settings_email_recipient:
recipient_email_list.append(email)
else:
recipient_email_list.append(settings_email_recipient)
return recipient_email_list
def get_revision_email_subject(self, fid):
"""
Email subject line for notifying production about a PMC revision
"""
subject = "You need to email PMC: article " + str(fid).zfill(5) + "!!!"
return subject
def get_email_subject(self, current_time, journal, volume, fid, revision,
file_name, file_size):
date_format = '%Y-%m-%d %H:%M'
datetime_string = time.strftime(date_format, current_time)
subject = (journal + " PMC deposit " + datetime_string + ", article " + str(fid).zfill(5))
if revision:
subject += ", revision " + str(revision)
return subject
def email_body_revision_header(self, revision):
header = None
if revision:
header = "Production please forward this to PMC with details of what changed"
return header
def get_email_body(self, current_time, journal, volume, fid, revision,
file_name, file_size):
body = ""
date_format = '%Y-%m-%dT%H:%M'
datetime_string = time.strftime(date_format, current_time)
# Header
if self.email_body_revision_header(revision):
body += self.email_body_revision_header(revision)
body += "\n\n"
# Include the subject line to be used
revision_email_subject = self.get_email_subject(current_time, journal, volume, fid,
revision, file_name, file_size)
body += str(revision_email_subject)
body += "\n\n"
# Bulk of body
body += "PMCDeposit activity" + "\n"
body += "\n"
body += journal + " deposit date: " + datetime_string + "\n"
body += "\n"
body += "Journal title: " + journal + "\n"
body += "Volume: " + str(volume).zfill(2) + "\n"
body += "Article: " + str(fid).zfill(2) + "\n"
if revision:
revision_text = str(revision)
else:
revision_text = "n/a"
body += "Revision: " + revision_text + "\n"
body += "\n"
body += "Zip filename: " + file_name + "\n"
body += "File size (bytes): " + str(file_size) + "\n"
body += "\n"
body += "\n\nSincerely\n\neLife bot"
return body
def create_activity_directories(self):
"""
Create the directories in the activity tmp_dir
"""
try:
os.mkdir(self.TMP_DIR)
os.mkdir(self.INPUT_DIR)
os.mkdir(self.JUNK_DIR)
os.mkdir(self.ZIP_DIR)
os.mkdir(self.EPS_DIR)
os.mkdir(self.TIF_DIR)
os.mkdir(self.OUTPUT_DIR)
except OSError:
pass
| gnott/elife-bot | activity/activity_PMCDeposit.py | Python | mit | 22,301 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
certificate_help = """For a detailed explanation of CA certificates in Azure IoT Hub,
see https://docs.microsoft.com/azure/iot-hub/iot-hub-x509ca-overview """
SYSTEM_ASSIGNED_IDENTITY = '[system]'
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/iot/_constants.py | Python | mit | 565 |
# -*- coding: utf-8 -*-
"""
Dice I
"""
def solve(dice, s):
for x in s:
if x == "E":
temp = dice[1][3]
dice[1][3] = dice[1][2]
dice[1][2] = dice[1][1]
dice[1][1] = dice[1][0]
dice[1][0] = temp
#print(dice)
elif x == "W":
temp = dice[1][0]
dice[1][0] = dice[1][1]
dice[1][1] = dice[1][2]
dice[1][2] = dice[1][3]
dice[1][3] = temp
#print(dice)
elif x == "S":
temp = dice[0][1]
dice[0][1] = dice[1][3]
dice[1][3] = dice[2][1]
dice[2][1] = dice[1][1]
dice[1][1] = temp
#print(dice)
elif x == "N":
temp = dice[0][1]
dice[0][1] = dice[1][1]
dice[1][1] = dice[2][1]
dice[2][1] = dice[1][3]
dice[1][3] = temp
#print(dice)
return dice
def setup(n):
dice = [list("0000"),
list("0000"),
list("0000")]
dice[0][1] = n[0]
dice[1][1] = n[1]
dice[1][2] = n[2]
dice[1][0] = n[3]
dice[1][3] = n[4]
dice[2][1] = n[5]
return solve(dice, 'S')
if __name__ == '__main__':
n = map(int, raw_input().split())
s = raw_input()
d = setup(n)
d = solve(d, s)
print(d[1][1])
| miyazaki-tm/aoj | Lesson/ITP1/ITP1_11_A.py | Python | mit | 1,348 |
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
from StringIO import StringIO
import yaml
from twisted.internet import defer
from twisted.python import reflect
from twisted.trial import unittest
from piped_status_testing import statustest
# Heh. :-)
class StatusTestTest(unittest.TestCase):
def setUp(self):
self.namespace = dict()
self.loader = statustest.StatusTestLoader(self.namespace)
self.package_name = __name__.rsplit('.', 1)[0]
self.default_test_namespace = reflect.namedAny(self.package_name+'.data')
self.status_test_class_name = self.package_name + '.data.statustest_helper.HelperStatusTest'
def get_globals_and_locals_from_frame_in_result(self, result, frame=-1):
_, failure = result
# try to find the locals and globals in the failure instance
last_frame = failure.frames[frame]
locals = dict(last_frame[-2])
globals = dict(last_frame[-1])
if not locals and not globals and failure.tb:
# but they might not have been captured, so we have to check the traceback
tracebacks = [failure.tb]
while tracebacks[-1] and tracebacks[-1].tb_next:
tracebacks.append(tracebacks[-1].tb_next)
tb = tracebacks[frame]
locals = tb.tb_frame.f_locals
globals = tb.tb_frame.f_globals
return globals, locals
def get_reporter_with_suite_run(self):
suite = self.loader.loadAnything(self.default_test_namespace, recurse=True)
return suite.run(statustest.StatusReporter(stream=StringIO()))
def get_result_by_test_name(self, list_of_tests, test_name):
for test in list_of_tests:
if test_name in str(test):
return test
def assertLocalsEqual(self, result, expected_locals, frame=-1):
_, locals = self.get_globals_and_locals_from_frame_in_result(result, frame=frame)
for key, value in expected_locals.items():
self.assertEquals(locals[key], value)
@defer.inlineCallbacks
def test_simple_run(self):
""" Test that at least one statustest gets executed. """
reporter = yield self.get_reporter_with_suite_run()
self.assertTrue(reporter.testsRun > 0)
@defer.inlineCallbacks
def test_correct_failure_and_errors(self):
""" Test that the correct failures/exceptions gets propagated to the reporter. """
reporter = yield self.get_reporter_with_suite_run()
fail_assert_result = self.get_result_by_test_name(reporter.failures, 'statustest_fail_assert')
self.assertTrue('failure', fail_assert_result[0])
self.assertEquals(fail_assert_result[1].type, unittest.FailTest)
raise_exception_result = self.get_result_by_test_name(reporter.errors, 'statustest_raise_exception')
self.assertTrue('error', raise_exception_result[0])
self.assertEquals(raise_exception_result[1].type, Exception)
self.assertEquals(raise_exception_result[1].value.args[0], 'raising an exception inside a statustest')
@defer.inlineCallbacks
def test_correct_traceback(self):
""" Test that the traceback (locals/globals and their contents) are as expected. """
reporter = yield self.get_reporter_with_suite_run()
expected_locals_for_test_failures = dict(
statustest_fail_assert_namespace=dict(some_value=1, other_value=2),
statustest_fail_assert_namespace_in=dict(some_value=1, other_value=[2,3]),
statustest_nested_functions=dict(foo='foostring', bar='barstring')
)
for test_name, expected_locals in expected_locals_for_test_failures.items():
result = self.get_result_by_test_name(reporter.failures, test_name)
self.assertLocalsEqual(result, expected_locals, frame=-2)
expected_locals_for_test_errors = dict(
statustest_raise_exception_namespace=dict(foo=1),
)
for test_name, expected_locals in expected_locals_for_test_errors.items():
result = self.get_result_by_test_name(reporter.errors, test_name)
self.assertLocalsEqual(result, expected_locals)
@defer.inlineCallbacks
def test_correct_namespaces(self):
""" Test that the namespace behaves as expected. """
reporter = yield self.get_reporter_with_suite_run()
result = self.get_result_by_test_name(reporter.errors, 'statustest_nested_raise')
globals, locals = self.get_globals_and_locals_from_frame_in_result(result)
self.assertNotIn('foo', locals)
self.assertNotIn('foo', globals)
self.assertEquals(locals['bar'], 'barstring')
result = self.get_result_by_test_name(reporter.errors, 'statustest_nested_raise_interesting_scoping')
self.assertLocalsEqual(result, dict(foo='foostring', bar='barstring'))
@defer.inlineCallbacks
def test_inserted_namespace(self):
""" Test that the inserted namespace is working. """
obj = object()
self.namespace['my_namespace_key'] = 414
self.namespace['secret_object'] = obj
reporter = yield self.get_reporter_with_suite_run()
result = self.get_result_by_test_name(reporter.errors, 'statustest_raise_exception')
globs, locs = self.get_globals_and_locals_from_frame_in_result(result)
self.assertEquals(locs['self'].namespace, dict(my_namespace_key=414, secret_object=obj))
@defer.inlineCallbacks
def test_inlinecallbacks(self):
""" Test that our inlineCallbacks works as expected. """
reporter = yield self.get_reporter_with_suite_run()
self.assertLocalsEqual(self.get_result_by_test_name(reporter.errors, 'statustest_inlinecallbacks_util'), dict(_=None))
# but locals are lost when using the default @defer.inlineCallbacks:
self.assertLocalsEqual(self.get_result_by_test_name(reporter.errors, 'statustest_inlinecallbacks'), dict())
@defer.inlineCallbacks
def test_should_skip(self):
""" Test that a test is skipped. """
reporter = yield self.get_reporter_with_suite_run()
result = self.get_result_by_test_name(reporter.skips, 'statustest_should_skip')
self.assertNotEquals(result, None)
@defer.inlineCallbacks
def test_todos(self):
""" Test that a tests marked todo are run. """
reporter = yield self.get_reporter_with_suite_run()
result = self.get_result_by_test_name(reporter.expectedFailures, 'statustest_todo')
self.assertNotEquals(result, None)
result = self.get_result_by_test_name(reporter.unexpectedSuccesses, 'statustest_unexpected_success')
self.assertNotEquals(result, None)
| foundit/Piped | contrib/status_testing/piped_status_testing/test/test_statustest.py | Python | mit | 6,759 |
"""
Contains wrapper class for datasets.
"""
import json
import os
import math
import random
import logging
import tempfile
import time
import shutil
import multiprocessing
from multiprocessing.dummy import Pool
from ast import literal_eval as make_tuple
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
from numpy.typing import ArrayLike
import pandas as pd
import deepchem as dc
from deepchem.utils.typing import OneOrMany, Shape
from deepchem.utils.data_utils import save_to_disk, load_from_disk, load_image_files
Batch = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
logger = logging.getLogger(__name__)
def sparsify_features(X: np.ndarray) -> np.ndarray:
"""Extracts a sparse feature representation from dense feature array.
Parameters
----------
X: np.ndarray
A numpy array of shape `(n_samples, ...)`.
Returns
-------
X_sparse: np.ndarray
A numpy array with `dtype=object` where `X_sparse[i]` is a
typle of `(nonzero_inds, nonzero_vals)` with nonzero indices and
values in the i-th sample of `X`.
"""
n_samples = len(X)
X_sparse = []
for i in range(n_samples):
nonzero_inds = np.nonzero(X[i])[0]
nonzero_vals = X[i][nonzero_inds]
X_sparse.append((nonzero_inds, nonzero_vals))
return np.array(X_sparse, dtype=object)
def densify_features(X_sparse: np.ndarray, num_features: int) -> np.ndarray:
"""Expands sparse feature representation to dense feature array.
Assumes that the sparse representation was constructed from an array
which had original shape `(n_samples, num_features)` so doesn't
support reconstructing multidimensional dense arrays.
Parameters
----------
X_sparse: np.ndarray
Must have `dtype=object`. `X_sparse[i]` must be a tuple of nonzero
indices and values.
num_features: int
Number of features in dense array.
Returns
-------
X: np.ndarray
A numpy array of shape `(n_samples, num_features)`.
"""
n_samples = len(X_sparse)
X = np.zeros((n_samples, num_features))
for i in range(n_samples):
nonzero_inds, nonzero_vals = X_sparse[i]
X[i][nonzero_inds.astype(int)] = nonzero_vals
return X
def pad_features(batch_size: int, X_b: np.ndarray) -> np.ndarray:
"""Pads a batch of features to have precisely batch_size elements.
Given an array of features with length less than or equal to
batch-size, pads it to `batch_size` length. It does this by
repeating the original features in tiled fashion. For illustration,
suppose that `len(X_b) == 3` and `batch_size == 10`.
>>> X_b = np.arange(3)
>>> X_b
array([0, 1, 2])
>>> batch_size = 10
>>> X_manual = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0])
>>> X_out = pad_features(batch_size, X_b)
>>> assert (X_manual == X_out).all()
This function is similar to `pad_batch` but doesn't handle labels
`y` or weights `w` and is intended to be used for inference-time
query processing.
Parameters
----------
batch_size: int
The number of datapoints in a batch
X_b: np.ndarray
Must be such that `len(X_b) <= batch_size`
Returns
-------
X_out: np.ndarray
A numpy array with `len(X_out) == batch_size`.
"""
num_samples = len(X_b)
if num_samples > batch_size:
raise ValueError("Cannot pad an array longer than `batch_size`")
elif num_samples == batch_size:
return X_b
else:
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
# Fill in batch arrays
start = 0
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
start += increment
return X_out
def pad_batch(batch_size: int, X_b: np.ndarray, y_b: np.ndarray,
w_b: np.ndarray, ids_b: np.ndarray) -> Batch:
"""Pads batch to have size precisely batch_size elements.
Given arrays of features `X_b`, labels `y_b`, weights `w_b`, and
identifiers `ids_b` all with length less than or equal to
batch-size, pads them to `batch_size` length. It does this by
repeating the original entries in tiled fashion. Note that `X_b,
y_b, w_b, ids_b` must all have the same length.
Parameters
----------
batch_size: int
The number of datapoints in a batch
X_b: np.ndarray
Must be such that `len(X_b) <= batch_size`
y_b: np.ndarray
Must be such that `len(y_b) <= batch_size`
w_b: np.ndarray
Must be such that `len(w_b) <= batch_size`
ids_b: np.ndarray
Must be such that `len(ids_b) <= batch_size`
Returns
-------
Batch
The batch is a tuple of `(X_out, y_out, w_out, ids_out)`,
all numpy arrays with length `batch_size`.
"""
num_samples = len(X_b)
if num_samples == batch_size:
return (X_b, y_b, w_b, ids_b)
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
if y_b is None:
y_out = None
elif len(y_b.shape) < 2:
y_out = np.zeros(batch_size, dtype=y_b.dtype)
else:
y_out = np.zeros((batch_size,) + y_b.shape[1:], dtype=y_b.dtype)
if w_b is None:
w_out = None
elif len(w_b.shape) < 2:
w_out = np.zeros(batch_size, dtype=w_b.dtype)
else:
w_out = np.zeros((batch_size,) + w_b.shape[1:], dtype=w_b.dtype)
ids_out = np.zeros((batch_size,), dtype=ids_b.dtype)
# Fill in batch arrays
start = 0
# Only the first set of copy will be counted in training loss
if w_out is not None:
w_out[start:start + num_samples] = w_b[:]
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
if y_out is not None:
y_out[start:start + increment] = y_b[:increment]
ids_out[start:start + increment] = ids_b[:increment]
start += increment
return (X_out, y_out, w_out, ids_out)
class Dataset(object):
"""Abstract base class for datasets defined by X, y, w elements.
`Dataset` objects are used to store representations of a dataset as
used in a machine learning task. Datasets contain features `X`,
labels `y`, weights `w` and identifiers `ids`. Different subclasses
of `Dataset` may choose to hold `X, y, w, ids` in memory or on disk.
The `Dataset` class attempts to provide for strong interoperability
with other machine learning representations for datasets.
Interconversion methods allow for `Dataset` objects to be converted
to and from numpy arrays, pandas dataframes, tensorflow datasets,
and pytorch datasets (only to and not from for pytorch at present).
Note that you can never instantiate a `Dataset` object directly.
Instead you will need to instantiate one of the concrete subclasses.
"""
def __init__(self) -> None:
raise NotImplementedError()
def __len__(self) -> int:
"""Get the number of elements in the dataset.
Returns
-------
int
The number of elements in the dataset.
"""
raise NotImplementedError()
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids
arrays.
Returns
-------
Tuple
The tuple contains four elements, which are the shapes of
the X, y, w, and ids arrays.
"""
raise NotImplementedError()
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
raise NotImplementedError()
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `X`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `y`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of identifiers `ids`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array.
Returns
-------
np.ndarray
A numpy array of weights `w`.
Note
----
If data is stored on disk, accessing this field may involve loading
data from disk and could potentially be slow. Using
`iterbatches()` or `itersamples()` may be more efficient for
larger datasets.
"""
raise NotImplementedError()
def __repr__(self) -> str:
"""Convert self to REPL print representation."""
threshold = dc.utils.get_print_threshold()
task_str = np.array2string(
np.array(self.get_task_names()), threshold=threshold)
X_shape, y_shape, w_shape, _ = self.get_shape()
if self.__len__() < dc.utils.get_max_print_size():
id_str = np.array2string(self.ids, threshold=threshold)
return "<%s X.shape: %s, y.shape: %s, w.shape: %s, ids: %s, task_names: %s>" % (
self.__class__.__name__, str(X_shape), str(y_shape), str(w_shape),
id_str, task_str)
else:
return "<%s X.shape: %s, y.shape: %s, w.shape: %s, task_names: %s>" % (
self.__class__.__name__, str(X_shape), str(y_shape), str(w_shape),
task_str)
def __str__(self) -> str:
"""Convert self to str representation."""
return self.__repr__()
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, optional (default 1)
Number of epochs to walk over dataset.
deterministic: bool, optional (default False)
If True, follow deterministic order.
pad_batches: bool, optional (default False)
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
raise NotImplementedError()
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Examples
--------
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
raise NotImplementedError()
def transform(self, transformer: "dc.trans.Transformer", **args) -> "Dataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple
times with different subsets of the data. Each time it is called,
it should transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset.
Returns
-------
Dataset
A newly constructed Dataset object.
"""
raise NotImplementedError()
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None) -> "Dataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Path to new directory that the selected indices will be copied to.
"""
raise NotImplementedError()
def get_statistics(self, X_stats: bool = True,
y_stats: bool = True) -> Tuple[np.ndarray, ...]:
"""Compute and return statistics of this dataset.
Uses `self.itersamples()` to compute means and standard deviations
of the dataset. Can compute on large datasets that don't fit in
memory.
Parameters
----------
X_stats: bool, optional (default True)
If True, compute feature-level mean and standard deviations.
y_stats: bool, optional (default True)
If True, compute label-level mean and standard deviations.
Returns
-------
Tuple
- If `X_stats == True`, returns `(X_means, X_stds)`.
- If `y_stats == True`, returns `(y_means, y_stds)`.
- If both are true, returns `(X_means, X_stds, y_means, y_stds)`.
"""
x_shape, y_shape, w_shape, ids_shape = self.get_shape()
X_means = np.zeros(x_shape[1:])
X_m2 = np.zeros(x_shape[1:])
y_means = np.zeros(y_shape[1:])
y_m2 = np.zeros(y_shape[1:])
n = 0
for X, y, _, _ in self.itersamples():
n += 1
if X_stats:
dx = X - X_means
X_means += dx / n
X_m2 += dx * (X - X_means)
if y_stats:
dy = y - y_means
y_means += dy / n
y_m2 += dy * (y - y_means)
if n < 2:
X_stds = np.zeros(x_shape[1:])
y_stds = np.zeros(y_shape[1:])
else:
X_stds = np.sqrt(X_m2 / n)
y_stds = np.sqrt(y_m2 / n)
if X_stats and not y_stats:
return X_means, X_stds
elif y_stats and not X_stats:
return y_means, y_stds
elif X_stats and y_stats:
return X_means, X_stds, y_means, y_stds
else:
return tuple()
def make_tf_dataset(self,
batch_size: int = 100,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False):
"""Create a tf.data.Dataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y,
w) for one batch.
Parameters
----------
batch_size: int, default 100
The number of samples to include in each batch.
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
pad_batches: bool, default False
If True, batches are padded as necessary to make the size of
each batch exactly equal batch_size.
Returns
-------
tf.data.Dataset
TensorFlow Dataset that iterates over the same data.
Note
----
This class requires TensorFlow to be installed.
"""
try:
import tensorflow as tf
except:
raise ImportError("This method requires TensorFlow to be installed.")
# Retrieve the first sample so we can determine the dtypes.
X, y, w, ids = next(self.itersamples())
dtypes = (tf.as_dtype(X.dtype), tf.as_dtype(y.dtype), tf.as_dtype(w.dtype))
shapes = (
tf.TensorShape([None] + list(X.shape)), # type: ignore
tf.TensorShape([None] + list(y.shape)), # type: ignore
tf.TensorShape([None] + list(w.shape))) # type: ignore
# Create a Tensorflow Dataset.
def gen_data():
for X, y, w, ids in self.iterbatches(batch_size, epochs, deterministic,
pad_batches):
yield (X, y, w)
return tf.data.Dataset.from_generator(gen_data, dtypes, shapes)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This class requires PyTorch to be installed.
"""
raise NotImplementedError()
def to_dataframe(self) -> pd.DataFrame:
"""Construct a pandas DataFrame containing the data from this Dataset.
Returns
-------
pd.DataFrame
Pandas dataframe. If there is only a single feature per datapoint,
will have column "X" else will have columns "X1,X2,..." for
features. If there is only a single label per datapoint, will
have column "y" else will have columns "y1,y2,..." for labels. If
there is only a single weight per datapoint will have column "w"
else will have columns "w1,w2,...". Will have column "ids" for
identifiers.
"""
X = self.X
y = self.y
w = self.w
ids = self.ids
if len(X.shape) == 1 or X.shape[1] == 1:
columns = ['X']
else:
columns = [f'X{i+1}' for i in range(X.shape[1])]
X_df = pd.DataFrame(X, columns=columns)
if len(y.shape) == 1 or y.shape[1] == 1:
columns = ['y']
else:
columns = [f'y{i+1}' for i in range(y.shape[1])]
y_df = pd.DataFrame(y, columns=columns)
if len(w.shape) == 1 or w.shape[1] == 1:
columns = ['w']
else:
columns = [f'w{i+1}' for i in range(w.shape[1])]
w_df = pd.DataFrame(w, columns=columns)
ids_df = pd.DataFrame(ids, columns=['ids'])
return pd.concat([X_df, y_df, w_df, ids_df], axis=1, sort=False)
@staticmethod
def from_dataframe(df: pd.DataFrame,
X: Optional[OneOrMany[str]] = None,
y: Optional[OneOrMany[str]] = None,
w: Optional[OneOrMany[str]] = None,
ids: Optional[str] = None):
"""Construct a Dataset from the contents of a pandas DataFrame.
Parameters
----------
df: pd.DataFrame
The pandas DataFrame
X: str or List[str], optional (default None)
The name of the column or columns containing the X array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
y: str or List[str], optional (default None)
The name of the column or columns containing the y array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
w: str or List[str], optional (default None)
The name of the column or columns containing the w array. If
this is None, it will look for default column names that match
those produced by to_dataframe().
ids: str, optional (default None)
The name of the column containing the ids. If this is None, it
will look for default column names that match those produced by
to_dataframe().
"""
# Find the X values.
if X is not None:
X_val = df[X]
elif 'X' in df.columns:
X_val = df['X']
else:
columns = []
i = 1
while f'X{i}' in df.columns:
columns.append(f'X{i}')
i += 1
X_val = df[columns]
if len(X_val.shape) == 1:
X_val = np.expand_dims(X_val, 1)
# Find the y values.
if y is not None:
y_val = df[y]
elif 'y' in df.columns:
y_val = df['y']
else:
columns = []
i = 1
while f'y{i}' in df.columns:
columns.append(f'y{i}')
i += 1
y_val = df[columns]
if len(y_val.shape) == 1:
y_val = np.expand_dims(y_val, 1)
# Find the w values.
if w is not None:
w_val = df[w]
elif 'w' in df.columns:
w_val = df['w']
else:
columns = []
i = 1
while f'w{i}' in df.columns:
columns.append(f'w{i}')
i += 1
w_val = df[columns]
if len(w_val.shape) == 1:
w_val = np.expand_dims(w_val, 1)
# Find the ids.
if ids is not None:
ids_val = df[ids]
elif 'ids' in df.columns:
ids_val = df['ids']
else:
ids_val = None
return NumpyDataset(X_val, y_val, w_val, ids_val)
class NumpyDataset(Dataset):
"""A Dataset defined by in-memory numpy arrays.
This subclass of `Dataset` stores arrays `X,y,w,ids` in memory as
numpy arrays. This makes it very easy to construct `NumpyDataset`
objects.
Examples
--------
>>> import numpy as np
>>> dataset = NumpyDataset(X=np.random.rand(5, 3), y=np.random.rand(5,), ids=np.arange(5))
"""
def __init__(self,
X: ArrayLike,
y: Optional[ArrayLike] = None,
w: Optional[ArrayLike] = None,
ids: Optional[ArrayLike] = None,
n_tasks: int = 1) -> None:
"""Initialize this object.
Parameters
----------
X: np.ndarray
Input features. A numpy array of shape `(n_samples,...)`.
y: np.ndarray, optional (default None)
Labels. A numpy array of shape `(n_samples, ...)`. Note that each label can
have an arbitrary shape.
w: np.ndarray, optional (default None)
Weights. Should either be 1D array of shape `(n_samples,)` or if
there's more than one task, of shape `(n_samples, n_tasks)`.
ids: np.ndarray, optional (default None)
Identifiers. A numpy array of shape `(n_samples,)`
n_tasks: int, default 1
Number of learning tasks.
"""
n_samples = np.shape(X)[0]
if n_samples > 0:
if y is None:
# Set labels to be zero, with zero weights
y = np.zeros((n_samples, n_tasks), np.float32)
w = np.zeros((n_samples, 1), np.float32)
if ids is None:
ids = np.arange(n_samples)
if not isinstance(X, np.ndarray):
X = np.array(X)
if not isinstance(y, np.ndarray):
y = np.array(y)
if w is None:
if len(y.shape) == 1:
w = np.ones(y.shape[0], np.float32)
else:
w = np.ones((y.shape[0], 1), np.float32)
if not isinstance(w, np.ndarray):
w = np.array(w)
self._X = X
self._y = y
self._w = w
self._ids = np.array(ids, dtype=object)
def __len__(self) -> int:
"""Get the number of elements in the dataset."""
return len(self._y)
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X.shape, self._y.shape, self._w.shape, self._ids.shape
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
if len(self._y.shape) < 2:
return np.array([0])
return np.arange(self._y.shape[1])
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
return self._X
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
return self._y
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, default 1
Number of epochs to walk over dataset.
deterministic: bool, optional (default False)
If True, follow deterministic order.
pad_batches: bool, optional (default False)
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
def iterate(dataset: NumpyDataset, batch_size: Optional[int], epochs: int,
deterministic: bool, pad_batches: bool):
n_samples = dataset._X.shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_batch = dataset._X[perm_indices]
y_batch = dataset._y[perm_indices]
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Iterator which yields tuples of four numpy arrays `(X, y, w, ids)`.
Examples
--------
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
n_samples = self._X.shape[0]
return ((self._X[i], self._y[i], self._w[i], self._ids[i])
for i in range(n_samples))
def transform(self, transformer: "dc.trans.Transformer",
**args) -> "NumpyDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple
times with different subsets of the data. Each time it is called,
it should transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset
Returns
-------
NumpyDataset
A newly constructed NumpyDataset object
"""
newx, newy, neww, newids = transformer.transform_array(
self._X, self._y, self._w, self._ids)
return NumpyDataset(newx, newy, neww, newids)
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None) -> "NumpyDataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: List[int]
List of indices to select.
select_dir: str, optional (default None)
Used to provide same API as `DiskDataset`. Ignored since
`NumpyDataset` is purely in-memory.
Returns
-------
NumpyDataset
A selected NumpyDataset object
"""
X = self.X[indices]
y = self.y[indices]
w = self.w[indices]
ids = self.ids[indices]
return NumpyDataset(X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchNumpyDataset
except:
raise ImportError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchNumpyDataset(
numpy_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
@staticmethod
def from_DiskDataset(ds: "DiskDataset") -> "NumpyDataset":
"""Convert DiskDataset to NumpyDataset.
Parameters
----------
ds: DiskDataset
DiskDataset to transform to NumpyDataset.
Returns
-------
NumpyDataset
A new NumpyDataset created from DiskDataset.
"""
return NumpyDataset(ds.X, ds.y, ds.w, ds.ids)
def to_json(self, fname: str) -> None:
"""Dump NumpyDataset to the json file .
Parameters
----------
fname: str
The name of the json file.
"""
d = {
'X': self.X.tolist(),
'y': self.y.tolist(),
'w': self.w.tolist(),
'ids': self.ids.tolist()
}
with open(fname, 'w') as fout:
json.dump(d, fout)
@staticmethod
def from_json(fname: str) -> "NumpyDataset":
"""Create NumpyDataset from the json file.
Parameters
----------
fname: str
The name of the json file.
Returns
-------
NumpyDataset
A new NumpyDataset created from the json file.
"""
with open(fname) as fin:
d = json.load(fin)
return NumpyDataset(d['X'], d['y'], d['w'], d['ids'])
@staticmethod
def merge(datasets: Sequence[Dataset]) -> "NumpyDataset":
"""Merge multiple NumpyDatasets.
Parameters
----------
datasets: List[Dataset]
List of datasets to merge.
Returns
-------
NumpyDataset
A single NumpyDataset containing all the samples from all datasets.
Example
-------
>>> X1, y1 = np.random.rand(5, 3), np.random.randn(5, 1)
>>> first_dataset = dc.data.NumpyDataset(X1, y1)
>>> X2, y2 = np.random.rand(5, 3), np.random.randn(5, 1)
>>> second_dataset = dc.data.NumpyDataset(X2, y2)
>>> merged_dataset = dc.data.NumpyDataset.merge([first_dataset, second_dataset])
>>> print(len(merged_dataset) == len(first_dataset) + len(second_dataset))
True
"""
X, y, w, ids = datasets[0].X, datasets[0].y, datasets[0].w, datasets[0].ids
for dataset in datasets[1:]:
X = np.concatenate([X, dataset.X], axis=0)
y = np.concatenate([y, dataset.y], axis=0)
w = np.concatenate([w, dataset.w], axis=0)
ids = np.concatenate(
[ids, dataset.ids],
axis=0,
)
return NumpyDataset(X, y, w, ids, n_tasks=y.shape[1])
class _Shard(object):
def __init__(self, X, y, w, ids):
self.X = X
self.y = y
self.w = w
self.ids = ids
class DiskDataset(Dataset):
"""
A Dataset that is stored as a set of files on disk.
The DiskDataset is the workhorse class of DeepChem that facilitates analyses
on large datasets. Use this class whenever you're working with a large
dataset that can't be easily manipulated in RAM.
On disk, a `DiskDataset` has a simple structure. All files for a given
`DiskDataset` are stored in a `data_dir`. The contents of `data_dir` should
be laid out as follows:
| data_dir/
| |
| ---> metadata.csv.gzip
| |
| ---> tasks.json
| |
| ---> shard-0-X.npy
| |
| ---> shard-0-y.npy
| |
| ---> shard-0-w.npy
| |
| ---> shard-0-ids.npy
| |
| ---> shard-1-X.npy
| .
| .
| .
The metadata is constructed by static method
`DiskDataset._construct_metadata` and saved to disk by
`DiskDataset._save_metadata`. The metadata itself consists of a csv file
which has columns `('ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape',
'w_shape')`. `tasks.json` consists of a list of task names for this dataset.
The actual data is stored in `.npy` files (numpy array files) of the form
'shard-0-X.npy', 'shard-0-y.npy', etc.
The basic structure of `DiskDataset` is quite robust and will likely serve
you well for datasets up to about 100 GB or larger. However note that
`DiskDataset` has not been tested for very large datasets at the terabyte
range and beyond. You may be better served by implementing a custom
`Dataset` class for those use cases.
Examples
--------
Let's walk through a simple example of constructing a new `DiskDataset`.
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(10, 10)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
If you have already saved a `DiskDataset` to `data_dir`, you can reinitialize it with
>> data_dir = "/path/to/my/data"
>> dataset = dc.data.DiskDataset(data_dir)
Once you have a dataset you can access its attributes as follows
>>> X = np.random.rand(10, 10)
>>> y = np.random.rand(10,)
>>> w = np.ones_like(y)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
>>> X, y, w = dataset.X, dataset.y, dataset.w
One thing to beware of is that `dataset.X`, `dataset.y`, `dataset.w` are
loading data from disk! If you have a large dataset, these operations can be
extremely slow. Instead try iterating through the dataset instead.
>>> for (xi, yi, wi, idi) in dataset.itersamples():
... pass
Attributes
----------
data_dir: str
Location of directory where this `DiskDataset` is stored to disk
metadata_df: pd.DataFrame
Pandas Dataframe holding metadata for this `DiskDataset`
legacy_metadata: bool
Whether this `DiskDataset` uses legacy format.
Note
----
`DiskDataset` originally had a simpler metadata format without shape
information. Older `DiskDataset` objects had metadata files with columns
`('ids', 'X', 'y', 'w')` and not additional shape columns. `DiskDataset`
maintains backwards compatibility with this older metadata format, but we
recommend for performance reasons not using legacy metadata for new
projects.
"""
def __init__(self, data_dir: str) -> None:
"""Load a constructed DiskDataset from disk
Note that this method cannot construct a new disk dataset. Instead use
static methods `DiskDataset.create_dataset` or `DiskDataset.from_numpy`
for that purpose. Use this constructor instead to load a `DiskDataset`
that has already been created on disk.
Parameters
----------
data_dir: str
Location on disk of an existing `DiskDataset`.
"""
self.data_dir = data_dir
logger.info("Loading dataset from disk.")
tasks, self.metadata_df = self.load_metadata()
self.tasks = np.array(tasks)
if len(self.metadata_df.columns) == 4 and list(
self.metadata_df.columns) == ['ids', 'X', 'y', 'w']:
logger.info(
"Detected legacy metatadata on disk. You can upgrade from legacy metadata "
"to the more efficient current metadata by resharding this dataset "
"by calling the reshard() method of this object.")
self.legacy_metadata = True
elif len(self.metadata_df.columns) == 8 and list(
self.metadata_df.columns) == [
'ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape', 'w_shape'
]: # noqa
self.legacy_metadata = False
else:
raise ValueError(
"Malformed metadata on disk. Metadata must have columns 'ids', 'X', 'y', 'w', "
"'ids_shape', 'X_shape', 'y_shape', 'w_shape' (or if in legacy metadata format,"
"columns 'ids', 'X', 'y', 'w')")
self._cached_shards: Optional[List] = None
self._memory_cache_size = 20 * (1 << 20) # 20 MB
self._cache_used = 0
@staticmethod
def create_dataset(shard_generator: Iterable[Batch],
data_dir: Optional[str] = None,
tasks: Optional[ArrayLike] = []) -> "DiskDataset":
"""Creates a new DiskDataset
Parameters
----------
shard_generator: Iterable[Batch]
An iterable (either a list or generator) that provides tuples of data
(X, y, w, ids). Each tuple will be written to a separate shard on disk.
data_dir: str, optional (default None)
Filename for data directory. Creates a temp directory if none specified.
tasks: Sequence, optional (default [])
List of tasks for this dataset.
Returns
-------
DiskDataset
A new `DiskDataset` constructed from the given data
"""
if data_dir is None:
data_dir = tempfile.mkdtemp()
elif not os.path.exists(data_dir):
os.makedirs(data_dir)
metadata_rows = []
time1 = time.time()
for shard_num, (X, y, w, ids) in enumerate(shard_generator):
basename = "shard-%d" % shard_num
metadata_rows.append(
DiskDataset.write_data_to_disk(data_dir, basename, X, y, w, ids))
metadata_df = DiskDataset._construct_metadata(metadata_rows)
DiskDataset._save_metadata(metadata_df, data_dir, tasks)
time2 = time.time()
logger.info("TIMING: dataset construction took %0.3f s" % (time2 - time1))
return DiskDataset(data_dir)
def load_metadata(self) -> Tuple[List[str], pd.DataFrame]:
"""Helper method that loads metadata from disk."""
try:
tasks_filename, metadata_filename = self._get_metadata_filename()
with open(tasks_filename) as fin:
tasks = json.load(fin)
metadata_df = pd.read_csv(
metadata_filename, compression='gzip', dtype=object)
metadata_df = metadata_df.where((pd.notnull(metadata_df)), None)
return tasks, metadata_df
except Exception:
pass
# Load obsolete format -> save in new format
metadata_filename = os.path.join(self.data_dir, "metadata.joblib")
if os.path.exists(metadata_filename):
tasks, metadata_df = load_from_disk(metadata_filename)
del metadata_df['task_names']
del metadata_df['basename']
DiskDataset._save_metadata(metadata_df, self.data_dir, tasks)
return tasks, metadata_df
raise ValueError("No Metadata Found On Disk")
@staticmethod
def _save_metadata(metadata_df: pd.DataFrame, data_dir: str,
tasks: Optional[ArrayLike]) -> None:
"""Saves the metadata for a DiskDataset
Parameters
----------
metadata_df: pd.DataFrame
The dataframe which will be written to disk.
data_dir: str
Directory to store metadata.
tasks: Sequence, optional
Tasks of DiskDataset. If `None`, an empty list of tasks is written to
disk.
"""
if tasks is None:
tasks = []
elif isinstance(tasks, np.ndarray):
tasks = tasks.tolist()
metadata_filename = os.path.join(data_dir, "metadata.csv.gzip")
tasks_filename = os.path.join(data_dir, "tasks.json")
with open(tasks_filename, 'w') as fout:
json.dump(tasks, fout)
metadata_df.to_csv(metadata_filename, index=False, compression='gzip')
@staticmethod
def _construct_metadata(metadata_entries: List) -> pd.DataFrame:
"""Construct a dataframe containing metadata.
Parameters
----------
metadata_entries: List
`metadata_entries` should have elements returned by write_data_to_disk
above.
Returns
-------
pd.DataFrame
A Pandas Dataframe object contains metadata.
"""
columns = ('ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape',
'w_shape')
metadata_df = pd.DataFrame(metadata_entries, columns=columns)
return metadata_df
@staticmethod
def write_data_to_disk(data_dir: str,
basename: str,
X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> List[Any]:
"""Static helper method to write data to disk.
This helper method is used to write a shard of data to disk.
Parameters
----------
data_dir: str
Data directory to write shard to.
basename: str
Basename for the shard in question.
X: np.ndarray, optional (default None)
The features array.
y: np.ndarray, optional (default None)
The labels array.
w: np.ndarray, optional (default None)
The weights array.
ids: np.ndarray, optional (default None)
The identifiers array.
Returns
-------
List[Optional[str]]
List with values `[out_ids, out_X, out_y, out_w, out_ids_shape,
out_X_shape, out_y_shape, out_w_shape]` with filenames of locations to
disk which these respective arrays were written.
"""
if X is not None:
out_X: Optional[str] = "%s-X.npy" % basename
save_to_disk(X, os.path.join(data_dir, out_X)) # type: ignore
out_X_shape: Optional[Tuple[int, ...]] = X.shape
else:
out_X = None
out_X_shape = None
if y is not None:
out_y: Optional[str] = "%s-y.npy" % basename
save_to_disk(y, os.path.join(data_dir, out_y)) # type: ignore
out_y_shape: Optional[Tuple[int, ...]] = y.shape
else:
out_y = None
out_y_shape = None
if w is not None:
out_w: Optional[str] = "%s-w.npy" % basename
save_to_disk(w, os.path.join(data_dir, out_w)) # type: ignore
out_w_shape: Optional[Tuple[int, ...]] = w.shape
else:
out_w = None
out_w_shape = None
if ids is not None:
out_ids: Optional[str] = "%s-ids.npy" % basename
save_to_disk(ids, os.path.join(data_dir, out_ids)) # type: ignore
out_ids_shape: Optional[Tuple[int, ...]] = ids.shape
else:
out_ids = None
out_ids_shape = None
# note that this corresponds to the _construct_metadata column order
return [
out_ids, out_X, out_y, out_w, out_ids_shape, out_X_shape, out_y_shape,
out_w_shape
]
def save_to_disk(self) -> None:
"""Save dataset to disk."""
DiskDataset._save_metadata(self.metadata_df, self.data_dir, self.tasks)
self._cached_shards = None
def move(self, new_data_dir: str,
delete_if_exists: Optional[bool] = True) -> None:
"""Moves dataset to new directory.
Parameters
----------
new_data_dir: str
The new directory name to move this to dataset to.
delete_if_exists: bool, optional (default True)
If this option is set, delete the destination directory if it exists
before moving. This is set to True by default to be backwards compatible
with behavior in earlier versions of DeepChem.
Note
----
This is a stateful operation! `self.data_dir` will be moved into
`new_data_dir`. If `delete_if_exists` is set to `True` (by default this is
set `True`), then `new_data_dir` is deleted if it's a pre-existing
directory.
"""
if delete_if_exists and os.path.isdir(new_data_dir):
shutil.rmtree(new_data_dir)
shutil.move(self.data_dir, new_data_dir)
if delete_if_exists:
self.data_dir = new_data_dir
else:
self.data_dir = os.path.join(new_data_dir,
os.path.basename(self.data_dir))
def copy(self, new_data_dir: str) -> "DiskDataset":
"""Copies dataset to new directory.
Parameters
----------
new_data_dir: str
The new directory name to copy this to dataset to.
Returns
-------
DiskDataset
A copied DiskDataset object.
Note
----
This is a stateful operation! Any data at `new_data_dir` will be deleted
and `self.data_dir` will be deep copied into `new_data_dir`.
"""
if os.path.isdir(new_data_dir):
shutil.rmtree(new_data_dir)
shutil.copytree(self.data_dir, new_data_dir)
return DiskDataset(new_data_dir)
def get_task_names(self) -> np.ndarray:
"""Gets learning tasks associated with this dataset."""
return self.tasks
def reshard(self, shard_size: int) -> None:
"""Reshards data to have specified shard size.
Parameters
----------
shard_size: int
The size of shard.
Examples
--------
>>> import deepchem as dc
>>> import numpy as np
>>> X = np.random.rand(100, 10)
>>> d = dc.data.DiskDataset.from_numpy(X)
>>> d.reshard(shard_size=10)
>>> d.get_number_shards()
10
Note
----
If this `DiskDataset` is in `legacy_metadata` format, reshard will
convert this dataset to have non-legacy metadata.
"""
# Create temp directory to store resharded version
reshard_dir = tempfile.mkdtemp()
n_shards = self.get_number_shards()
# Get correct shapes for y/w
tasks = self.get_task_names()
_, y_shape, w_shape, _ = self.get_shape()
if len(y_shape) == 1:
y_shape = (len(y_shape), len(tasks))
if len(w_shape) == 1:
w_shape = (len(w_shape), len(tasks))
# Write data in new shards
def generator():
X_next = np.zeros((0,) + self.get_data_shape())
y_next = np.zeros((0,) + y_shape[1:])
w_next = np.zeros((0,) + w_shape[1:])
ids_next = np.zeros((0,), dtype=object)
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
logger.info("Resharding shard %d/%d" % (shard_num + 1, n_shards))
# Handle shapes
X = np.reshape(X, (len(X),) + self.get_data_shape())
# Note that this means that DiskDataset resharding currently doesn't
# work for datasets that aren't regression/classification.
if y is None: # datasets without label
y = y_next
w = w_next
else:
y = np.reshape(y, (len(y),) + y_shape[1:])
w = np.reshape(w, (len(w),) + w_shape[1:])
X_next = np.concatenate([X_next, X], axis=0)
y_next = np.concatenate([y_next, y], axis=0)
w_next = np.concatenate([w_next, w], axis=0)
ids_next = np.concatenate([ids_next, ids])
while len(X_next) > shard_size:
X_batch, X_next = X_next[:shard_size], X_next[shard_size:]
y_batch, y_next = y_next[:shard_size], y_next[shard_size:]
w_batch, w_next = w_next[:shard_size], w_next[shard_size:]
ids_batch, ids_next = ids_next[:shard_size], ids_next[shard_size:]
yield (X_batch, y_batch, w_batch, ids_batch)
# Handle spillover from last shard
yield (X_next, y_next, w_next, ids_next)
resharded_dataset = DiskDataset.create_dataset(
generator(), data_dir=reshard_dir, tasks=self.tasks)
shutil.rmtree(self.data_dir)
shutil.move(reshard_dir, self.data_dir)
# Should have updated to non-legacy metadata
self.legacy_metadata = False
self.metadata_df = resharded_dataset.metadata_df
# Note that this resets the cache internally
self.save_to_disk()
def get_data_shape(self) -> Shape:
"""Gets array shape of datapoints in this dataset."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
if self.legacy_metadata:
sample_X = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['X']))
return np.shape(sample_X)[1:]
else:
X_shape, _, _, _ = self.get_shape()
return X_shape[1:]
def get_shard_size(self) -> int:
"""Gets size of shards on disk."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
sample_ids = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['ids']))
return len(sample_ids)
def _get_metadata_filename(self) -> Tuple[str, str]:
"""Get standard location for metadata file."""
metadata_filename = os.path.join(self.data_dir, "metadata.csv.gzip")
tasks_filename = os.path.join(self.data_dir, "tasks.json")
return tasks_filename, metadata_filename
def get_number_shards(self) -> int:
"""Returns the number of shards for this dataset."""
return self.metadata_df.shape[0]
def itershards(self) -> Iterator[Batch]:
"""Return an object that iterates over all shards in dataset.
Datasets are stored in sharded fashion on disk. Each call to next() for the
generator defined by this function returns the data from a particular shard.
The order of shards returned is guaranteed to remain fixed.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
return (self.get_shard(i) for i in range(self.get_number_shards()))
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
""" Get an object that iterates over minibatches from the dataset.
It is guaranteed that the number of batches returned is
`math.ceil(len(dataset)/batch_size)`. Each minibatch is returned as
a tuple of four numpy arrays: `(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in a batch. If None, then it yields batches
with size equal to the size of each individual shard.
epoch: int, default 1
Number of epochs to walk over dataset
deterministic: bool, default False
Whether or not we should should shuffle each shard before
generating the batches. Note that this is only local in the
sense that it does not ever mix between different shards.
pad_batches: bool, default False
Whether or not we should pad the last batch, globally, such that
it has exactly batch_size elements.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
shard_indices = list(range(self.get_number_shards()))
return self._iterbatches_from_shards(shard_indices, batch_size, epochs,
deterministic, pad_batches)
def _iterbatches_from_shards(self,
shard_indices: Sequence[int],
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over batches from a restricted set of shards."""
def iterate(dataset: DiskDataset, batch_size: Optional[int], epochs: int):
num_shards = len(shard_indices)
if deterministic:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
# mp.dummy aliases ThreadPool to Pool
pool = Pool(1)
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] / batch_size)
for epoch in range(epochs):
if not deterministic:
shard_perm = np.random.permutation(num_shards)
next_shard = pool.apply_async(dataset.get_shard,
(shard_indices[shard_perm[0]],))
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[cur_shard + 1]],))
elif epoch == epochs - 1:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples / shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples, (cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(X_b) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b, ids_b) = pad_batch(shard_batch_size, X_b, y_b,
w_b, ids_b)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
return iterate(self, batch_size, epochs)
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
Examples
--------
>>> dataset = DiskDataset.from_numpy(np.ones((2,2)), np.ones((2,1)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [1.0] [1.0] 0
[1.0, 1.0] [1.0] [1.0] 1
"""
def iterate(dataset):
for (X_shard, y_shard, w_shard, ids_shard) in dataset.itershards():
n_samples = X_shard.shape[0]
for i in range(n_samples):
def sanitize(elem):
if elem is None:
return None
else:
return elem[i]
yield map(sanitize, [X_shard, y_shard, w_shard, ids_shard])
return iterate(self)
def transform(self,
transformer: "dc.trans.Transformer",
parallel: bool = False,
out_dir: Optional[str] = None,
**args) -> "DiskDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times
with different subsets of the data. Each time it is called, it should
transform the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset.
parallel: bool, default False
If True, use multiple processes to transform the dataset in parallel.
out_dir: str, optional (default None)
The directory to save the new dataset in. If this is omitted, a
temporary directory is created automaticall.
Returns
-------
DiskDataset
A newly constructed Dataset object
"""
if out_dir is None:
out_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
n_shards = self.get_number_shards()
time1 = time.time()
if parallel:
results = []
pool = multiprocessing.Pool()
for i in range(self.get_number_shards()):
row = self.metadata_df.iloc[i]
X_file = os.path.join(self.data_dir, row['X'])
if row['y'] is not None:
y_file: Optional[str] = os.path.join(self.data_dir, row['y'])
else:
y_file = None
if row['w'] is not None:
w_file: Optional[str] = os.path.join(self.data_dir, row['w'])
else:
w_file = None
ids_file = os.path.join(self.data_dir, row['ids'])
results.append(
pool.apply_async(DiskDataset._transform_shard,
(transformer, i, X_file, y_file, w_file, ids_file,
out_dir, tasks)))
pool.close()
metadata_rows = [r.get() for r in results]
metadata_df = DiskDataset._construct_metadata(metadata_rows)
DiskDataset._save_metadata(metadata_df, out_dir, tasks)
dataset = DiskDataset(out_dir)
else:
def generator():
for shard_num, row in self.metadata_df.iterrows():
logger.info("Transforming shard %d/%d" % (shard_num, n_shards))
X, y, w, ids = self.get_shard(shard_num)
newx, newy, neww, newids = transformer.transform_array(X, y, w, ids)
yield (newx, newy, neww, newids)
dataset = DiskDataset.create_dataset(
generator(), data_dir=out_dir, tasks=tasks)
time2 = time.time()
logger.info("TIMING: transforming took %0.3f s" % (time2 - time1))
return dataset
@staticmethod
def _transform_shard(transformer: "dc.trans.Transformer", shard_num: int,
X_file: str, y_file: str, w_file: str, ids_file: str,
out_dir: str, tasks: np.ndarray) -> List[Optional[str]]:
"""This is called by transform() to transform a single shard."""
X = None if X_file is None else np.array(load_from_disk(X_file))
y = None if y_file is None else np.array(load_from_disk(y_file))
w = None if w_file is None else np.array(load_from_disk(w_file))
ids = np.array(load_from_disk(ids_file))
X, y, w, ids = transformer.transform_array(X, y, w, ids)
basename = "shard-%d" % shard_num
return DiskDataset.write_data_to_disk(out_dir, basename, X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchDiskDataset
except:
raise ImportError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchDiskDataset(
disk_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
@staticmethod
def from_numpy(X: ArrayLike,
y: Optional[ArrayLike] = None,
w: Optional[ArrayLike] = None,
ids: Optional[ArrayLike] = None,
tasks: Optional[ArrayLike] = None,
data_dir: Optional[str] = None) -> "DiskDataset":
"""Creates a DiskDataset object from specified Numpy arrays.
Parameters
----------
X: np.ndarray
Feature array.
y: np.ndarray, optional (default None)
Labels array.
w: np.ndarray, optional (default None)
Weights array.
ids: np.ndarray, optional (default None)
Identifiers array.
tasks: Sequence, optional (default None)
Tasks in this dataset
data_dir: str, optional (default None)
The directory to write this dataset to. If none is specified, will use
a temporary directory instead.
Returns
-------
DiskDataset
A new `DiskDataset` constructed from the provided information.
"""
# To unify shape handling so from_numpy behaves like NumpyDataset, we just
# make a NumpyDataset under the hood
dataset = NumpyDataset(X, y, w, ids)
if tasks is None:
tasks = dataset.get_task_names()
# raw_data = (X, y, w, ids)
return DiskDataset.create_dataset(
[(dataset.X, dataset.y, dataset.w, dataset.ids)],
data_dir=data_dir,
tasks=tasks)
@staticmethod
def merge(datasets: Iterable["Dataset"],
merge_dir: Optional[str] = None) -> "DiskDataset":
"""Merges provided datasets into a merged dataset.
Parameters
----------
datasets: Iterable[Dataset]
List of datasets to merge.
merge_dir: str, optional (default None)
The new directory path to store the merged DiskDataset.
Returns
-------
DiskDataset
A merged DiskDataset.
"""
if merge_dir is not None:
if not os.path.exists(merge_dir):
os.makedirs(merge_dir)
else:
merge_dir = tempfile.mkdtemp()
# Protect against generator exhaustion
datasets = list(datasets)
# This ensures tasks are consistent for all datasets
tasks = []
for dataset in datasets:
try:
tasks.append(dataset.tasks) # type: ignore
except AttributeError:
pass
if tasks:
task_tuples = [tuple(task_list) for task_list in tasks]
if len(tasks) < len(datasets) or len(set(task_tuples)) > 1:
raise ValueError(
'Cannot merge datasets with different task specifications')
merge_tasks = tasks[0]
else:
merge_tasks = []
# determine the shard sizes of the datasets to merge
shard_sizes = []
for dataset in datasets:
if hasattr(dataset, 'get_shard_size'):
shard_sizes.append(dataset.get_shard_size()) # type: ignore
# otherwise the entire dataset is the "shard size"
else:
shard_sizes.append(len(dataset))
def generator():
for ind, dataset in enumerate(datasets):
logger.info("Merging in dataset %d/%d" % (ind, len(datasets)))
if hasattr(dataset, 'itershards'):
for (X, y, w, ids) in dataset.itershards():
yield (X, y, w, ids)
else:
yield (dataset.X, dataset.y, dataset.w, dataset.ids)
merged_dataset = DiskDataset.create_dataset(
generator(), data_dir=merge_dir, tasks=merge_tasks)
# we must reshard the dataset to have a uniform size
# choose the smallest shard size
if len(set(shard_sizes)) > 1:
merged_dataset.reshard(min(shard_sizes))
return merged_dataset
def subset(self, shard_nums: Sequence[int],
subset_dir: Optional[str] = None) -> "DiskDataset":
"""Creates a subset of the original dataset on disk.
Parameters
----------
shard_nums: Sequence[int]
The indices of shard to extract from the original DiskDataset.
subset_dir: str, optional (default None)
The new directory path to store the subset DiskDataset.
Returns
-------
DiskDataset
A subset DiskDataset.
"""
if subset_dir is not None:
if not os.path.exists(subset_dir):
os.makedirs(subset_dir)
else:
subset_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
def generator():
for shard_num, row in self.metadata_df.iterrows():
if shard_num not in shard_nums:
continue
X, y, w, ids = self.get_shard(shard_num)
yield (X, y, w, ids)
return DiskDataset.create_dataset(
generator(), data_dir=subset_dir, tasks=tasks)
def sparse_shuffle(self) -> None:
"""Shuffling that exploits data sparsity to shuffle large datasets.
If feature vectors are sparse, say circular fingerprints or any other
representation that contains few nonzero values, it can be possible to
exploit the sparsity of the vector to simplify shuffles. This method
implements a sparse shuffle by compressing sparse feature vectors down
into a compressed representation, then shuffles this compressed dataset in
memory and writes the results to disk.
Note
----
This method only works for 1-dimensional feature vectors (does not work
for tensorial featurizations). Note that this shuffle is performed in
place.
"""
time1 = time.time()
shard_size = self.get_shard_size()
num_shards = self.get_number_shards()
X_sparse_list: List[np.ndarray] = []
y_list: List[np.ndarray] = []
w_list: List[np.ndarray] = []
ids_list: List[np.ndarray] = []
num_features = -1
for i in range(num_shards):
logger.info("Sparsifying shard %d/%d" % (i, num_shards))
(X_s, y_s, w_s, ids_s) = self.get_shard(i)
if num_features == -1:
num_features = X_s.shape[1]
X_sparse = sparsify_features(X_s)
X_sparse_list, y_list, w_list, ids_list = (
X_sparse_list + [X_sparse], y_list + [y_s], w_list + [w_s],
ids_list + [np.atleast_1d(np.squeeze(ids_s))])
# Get full dataset in memory
(X_sparse, y, w, ids) = (np.vstack(X_sparse_list), np.vstack(y_list),
np.vstack(w_list), np.concatenate(ids_list))
# Shuffle in memory
num_samples = len(X_sparse)
permutation = np.random.permutation(num_samples)
X_sparse, y, w, ids = (X_sparse[permutation], y[permutation],
w[permutation], ids[permutation])
# Write shuffled shards out to disk
for i in range(num_shards):
logger.info("Sparse shuffling shard %d/%d" % (i, num_shards))
start, stop = i * shard_size, (i + 1) * shard_size
(X_sparse_s, y_s, w_s, ids_s) = (X_sparse[start:stop], y[start:stop],
w[start:stop], ids[start:stop])
X_s = densify_features(X_sparse_s, num_features)
self.set_shard(i, X_s, y_s, w_s, ids_s)
time2 = time.time()
logger.info("TIMING: sparse_shuffle took %0.3f s" % (time2 - time1))
def complete_shuffle(self, data_dir: Optional[str] = None) -> Dataset:
"""Completely shuffle across all data, across all shards.
Note
----
The algorithm used for this complete shuffle is O(N^2) where N is the
number of shards. It simply constructs each shard of the output dataset
one at a time. Since the complete shuffle can take a long time, it's
useful to watch the logging output. Each shuffled shard is constructed
using select() which logs as it selects from each original shard. This
will results in O(N^2) logging statements, one for each extraction of
shuffled shard i's contributions from original shard j.
Parameters
----------
data_dir: Optional[str], (default None)
Directory to write the shuffled dataset to. If none is specified a
temporary directory will be used.
Returns
-------
DiskDataset
A DiskDataset whose data is a randomly shuffled version of this dataset.
"""
N = len(self)
perm = np.random.permutation(N).tolist()
shard_size = self.get_shard_size()
return self.select(perm, data_dir, shard_size)
def shuffle_each_shard(self,
shard_basenames: Optional[List[str]] = None) -> None:
"""Shuffles elements within each shard of the dataset.
Parameters
----------
shard_basenames: List[str], optional (default None)
The basenames for each shard. If this isn't specified, will assume the
basenames of form "shard-i" used by `create_dataset` and `reshard`.
"""
# Shuffle the arrays corresponding to each row in metadata_df
n_rows = len(self.metadata_df.index)
if shard_basenames is not None:
if len(shard_basenames) != n_rows:
raise ValueError(
"shard_basenames must provide a basename for each shard in this DiskDataset."
)
else:
shard_basenames = ["shard-%d" % shard_num for shard_num in range(n_rows)]
for i, basename in zip(range(n_rows), shard_basenames):
logger.info("Shuffling shard %d/%d" % (i, n_rows))
X, y, w, ids = self.get_shard(i)
n = X.shape[0]
permutation = np.random.permutation(n)
X, y, w, ids = (X[permutation], y[permutation], w[permutation],
ids[permutation])
DiskDataset.write_data_to_disk(self.data_dir, basename, X, y, w, ids)
# Reset cache
self._cached_shards = None
def shuffle_shards(self) -> None:
"""Shuffles the order of the shards for this dataset."""
metadata_rows = self.metadata_df.values.tolist()
random.shuffle(metadata_rows)
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def get_shard(self, i: int) -> Batch:
"""Retrieves data for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve batch from.
Returns
-------
Batch
A batch data for i-th shard.
"""
# See if we have a cached copy of this shard.
if self._cached_shards is None:
self._cached_shards = [None] * self.get_number_shards()
self._cache_used = 0
if self._cached_shards[i] is not None:
shard = self._cached_shards[i]
return (shard.X, shard.y, shard.w, shard.ids)
# We don't, so load it from disk.
row = self.metadata_df.iloc[i]
X = np.array(load_from_disk(os.path.join(self.data_dir, row['X'])))
if row['y'] is not None:
y: Optional[np.ndarray] = np.array(
load_from_disk(os.path.join(self.data_dir, row['y'])))
else:
y = None
if row['w'] is not None:
# TODO (ytz): Under what condition does this exist but the file itself doesn't?
w_filename = os.path.join(self.data_dir, row['w'])
if os.path.exists(w_filename):
w: Optional[np.ndarray] = np.array(load_from_disk(w_filename))
elif y is not None:
if len(y.shape) == 1:
w = np.ones(y.shape[0], np.float32)
else:
w = np.ones((y.shape[0], 1), np.float32)
else:
w = None
else:
w = None
ids = np.array(
load_from_disk(os.path.join(self.data_dir, row['ids'])), dtype=object)
# Try to cache this shard for later use. Since the normal usage pattern is
# a series of passes through the whole dataset, there's no point doing
# anything fancy. It never makes sense to evict another shard from the
# cache to make room for this one, because we'll probably want that other
# shard again before the next time we want this one. So just cache as many
# as we can and then stop.
shard = _Shard(X, y, w, ids)
shard_size = X.nbytes + ids.nbytes
if y is not None:
shard_size += y.nbytes
if w is not None:
shard_size += w.nbytes
if self._cache_used + shard_size < self._memory_cache_size:
self._cached_shards[i] = shard
self._cache_used += shard_size
return (shard.X, shard.y, shard.w, shard.ids)
def get_shard_ids(self, i: int) -> np.ndarray:
"""Retrieves the list of IDs for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve weights from.
Returns
-------
np.ndarray
A numpy array of ids for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[i] is not None:
return self._cached_shards[i].ids
row = self.metadata_df.iloc[i]
return np.array(
load_from_disk(os.path.join(self.data_dir, row['ids'])), dtype=object)
def get_shard_y(self, i: int) -> np.ndarray:
"""Retrieves the labels for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve labels from.
Returns
-------
np.ndarray
A numpy array of labels for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[i] is not None:
return self._cached_shards[i].y
row = self.metadata_df.iloc[i]
return np.array(load_from_disk(os.path.join(self.data_dir, row['y'])))
def get_shard_w(self, i: int) -> np.ndarray:
"""Retrieves the weights for the i-th shard from disk.
Parameters
----------
i: int
Shard index for shard to retrieve weights from.
Returns
-------
np.ndarray
A numpy array of weights for i-th shard.
"""
if self._cached_shards is not None and self._cached_shards[i] is not None:
return self._cached_shards[i].w
row = self.metadata_df.iloc[i]
return np.array(load_from_disk(os.path.join(self.data_dir, row['w'])))
def add_shard(self,
X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> None:
"""Adds a data shard.
Parameters
----------
X: np.ndarray
Feature array.
y: np.ndarray, optioanl (default None)
Labels array.
w: np.ndarray, optioanl (default None)
Weights array.
ids: np.ndarray, optioanl (default None)
Identifiers array.
"""
metadata_rows = self.metadata_df.values.tolist()
shard_num = len(metadata_rows)
basename = "shard-%d" % shard_num
metadata_rows.append(
DiskDataset.write_data_to_disk(self.data_dir, basename, X, y, w, ids))
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def set_shard(self,
shard_num: int,
X: np.ndarray,
y: Optional[np.ndarray] = None,
w: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None) -> None:
"""Writes data shard to disk.
Parameters
----------
shard_num: int
Shard index for shard to set new data.
X: np.ndarray
Feature array.
y: np.ndarray, optioanl (default None)
Labels array.
w: np.ndarray, optioanl (default None)
Weights array.
ids: np.ndarray, optioanl (default None)
Identifiers array.
"""
basename = "shard-%d" % shard_num
DiskDataset.write_data_to_disk(self.data_dir, basename, X, y, w, ids)
self._cached_shards = None
self.legacy_metadata = True
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None,
select_shard_size: Optional[int] = None,
output_numpy_dataset: Optional[bool] = False) -> Dataset:
"""Creates a new dataset from a selection of indices from self.
Examples
--------
>>> import numpy as np
>>> X = np.random.rand(10, 10)
>>> dataset = dc.data.DiskDataset.from_numpy(X)
>>> selected = dataset.select([1, 3, 4])
>>> len(selected)
3
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Path to new directory that the selected indices will be copied to.
select_shard_size: Optional[int], (default None)
If specified, the shard-size to use for output selected `DiskDataset`.
If not output_numpy_dataset, then this is set to this current dataset's
shard size if not manually specified.
output_numpy_dataset: Optional[bool], (default False)
If True, output an in-memory `NumpyDataset` instead of a `DiskDataset`.
Note that `select_dir` and `select_shard_size` must be `None` if this
is `True`
Returns
-------
Dataset
A dataset containing the selected samples. The default dataset is `DiskDataset`.
If `output_numpy_dataset` is True, the dataset is `NumpyDataset`.
"""
if output_numpy_dataset and (select_dir is not None or
select_shard_size is not None):
raise ValueError(
"If output_numpy_dataset is set, then select_dir and select_shard_size must both be None"
)
if output_numpy_dataset:
# When outputting a NumpyDataset, we have 1 in-memory shard
select_shard_size = len(indices)
else:
if select_dir is not None:
if not os.path.exists(select_dir):
os.makedirs(select_dir)
else:
select_dir = tempfile.mkdtemp()
if select_shard_size is None:
select_shard_size = self.get_shard_size()
# Handle edge case with empty indices
if not len(indices):
if not output_numpy_dataset:
return DiskDataset.create_dataset([], data_dir=select_dir)
else:
return NumpyDataset(
np.array([]), np.array([]), np.array([]), np.array([]))
N = len(indices)
tasks = self.get_task_names()
n_shards = self.get_number_shards()
# We use two loops here. The outer while loop walks over selection shards
# (the chunks of the indices to select that should go into separate
# output shards), while the inner for loop walks over the shards in the
# source datasets to select out the shard indices from that source shard
def generator():
start = 0
select_shard_num = 0
while start < N:
logger.info(
"Constructing selection output shard %d" % (select_shard_num + 1))
end = min(start + select_shard_size, N)
select_shard_indices = indices[start:end]
sorted_indices = np.array(sorted(select_shard_indices)).astype(int)
Xs, ys, ws, ids_s = [], [], [], []
count, indices_count = 0, 0
for shard_num in range(self.get_number_shards()):
logger.info(
"Selecting from input shard %d/%d for selection output shard %d" %
(shard_num + 1, n_shards, select_shard_num + 1))
if self.legacy_metadata:
ids = self.get_shard_ids(shard_num)
shard_len = len(ids)
else:
shard_X_shape, _, _, _ = self._get_shard_shape(shard_num)
if len(shard_X_shape) > 0:
shard_len = shard_X_shape[0]
else:
shard_len = 0
# Find indices which rest in this shard
num_shard_elts = 0
while sorted_indices[indices_count +
num_shard_elts] < count + shard_len:
num_shard_elts += 1
if (indices_count + num_shard_elts) >= len(sorted_indices):
break
if num_shard_elts == 0:
count += shard_len
continue
else:
X, y, w, ids = self.get_shard(shard_num)
# Need to offset indices to fit within shard_size
shard_inds = sorted_indices[indices_count:indices_count +
num_shard_elts] - count
# Handle empty case where no data from this shard needed
X_sel = X[shard_inds]
# Handle the case of datasets with y/w missing
if y is not None:
y_sel = y[shard_inds]
else:
y_sel = np.array([])
if w is not None:
w_sel = w[shard_inds]
else:
w_sel = np.array([])
ids_sel = ids[shard_inds]
Xs.append(X_sel)
ys.append(y_sel)
ws.append(w_sel)
ids_s.append(ids_sel)
indices_count += num_shard_elts
count += shard_len
# Break if all indices have been used up already
if indices_count >= len(sorted_indices):
break
# Note these will be in the sorted order
X = np.concatenate(Xs, axis=0)
y = np.concatenate(ys, axis=0)
w = np.concatenate(ws, axis=0)
ids = np.concatenate(ids_s, axis=0)
# We need to recover the original ordering. We can do this by using
# np.where to find the locatios of the original indices in the sorted
# indices.
reverted_indices = np.array(
# We know there's only one match for np.where since this is a
# permutation, so the [0][0] pulls out the exact match location.
[
np.where(sorted_indices == orig_index)[0][0]
for orig_index in select_shard_indices
])
if y.size == 0:
tup_y = y
else:
tup_y = y[reverted_indices]
if w.size == 0:
tup_w = w
else:
tup_w = w[reverted_indices]
X, ids = X[reverted_indices], ids[reverted_indices]
yield (X, tup_y, tup_w, ids)
start = end
select_shard_num += 1
if not output_numpy_dataset:
return DiskDataset.create_dataset(
generator(), data_dir=select_dir, tasks=tasks)
else:
X, y, w, ids = next(generator())
return NumpyDataset(X, y, w, ids)
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
if len(self) == 0:
return np.array([])
ids = []
for i in range(self.get_number_shards()):
ids.append(np.atleast_1d(np.squeeze(self.get_shard_ids(i))))
return np.concatenate(ids)
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
Xs = []
one_dimensional = False
for (X_b, _, _, _) in self.itershards():
Xs.append(X_b)
if len(X_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(Xs)
else:
return np.concatenate(Xs)
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
if len(self) == 0:
return np.array([])
ys = []
one_dimensional = False
for i in range(self.get_number_shards()):
y_b = self.get_shard_y(i)
ys.append(y_b)
if len(y_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ys)
else:
return np.concatenate(ys)
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
ws = []
one_dimensional = False
for i in range(self.get_number_shards()):
w_b = self.get_shard_w(i)
ws.append(w_b)
if len(w_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ws)
else:
return np.concatenate(ws)
@property
def memory_cache_size(self) -> int:
"""Get the size of the memory cache for this dataset, measured in bytes."""
return self._memory_cache_size
@memory_cache_size.setter
def memory_cache_size(self, size: int) -> None:
"""Get the size of the memory cache for this dataset, measured in bytes."""
self._memory_cache_size = size
if self._cache_used > size:
self._cached_shards = None
def __len__(self) -> int:
"""Finds number of elements in dataset."""
total = 0
for _, row in self.metadata_df.iterrows():
y = load_from_disk(os.path.join(self.data_dir, row['ids']))
total += len(y)
return total
def _get_shard_shape(self,
shard_num: int) -> Tuple[Shape, Shape, Shape, Shape]:
"""Finds the shape of the specified shard."""
if self.legacy_metadata:
raise ValueError(
"This function requires the new metadata format to be called. Please reshard this dataset by calling the reshard() method."
)
n_tasks = len(self.get_task_names())
row = self.metadata_df.iloc[shard_num]
if row['X_shape'] is not None:
shard_X_shape = make_tuple(str(row['X_shape']))
else:
shard_X_shape = tuple()
if n_tasks > 0:
if row['y_shape'] is not None:
shard_y_shape = make_tuple(str(row['y_shape']))
else:
shard_y_shape = tuple()
if row['w_shape'] is not None:
shard_w_shape = make_tuple(str(row['w_shape']))
else:
shard_w_shape = tuple()
else:
shard_y_shape = tuple()
shard_w_shape = tuple()
if row['ids_shape'] is not None:
shard_ids_shape = make_tuple(str(row['ids_shape']))
else:
shard_ids_shape = tuple()
X_shape, y_shape, w_shape, ids_shape = tuple(
np.array(shard_X_shape)), tuple(np.array(shard_y_shape)), tuple(
np.array(shard_w_shape)), tuple(np.array(shard_ids_shape))
return X_shape, y_shape, w_shape, ids_shape
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Finds shape of dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
n_tasks = len(self.get_task_names())
n_rows = len(self.metadata_df.index)
# If shape metadata is available use it to directly compute shape from
# metadata
if not self.legacy_metadata:
for shard_num in range(n_rows):
shard_X_shape, shard_y_shape, shard_w_shape, shard_ids_shape = self._get_shard_shape(
shard_num)
if shard_num == 0:
X_shape, y_shape, w_shape, ids_shape = np.array(
shard_X_shape), np.array(shard_y_shape), np.array(
shard_w_shape), np.array(shard_ids_shape)
else:
X_shape[0] += shard_X_shape[0]
if n_tasks > 0:
y_shape[0] += shard_y_shape[0]
w_shape[0] += shard_w_shape[0]
ids_shape[0] += shard_ids_shape[0]
return tuple(X_shape), tuple(y_shape), tuple(w_shape), tuple(ids_shape)
# In absense of shape metadata, fall back to loading data from disk to
# find shape.
else:
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
if shard_num == 0:
X_shape = np.array(X.shape)
if n_tasks > 0:
y_shape = np.array(y.shape)
w_shape = np.array(w.shape)
else:
y_shape = np.array([])
w_shape = np.array([])
ids_shape = np.array(ids.shape)
else:
X_shape[0] += np.array(X.shape)[0]
if n_tasks > 0:
y_shape[0] += np.array(y.shape)[0]
w_shape[0] += np.array(w.shape)[0]
ids_shape[0] += np.array(ids.shape)[0]
return tuple(X_shape), tuple(y_shape), tuple(w_shape), tuple(ids_shape)
def get_label_means(self) -> pd.DataFrame:
"""Return pandas series of label means."""
return self.metadata_df["y_means"]
def get_label_stds(self) -> pd.DataFrame:
"""Return pandas series of label stds."""
return self.metadata_df["y_stds"]
class ImageDataset(Dataset):
"""A Dataset that loads data from image files on disk."""
def __init__(self,
X: Union[np.ndarray, List[str]],
y: Optional[Union[np.ndarray, List[str]]],
w: Optional[ArrayLike] = None,
ids: Optional[ArrayLike] = None) -> None:
"""Create a dataset whose X and/or y array is defined by image files on disk.
Parameters
----------
X: np.ndarray or List[str]
The dataset's input data. This may be either a single NumPy
array directly containing the data, or a list containing the
paths to the image files
y: np.ndarray or List[str]
The dataset's labels. This may be either a single NumPy array
directly containing the data, or a list containing the paths to
the image files
w: np.ndarray, optional (default None)
a 1D or 2D array containing the weights for each sample or
sample/task pair
ids: np.ndarray, optional (default None)
the sample IDs
"""
n_samples = len(X)
if y is None:
y = np.zeros((n_samples,))
self._X_shape = self._find_array_shape(X)
self._y_shape = self._find_array_shape(y)
if w is None:
if len(self._y_shape) == 0:
# Case n_samples should be 1
if n_samples != 1:
raise ValueError("y can only be a scalar if n_samples == 1")
w = np.ones_like(y)
elif len(self._y_shape) == 1:
w = np.ones(self._y_shape[0], np.float32)
else:
w = np.ones((self._y_shape[0], 1), np.float32)
if ids is None:
if not isinstance(X, np.ndarray):
ids = X
elif not isinstance(y, np.ndarray):
ids = y
else:
ids = np.arange(n_samples)
self._X = X
self._y = y
self._w = np.asarray(w)
self._ids = np.array(ids, dtype=object)
def _find_array_shape(self, array: Union[np.ndarray, List[str]]) -> Shape:
if isinstance(array, np.ndarray):
return array.shape
image_shape = load_image_files([array[0]]).shape[1:]
return tuple(np.concatenate([[len(array)], image_shape]))
def __len__(self) -> int:
"""Get the number of elements in the dataset."""
return self._X_shape[0]
def get_shape(self) -> Tuple[Shape, Shape, Shape, Shape]:
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X_shape, self._y_shape, self._w.shape, self._ids.shape
def get_task_names(self) -> np.ndarray:
"""Get the names of the tasks associated with this dataset."""
if len(self._y_shape) < 2:
return np.array([0])
return np.arange(self._y_shape[1])
@property
def X(self) -> np.ndarray:
"""Get the X vector for this dataset as a single numpy array."""
if isinstance(self._X, np.ndarray):
return self._X
return load_image_files(self._X)
@property
def y(self) -> np.ndarray:
"""Get the y vector for this dataset as a single numpy array."""
if isinstance(self._y, np.ndarray):
return self._y
return load_image_files(self._y)
@property
def ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self) -> np.ndarray:
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size: Optional[int] = None,
epochs: int = 1,
deterministic: bool = False,
pad_batches: bool = False) -> Iterator[Batch]:
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays:
`(X, y, w, ids)`.
Parameters
----------
batch_size: int, optional (default None)
Number of elements in each batch.
epochs: int, default 1
Number of epochs to walk over dataset.
deterministic: bool, default False
If True, follow deterministic order.
pad_batches: bool, default False
If True, pad each batch to `batch_size`.
Returns
-------
Iterator[Batch]
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
def iterate(dataset, batch_size, epochs, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = load_image_files([dataset._X[i] for i in perm_indices])
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = load_image_files([dataset._y[i] for i in perm_indices])
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
def _get_image(self, array: Union[np.ndarray, List[str]],
indices: Union[int, np.ndarray]) -> np.ndarray:
"""Method for loading an image
Parameters
----------
array: Union[np.ndarray, List[str]]
A numpy array which contains images or List of image filenames
indices: Union[int, np.ndarray]
Index you want to get the images
Returns
-------
np.ndarray
Loaded images
"""
if isinstance(array, np.ndarray):
return array[indices]
if isinstance(indices, np.ndarray):
return load_image_files([array[i] for i in indices])
return load_image_files([array[indices]])[0]
def itersamples(self) -> Iterator[Batch]:
"""Get an object that iterates over the samples in the dataset.
Returns
-------
Iterator[Batch]
Iterator which yields tuples of four numpy arrays `(X, y, w, ids)`.
"""
n_samples = self._X_shape[0]
return ((self._get_image(self._X, i), self._get_image(self._y, i),
self._w[i], self._ids[i]) for i in range(n_samples))
def transform(
self,
transformer: "dc.trans.Transformer",
**args,
) -> "NumpyDataset":
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times with
different subsets of the data. Each time it is called, it should transform
the samples and return the transformed data.
Parameters
----------
transformer: dc.trans.Transformer
The transformation to apply to each sample in the dataset
Returns
-------
NumpyDataset
A newly constructed NumpyDataset object
"""
newx, newy, neww, newids = transformer.transform_array(
self.X, self.y, self.w, self.ids)
return NumpyDataset(newx, newy, neww, newids)
def select(self,
indices: Union[Sequence[int], np.ndarray],
select_dir: Optional[str] = None) -> "ImageDataset":
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
indices: Sequence
List of indices to select.
select_dir: str, optional (default None)
Used to provide same API as `DiskDataset`. Ignored since
`ImageDataset` is purely in-memory.
Returns
-------
ImageDataset
A selected ImageDataset object
"""
if isinstance(self._X, np.ndarray):
X = self._X[indices]
else:
X = [self._X[i] for i in indices]
if isinstance(self._y, np.ndarray):
y = self._y[indices]
else:
y = [self._y[i] for i in indices]
w = self._w[indices]
ids = self._ids[indices]
return ImageDataset(X, y, w, ids)
def make_pytorch_dataset(self,
epochs: int = 1,
deterministic: bool = False,
batch_size: Optional[int] = None):
"""Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y, w, id)
containing the data for one batch, or for a single sample if batch_size is None.
Parameters
----------
epochs: int, default 1
The number of times to iterate over the Dataset.
deterministic: bool, default False
If True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
batch_size: int, optional (default None)
The number of samples to return in each batch. If None, each returned
value is a single sample.
Returns
-------
torch.utils.data.IterableDataset
`torch.utils.data.IterableDataset` that iterates over the data in
this dataset.
Note
----
This method requires PyTorch to be installed.
"""
try:
from deepchem.data.pytorch_datasets import _TorchImageDataset
except:
raise ValueError("This method requires PyTorch to be installed.")
pytorch_ds = _TorchImageDataset(
image_dataset=self,
epochs=epochs,
deterministic=deterministic,
batch_size=batch_size)
return pytorch_ds
class Databag(object):
"""A utility class to iterate through multiple datasets together.
A `Databag` is useful when you have multiple datasets that you want
to iterate in locksteps. This might be easiest to grasp with a
simple code example.
>>> ones_dataset = NumpyDataset(X=np.ones((5, 3)))
>>> zeros_dataset = NumpyDataset(X=np.zeros((5, 3)))
>>> databag = Databag({"ones": ones_dataset, "zeros": zeros_dataset})
>>> for sample_dict in databag.iterbatches(batch_size=1):
... print(sample_dict)
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
{'ones': array([[1., 1., 1.]]), 'zeros': array([[0., 0., 0.]])}
Note how we get a batch at a time from each of the datasets in the
`Databag`. This can be useful for training models that combine data
from multiple `Dataset` objects at a time.
"""
def __init__(self, datasets: Optional[Dict[Any, Dataset]] = None) -> None:
"""Initialize this `Databag`.
Parameters
----------
datasets: dict, optional (default None)
A dictionary mapping keys to `Dataset` objects.
"""
if datasets is None:
self.datasets = dict()
else:
self.datasets = datasets
def add_dataset(self, key: Any, dataset: Dataset) -> None:
"""Adds a dataset to this databag.
Parameters
----------
key: Any, hashable value
Key to be added
dataset: Dataset
The dataset that `key` should point to.
"""
self.datasets[key] = dataset
def iterbatches(self, **kwargs) -> Iterator[Dict[str, np.ndarray]]:
"""Loop through all internal datasets in the same order.
Parameters
----------
batch_size: int
Number of samples from each dataset to return
epochs: int
Number of times to loop through the datasets
pad_batches: bool
Should all batches==batch_size
Returns
-------
Iterator[Dict[str, np.ndarray]]
Generator which yields a dictionary {key: dataset.X[batch]}
"""
key_order = [x for x in self.datasets.keys()]
if "epochs" in kwargs:
epochs = kwargs['epochs']
del kwargs['epochs']
else:
epochs = 1
kwargs['deterministic'] = True
for epoch in range(epochs):
iterators = [self.datasets[x].iterbatches(**kwargs) for x in key_order]
for tup in zip(*iterators):
m_d = {key_order[i]: tup[i][0] for i in range(len(key_order))}
yield m_d
| deepchem/deepchem | deepchem/data/datasets.py | Python | mit | 101,597 |
from .RamlParseable import RamlParseable
from .RamlDocumentation import RamlDocumentation
from .RamlResource import RamlResource
from .RamlURIParameter import RamlURIParameter
from .decorators import (raml_required, raml_optional,
raml_simple_parse, raml_tabbed)
class RamlRoot(RamlParseable):
def __init__(self, yaml):
super(RamlRoot, self).__init__(yaml)
@raml_required
@raml_simple_parse
def parse_title(self):
pass
@raml_optional
@raml_simple_parse
def parse_version(self):
pass
@raml_required
@raml_simple_parse
def parse_baseUri(self):
pass
@raml_optional
def parse_baseUriParameters(self):
self.baseUriParameters = {
x[0]: RamlURIParameter(x[1])
for x in self.yaml['baseUriParameters'].items()
}
@raml_optional
@raml_simple_parse
def parse_protocols(self):
pass
@raml_optional
@raml_simple_parse
def parse_mediaType(self):
pass
@raml_optional
@raml_simple_parse
def parse_schemas(self):
pass
@raml_optional
def parse_uriParameters(self):
self.uriParameters = {
x[0]: RamlURIParameter(x[1])
for x in self.yaml['uriParameters'].items()
}
@raml_optional
def parse_resources(self):
self.resources = {
x[0]: RamlResource(x[0], x[1])
for x in filter(lambda x: x[0].startswith('/'), self.yaml.items())
}
@raml_optional
def parse_documentation(self):
self.documentation = RamlDocumentation(self.yaml['documentation'])
@raml_tabbed
def __str__(self):
return '''\
[RamlRoot:
title: {0.title}
version: {0.version}
base URI: {0.baseUri}
base URI parameters: {0.baseUriParameters}
protocols: {0.protocols}
media type: {0.mediaType}
URI parameters: {0.uriParameters}
documentation:
{0.documentation}
schemas: {0.schemas}
resources: {0.resources}
]'''.format(self)
| freddrake/ramlizer | ramlizer/RamlRoot.py | Python | mit | 2,052 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Python motu client
#
# Motu, a high efficient, robust and Standard compliant Web Server for Geographic
# Data Dissemination.
#
# http://cls-motu.sourceforge.net/
#
# (C) Copyright 2009-2010, by CLS (Collecte Localisation Satellites) -
# http://www.cls.fr - and Contributors
#
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from threading import Thread, local
import time
import threading
# global stats
tsl = local()
class StopWatch(object):
TIME = "time"
GLOBAL = "global"
def __init__(self):
# contains the computed times
self.times = {}
# contains the current timers
self.timers = {}
def clear(self):
self.timers = {}
self.times = {}
def start(self,label = GLOBAL):
"""Starts a new counter
Returns the time the counter has been recorded.
"""
self.timers[label] = self.__time()
return self.timers[label]
def stop(self,label=GLOBAL):
"""Stops the clock for the given counter.
Returns the time at which the instance was stopped.
"""
self.times[label] = self.elapsed(label)
del self.timers[label]
return self.times[label]
def isRunning(self, label=GLOBAL):
return label in self.timers
def elapsed(self,label=GLOBAL):
"""The number of seconds since the current time that the StopWatch
object was created. If stop() was called, it is the number
of seconds from the instance creation until stop() was called.
"""
t0 = self.times[label] if label in self.times else 0.
t1 = self.timers[label] if label in self.timers else 0.
t2 = self.__time() if label in self.timers else 0.
return t0 + (t2 - t1)
def getTimes(self):
return self.times
def __time(self):
"""Wrapper for time.time() to allow unit testing.
"""
return time.time()
def __str__(self):
"""Nicely format the elapsed time
"""
keys = self.times.keys() + filter( lambda x:x not in self.times.keys(), self.timers.keys() )
txt = ""
for key in keys:
txt = txt + key + " : " + str(self.elapsed(key)) + " s " + ("(running)" if self.isRunning(key) else "(stopped)")+"\n"
return txt
def localThreadStopWatch():
if not hasattr(tsl,'timer'):
lock = threading.Lock()
lock.acquire()
try:
if not hasattr(tsl,'timer'):
tsl.timer = StopWatch()
finally:
lock.release()
return tsl.timer | dipapaspyros/bdo_platform | aggregator/connectors/motu/util/stop_watch.py | Python | mit | 3,413 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
import collections
import numbers
from functools import partial
import numpy as np
import scipy.constants as const
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
"""
Some conversion factors
"""
Ha_to_eV = 1 / const.physical_constants["electron volt-hartree relationship"][0]
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = const.physical_constants["atomic mass unit-kilogram relationship"][0]
mile_to_meters = const.mile
bohr_to_angstrom = const.physical_constants["Bohr radius"][0] * 1e10
bohr_to_ang = bohr_to_angstrom
ang_to_bohr = 1 / bohr_to_ang
kCal_to_kJ = const.calorie
kb = const.physical_constants["Boltzmann constant in eV/K"][0]
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
"d": 3600 * 24,
},
"current": {"A": 1},
"temperature": {
"K": 1,
},
"amount": {"mol": 1, "atom": 1 / const.N_A},
"intensity": {"cd": 1},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024 ** 2,
"Gb": 1024 ** 3,
"Tb": 1024 ** 4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v for k, v in BASE_UNITS["memory"].items()})
# This current list are supported derived units defined in terms of powers of
# SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, const.e: 1},
"meV": {"kg": 1, "m": 2, "s": -2, const.e * 1e-3: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, const.e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, const.e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1},
"kCal": {"kg": 1, "m": 2, "s": -2, 1000: 1, kCal_to_kJ: 1},
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, const.e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2},
"KN": {"kg": 1, "m": 1, "s": -2, 1000: 1},
"MN": {"kg": 1, "m": 1, "s": -2, 1e6: 1},
"GN": {"kg": 1, "m": 1, "s": -2, 1e9: 1},
},
"frequency": {
"Hz": {"s": -1},
"KHz": {"s": -1, 1000: 1},
"MHz": {"s": -1, 1e6: 1},
"GHz": {"s": -1, 1e9: 1},
"THz": {"s": -1, 1e12: 1},
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1},
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3},
"KW": {"m": 2, "kg": 1, "s": -3, 1000: 1},
"MW": {"m": 2, "kg": 1, "s": -3, 1e6: 1},
"GW": {"m": 2, "kg": 1, "s": -3, 1e9: 1},
},
"emf": {"V": {"m": 2, "kg": 1, "s": -3, "A": -1}},
"capacitance": {"F": {"m": -2, "kg": -1, "s": 4, "A": 2}},
"resistance": {"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}},
"conductance": {"S": {"m": -2, "kg": -1, "s": 3, "A": 2}},
"magnetic_flux": {"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}},
"cross_section": {"barn": {"m": 2, 1e-28: 1}, "mbarn": {"m": 2, 1e-31: 1}},
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items())) # type: ignore
SUPPORTED_UNIT_NAMES = tuple(i for d in ALL_UNITS.values() for i in d.keys())
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {} # type: ignore
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1, BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def _check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all(v2.get(ku, 0) == vu for ku, vu in u.items()) and all(
u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()
):
return {k2: 1}
return u
class Unit(collections.abc.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, str):
unit = collections.defaultdict(int)
import re
for m in re.finditer(r"([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = _check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(), key=lambda k: (-self._unit[k], k))
return " ".join(
["{}^{}".format(k, self._unit[k]) if self._unit[k] != 1 else k for k in sorted_keys if self._unit[k] != 0]
)
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, numbers.Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(), key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(), key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
"""Overrides __new__ since we are subclassing a Python primitive/"""
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError("{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super().__repr__()
def __str__(self):
s = super().__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super().__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type, unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super().__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type, unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None, unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None, unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None, unit=self._unit ** i)
def __truediv__(self, other):
val = super().__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(val, unit_type=None, unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super().__neg__(), unit_type=self._unit_type, unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
# print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
# print("in getstate %s" % state)
return state
def __setstate__(self, state):
# print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit,
)
@property
def as_base_units(self):
"""
Returns this FloatWithUnit in base SI units, including derived units.
Returns:
A FloatWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
"""
Override __new__.
"""
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def __reduce__(self):
# print("in reduce")
reduce = list(super().__reduce__())
# print("unit",self._unit)
# print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
# pylint: disable=E1101
super().__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is" " not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other), unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other), unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(np.array(self).__mul__(np.array(other)), unit=self.unit * other.unit)
def __rmul__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__rmul__(np.array(other)), unit=self.unit * other.unit)
def __div__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__div__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__div__(np.array(other)), unit=self.unit / other.unit)
def __truediv__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__truediv__(np.array(other)), unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(), unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type,
unit=new_unit,
)
@property
def as_base_units(self):
"""
Returns this ArrayWithUnit in base SI units, including derived units.
Returns:
An ArrayWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
# TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
# TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class
methods defined in FloatWithUnit. This function calls partial and patches
the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
if isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k, v in obj.items()}
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. You can also
use it to standardize the output units of a function that already returns
a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences
are assigned the same unit. It works with Python sequences only. The creation
of numpy arrays loses all unit information. For mapping types, the values
are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, (FloatWithUnit, ArrayWithUnit)):
return val.to(unit)
if isinstance(val, collections.abc.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type, unit=unit) for i in val])
if isinstance(val, collections.abc.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
| gmatteo/pymatgen | pymatgen/core/units.py | Python | mit | 27,212 |
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.domain.payment.definitions.create_payment_result import CreatePaymentResult
class CreatePaymentResponse(CreatePaymentResult):
def to_dictionary(self):
dictionary = super(CreatePaymentResponse, self).to_dictionary()
return dictionary
def from_dictionary(self, dictionary):
super(CreatePaymentResponse, self).from_dictionary(dictionary)
return self
| Ingenico-ePayments/connect-sdk-python2 | ingenico/connect/sdk/domain/payment/create_payment_response.py | Python | mit | 568 |
# Copyright (c) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods to manage guests migration
"""
from collections import deque
from lxml import etree
from oslo_log import log as logging
from nova.compute import power_state
import nova.conf
from nova.i18n import _LI
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
# TODO(berrange): hack to avoid a "import libvirt" in this file.
# Remove this and similar hacks in guest.py, driver.py, host.py
# etc in Ocata.
libvirt = None
def graphics_listen_addrs(migrate_data):
"""Returns listen addresses of vnc/spice from a LibvirtLiveMigrateData"""
listen_addrs = None
if (migrate_data.obj_attr_is_set('graphics_listen_addr_vnc')
or migrate_data.obj_attr_is_set('graphics_listen_addr_spice')):
listen_addrs = {'vnc': None, 'spice': None}
if migrate_data.obj_attr_is_set('graphics_listen_addr_vnc'):
listen_addrs['vnc'] = str(migrate_data.graphics_listen_addr_vnc)
if migrate_data.obj_attr_is_set('graphics_listen_addr_spice'):
listen_addrs['spice'] = str(
migrate_data.graphics_listen_addr_spice)
return listen_addrs
def serial_listen_addr(migrate_data):
"""Returns listen address serial from a LibvirtLiveMigrateData"""
listen_addr = None
# NOTE (markus_z/dansmith): Our own from_legacy_dict() code can return
# an object with nothing set here. That can happen based on the
# compute RPC version pin. Until we can bump that major (which we
# can do just before Ocata releases), we may still get a legacy
# dict over the wire, converted to an object, and thus is may be unset
# here.
if migrate_data.obj_attr_is_set('serial_listen_addr'):
# NOTE (markus_z): The value of 'serial_listen_addr' is either
# an IP address (as string type) or None. There's no need of a
# conversion, in fact, doing a string conversion of None leads to
# 'None', which is an invalid (string type) value here.
listen_addr = migrate_data.serial_listen_addr
return listen_addr
# TODO(sahid): remove me for Q*
def serial_listen_ports(migrate_data):
"""Returns ports serial from a LibvirtLiveMigrateData"""
ports = []
if migrate_data.obj_attr_is_set('serial_listen_ports'):
ports = migrate_data.serial_listen_ports
return ports
def get_updated_guest_xml(guest, migrate_data, get_volume_config):
xml_doc = etree.fromstring(guest.get_xml_desc(dump_migratable=True))
xml_doc = _update_graphics_xml(xml_doc, migrate_data)
xml_doc = _update_serial_xml(xml_doc, migrate_data)
xml_doc = _update_volume_xml(xml_doc, migrate_data, get_volume_config)
xml_doc = _update_perf_events_xml(xml_doc, migrate_data)
return etree.tostring(xml_doc)
def _update_graphics_xml(xml_doc, migrate_data):
listen_addrs = graphics_listen_addrs(migrate_data)
# change over listen addresses
for dev in xml_doc.findall('./devices/graphics'):
gr_type = dev.get('type')
listen_tag = dev.find('listen')
if gr_type in ('vnc', 'spice'):
if listen_tag is not None:
listen_tag.set('address', listen_addrs[gr_type])
if dev.get('listen') is not None:
dev.set('listen', listen_addrs[gr_type])
return xml_doc
def _update_serial_xml(xml_doc, migrate_data):
listen_addr = serial_listen_addr(migrate_data)
listen_ports = serial_listen_ports(migrate_data)
def set_listen_addr_and_port(source, listen_addr, serial_listen_ports):
# The XML nodes can be empty, which would make checks like
# "if source.get('host'):" different to an explicit check for
# None. That's why we have to check for None in this method.
if source.get('host') is not None:
source.set('host', listen_addr)
device = source.getparent()
target = device.find("target")
if target is not None and source.get('service') is not None:
port_index = int(target.get('port'))
# NOTE (markus_z): Previous releases might not give us the
# ports yet, that's why we have this check here.
if len(serial_listen_ports) > port_index:
source.set('service', str(serial_listen_ports[port_index]))
# This updates all "LibvirtConfigGuestSerial" devices
for source in xml_doc.findall("./devices/serial[@type='tcp']/source"):
set_listen_addr_and_port(source, listen_addr, listen_ports)
# This updates all "LibvirtConfigGuestConsole" devices
for source in xml_doc.findall("./devices/console[@type='tcp']/source"):
set_listen_addr_and_port(source, listen_addr, listen_ports)
return xml_doc
def _update_volume_xml(xml_doc, migrate_data, get_volume_config):
"""Update XML using device information of destination host."""
migrate_bdm_info = migrate_data.bdms
# Update volume xml
parser = etree.XMLParser(remove_blank_text=True)
disk_nodes = xml_doc.findall('./devices/disk')
bdm_info_by_serial = {x.serial: x for x in migrate_bdm_info}
for pos, disk_dev in enumerate(disk_nodes):
serial_source = disk_dev.findtext('serial')
bdm_info = bdm_info_by_serial.get(serial_source)
if (serial_source is None or
not bdm_info or not bdm_info.connection_info or
serial_source not in bdm_info_by_serial):
continue
conf = get_volume_config(
bdm_info.connection_info, bdm_info.as_disk_info())
xml_doc2 = etree.XML(conf.to_xml(), parser)
serial_dest = xml_doc2.findtext('serial')
# Compare source serial and destination serial number.
# If these serial numbers match, continue the process.
if (serial_dest and (serial_source == serial_dest)):
LOG.debug("Find same serial number: pos=%(pos)s, "
"serial=%(num)s",
{'pos': pos, 'num': serial_source})
for cnt, item_src in enumerate(disk_dev):
# If source and destination have same item, update
# the item using destination value.
for item_dst in xml_doc2.findall(item_src.tag):
disk_dev.remove(item_src)
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
# If destination has additional items, thses items should be
# added here.
for item_dst in list(xml_doc2):
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
return xml_doc
def _update_perf_events_xml(xml_doc, migrate_data):
"""Update XML by the supported events of destination host."""
supported_perf_events = []
old_xml_has_perf = True
if 'supported_perf_events' in migrate_data:
supported_perf_events = migrate_data.supported_perf_events
perf_events = xml_doc.findall('./perf')
# remove perf events from xml
if not perf_events:
perf_events = etree.Element("perf")
old_xml_has_perf = False
else:
perf_events = perf_events[0]
for _, event in enumerate(perf_events):
perf_events.remove(event)
if not supported_perf_events:
return xml_doc
# add supported perf events
for e in supported_perf_events:
new_event = etree.Element("event", enabled="yes", name=e)
perf_events.append(new_event)
if not old_xml_has_perf:
xml_doc.append(perf_events)
return xml_doc
def find_job_type(guest, instance):
"""Determine the (likely) current migration job type
:param guest: a nova.virt.libvirt.guest.Guest
:param instance: a nova.objects.Instance
Annoyingly when job type == NONE and migration is
no longer running, we don't know whether we stopped
because of failure or completion. We can distinguish
these cases by seeing if the VM still exists & is
running on the current host
:returns: a libvirt job type constant
"""
try:
if guest.is_active():
LOG.debug("VM running on src, migration failed",
instance=instance)
return libvirt.VIR_DOMAIN_JOB_FAILED
else:
LOG.debug("VM is shutoff, migration finished",
instance=instance)
return libvirt.VIR_DOMAIN_JOB_COMPLETED
except libvirt.libvirtError as ex:
LOG.debug("Error checking domain status %(ex)s",
{"ex": ex}, instance=instance)
if ex.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug("VM is missing, migration finished",
instance=instance)
return libvirt.VIR_DOMAIN_JOB_COMPLETED
else:
LOG.info(_LI("Error %(ex)s, migration failed"),
{"ex": ex}, instance=instance)
return libvirt.VIR_DOMAIN_JOB_FAILED
def should_abort(instance, now,
progress_time, progress_timeout,
elapsed, completion_timeout,
migration_status):
"""Determine if the migration should be aborted
:param instance: a nova.objects.Instance
:param now: current time in secs since epoch
:param progress_time: when progress was last made in secs since epoch
:param progress_timeout: time in secs to allow for progress
:param elapsed: total elapsed time of migration in secs
:param completion_timeout: time in secs to allow for completion
:param migration_status: current status of the migration
Check the progress and completion timeouts to determine if either
of them have been hit, and should thus cause migration to be aborted
Avoid migration to be aborted if it is running in post-copy mode
:returns: True if migration should be aborted, False otherwise
"""
if migration_status == 'running (post-copy)':
return False
if (progress_timeout != 0 and
(now - progress_time) > progress_timeout):
LOG.warning(_LW("Live migration stuck for %d sec"),
(now - progress_time), instance=instance)
return True
if (completion_timeout != 0 and
elapsed > completion_timeout):
LOG.warning(
_LW("Live migration not completed after %d sec"),
completion_timeout, instance=instance)
return True
return False
def should_switch_to_postcopy(memory_iteration, current_data_remaining,
previous_data_remaining, migration_status):
"""Determine if the migration should be switched to postcopy mode
:param memory_iteration: Number of memory iterations during the migration
:param current_data_remaining: amount of memory to be transferred
:param previous_data_remaining: previous memory to be transferred
:param migration_status: current status of the migration
Check the progress after the first memory iteration to determine if the
migration should be switched to post-copy mode
Avoid post-copy switch if already running in post-copy mode
:returns: True if migration should be switched to postcopy mode,
False otherwise
"""
if (migration_status == 'running (post-copy)' or
previous_data_remaining <= 0):
return False
if memory_iteration > 1:
progress_percentage = round((previous_data_remaining -
current_data_remaining) *
100 / previous_data_remaining)
# If migration progress is less than 10% per iteration after the
# first memory page copying pass, the migration is switched to
# postcopy mode
if progress_percentage < 10:
return True
return False
def update_downtime(guest, instance,
olddowntime,
downtime_steps, elapsed):
"""Update max downtime if needed
:param guest: a nova.virt.libvirt.guest.Guest to set downtime for
:param instance: a nova.objects.Instance
:param olddowntime: current set downtime, or None
:param downtime_steps: list of downtime steps
:param elapsed: total time of migration in secs
Determine if the maximum downtime needs to be increased
based on the downtime steps. Each element in the downtime
steps list should be a 2 element tuple. The first element
contains a time marker and the second element contains
the downtime value to set when the marker is hit.
The guest object will be used to change the current
downtime value on the instance.
Any errors hit when updating downtime will be ignored
:returns: the new downtime value
"""
LOG.debug("Current %(dt)s elapsed %(elapsed)d steps %(steps)s",
{"dt": olddowntime, "elapsed": elapsed,
"steps": downtime_steps}, instance=instance)
thisstep = None
for step in downtime_steps:
if elapsed > step[0]:
thisstep = step
if thisstep is None:
LOG.debug("No current step", instance=instance)
return olddowntime
if thisstep[1] == olddowntime:
LOG.debug("Downtime does not need to change",
instance=instance)
return olddowntime
LOG.info(_LI("Increasing downtime to %(downtime)d ms "
"after %(waittime)d sec elapsed time"),
{"downtime": thisstep[1],
"waittime": thisstep[0]},
instance=instance)
try:
guest.migrate_configure_max_downtime(thisstep[1])
except libvirt.libvirtError as e:
LOG.warning(_LW("Unable to increase max downtime to %(time)d"
"ms: %(e)s"),
{"time": thisstep[1], "e": e}, instance=instance)
return thisstep[1]
def save_stats(instance, migration, info, remaining):
"""Save migration stats to the database
:param instance: a nova.objects.Instance
:param migration: a nova.objects.Migration
:param info: a nova.virt.libvirt.guest.JobInfo
:param remaining: percentage data remaining to transfer
Update the migration and instance objects with
the latest available migration stats
"""
# The fully detailed stats
migration.memory_total = info.memory_total
migration.memory_processed = info.memory_processed
migration.memory_remaining = info.memory_remaining
migration.disk_total = info.disk_total
migration.disk_processed = info.disk_processed
migration.disk_remaining = info.disk_remaining
migration.save()
# The coarse % completion stats
instance.progress = 100 - remaining
instance.save()
def trigger_postcopy_switch(guest, instance, migration):
try:
guest.migrate_start_postcopy()
except libvirt.libvirtError as e:
LOG.warning(_LW("Failed to switch to post-copy live "
"migration: %s"),
e, instance=instance)
else:
# NOTE(ltomas): Change the migration status to indicate that
# it is in post-copy active mode, i.e., the VM at
# destination is the active one
LOG.info(_LI("Switching to post-copy migration mode"),
instance=instance)
migration.status = 'running (post-copy)'
migration.save()
def run_tasks(guest, instance, active_migrations, on_migration_failure,
migration, is_post_copy_enabled):
"""Run any pending migration tasks
:param guest: a nova.virt.libvirt.guest.Guest
:param instance: a nova.objects.Instance
:param active_migrations: dict of active migrations
:param on_migration_failure: queue of recovery tasks
:param migration: a nova.objects.Migration
:param is_post_copy_enabled: True if post-copy can be used
Run any pending migration tasks queued against the
provided instance object. The active migrations dict
should use instance UUIDs for keys and a queue of
tasks as the values.
Currently the valid tasks that can be requested
are "pause" and "force-complete". Other tasks will
be ignored.
"""
tasks = active_migrations.get(instance.uuid, deque())
while tasks:
task = tasks.popleft()
if task == 'force-complete':
if migration.status == 'running (post-copy)':
LOG.warning(_LW("Live-migration %s already switched "
"to post-copy mode."),
instance=instance)
elif is_post_copy_enabled:
trigger_postcopy_switch(guest, instance, migration)
else:
try:
guest.pause()
on_migration_failure.append("unpause")
except Exception as e:
LOG.warning(_LW("Failed to pause instance during "
"live-migration %s"),
e, instance=instance)
else:
LOG.warning(_LW("Unknown migration task '%(task)s'"),
{"task": task}, instance=instance)
def run_recover_tasks(host, guest, instance, on_migration_failure):
"""Run any pending migration recovery tasks
:param host: a nova.virt.libvirt.host.Host
:param guest: a nova.virt.libvirt.guest.Guest
:param instance: a nova.objects.Instance
:param on_migration_failure: queue of recovery tasks
Run any recovery tasks provided in the on_migration_failure
queue.
Currently the only valid task that can be requested
is "unpause". Other tasks will be ignored
"""
while on_migration_failure:
task = on_migration_failure.popleft()
# NOTE(tdurakov): there is still possibility to leave
# instance paused in case of live-migration failure.
# This check guarantee that instance will be resumed
# in this case
if task == 'unpause':
try:
state = guest.get_power_state(host)
if state == power_state.PAUSED:
guest.resume()
except Exception as e:
LOG.warning(_LW("Failed to resume paused instance "
"before live-migration rollback %s"),
e, instance=instance)
else:
LOG.warning(_LW("Unknown migration task '%(task)s'"),
{"task": task}, instance=instance)
def downtime_steps(data_gb):
'''Calculate downtime value steps and time between increases.
:param data_gb: total GB of RAM and disk to transfer
This looks at the total downtime steps and upper bound
downtime value and uses a linear function.
For example, with 10 steps, 30 second step delay, 3 GB
of RAM and 400ms target maximum downtime, the downtime will
be increased every 90 seconds in the following progression:
- 0 seconds -> set downtime to 40ms
- 90 seconds -> set downtime to 76ms
- 180 seconds -> set downtime to 112ms
- 270 seconds -> set downtime to 148ms
- 360 seconds -> set downtime to 184ms
- 450 seconds -> set downtime to 220ms
- 540 seconds -> set downtime to 256ms
- 630 seconds -> set downtime to 292ms
- 720 seconds -> set downtime to 328ms
- 810 seconds -> set downtime to 364ms
- 900 seconds -> set downtime to 400ms
This allows the guest a good chance to complete migration
with a small downtime value.
'''
downtime = CONF.libvirt.live_migration_downtime
steps = CONF.libvirt.live_migration_downtime_steps
delay = CONF.libvirt.live_migration_downtime_delay
downtime_min = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_MIN
steps_min = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_STEPS_MIN
delay_min = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_DELAY_MIN
# TODO(hieulq): Need to move min/max value into the config option,
# currently oslo_config will raise ValueError instead of setting
# option value to its min/max.
if downtime < downtime_min:
LOG.warning(_LW("Config option live_migration_downtime's value "
"is less than minimum value %dms, rounded up to "
"the minimum value and will raise ValueError in "
"the future release."), downtime_min)
downtime = downtime_min
if steps < steps_min:
LOG.warning(_LW("Config option live_migration_downtime_steps's "
"value is less than minimum value %dms, rounded "
"up to the minimum value and will raise "
"ValueError in the future release."), steps_min)
steps = steps_min
if delay < delay_min:
LOG.warning(_LW("Config option live_migration_downtime_delay's "
"value is less than minimum value %dms, rounded "
"up to the minimum value and will raise "
"ValueError in the future release."), delay_min)
delay = delay_min
delay = int(delay * data_gb)
base = downtime / steps
offset = (downtime - base) / steps
for i in range(steps + 1):
yield (int(delay * i), int(base + offset * i))
| OpenSciViz/cloudstack | openstack/src/python/nova-libvirt/backup/migration.py | Python | mit | 21,731 |
import os
import subprocess
import pytest
from julia import install
from .utils import only_in_ci
@only_in_ci
def test_noop(juliainfo):
install(julia=juliainfo.julia)
@only_in_ci
def test_rebuild_broken_pycall(juliainfo):
if juliainfo.version_info < (0, 7):
pytest.skip("Julia >= 0.7 required")
subprocess.check_call(
[
juliainfo.julia,
"--startup-file=no",
"-e",
"""using Pkg; Pkg.develop("PyCall")""",
]
)
# Remove ~/.julia/dev/PyCall/deps/deps.jl
depsjl = os.path.join(
os.path.expanduser("~"), ".julia", "dev", "PyCall", "deps", "deps.jl"
)
if os.path.exists(depsjl):
print("Removing", depsjl)
os.remove(depsjl)
# julia.install() should fix it:
install(julia=juliainfo.julia)
assert os.path.exists(depsjl)
@only_in_ci
def test_add_pycall(juliainfo):
if juliainfo.version_info < (0, 7):
pytest.skip("Julia >= 0.7 required")
# Try to remove PyCall
subprocess.call(
[juliainfo.julia, "--startup-file=no", "-e", """using Pkg; Pkg.rm("PyCall")"""]
)
# julia.install() should add PyCall:
install(julia=juliainfo.julia)
| JuliaLang/pyjulia | src/julia/tests/test_install.py | Python | mit | 1,212 |
"""
Custom specifications for the different types of stack options.
The idea is that these understand the conditions around representation of the
options.
"""
from bespin.option_spec.stack_objs import (
StaticVariable, DynamicVariable, EnvironmentVariable, Skipper, S3Address
, UltraDNSSite, UltraDNSProvider
)
from bespin.option_spec.artifact_objs import ArtifactCommand
from bespin.option_spec.artifact_objs import ArtifactPath
from bespin.formatter import MergedOptionStringFormatter
from bespin.errors import BadSpecValue, BadConfiguration
from bespin.helpers import memoized_property
from input_algorithms.many_item_spec import many_item_formatted_spec
from input_algorithms.spec_base import NotSpecified, Spec
from input_algorithms import spec_base as sb
from input_algorithms import validators
from six.moves.urllib.parse import urlparse
import logging
log = logging.getLogger("bespin.option_spec.stack_specs")
class var_spec(many_item_formatted_spec):
value_name = "Variable"
specs = [sb.or_spec(sb.string_or_int_as_string_spec(), sb.listof(sb.string_or_int_as_string_spec()))]
optional_specs = [sb.string_or_int_as_string_spec()]
formatter = MergedOptionStringFormatter
seperators = "|"
def create_result(self, variable, variable_value, meta, val, dividers):
if variable_value is NotSpecified:
return StaticVariable(variable)
else:
stack = variable
return DynamicVariable(stack, variable_value, meta.everything["bespin"])
class artifact_path_spec(many_item_formatted_spec):
value_name = "Artifact Path"
specs = [sb.string_spec(), sb.string_spec()]
creates = ArtifactPath
formatter = MergedOptionStringFormatter
def create_result(self, host_path, artifact_path, meta, val, dividers):
return ArtifactPath(host_path, artifact_path)
class env_spec(many_item_formatted_spec):
value_name = "Environment Variable"
seperators = [':', '=']
specs = [sb.string_spec()]
creates = EnvironmentVariable
optional_specs = [sb.string_or_int_as_string_spec()]
formatter = MergedOptionStringFormatter
def create_result(self, env_name, other_val, meta, val, dividers):
"""Set default_val and set_val depending on the seperator"""
args = [env_name]
if other_val is NotSpecified:
other_val = None
if not dividers:
args.extend([None, None])
elif dividers[0] == ':':
args.extend([other_val, None])
elif dividers[0] == '=':
args.extend([None, other_val])
return EnvironmentVariable(*args)
class skipper_spec(many_item_formatted_spec):
value_name = "Skip specification"
spec = lambda: sb.delayed(var_spec())
creates = Skipper
specs = [spec(), spec()]
def create_result(self, var1, var2, meta, val, dividers):
return Skipper(var1, var2)
class s3_address(many_item_formatted_spec):
value_name = "s3 address"
specs = [sb.string_spec()]
optional_specs = [sb.integer_spec()]
creates = S3Address
seperators = None
formatter = MergedOptionStringFormatter
def create_result(self, address, timeout, meta, val, dividers):
if timeout is NotSpecified:
timeout = 600
options = urlparse(address)
if options.scheme != "s3":
raise BadSpecValue("Not a valid s3 address", meta=meta, got=val)
if not options.netloc:
path = ''
domain = options.path
else:
path = options.path
domain = options.netloc
if not path.startswith('/'):
path = '/'
return S3Address(domain, path, timeout)
class dns_spec(Spec):
def setup(self, spec):
self.spec = spec
def normalise_filled(self, meta, val):
meta.everything = meta.everything.wrapped()
meta.everything["__dns_vars__"] = val["vars"].as_dict()
return self.spec.normalise(meta, val)
class dns_site_spec(Spec):
def normalise_filled(self, meta, val):
log.info("Normalising dns site %s", meta.path)
val = sb.dictionary_spec().normalise(meta, val)
provider = val["provider"]
available = meta.everything["stacks"][meta.everything["__stack_name__"]]["dns"]["providers"]
if provider not in available.keys():
raise BadConfiguration("Specified provider isn't defined in {dns.providers}", available=list(available.keys()), wanted=provider, meta=meta)
val["provider"] = lambda: meta.everything["stacks"][meta.everything["__stack_name__"]]["dns"]["providers"][provider]
if available[provider]["provider_type"] == "ultradns":
return self.ultradns_site_spec(val).normalise(meta, val)
else:
raise BadConfiguration("Unknown dns provider type", available=["ultradns"], wanted=val["provider"].provider_type, meta=meta)
def ultradns_site_spec(self, this):
formatted_string = sb.formatted(sb.string_spec(), formatter=MergedOptionStringFormatter)
return sb.create_spec(UltraDNSSite
, name = sb.formatted(sb.overridden("{_key_name_1}"), formatter=MergedOptionStringFormatter)
, ttl = sb.optional_spec(sb.integer_spec())
, provider = sb.any_spec()
, record_type = sb.required(formatted_string)
, zone = sb.required(formatted_string)
, domain = sb.required(formatted_string)
, environments = sb.required(self.dns_environment_spec(this))
)
def dns_environment_spec(self, this):
formatted_string = sb.formatted(sb.string_spec(), formatter=MergedOptionStringFormatter)
class spec(Spec):
def normalise_filled(s, meta, val):
meta.everything = meta.everything.wrapped()
meta.everything["__site_environments__"] = this["environments"].as_dict()
spec = sb.dictof(sb.string_spec(), sb.listof(formatted_string))
return spec.normalise(meta, val.as_dict())
return spec()
class dns_provider_spec(Spec):
def normalise_filled(self, meta, val):
val = sb.dictionary_spec().normalise(meta, val)
provider_type = val["provider_type"]
available = ["ultradns"]
if provider_type not in available:
raise BadConfiguration("Specified provider type isn't supported", supported=available, wanted=provider_type, meta=meta)
if provider_type == "ultradns":
return self.ultradns_provider_spec.normalise(meta, val)
@memoized_property
def ultradns_provider_spec(self):
return sb.create_spec(UltraDNSProvider
, name = sb.formatted(sb.overridden("{_key_name_1}"), formatter=MergedOptionStringFormatter)
, provider_type = sb.required(sb.string_spec())
, username = sb.required(formatted_string)
, password = sb.required(formatted_string)
)
formatted_string = sb.formatted(sb.string_spec(), formatter=MergedOptionStringFormatter)
artifact_command_spec = lambda : sb.create_spec(ArtifactCommand
, copy = sb.listof(artifact_path_spec())
, modify = sb.dictof(sb.string_spec(), sb.set_options(append=sb.listof(formatted_string)))
, command = sb.listof(formatted_string)
, timeout = sb.defaulted(sb.integer_spec(), 600)
, temp_dir = sb.defaulted(formatted_string, None)
, add_into_tar = sb.listof(artifact_path_spec())
)
params_json_spec = lambda: sb.listof(sb.set_options(
ParameterKey = sb.required(sb.any_spec())
, ParameterValue = sb.required(sb.any_spec())
))
params_yaml_spec = lambda: sb.dictof(sb.string_spec(), sb.formatted(sb.string_or_int_as_string_spec(), formatter=MergedOptionStringFormatter))
stack_json_spec = lambda: sb.set_options(
Resources = sb.required(sb.dictof(sb.string_spec(), sb.set_options(Type=sb.required(sb.string_spec()), Properties=sb.optional_spec(sb.dictionary_spec()))))
, Parameters = sb.optional_spec(sb.dictof(sb.string_spec(), sb.dictionary_spec()))
, Outputs = sb.optional_spec(sb.dictof(sb.string_spec(), sb.dictionary_spec()))
)
class policy_set_options(sb.set_options):
"""
Strip ``NotSpecified`` values from normalised dictionary
and ensure only one of the 'Not' options are specified
"""
def normalise_filled(self, meta, val):
result = super(policy_set_options, self).normalise_filled(meta, val)
val = dict((k, v) for k, v in result.items() if v is not NotSpecified)
return sb.apply_validators(meta, val, [
validators.has_only_one_of(["Action", "NotAction"]),
validators.has_only_one_of(["Resource", "NotResource"])
])
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#stack-policy-reference
policy_json_spec = lambda: sb.set_options(
Statement = sb.listof(policy_set_options(
Effect = sb.string_choice_spec(choices=["Deny", "Allow"])
, Action = sb.optional_spec(sb.listof(sb.string_choice_spec(choices=["Update:Modify", "Update:Replace", "Update:Delete", "Update:*"])))
, NotAction = sb.optional_spec(sb.listof(sb.string_choice_spec(choices=["Update:Modify", "Update:Replace", "Update:Delete", "Update:*"])))
, Principal = sb.valid_string_spec(validators.regexed("^\*$"))
, Resource = sb.optional_spec(sb.listof(sb.valid_string_spec(validators.regexed(r"^(LogicalResourceId/.*|\*)$"))))
, NotResource = sb.optional_spec(sb.listof(sb.valid_string_spec(validators.regexed(r"^(LogicalResourceId/.*|\*)$"))))
, Condition = sb.optional_spec(sb.dictionary_spec())
))
)
| realestate-com-au/bespin | bespin/option_spec/stack_specs.py | Python | mit | 9,692 |
#!/usr/bin/env python
import os, StringIO, tempfile
from rtfng.utils import RTFTestCase
from rtfng.Elements import Document
from rtfng.document.section import Section
class DocumentTestCase(RTFTestCase):
def test_documentWrite(self):
doc, section, styles = RTFTestCase.initializeDoc()
fd, filename = tempfile.mkstemp(prefix='test-pyrtf', suffix='.rtf')
os.close(fd)
doc.write(filename)
result = StringIO.StringIO()
doc.write(result)
assert open(filename, 'r').read() == result.getvalue()
os.remove(filename)
| oubiwann-unsupported/pyrtf | test/document/test_document.py | Python | mit | 611 |
from rtfng.Styles import TextStyle
from rtfng.PropertySets import TextPropertySet, ShadingPropertySet
class Text(object):
def __init__(self, *params):
self.Data = None
self.Style = None
self.Properties = None
self.Shading = None
for param in params:
if isinstance(param, TextStyle):
self.Style = param
elif isinstance(param, TextPropertySet):
self.Properties = param
elif isinstance(param, ShadingPropertySet):
self.Shading = param
else:
# otherwise let the rendering custom handler sort it out itself
self.SetData(param)
def SetData(self, value):
self.Data = value
class Inline(list):
''' A Text object but with a list of data. Perhaps unify Text and Inline classes? '''
def __init__(self, *params): # Method copied from above
super(Inline, self).__init__()
self.Style = None
self.Properties = None
self.Shading = None
for param in params:
if isinstance(param, TextStyle):
self.Style = param
elif isinstance(param, TextPropertySet):
self.Properties = param
elif isinstance(param, ShadingPropertySet):
self.Shading = param
else:
# otherwise let the rendering custom handler sort it out itself
self.append(param)
def append(self, *params):
# filter out any that are explicitly None
values = [x for x in params if x is not None]
self.extend(values)
def TEXT(*params, **kwargs):
textProps = TextPropertySet()
textProps.font = kwargs.get('font', None)
textProps.size = kwargs.get('size', None)
textProps.bold = kwargs.get('bold', False)
textProps.italic = kwargs.get('italic', False)
textProps.underline = kwargs.get('underline', False)
textProps.colour = kwargs.get('colour', None)
if len(params) == 1:
return Text(params[0], textProps)
result = Inline(textProps)
apply(result.append, params)
return result
def B(*params):
textProps = TextPropertySet(bold=True)
if len(params) == 1:
return Text(params[0], textProps)
result = Inline(textProps)
apply(result.append, params)
return result
def I(*params):
textProps = TextPropertySet(italic=True)
if len(params) == 1:
return Text(params[0], textProps)
result = Inline(textProps)
apply(result.append, params)
return result
def U(*params):
textProps = TextPropertySet(underline=True)
if len(params) == 1:
return Text(params[0], textProps)
result = Inline(textProps)
apply(result.append, params)
return result
| oubiwann-unsupported/pyrtf | rtfng/document/character.py | Python | mit | 2,798 |
import numpy as np
from pytest import approx
from fastats import single_pass
from fastats.maths import relu, softplus
def test_relu_basic_sanity():
data = np.arange(-2, 3, dtype='float32')
result = single_pass(data, value=relu)
assert result[0] == 0.
assert result[1] == 0.
assert result[2] == 0.
assert result[3] == 1.
assert result[4] == 2.
def test_relu_with_nan_and_inf():
data = np.array([np.nan, -np.inf, np.inf], dtype='float32')
result = single_pass(data, value=relu)
assert np.isnan(result[0])
assert result[1] == 0.
assert result[2] == np.inf
def test_softplus_basic_sanity():
data = np.array([-2, -1, 0, 1, 2], dtype='float32')
result = single_pass(data, value=softplus)
assert result[0] == approx(0.12692805)
assert result[1] == approx(0.31326166)
assert result[2] == approx(0.69314718)
assert result[3] == approx(1.31326163)
assert result[4] == approx(2.12692809)
def test_softplus_with_nan_and_inf():
data = np.array([np.nan, -np.inf, np.inf], dtype='float32')
result = single_pass(data, value=softplus)
assert np.isnan(result[0])
assert result[1] == 0.
assert result[2] == np.inf
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| dwillmer/fastats | tests/maths/test_activations.py | Python | mit | 1,285 |
import unittest
import urllib
from createsend import *
class PeopleTestCase(object):
def test_get(self):
email = "[email protected]"
self.person.stub_request("clients/%s/people.json?email=%s" % (self.client_id, urllib.quote(email)), "person_details.json")
person = self.person.get(self.client_id, email)
self.assertEquals(person.EmailAddress, email)
self.assertEquals(person.Name, "Person One")
self.assertEquals(person.AccessLevel, 1023)
self.assertEquals(person.Status, "Active")
def test_get_without_args(self):
email = "[email protected]"
self.person.stub_request("clients/%s/people.json?email=%s" % (self.client_id, urllib.quote(email)), "person_details.json")
person = self.person.get()
self.assertEquals(person.EmailAddress, email)
self.assertEquals(person.Name, "Person One")
self.assertEquals(person.AccessLevel, 1023)
self.assertEquals(person.Status, "Active")
def test_add(self):
self.person.stub_request("clients/%s/people.json" % self.client_id, "add_person.json")
result = self.person.add(self.client_id, "[email protected]", "Person Name", 1023, "Password")
self.assertEquals(result.EmailAddress, "[email protected]")
def test_update(self):
new_email = "[email protected]"
self.person.stub_request("clients/%s/people.json?email=%s" % (self.client_id, urllib.quote(self.person.email_address)), None)
self.person.update(new_email, "Person New Name", 31, 'blah')
self.assertEquals(self.person.email_address, new_email)
def test_delete(self):
self.person.stub_request("clients/%s/people.json?email=%s" % (self.client_id, urllib.quote(self.person.email_address)), None)
email_address = self.person.delete()
class OAuthPeopleTestCase(unittest.TestCase, PeopleTestCase):
"""Test when using OAuth to authenticate"""
def setUp(self):
self.client_id = "d98h2938d9283d982u3d98u88"
self.person = Person(
{"access_token": "ASP95S4aR+9KsgfHB0dapTYxNA==", "refresh_token": "5S4aASP9R+9KsgfHB0dapTYxNA=="},
self.client_id, "[email protected]")
class ApiKeyPeopleTestCase(unittest.TestCase, PeopleTestCase):
"""Test when using an API key to authenticate"""
def setUp(self):
self.client_id = "d98h2938d9283d982u3d98u88"
self.person = Person(
{'api_key': '123123123123123123123'},
self.client_id, "[email protected]")
| guilhermetavares/createsend-python | test/test_people.py | Python | mit | 2,391 |
#!/usr/bin/python
# calcualtes supervised weighted knn baseline from sparse format
# This code does not handle tie cases
import sys
import gzip
import pprint
import collections as col;
def map_keyfile(fname, withtags=False):
#returns: dd:index-> word array maps:?? keys: index->key from ptb formated file
#<s> and <\s> tags and emptylines are omitted during indexing
dd = []
keys = {}
maps = col.defaultdict(lambda: col.defaultdict(int))
for line in gzip.open(fname):
l = line.strip().split()
if len(l) == 0:
continue
dd.append(l[0])
maps[l[0]]["_A_"] += 1
maps[l[0]][len(dd) - 1] = maps[l[0]]["_A_"]
keys[len(dd) - 1] = l[1]
return (dd,maps,keys)
def knn_dist_sparse_gz(words, keys, k, debug = False):
if debug: print >> sys.stderr, "calculating"
cr, wr = 0, 0
for (r,line) in enumerate(sys.stdin, start = 0):
ll = line.strip().split()
ll.pop(0)
colum = len(ll)/2
ans = col.defaultdict(lambda: 0)
maxv,maxi = 0,-1
if keys[r] == "</s>":
continue
for j in range(k):
ref = int(ll[2*j])
if ref == r:
j -= 1
continue
if float(ll[2*j+1]) == 0:
ans[keys[ref]] += 1.0/10e-15;
else:
ans[keys[ref]] += 1.0/float(ll[2*j+1])
if ans[keys[ref]] > maxv:
maxv = ans[keys[ref]]
maxi = keys[ref]
if maxi == keys[r]:
cr += 1
else:
wr += 1
if debug and r % 100000 == 0:
print >> sys.stderr, r,
print >> sys.stderr, cr,"\t",wr, "\t", 1.0*cr /(wr+cr)
print cr,"\t",wr, "\t", 1.0*cr /(wr+cr)
keys = map_keyfile(sys.argv[1])
knn_dist_sparse_gz(keys[0],keys[2], int(sys.argv[2]), debug=False)
| ai-ku/upos_2014 | src/scripts/knnsparse.py | Python | mit | 1,926 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# nonphysical.py
# Waqas Bhatti and Luke Bouma - Feb 2017
# ([email protected] and [email protected])
'''Light curve fitting routines for 'non-physical' models:
- :py:func:`astrobase.lcfit.nonphysical.spline_fit_magseries`: fit a univariate
cubic spline to a magnitude/flux time series with a specified spline knot
fraction.
- :py:func:`astrobase.lcfit.nonphysical.savgol_fit_magseries`: apply a
Savitzky-Golay smoothing filter to a magnitude/flux time series, returning the
resulting smoothed function as a "fit".
- :py:func:`astrobase.lcfit.nonphysical.legendre_fit_magseries`: fit a Legendre
function of the specified order to the magnitude/flux time series.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
from numpy import (
sum as npsum, array as nparray, max as npmax, min as npmin,
floor as npfloor, where as npwhere, linspace as nplinspace,
full_like as npfull_like, nonzero as npnonzero, diff as npdiff,
concatenate as npconcatenate
)
from scipy.interpolate import LSQUnivariateSpline
from scipy.signal import savgol_filter
from numpy.polynomial.legendre import Legendre
from ..lcmath import sigclip_magseries
from .utils import get_phased_quantities, make_fit_plot
#################################################################
## SPLINE FITTING TO PHASED AND UNPHASED MAGNITUDE TIME SERIES ##
#################################################################
def spline_fit_magseries(times, mags, errs, period,
knotfraction=0.01,
maxknots=30,
sigclip=30.0,
plotfit=False,
ignoreinitfail=False,
magsarefluxes=False,
verbose=True):
'''This fits a univariate cubic spline to the phased light curve.
This fit may be better than the Fourier fit for sharply variable objects,
like EBs, so can be used to distinguish them from other types of variables.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a spline to.
period : float
The period to use for the spline fit.
knotfraction : float
The knot fraction is the number of internal knots to use for the
spline. A value of 0.01 (or 1%) of the total number of non-nan
observations appears to work quite well, without over-fitting. maxknots
controls the maximum number of knots that will be allowed.
maxknots : int
The maximum number of knots that will be used even if `knotfraction`
gives a value to use larger than `maxknots`. This helps dealing with
over-fitting to short time-scale variations.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'spline',
'fitinfo':{
'nknots': the number of knots used for the fit
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
# this is required to fit the spline correctly
if errs is None:
errs = npfull_like(mags, 0.005)
# sigclip the magnitude time series
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# phase the mag series
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
# now figure out the number of knots up to max knots (=100)
nobs = len(phase)
nknots = int(npfloor(knotfraction*nobs))
nknots = maxknots if nknots > maxknots else nknots
splineknots = nplinspace(phase[0] + 0.01,
phase[-1] - 0.01,
num=nknots)
# NOTE: newer scipy needs x to be strictly increasing. this means we should
# filter out anything that doesn't have np.diff(phase) > 0.0
# FIXME: this needs to be tested
phase_diffs_ind = npdiff(phase) > 0.0
incphase_ind = npconcatenate((nparray([True]), phase_diffs_ind))
phase, pmags, perrs = (phase[incphase_ind],
pmags[incphase_ind],
perrs[incphase_ind])
# generate and fit the spline
spl = LSQUnivariateSpline(phase, pmags, t=splineknots, w=1.0/perrs)
# calculate the spline fit to the actual phases, the chisq and red-chisq
fitmags = spl(phase)
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - nknots - 1)
if verbose:
LOGINFO(
'spline fit done. nknots = %s, '
'chisq = %.5f, reduced chisq = %.5f' %
(nknots, fitchisq, fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
magseriesepoch = ptimes[fitmagminind]
# assemble the returndict
returndict = {
'fittype':'spline',
'fitinfo':{
'nknots':nknots,
'fitmags':fitmags,
'fitepoch':magseriesepoch
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
#####################################################
## SAVITZKY-GOLAY FITTING TO MAGNITUDE TIME SERIES ##
#####################################################
def savgol_fit_magseries(times, mags, errs, period,
windowlength=None,
polydeg=2,
sigclip=30.0,
plotfit=False,
magsarefluxes=False,
verbose=True):
'''Fit a Savitzky-Golay filter to the magnitude/flux time series.
SG fits successive sub-sets (windows) of adjacent data points with a
low-order polynomial via least squares. At each point (magnitude), it
returns the value of the polynomial at that magnitude's time. This is made
significantly cheaper than *actually* performing least squares for each
window through linear algebra tricks that are possible when specifying the
window size and polynomial order beforehand. Numerical Recipes Ch 14.8
gives an overview, Eq. 14.8.6 is what Scipy has implemented.
The idea behind Savitzky-Golay is to preserve higher moments (>=2) of the
input data series than would be done by a simple moving window average.
Note that the filter assumes evenly spaced data, which magnitude time series
are not. By *pretending* the data points are evenly spaced, we introduce an
additional noise source in the function values. This is a relatively small
noise source provided that the changes in the magnitude values across the
full width of the N=windowlength point window is < sqrt(N/2) times the
measurement noise on a single point.
TODO:
- Find correct dof for reduced chi squared in savgol_fit_magseries
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit the Savitsky-Golay model to.
period : float
The period to use for the model fit.
windowlength : None or int
The length of the filter window (the number of coefficients). Must be
either positive and odd, or None. (The window is the number of points to
the left, and to the right, of whatever point is having a polynomial fit
to it locally). Bigger windows at fixed polynomial order risk lowering
the amplitude of sharp features. If None, this routine (arbitrarily)
sets the `windowlength` for phased LCs to be either the number of finite
data points divided by 300, or polydeg+3, whichever is bigger.
polydeg : int
This is the order of the polynomial used to fit the samples. Must be
less than `windowlength`. "Higher-order filters do better at preserving
feature heights and widths, but do less smoothing on broader features."
(Numerical Recipes).
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'savgol',
'fitinfo':{
'windowlength': the window length used for the fit,
'polydeg':the polynomial degree used for the fit,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
if not isinstance(windowlength, int):
windowlength = max(
polydeg + 3,
int(len(phase)/300)
)
if windowlength % 2 == 0:
windowlength += 1
if verbose:
LOGINFO('applying Savitzky-Golay filter with '
'window length %s and polynomial degree %s to '
'mag series with %s observations, '
'using period %.6f, folded at %.6f' % (windowlength,
polydeg,
len(pmags),
period,
mintime))
# generate the function values obtained by applying the SG filter. The
# "wrap" option is best for phase-folded LCs.
sgf = savgol_filter(pmags, windowlength, polydeg, mode='wrap')
# here the "fit" to the phases is the function produced by the
# Savitzky-Golay filter. then compute the chisq and red-chisq.
fitmags = sgf
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
# TODO: quantify dof for SG filter.
nparams = int(len(pmags)/windowlength) * polydeg
fitredchisq = fitchisq/(len(pmags) - nparams - 1)
fitredchisq = -99.
if verbose:
LOGINFO(
'SG filter applied. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
magseriesepoch = ptimes[fitmagminind]
# assemble the returndict
returndict = {
'fittype':'savgol',
'fitinfo':{
'windowlength':windowlength,
'polydeg':polydeg,
'fitmags':fitmags,
'fitepoch':magseriesepoch
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
##########################################################
## LEGENDRE-POLYNOMIAL FITTING TO MAGNITUDE TIME SERIES ##
##########################################################
def legendre_fit_magseries(times, mags, errs, period,
legendredeg=10,
sigclip=30.0,
plotfit=False,
magsarefluxes=False,
verbose=True):
'''Fit an arbitrary-order Legendre series, via least squares, to the
magnitude/flux time series.
This is a series of the form::
p(x) = c_0*L_0(x) + c_1*L_1(x) + c_2*L_2(x) + ... + c_n*L_n(x)
where L_i's are Legendre polynomials (also called "Legendre functions of the
first kind") and c_i's are the coefficients being fit.
This function is mainly just a wrapper to
`numpy.polynomial.legendre.Legendre.fit`.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a Legendre series polynomial to.
period : float
The period to use for the Legendre fit.
legendredeg : int
This is `n` in the equation above, e.g. if you give `n=5`, you will
get 6 coefficients. This number should be much less than the number of
data points you are fitting.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'legendre',
'fitinfo':{
'legendredeg': the Legendre polynomial degree used,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
if verbose:
LOGINFO('fitting Legendre series with '
'maximum Legendre polynomial order %s to '
'mag series with %s observations, '
'using period %.6f, folded at %.6f' % (legendredeg,
len(pmags),
period,
mintime))
# Least squares fit of Legendre polynomial series to the data. The window
# and domain (see "Using the Convenience Classes" in the numpy
# documentation) are handled automatically, scaling the times to a minimal
# domain in [-1,1], in which Legendre polynomials are a complete basis.
p = Legendre.fit(phase, pmags, legendredeg)
coeffs = p.coef
fitmags = p(phase)
# Now compute the chisq and red-chisq.
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
nparams = legendredeg + 1
fitredchisq = fitchisq/(len(pmags) - nparams - 1)
if verbose:
LOGINFO(
'Legendre fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
magseriesepoch = ptimes[fitmagminind]
# assemble the returndict
returndict = {
'fittype':'legendre',
'fitinfo':{
'legendredeg':legendredeg,
'fitmags':fitmags,
'fitepoch':magseriesepoch,
'finalparams':coeffs,
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
| lgbouma/astrobase | astrobase/lcfit/nonphysical.py | Python | mit | 25,469 |
#!/usr/bin/env python3
import pickle
class Apple(object):
pass
d = dict(a = 1, b = 2, c = 3, d = Apple())
p = pickle.dumps(d, 2)
print(type(p), p)
o = pickle.loads(p)
print(type(o), o) | JShadowMan/package | python/pickle/pcikling.py | Python | mit | 204 |
#!/usr/bin/env python
"""
Module for simulation.
"""
import copy
import os
import numpy as np
import scipy.interpolate
import astropy.io.fits as pyfits
import healpy as hp
import numpy.lib.recfunctions as recfuncs
import fitsio
import ugali.observation.catalog
import ugali.observation.mask
import ugali.observation.roi
import ugali.utils.projector
import ugali.utils.stats
import ugali.analysis.scan
from ugali.utils.projector import gal2cel, cel2gal, sr2deg, mod2dist
from ugali.utils.healpix import ang2pix, pix2ang
from ugali.utils.logger import logger
from ugali.utils.config import Config
class Generator:
"""
Class for generating the parameters of the simulation.
"""
def __init__(self,config, seed=None):
self.config = Config(config)
self.seed = seed
if self.seed is not None: np.random.seed(self.seed)
def generate(self, size=1):
params = dict(self.config['simulate']['params'])
dtype = [(n,'>f4') for n in params.keys()]
data = np.zeros(size,dtype=dtype)
lon,lat = params.pop('lon'),params.pop('lat')
data['lon'],data['lat'] = self.sky(lon,lat,size)
for key,value in params.items():
if value[-1] == 'linear':
data[key] = self.linear(value[0],value[1],size)
elif value[-1] == 'log':
data[key] = self.logarithmic(value[0],value[1],size)
else:
raise Exception('...')
return data
def sky(self,lon=None,lat=None,size=1):
logger.info("Generating %i random points..."%size)
# Random longitue and latitude
lon,lat = ugali.utils.stats.sky(lon,lat,size=10*size)
# Random healpix coordinates inside footprint
nside_pixel = self.config['coords']['nside_pixel']
pixels = ang2pix(nside_pixel,lon,lat)
if np.unique(pixels).size > 1:
inside = ugali.utils.skymap.inFootprint(self.config,pixels,nside=nside_pixel)
else:
inside = np.ones(len(pixels),dtype=bool)
return lon[inside][:size],lat[inside][:size]
def linear(self,low,high,size):
return np.random.uniform(low,high,size)
def logarithmic(self,low,high,size):
if low==0 and high==0:
logger.warning("Can't sample logarithmically with boundary of zero.")
return np.zeros(size)
return 10**np.random.uniform(np.log10(low),np.log10(high),size)
def detectability(self,**kwargs):
"""
An a priori detectability proxy.
"""
distance_modulus = kwargs.get('distance_modulus')
distance = mod2dist(distance_modulus)
stellar_mass = kwargs.get('stellar_mass')
extension = kwargs.get('extension')
# Normalized to 10^3 Msolar at mod=18
norm = 10**3/mod2dist(18)**2
detect = stellar_mass / distance**2
detect /= norm
def write(self, filename, data=None):
if data is None: data = self.results
logger.info("Writing %s..."%filename)
if filename.endswith('.npy'):
np.save(filename,data)
elif filename.endswith('.fits'):
# Copies data, so be careful..
out = np.rec.array(data)
out.dtype.names = np.char.upper(out.dtype.names)
hdu = pyfits.new_table(out)
hdu.writeto(filename,clobber=True)
elif filename.endswith('.txt') or filename.endswith('.dat'):
np.savetxt(filename,data)
elif filename.endswith('.csv'):
np.savetxt(filename,data,delimiter=',')
else:
raise Exception('Unrecognized file extension: %s'%filename)
def run(self, outfile=None, size=None):
if size is None: size = self.config['simulate']['size']
data = self.generate(size)
dtype=[('kernel','S18'),('ts','>f4'),('fit_kernel','S18'),('fit_ts','>f4'),
('fit_mass','>f4'),('fit_mass_err','>f4'),
('fit_distance','>f4'),('fit_distance_err','>f4')]
results = np.array(np.nan*np.ones(size),dtype=dtype)
results = recfuncs.merge_arrays([data,results],flatten=True,asrecarray=False,usemask=False)
self.results = results
if outfile: self.write(outfile,results)
for i,d in enumerate(data):
params = dict(list(zip(data.dtype.names,d)))
lon,lat = params['lon'],params['lat']
distance_modulus = params['distance_modulus']
logger.info('\n(%i/%i); (lon, lat) = (%.2f, %.2f)'%(i+1,len(data),lon,lat))
roi = ugali.analysis.loglike.createROI(self.config,lon,lat)
mask = ugali.analysis.loglike.createMask(self.config,roi)
isochrone = ugali.analysis.loglike.createIsochrone(self.config)
kernel = ugali.analysis.loglike.createKernel(self.config,lon,lat)
pix = roi.indexTarget(lon,lat)
simulator = Simulator(self.config,roi)
#catalog = simulator.simulate(seed=self.seed, **params)
catalog = simulator.simulate(**params)
#print "Catalog annulus contains:",roi.inAnnulus(simulator.catalog.lon,simulator.catalog.lat).sum()
logger.info("Simulated catalog annulus contains %i stars"%roi.inAnnulus(catalog.lon,catalog.lat).sum())
if len(catalog.lon) < 1000:
logger.error("Simulation contains too few objects; skipping...")
continue
"""
like = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, catalog, isochrone, kernel)
like.set_params(distance_modulus=params['distance_modulus'])
like.sync_params()
results[i]['ts'] = 2*like.fit_richness()[0]
print 'TS=',results[i]['ts']
like2 = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, simulator.catalog, isochrone, kernel)
like2.set_params(distance_modulus=params['distance_modulus'])
like2.sync_params()
print 'TS=',2*like2.fit_richness()[0]
"""
#return simulator,like,like2
# Index of closest distance modulus
grid = ugali.analysis.scan.GridSearch(self.config,roi,mask,catalog,isochrone,kernel)
self.catalog = catalog
self.simulator = simulator
self.grid = grid
self.loglike = self.grid.loglike
# ADW: Should allow fit_distance to float in order to model search procedure
#fit_distance = float(distance_modulus)
distance_idx = np.fabs(grid.distance_modulus_array-params['distance_modulus']).argmin()
fit_distance = grid.distance_modulus_array[distance_idx]
grid.search(coords=(lon,lat),distance_modulus=fit_distance)
logger.info(str(self.loglike))
mle = grid.mle()
results[i]['kernel'] = simulator.kernel.name
results[i]['fit_kernel'] = grid.loglike.kernel.name
results[i]['ts'] = 2*grid.log_likelihood_sparse_array[distance_idx][pix]
results[i]['fit_ts'] = 2*np.max(grid.log_likelihood_sparse_array[:,pix])
results[i]['fit_mass'] = grid.stellar_mass_conversion*mle['richness']
results[i]['fit_distance'] = fit_distance #mle['distance_modulus']
err = grid.err()
richness_err = (err['richness'][1]-err['richness'][0])/2.
results[i]['fit_mass_err'] = grid.stellar_mass_conversion*richness_err
distance_modulus_err = (err['distance_modulus'][1]-err['distance_modulus'][0])/2.
results[i]['fit_distance_err'] = distance_modulus_err
for d in dtype:
logger.info('\t%s: %s'%(d[0], results[i][d[0]]))
if i%self.config['simulate']['save']==0 and outfile:
self.write(outfile,results)
if outfile: self.write(outfile,results)
return results
############################################################
class Simulator(object):
"""
Class for simulating catalog data.
"""
def __init__(self, config, roi, **kwargs):
self.config = ugali.utils.config.Config(config)
self.roi = roi
#np.random.seed(0)
params = dict(self.config)
if self.config['simulate'].get('isochrone') is None:
params['simulate']['isochrone'] = params['isochrone']
if self.config['simulate'].get('kernel') is None:
params['simulate']['kernel'] = params['kernel']
self.isochrone = ugali.analysis.loglike.createIsochrone(params)
self.kernel = ugali.analysis.loglike.createKernel(params['simulate'],lon=self.roi.lon,lat=self.roi.lat)
self.mask = ugali.analysis.loglike.createMask(self.config,self.roi)
self._create_catalog(kwargs.get('catalog'))
self.photo_err_1,self.photo_err_2 = self.mask.photo_err_1,self.mask.photo_err_2
#self._photometricErrors()
self._setup_subpix()
#self._setup_cmd()
def _create_catalog(self,catalog=None):
"""
Bundle it.
"""
if catalog is None:
catalog = ugali.analysis.loglike.createCatalog(self.config,self.roi)
cut = self.mask.restrictCatalogToObservableSpace(catalog)
self.catalog = catalog.applyCut(cut)
def _photometricErrors(self, n_per_bin=100, plot=False):
"""
Realistic photometric errors estimated from catalog objects and mask.
Extend below the magnitude threshold with a flat extrapolation.
"""
self.catalog.spatialBin(self.roi)
if len(self.catalog.mag_1) < n_per_bin:
logger.warning("Catalog contains fewer objects than requested to calculate errors.")
n_per_bin = int(len(self.catalog.mag_1) / 3)
# Band 1
mag_1_thresh = self.mask.mask_1.mask_roi_sparse[self.catalog.pixel_roi_index] - self.catalog.mag_1
sorting_indices = np.argsort(mag_1_thresh)
mag_1_thresh_sort = mag_1_thresh[sorting_indices]
mag_err_1_sort = self.catalog.mag_err_1[sorting_indices]
# ADW: Can't this be done with np.median(axis=?)
mag_1_thresh_medians = []
mag_err_1_medians = []
for i in range(0, int(len(mag_1_thresh) / float(n_per_bin))):
mag_1_thresh_medians.append(np.median(mag_1_thresh_sort[n_per_bin * i: n_per_bin * (i + 1)]))
mag_err_1_medians.append(np.median(mag_err_1_sort[n_per_bin * i: n_per_bin * (i + 1)]))
if mag_1_thresh_medians[0] > 0.:
mag_1_thresh_medians = np.insert(mag_1_thresh_medians, 0, -99.)
mag_err_1_medians = np.insert(mag_err_1_medians, 0, mag_err_1_medians[0])
self.photo_err_1 = scipy.interpolate.interp1d(mag_1_thresh_medians, mag_err_1_medians,
bounds_error=False, fill_value=mag_err_1_medians[-1])
# Band 2
mag_2_thresh = self.mask.mask_2.mask_roi_sparse[self.catalog.pixel_roi_index] - self.catalog.mag_2
sorting_indices = np.argsort(mag_2_thresh)
mag_2_thresh_sort = mag_2_thresh[sorting_indices]
mag_err_2_sort = self.catalog.mag_err_2[sorting_indices]
mag_2_thresh_medians = []
mag_err_2_medians = []
for i in range(0, int(len(mag_2_thresh) / float(n_per_bin))):
mag_2_thresh_medians.append(np.median(mag_2_thresh_sort[n_per_bin * i: n_per_bin * (i + 1)]))
mag_err_2_medians.append(np.median(mag_err_2_sort[n_per_bin * i: n_per_bin * (i + 1)]))
if mag_2_thresh_medians[0] > 0.:
mag_2_thresh_medians = np.insert(mag_2_thresh_medians, 0, -99.)
mag_err_2_medians = np.insert(mag_err_2_medians, 0, mag_err_2_medians[0])
self.photo_err_2 = scipy.interpolate.interp1d(mag_2_thresh_medians, mag_err_2_medians,
bounds_error=False, fill_value=mag_err_2_medians[-1])
def _setup_subpix(self,nside=2**16):
"""
Subpixels for random position generation.
"""
# Only setup once...
if hasattr(self,'subpix'): return
# Simulate over full ROI
self.roi_radius = self.config['coords']['roi_radius']
# Setup background spatial stuff
logger.info("Setup subpixels...")
self.nside_pixel = self.config['coords']['nside_pixel']
self.nside_subpixel = self.nside_pixel * 2**4 # Could be config parameter
epsilon = np.degrees(hp.max_pixrad(self.nside_pixel)) # Pad roi radius to cover edge healpix
subpix = ugali.utils.healpix.query_disc(self.nside_subpixel,self.roi.vec,self.roi_radius+epsilon)
superpix = ugali.utils.healpix.superpixel(subpix,self.nside_subpixel,self.nside_pixel)
self.subpix = subpix[np.in1d(superpix,self.roi.pixels)]
def _setup_cmd(self,mode='cloud-in-cells'):
"""
The purpose here is to create a more finely binned
background CMD to sample from.
"""
# Only setup once...
if hasattr(self,'bkg_lambda'): return
logger.info("Setup color...")
# In the limit theta->0: 2*pi*(1-cos(theta)) -> pi*theta**2
# (Remember to convert from sr to deg^2)
#solid_angle_roi = sr2deg(2*np.pi*(1-np.cos(np.radians(self.roi_radius))))
solid_angle_roi = self.roi.area_pixel*len(self.roi.pixels)
# Large CMD bins cause problems when simulating
config = Config(self.config)
config['color']['n_bins'] *= 5 #10
config['mag']['n_bins'] *= 1 #2
#config['mask']['minimum_solid_angle'] = 0
roi = ugali.analysis.loglike.createROI(config,self.roi.lon,self.roi.lat)
mask = ugali.analysis.loglike.createMask(config,roi)
self.bkg_centers_color = roi.centers_color
self.bkg_centers_mag = roi.centers_mag
# Background CMD has units: [objs / deg^2 / mag^2]
cmd_background = mask.backgroundCMD(self.catalog,mode)
self.bkg_lambda=cmd_background*solid_angle_roi*roi.delta_color*roi.delta_mag
np.sum(self.bkg_lambda)
# Clean up
del config, roi, mask
def toy_background(self,mc_source_id=2,seed=None):
"""
Quick uniform background generation.
"""
logger.info("Running toy background simulation...")
size = 20000
nstar = np.random.poisson(size)
#np.random.seed(0)
logger.info("Simulating %i background stars..."%nstar)
### # Random points from roi pixels
### idx = np.random.randint(len(self.roi.pixels)-1,size=nstar)
### pix = self.roi.pixels[idx]
# Random points drawn from subpixels
logger.info("Generating uniform positions...")
idx = np.random.randint(0,len(self.subpix)-1,size=nstar)
lon,lat = pix2ang(self.nside_subpixel,self.subpix[idx])
pix = ang2pix(self.nside_pixel, lon, lat)
lon,lat = pix2ang(self.nside_pixel,pix)
# Single color
#mag_1 = 19.05*np.ones(len(pix))
#mag_2 = 19.10*np.ones(len(pix))
# Uniform in color
logger.info("Generating uniform CMD...")
mag_1 = np.random.uniform(self.config['mag']['min'],self.config['mag']['max'],size=nstar)
color = np.random.uniform(self.config['color']['min'],self.config['color']['max'],size=nstar)
mag_2 = mag_1 - color
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
#mag_err_1 = 1.0*np.ones(len(pix))
#mag_err_2 = 1.0*np.ones(len(pix))
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
mc_source_id = mc_source_id * np.ones(len(mag_1))
select = (mag_lim_1>mag_1)&(mag_lim_2>mag_2)
hdu = ugali.observation.catalog.makeHDU(self.config,mag_1[select],mag_err_1[select],
mag_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def background(self,mc_source_id=2,seed=None):
"""
Create a simulation of the background stellar population.
Because some stars have been clipped to generate the CMD,
this function tends to slightly underestimate (~1%) the
background as compared to the true catalog.
The simulation of background object colors relies on the
data-derived CMD. As such, it is a binned random generator
and thus has some fundamental limitations.
- The expected number of counts per bin is drawn ra
There are a few limitations of this procedure:
- Colors are drawn from the CMD of the background annulus
- The number of stars per CMD bin is randomized according to the CMD
- The colors/mags are then uniformly distributed within the bin
- This leads to trouble with large bins when the cloud-in-cells
algorithm is applied to the simulated data
- The positions are chosen randomly over the spherical cap of the ROI
- Objects that are outside of the
WARNING: The cloud-in-cells method of generating
the CMD leads to some difficulties since it disperses
objects from high-density zones to low density zones.
- Magnitudes are not randomized according to their errors
"""
if seed is not None: np.random.seed(seed)
self._setup_cmd()
# Randomize the number of stars per bin according to Poisson distribution
nstar_per_bin = np.random.poisson(lam=self.bkg_lambda)
nstar = nstar_per_bin.sum()
logger.info("Simulating %i background stars..."%nstar)
if not self.config['simulate'].get('uniform'):
logger.info("Generating colors from background CMD.")
# Distribute the stars within each CMD bin
delta_color = self.bkg_centers_color[1]-self.bkg_centers_color[0]
delta_mag = self.bkg_centers_mag[1]-self.bkg_centers_mag[0]
# Distribute points within each color-mag bins
xx,yy = np.meshgrid(self.bkg_centers_color,self.bkg_centers_mag)
color = np.repeat(xx.flatten(),repeats=nstar_per_bin.flatten())
color += np.random.uniform(-delta_color/2.,delta_color/2.,size=nstar)
mag_1 = np.repeat(yy.flatten(),repeats=nstar_per_bin.flatten())
mag_1 += np.random.uniform(-delta_mag/2.,delta_mag/2.,size=nstar)
else:
# Uniform color-magnitude distribution
logger.info("Generating uniform CMD.")
mag_1 = np.random.uniform(self.config['mag']['min'],self.config['mag']['max'],size=nstar)
color = np.random.uniform(self.config['color']['min'],self.config['color']['max'],size=nstar)
mag_2 = mag_1 - color
# Random points drawn from healpix subpixels
logger.info("Generating uniform positions...")
idx = np.random.randint(0,len(self.subpix)-1,size=nstar)
lon,lat = pix2ang(self.nside_subpixel,self.subpix[idx])
nside_pixel = self.nside_pixel
pix = ang2pix(nside_pixel, lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(nside_pixel))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(nside_pixel))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
mc_source_id = mc_source_id * np.ones(len(mag_1))
# ADW: Should magnitudes be randomized by the erros?
#mag_1 += (np.random.normal(size=len(mag_1)) * mag_err_1)
#mag_2 += (np.random.normal(size=len(mag_2)) * mag_err_2)
select = (mag_lim_1>mag_1)&(mag_lim_2>mag_2)
### # Make sure objects lie within the original cmd (should be done later...)
### select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, color, mag_1,
### self.roi.bins_color, self.roi.bins_mag) > 0)
logger.info("Clipping %i simulated background stars..."%(~select).sum())
hdu = ugali.observation.catalog.makeHDU(self.config,mag_1[select],mag_err_1[select],
mag_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def satellite(self,stellar_mass,distance_modulus,mc_source_id=1,seed=None,**kwargs):
"""
Create a simulated satellite. Returns a catalog object.
"""
if seed is not None: np.random.seed(seed)
isochrone = kwargs.pop('isochrone',self.isochrone)
kernel = kwargs.pop('kernel',self.kernel)
for k,v in kwargs.items():
if k in kernel.params.keys(): setattr(kernel,k,v)
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
logger.info("Simulating %i satellite stars..."%len(mag_1))
pix = ang2pix(self.config['coords']['nside_pixel'], lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
# Randomize magnitudes by their errors
mag_obs_1 = mag_1+np.random.normal(size=len(mag_1))*mag_err_1
mag_obs_2 = mag_2+np.random.normal(size=len(mag_2))*mag_err_2
#mag_obs_1 = mag_1
#mag_obs_2 = mag_2
#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)
select = (mag_lim_1>mag_obs_1)&(mag_lim_2>mag_obs_2)
# Make sure objects lie within the original cmd (should also be done later...)
#select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, mag_obs_1 - mag_obs_2, mag_obs_1,self.roi.bins_color, self.roi.bins_mag) > 0)
#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]
logger.info("Clipping %i simulated satellite stars..."%(~select).sum())
mc_source_id = mc_source_id * np.ones(len(mag_1))
hdu = ugali.observation.catalog.makeHDU(self.config,mag_obs_1[select],mag_err_1[select],
mag_obs_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def satellite2(self,stellar_mass,distance_modulus,mc_source_id=1,seed=None,**kwargs):
"""
Create a simulated satellite. Returns a catalog object.
"""
if seed is not None: np.random.seed(seed)
isochrone = kwargs.pop('isochrone',self.isochrone)
kernel = kwargs.pop('kernel',self.kernel)
for k,v in kwargs.items():
if k in kernel.params.keys(): setattr(kernel,k,v)
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
logger.info("Simulating %i satellite stars..."%len(mag_1))
pix = ang2pix(self.config['coords']['nside_pixel'], lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.mask.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.mask.photo_err_2(mag_lim_2 - mag_2)
# Completeness is a function of true magnitude
method = 'step'
if method is None or method == 'none':
comp = np.ones(len(mag_1))
elif self.config['catalog']['band_1_detection']:
comp=self.mask.completeness(mag_lim_1-mag_1, method=method)
elif not self.config['catalog']['band_1_detection']:
comp=self.mask.completeness(mag_lim_2-mag_2, method=method)
else:
comp_1 = self.mask.completeness(mag_lim_1-mag_1, method=method)
comp_2 = self.mask.completeness(mag_lim_2-mag_2, method=method)
comp = comp_1*comp_2
accept = comp > 1 - np.random.uniform(size=len(mag_1))
# Randomize magnitudes by their errors
mag_obs_1 = mag_1 + (np.random.normal(size=len(mag_1))*mag_err_1)
mag_obs_2 = mag_2 + (np.random.normal(size=len(mag_2))*mag_err_2)
#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)
select = (mag_lim_1>mag_obs_1)&(mag_lim_2>mag_obs_2)&accept
### # Make sure objects lie within the original cmd (should also be done later...)
### select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, color, mag_1,
### self.roi.bins_color, self.roi.bins_mag) > 0)
#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]
logger.info("Clipping %i simulated satellite stars..."%(~select).sum())
mc_source_id = mc_source_id * np.ones(len(mag_1))
hdu = ugali.observation.catalog.makeHDU(self.config,mag_obs_1[select],mag_err_1[select],
mag_obs_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def simulate(self, seed=None, **kwargs):
if seed is not None: np.random.seed(seed)
logger.info("Simulating object catalog...")
catalogs = []
#catalogs.append(self.toy_background(seed=seed))
catalogs.append(self.background(seed=seed))
catalogs.append(self.satellite(seed=seed,**kwargs))
logger.info("Merging simulated catalogs...")
catalog = ugali.observation.catalog.mergeCatalogs(catalogs)
nsig = (catalog.mc_source_id == 1).sum()
nbkg = (catalog.mc_source_id == 2).sum()
logger.info("Simulated catalog contains: %i background stars"%nbkg)
logger.info("Simulated catalog contains: %i satellite stars"%nsig)
return catalog
def makeHDU(self, mag_1, mag_err_1, mag_2, mag_err_2, lon, lat, mc_source_id):
"""
Create a catalog fits file object based on input data.
ADW: This should be combined with the write_membership
function of loglike.
"""
if self.config['catalog']['coordsys'].lower() == 'cel' \
and self.config['coords']['coordsys'].lower() == 'gal':
lon, lat = ugali.utils.projector.gal2cel(lon, lat)
elif self.config['catalog']['coordsys'].lower() == 'gal' \
and self.config['coords']['coordsys'].lower() == 'cel':
lon, lat = ugali.utils.projector.cel2gal(lon, lat)
columns = [
pyfits.Column(name=self.config['catalog']['objid_field'],
format = 'D',array = np.arange(len(lon))),
pyfits.Column(name=self.config['catalog']['lon_field'],
format = 'D',array = lon),
pyfits.Column(name = self.config['catalog']['lat_field'],
format = 'D',array = lat),
pyfits.Column(name = self.config['catalog']['mag_1_field'],
format = 'E',array = mag_1),
pyfits.Column(name = self.config['catalog']['mag_err_1_field'],
format = 'E',array = mag_err_1),
pyfits.Column(name = self.config['catalog']['mag_2_field'],
format = 'E',array = mag_2),
pyfits.Column(name = self.config['catalog']['mag_err_2_field'],
format = 'E',array = mag_err_2),
pyfits.Column(name = self.config['catalog']['mc_source_id_field'],
format = 'I',array = mc_source_id),
]
hdu = pyfits.new_table(columns)
return hdu
def write(self, outfile):
"""
"""
pass
############################################################
class Analyzer(object):
"""
Class for generating the parameters of the simulation.
"""
def __init__(self, config, seed=None):
self.config = Config(config)
def create_population(self):
if self.config['simulate']['popfile']:
filename = os.path.join(self.config['simulate']['dirname'],self.config['simulate']['popfile'])
population = fitsio.read(filename)
else:
size = self.config['simulate']['size']
population = self.generate(size)
self.population = population
return self.population
def write(self, filename, data=None):
""" Write the output results """
if data is None: data = self.results
logger.info("Writing %s..."%filename)
if filename.endswith('.npy'):
np.save(filename,data)
elif filename.endswith('.fits'):
# Copies data, so be careful..
out = np.rec.array(data)
out.dtype.names = np.char.upper(out.dtype.names)
hdu = pyfits.new_table(out)
hdu.writeto(filename,clobber=True)
elif filename.endswith('.txt') or filename.endswith('.dat'):
np.savetxt(filename,data)
elif filename.endswith('.csv'):
np.savetxt(filename,data,delimiter=',')
else:
raise Exception('Unrecognized file extension: %s'%filename)
def run(self, catalog=None, outfile=None):
#if size is None: size = self.config['simulate']['size']
#data = self.generate(size)
data = self.create_population()
size = len(data)
dtype=[('kernel','S18'),('ts','>f4'),('fit_kernel','S18'),('fit_ts','>f4'),
('fit_mass','>f4'),('fit_mass_err','>f4'),
('fit_distance','>f4'),('fit_distance_err','>f4')]
results = np.array(np.nan*np.ones(size),dtype=dtype)
results = recfuncs.merge_arrays([data,results],flatten=True,asrecarray=False,usemask=False)
self.results = results
if outfile: self.write(outfile,results)
for i,d in enumerate(data):
params = dict(list(zip(data.dtype.names,d)))
lon,lat = params['ra'],params['dec']
distance_modulus = params['distance_modulus']
logger.info('\n(%i/%i); (lon, lat) = (%.2f, %.2f)'%(i+1,len(data),lon,lat))
roi = ugali.analysis.loglike.createROI(self.config,lon,lat)
mask = ugali.analysis.loglike.createMask(self.config,roi)
isochrone = ugali.analysis.loglike.createIsochrone(self.config)
kernel = ugali.analysis.loglike.createKernel(self.config,lon=lon,lat=lat)
pix = roi.indexTarget(lon,lat)
if not config['simulate']['catfile']:
simulator = Simulator(self.config,roi)
#catalog = simulator.simulate(seed=self.seed, **params)
catalog = simulator.simulate(**params)
#print "Catalog annulus contains:",roi.inAnnulus(simulator.catalog.lon,simulator.catalog.lat).sum()
else:
pass
import pdb; pdb.set_trace()
logger.info("Simulated catalog annulus contains %i stars"%roi.inAnnulus(catalog.lon,catalog.lat).sum())
if len(catalog.lon) < 1000:
logger.error("Simulation contains too few objects; skipping...")
continue
"""
like = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, catalog, isochrone, kernel)
like.set_params(distance_modulus=params['distance_modulus'])
like.sync_params()
results[i]['ts'] = 2*like.fit_richness()[0]
print 'TS=',results[i]['ts']
like2 = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, simulator.catalog, isochrone, kernel)
like2.set_params(distance_modulus=params['distance_modulus'])
like2.sync_params()
print 'TS=',2*like2.fit_richness()[0]
"""
#return simulator,like,like2
# Index of closest distance modulus
grid = ugali.analysis.scan.GridSearch(self.config,roi,mask,catalog,isochrone,kernel)
self.catalog = catalog
self.simulator = simulator
self.grid = grid
self.loglike = self.grid.loglike
# ADW: Should allow fit_distance to float in order to model search procedure
#fit_distance = float(distance_modulus)
distance_idx = np.fabs(grid.distance_modulus_array-params['distance_modulus']).argmin()
fit_distance = grid.distance_modulus_array[distance_idx]
grid.search(coords=(lon,lat),distance_modulus=fit_distance)
logger.info(str(self.loglike))
mle = grid.mle()
results[i]['kernel'] = simulator.kernel.name
results[i]['fit_kernel'] = grid.loglike.kernel.name
results[i]['ts'] = 2*grid.log_likelihood_sparse_array[distance_idx][pix]
results[i]['fit_ts'] = 2*np.max(grid.log_likelihood_sparse_array[:,pix])
results[i]['fit_mass'] = grid.stellar_mass_conversion*mle['richness']
results[i]['fit_distance'] = fit_distance #mle['distance_modulus']
err = grid.err()
richness_err = (err['richness'][1]-err['richness'][0])/2.
results[i]['fit_mass_err'] = grid.stellar_mass_conversion*richness_err
distance_modulus_err = (err['distance_modulus'][1]-err['distance_modulus'][0])/2.
results[i]['fit_distance_err'] = distance_modulus_err
for d in dtype:
logger.info('\t%s: %s'%(d[0], results[i][d[0]]))
if i%self.config['simulate']['save']==0 and outfile:
self.write(outfile,results)
if outfile: self.write(outfile,results)
return results
############################################################
def satellite(isochrone, kernel, stellar_mass, distance_modulus,**kwargs):
"""
Wrapping the isochrone and kernel simulate functions.
"""
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
return mag_1, mag_2, lon, lat
############################################################
if __name__ == "__main__":
import ugali.utils.parser
description = "Script for executing the likelihood scan."
parser = ugali.utils.parser.Parser(description=description)
parser.add_config()
parser.add_argument('outfile',metavar='outfile.fits',help='Output fits file.')
parser.add_debug()
parser.add_verbose()
parser.add_seed()
opts = parser.parse_args()
config = Config(opts.config)
generator = Generator(config,opts.seed)
sim,like1,like2 = generator.run(opts.outfile)
| kadrlica/ugali | ugali/simulation/simulator.py | Python | mit | 36,482 |
from django.shortcuts import resolve_url
from django.test import TestCase, override_settings
class TestGet(TestCase):
def setUp(self):
self.resp = self.client.get(resolve_url('layers:home'))
def test_status_code(self):
self.assertEqual(200, self.resp.status_code)
def test_template(self):
resp = self.client.get(resolve_url('layers:home'))
self.assertTemplateUsed(resp, 'layers/home.html')
def test_contents(self):
expected = '<title>Jarbas | Serenata de Amor</title>'
self.assertIn(expected, self.resp.content.decode('utf-8'))
@override_settings(GOOGLE_STREET_VIEW_API_KEY=42)
def test_google_api_key(self):
resp = self.client.get(resolve_url('layers:home'))
expected = "googleStreetViewApiKey: '42'"
self.assertIn(expected, resp.content.decode('utf-8'))
| marcusrehm/serenata-de-amor | jarbas/layers/tests/test_home_view.py | Python | mit | 859 |
"""Pylons specific code to facilitate using AuthKit with Pylons
There is a full Pylons manual in addition to the AuthKit manual which
you should read if you plan to use AuthKit with Pylons
.. Note ::
In addition to the authorize methods described here, you can also use the
default ``authkit.authorize.middleware`` function to add WSGI middleware
authorization checks to your Pylons application since Pylons has a full
WSGI middleware stack. Just add the middleware to your project's
``config/middleware.py`` file.
"""
from decorator import decorator
from pylons import request
from authkit.authorize import PermissionSetupError
from authkit.authorize import NotAuthenticatedError, NotAuthorizedError
from authkit.authorize import authorize_request as authkit_authorize_request
def authorize(permission):
"""
This is a decorator which can be used to decorate a Pylons controller action.
It takes the permission to check as the only argument and can be used with
all types of permission objects.
"""
def validate(func, self, *args, **kwargs):
all_conf = request.environ.get('authkit.config')
if all_conf is None:
raise Exception('Authentication middleware not present')
if all_conf.get('setup.enable', True) is True:
def app(environ, start_response):
return func(self, *args, **kwargs)
return permission.check(app, request.environ, self.start_response)
else:
return func(self, *args, **kwargs)
return decorator(validate)
def authorize_request(permission):
"""
This function can be used within a controller action to ensure that no code
after the function call is executed if the user doesn't pass the permission
check specified by ``permission``.
.. Note ::
Unlike the ``authorize()`` decorator or
``authkit.authorize.middleware`` middleware, this function has no
access to the WSGI response so cannot be used to check response-based
permissions. Since almost all AuthKit permissions are request-based
this shouldn't be a big problem unless you are defining your own
advanced permission checks.
"""
authkit_authorize_request(request.environ, permission)
def authorized(permission):
"""
Similar to the ``authorize_request()`` function with no access to the
request but rather than raising an exception to stop the request if a
permission check fails, this function simply returns ``False`` so that you
can test permissions in your code without triggering a sign in. It can
therefore be used in a controller action or template.
Use like this::
if authorized(permission):
return Response('You are authorized')
else:
return Response('Access denied')
"""
try:
authorize_request(permission)
except (NotAuthorizedError, NotAuthenticatedError):
return False
else:
return True
| cedadev/AuthKit | authkit/authorize/pylons_adaptors.py | Python | mit | 3,015 |
from os import path
import sys
import rtree
import json
from polyIntersect.micro_functions.poly_intersect import esri_server2ogr
from polyIntersect.micro_functions.poly_intersect import cartodb2ogr
from polyIntersect.micro_functions.poly_intersect import json2ogr
from polyIntersect.micro_functions.poly_intersect import ogr2json
from polyIntersect.micro_functions.poly_intersect import dissolve
from polyIntersect.micro_functions.poly_intersect import intersect
from polyIntersect.micro_functions.poly_intersect import index_featureset
from polyIntersect.micro_functions.poly_intersect import buffer_to_dist
from polyIntersect.micro_functions.poly_intersect import project_local
from polyIntersect.micro_functions.poly_intersect import project_global
from polyIntersect.micro_functions.poly_intersect import get_area
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
from .sample_data import DISSOLVE_GEOJSON
from .sample_data import INTERSECT_BASE_GEOJSON
from .sample_data import INTERSECT_PARTIALLY_WITHIN_GEOJSON
from .sample_data import INTERSECT_MULTIPLE_FEATURES
from .sample_data import INDONESIA_USER_POLY
from .sample_data import BRAZIL_USER_POLY
from .sample_data import AZE_TEST
fixtures = path.abspath(path.join(path.dirname(__file__), 'fixtures'))
def test_successfully_index_featureset():
featureset = json2ogr(DISSOLVE_GEOJSON)
index = index_featureset(featureset)
assert isinstance(index, rtree.index.Index)
def test_successfully_dissolve_string_field():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_diss = dissolve(featureset, fields='str_value')
assert len(geom_diss['features']) == 2
def test_successfully_dissolve_int_field():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_diss = dissolve(featureset, fields='int_value')
assert len(geom_diss['features']) == 2
def test_successfully_dissolve_decimal_field():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_diss = dissolve(featureset, fields='dec_value')
assert len(geom_diss['features']) == 2
def test_successfully_dissolve_no_field_arg():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_diss = dissolve(featureset)
assert len(geom_diss['features']) == 1
def test_maintain_crs():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_diss = dissolve(featureset, fields='str_value')
assert len(geom_diss['features']) == 2
assert 'crs' in geom_diss.keys()
def test_successful_intersection():
featureset1 = json2ogr(INTERSECT_PARTIALLY_WITHIN_GEOJSON)
featureset2 = json2ogr(INTERSECT_BASE_GEOJSON)
result_featureset = intersect(featureset1, featureset2)
assert len(result_featureset['features']) == 1
assert isinstance(result_featureset['features'][0]['geometry'],
MultiPolygon)
def test_project():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_projected = project_local(featureset)
assert isinstance(geom_projected, dict)
assert 'features' in geom_projected.keys()
assert (geom_projected['crs']['properties']['name']
== 'urn:ogc:def:uom:EPSG::9102')
assert (featureset['crs']['properties']['name']
!= 'urn:ogc:def:uom:EPSG::9102')
geom_projected_back = project_global(geom_projected)
assert (geom_projected_back['crs']['properties']['name']
!= 'urn:ogc:def:uom:EPSG::9102')
def test_project_already_projected():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_projected1 = project_local(featureset)
try:
geom_projected2 = project_local(geom_projected1)
except ValueError as e:
assert str(e) == 'geometries have already been projected with the \
World Azimuthal Equidistant coordinate system'
def test_projected_buffer():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
geom_projected = project_local(featureset)
geom_buffered = buffer_to_dist(geom_projected, 10)
assert isinstance(geom_buffered, dict)
assert 'features' in geom_buffered.keys()
assert len(geom_buffered['features']) == 4
for f_in, f_out in zip(featureset['features'], geom_buffered['features']):
assert f_out['geometry'].area > f_in['geometry'].area
def test_not_projected_buffer():
featureset = json2ogr(DISSOLVE_GEOJSON)
assert len(featureset['features']) == 4
try:
geom_buffered = buffer_to_dist(featureset, 10)
except ValueError as e:
assert str(e) == ('geometries must be projected with the World ' +
'Azimuthal Equidistant coordinate system')
def test_json2ogr():
geom_converted_version = json2ogr(DISSOLVE_GEOJSON)
assert isinstance(geom_converted_version, dict)
assert 'features' in geom_converted_version.keys()
for f in geom_converted_version['features']:
assert isinstance(f['geometry'], Polygon)
def test_ogr2json():
geom_converted_version = json2ogr(DISSOLVE_GEOJSON)
geom_converted_back = ogr2json(geom_converted_version)
for i, f in enumerate(json.loads(geom_converted_back)['features']):
assert isinstance(f['geometry'], dict)
def test_esri_server2json():
host = 'http://gis-gfw.wri.org'
layer = 'forest_cover/MapServer/0'
layer_url = path.join(host, 'arcgis/rest/services', layer)
featureset = esri_server2ogr(layer_url, INDONESIA_USER_POLY, '')
assert 'features' in featureset.keys()
assert len(featureset['features']) > 0
assert isinstance(featureset['features'][0]['geometry'], (Polygon, MultiPolygon))
def test_cartodb2json():
url = ('https://wri-01.carto.com/tables/' +
'alliance_for_zero_extinction_sites_species_joi')
featureset = cartodb2ogr(url, AZE_TEST, 'sitename_1,species')
assert 'features' in featureset.keys()
assert len(featureset['features']) > 0
assert isinstance(featureset['features'][0]['geometry'], (Polygon, MultiPolygon))
| blueraster/poly-intersect | tests/test_analysis_funcs.py | Python | mit | 6,268 |
#!/usr/bin/env python
from __future__ import unicode_literals
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
nanopb_version = "nanopb-0.3.7"
import sys
import re
from functools import reduce
try:
# Add some dummy imports to keep packaging tools happy.
import google, distutils.util # bbfreeze seems to need these
import pkg_resources # pyinstaller / protobuf 2.5 seem to need these
except:
# Don't care, we will error out later if it is actually important.
pass
try:
import google.protobuf.text_format as text_format
import google.protobuf.descriptor_pb2 as descriptor
except:
sys.stderr.write('''
*************************************************************
*** Could not import the Google protobuf Python libraries ***
*** Try installing package 'python-protobuf' or similar. ***
*************************************************************
''' + '\n')
raise
try:
import proto.nanopb_pb2 as nanopb_pb2
import proto.plugin_pb2 as plugin_pb2
except:
sys.stderr.write('''
********************************************************************
*** Failed to import the protocol definitions for generator. ***
*** You have to run 'make' in the nanopb/generator/proto folder. ***
********************************************************************
''' + '\n')
raise
# ---------------------------------------------------------------------------
# Generation of single fields
# ---------------------------------------------------------------------------
import time
import os.path
# Values are tuple (c type, pb type, encoded size, int_size_allowed)
FieldD = descriptor.FieldDescriptorProto
datatypes = {
FieldD.TYPE_BOOL: ('bool', 'BOOL', 1, False),
FieldD.TYPE_DOUBLE: ('double', 'DOUBLE', 8, False),
FieldD.TYPE_FIXED32: ('uint32_t', 'FIXED32', 4, False),
FieldD.TYPE_FIXED64: ('uint64_t', 'FIXED64', 8, False),
FieldD.TYPE_FLOAT: ('float', 'FLOAT', 4, False),
FieldD.TYPE_INT32: ('int32_t', 'INT32', 10, True),
FieldD.TYPE_INT64: ('int64_t', 'INT64', 10, True),
FieldD.TYPE_SFIXED32: ('int32_t', 'SFIXED32', 4, False),
FieldD.TYPE_SFIXED64: ('int64_t', 'SFIXED64', 8, False),
FieldD.TYPE_SINT32: ('int32_t', 'SINT32', 5, True),
FieldD.TYPE_SINT64: ('int64_t', 'SINT64', 10, True),
FieldD.TYPE_UINT32: ('uint32_t', 'UINT32', 5, True),
FieldD.TYPE_UINT64: ('uint64_t', 'UINT64', 10, True)
}
# Integer size overrides (from .proto settings)
intsizes = {
nanopb_pb2.IS_8: 'int8_t',
nanopb_pb2.IS_16: 'int16_t',
nanopb_pb2.IS_32: 'int32_t',
nanopb_pb2.IS_64: 'int64_t',
}
# String types (for python 2 / python 3 compatibility)
try:
strtypes = (unicode, str)
except NameError:
strtypes = (str, )
class Names:
'''Keeps a set of nested names and formats them to C identifier.'''
def __init__(self, parts = ()):
if isinstance(parts, Names):
parts = parts.parts
self.parts = tuple(parts)
def __str__(self):
return '_'.join(self.parts)
def __add__(self, other):
if isinstance(other, strtypes):
return Names(self.parts + (other,))
elif isinstance(other, tuple):
return Names(self.parts + other)
else:
raise ValueError("Name parts should be of type str")
def __eq__(self, other):
return isinstance(other, Names) and self.parts == other.parts
def names_from_type_name(type_name):
'''Parse Names() from FieldDescriptorProto type_name'''
if type_name[0] != '.':
raise NotImplementedError("Lookup of non-absolute type names is not supported")
return Names(type_name[1:].split('.'))
def varint_max_size(max_value):
'''Returns the maximum number of bytes a varint can take when encoded.'''
if max_value < 0:
max_value = 2**64 - max_value
for i in range(1, 11):
if (max_value >> (i * 7)) == 0:
return i
raise ValueError("Value too large for varint: " + str(max_value))
assert varint_max_size(-1) == 10
assert varint_max_size(0) == 1
assert varint_max_size(127) == 1
assert varint_max_size(128) == 2
class EncodedSize:
'''Class used to represent the encoded size of a field or a message.
Consists of a combination of symbolic sizes and integer sizes.'''
def __init__(self, value = 0, symbols = []):
if isinstance(value, EncodedSize):
self.value = value.value
self.symbols = value.symbols
elif isinstance(value, strtypes + (Names,)):
self.symbols = [str(value)]
self.value = 0
else:
self.value = value
self.symbols = symbols
def __add__(self, other):
if isinstance(other, int):
return EncodedSize(self.value + other, self.symbols)
elif isinstance(other, strtypes + (Names,)):
return EncodedSize(self.value, self.symbols + [str(other)])
elif isinstance(other, EncodedSize):
return EncodedSize(self.value + other.value, self.symbols + other.symbols)
else:
raise ValueError("Cannot add size: " + repr(other))
def __mul__(self, other):
if isinstance(other, int):
return EncodedSize(self.value * other, [str(other) + '*' + s for s in self.symbols])
else:
raise ValueError("Cannot multiply size: " + repr(other))
def __str__(self):
if not self.symbols:
return str(self.value)
else:
return '(' + str(self.value) + ' + ' + ' + '.join(self.symbols) + ')'
def upperlimit(self):
if not self.symbols:
return self.value
else:
return 2**32 - 1
class Enum:
def __init__(self, names, desc, enum_options):
'''desc is EnumDescriptorProto'''
self.options = enum_options
self.names = names + desc.name
if enum_options.long_names:
self.values = [(self.names + x.name, x.number) for x in desc.value]
else:
self.values = [(names + x.name, x.number) for x in desc.value]
self.value_longnames = [self.names + x.name for x in desc.value]
self.packed = enum_options.packed_enum
def has_negative(self):
for n, v in self.values:
if v < 0:
return True
return False
def encoded_size(self):
return max([varint_max_size(v) for n,v in self.values])
def __str__(self):
result = 'typedef enum _%s {\n' % self.names
result += ',\n'.join([" %s = %d" % x for x in self.values])
result += '\n}'
if self.packed:
result += ' pb_packed'
result += ' %s;' % self.names
result += '\n#define _%s_MIN %s' % (self.names, self.values[0][0])
result += '\n#define _%s_MAX %s' % (self.names, self.values[-1][0])
result += '\n#define _%s_ARRAYSIZE ((%s)(%s+1))' % (self.names, self.names, self.values[-1][0])
if not self.options.long_names:
# Define the long names always so that enum value references
# from other files work properly.
for i, x in enumerate(self.values):
result += '\n#define %s %s' % (self.value_longnames[i], x[0])
return result
class FieldMaxSize:
def __init__(self, worst = 0, checks = [], field_name = 'undefined'):
if isinstance(worst, list):
self.worst = max(i for i in worst if i is not None)
else:
self.worst = worst
self.worst_field = field_name
self.checks = list(checks)
def extend(self, extend, field_name = None):
self.worst = max(self.worst, extend.worst)
if self.worst == extend.worst:
self.worst_field = extend.worst_field
self.checks.extend(extend.checks)
class Field:
def __init__(self, struct_name, desc, field_options):
'''desc is FieldDescriptorProto'''
self.tag = desc.number
self.struct_name = struct_name
self.union_name = None
self.name = desc.name
self.default = None
self.max_size = None
self.max_count = None
self.array_decl = ""
self.enc_size = None
self.ctype = None
self.inline = None
if field_options.type == nanopb_pb2.FT_INLINE:
field_options.type = nanopb_pb2.FT_STATIC
self.inline = nanopb_pb2.FT_INLINE
# Parse field options
if field_options.HasField("max_size"):
self.max_size = field_options.max_size
if field_options.HasField("max_count"):
self.max_count = field_options.max_count
if desc.HasField('default_value'):
self.default = desc.default_value
# Check field rules, i.e. required/optional/repeated.
can_be_static = True
if desc.label == FieldD.LABEL_REPEATED:
self.rules = 'REPEATED'
if self.max_count is None:
can_be_static = False
else:
self.array_decl = '[%d]' % self.max_count
elif field_options.HasField("proto3"):
self.rules = 'SINGULAR'
elif desc.label == FieldD.LABEL_REQUIRED:
self.rules = 'REQUIRED'
elif desc.label == FieldD.LABEL_OPTIONAL:
self.rules = 'OPTIONAL'
else:
raise NotImplementedError(desc.label)
# Check if the field can be implemented with static allocation
# i.e. whether the data size is known.
if desc.type == FieldD.TYPE_STRING and self.max_size is None:
can_be_static = False
if desc.type == FieldD.TYPE_BYTES and self.max_size is None:
can_be_static = False
# Decide how the field data will be allocated
if field_options.type == nanopb_pb2.FT_DEFAULT:
if can_be_static:
field_options.type = nanopb_pb2.FT_STATIC
else:
field_options.type = nanopb_pb2.FT_CALLBACK
if field_options.type == nanopb_pb2.FT_STATIC and not can_be_static:
raise Exception("Field %s is defined as static, but max_size or "
"max_count is not given." % self.name)
if field_options.type == nanopb_pb2.FT_STATIC:
self.allocation = 'STATIC'
elif field_options.type == nanopb_pb2.FT_POINTER:
self.allocation = 'POINTER'
elif field_options.type == nanopb_pb2.FT_CALLBACK:
self.allocation = 'CALLBACK'
else:
raise NotImplementedError(field_options.type)
# Decide the C data type to use in the struct.
if desc.type in datatypes:
self.ctype, self.pbtype, self.enc_size, isa = datatypes[desc.type]
# Override the field size if user wants to use smaller integers
if isa and field_options.int_size != nanopb_pb2.IS_DEFAULT:
self.ctype = intsizes[field_options.int_size]
if desc.type == FieldD.TYPE_UINT32 or desc.type == FieldD.TYPE_UINT64:
self.ctype = 'u' + self.ctype;
elif desc.type == FieldD.TYPE_ENUM:
self.pbtype = 'ENUM'
self.ctype = names_from_type_name(desc.type_name)
if self.default is not None:
self.default = self.ctype + self.default
self.enc_size = None # Needs to be filled in when enum values are known
elif desc.type == FieldD.TYPE_STRING:
self.pbtype = 'STRING'
self.ctype = 'char'
if self.allocation == 'STATIC':
self.ctype = 'char'
self.array_decl += '[%d]' % self.max_size
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif desc.type == FieldD.TYPE_BYTES:
self.pbtype = 'BYTES'
if self.allocation == 'STATIC':
# Inline STATIC for BYTES is like STATIC for STRING.
if self.inline:
self.ctype = 'pb_byte_t'
self.array_decl += '[%d]' % self.max_size
else:
self.ctype = self.struct_name + self.name + 't'
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif self.allocation == 'POINTER':
self.ctype = 'pb_bytes_array_t'
elif desc.type == FieldD.TYPE_MESSAGE:
self.pbtype = 'MESSAGE'
self.ctype = self.submsgname = names_from_type_name(desc.type_name)
self.enc_size = None # Needs to be filled in after the message type is available
else:
raise NotImplementedError(desc.type)
def __lt__(self, other):
return self.tag < other.tag
def __str__(self):
result = ''
if self.allocation == 'POINTER':
if self.rules == 'REPEATED':
result += ' pb_size_t ' + self.name + '_count;\n'
if self.pbtype == 'MESSAGE':
# Use struct definition, so recursive submessages are possible
result += ' struct _%s *%s;' % (self.ctype, self.name)
elif self.rules == 'REPEATED' and self.pbtype in ['STRING', 'BYTES']:
# String/bytes arrays need to be defined as pointers to pointers
result += ' %s **%s;' % (self.ctype, self.name)
else:
result += ' %s *%s;' % (self.ctype, self.name)
elif self.allocation == 'CALLBACK':
result += ' pb_callback_t %s;' % self.name
else:
if self.rules == 'OPTIONAL' and self.allocation == 'STATIC':
result += ' bool has_' + self.name + ';\n'
elif self.rules == 'REPEATED' and self.allocation == 'STATIC':
result += ' pb_size_t ' + self.name + '_count;\n'
result += ' %s %s%s;' % (self.ctype, self.name, self.array_decl)
return result
def types(self):
'''Return definitions for any special types this field might need.'''
if self.pbtype == 'BYTES' and self.allocation == 'STATIC' and not self.inline:
result = 'typedef PB_BYTES_ARRAY_T(%d) %s;\n' % (self.max_size, self.ctype)
else:
result = ''
return result
def get_dependencies(self):
'''Get list of type names used by this field.'''
if self.allocation == 'STATIC':
return [str(self.ctype)]
else:
return []
def get_initializer(self, null_init, inner_init_only = False):
'''Return literal expression for this field's default value.
null_init: If True, initialize to a 0 value instead of default from .proto
inner_init_only: If True, exclude initialization for any count/has fields
'''
inner_init = None
if self.pbtype == 'MESSAGE':
if null_init:
inner_init = '%s_init_zero' % self.ctype
else:
inner_init = '%s_init_default' % self.ctype
elif self.default is None or null_init:
if self.pbtype == 'STRING':
inner_init = '""'
elif self.pbtype == 'BYTES':
if self.inline:
inner_init = '{0}'
else:
inner_init = '{0, {0}}'
elif self.pbtype in ('ENUM', 'UENUM'):
inner_init = '(%s)0' % self.ctype
else:
inner_init = '0'
else:
if self.pbtype == 'STRING':
inner_init = self.default.replace('"', '\\"')
inner_init = '"' + inner_init + '"'
elif self.pbtype == 'BYTES':
data = ['0x%02x' % ord(c) for c in self.default]
if len(data) == 0:
if self.inline:
inner_init = '{0}'
else:
inner_init = '{0, {0}}'
else:
if self.inline:
inner_init = '{%s}' % ','.join(data)
else:
inner_init = '{%d, {%s}}' % (len(data), ','.join(data))
elif self.pbtype in ['FIXED32', 'UINT32']:
inner_init = str(self.default) + 'u'
elif self.pbtype in ['FIXED64', 'UINT64']:
inner_init = str(self.default) + 'ull'
elif self.pbtype in ['SFIXED64', 'INT64']:
inner_init = str(self.default) + 'll'
else:
inner_init = str(self.default)
if inner_init_only:
return inner_init
outer_init = None
if self.allocation == 'STATIC':
if self.rules == 'REPEATED':
outer_init = '0, {'
outer_init += ', '.join([inner_init] * self.max_count)
outer_init += '}'
elif self.rules == 'OPTIONAL':
outer_init = 'false, ' + inner_init
else:
outer_init = inner_init
elif self.allocation == 'POINTER':
if self.rules == 'REPEATED':
outer_init = '0, NULL'
else:
outer_init = 'NULL'
elif self.allocation == 'CALLBACK':
if self.pbtype == 'EXTENSION':
outer_init = 'NULL'
else:
outer_init = '{{NULL}, NULL}'
return outer_init
def default_decl(self, declaration_only = False):
'''Return definition for this field's default value.'''
if self.default is None:
return None
ctype = self.ctype
default = self.get_initializer(False, True)
array_decl = ''
if self.pbtype == 'STRING':
if self.allocation != 'STATIC':
return None # Not implemented
array_decl = '[%d]' % self.max_size
elif self.pbtype == 'BYTES':
if self.allocation != 'STATIC':
return None # Not implemented
if self.inline:
array_decl = '[%d]' % self.max_size
if declaration_only:
return 'extern const %s %s_default%s;' % (ctype, self.struct_name + self.name, array_decl)
else:
return 'const %s %s_default%s = %s;' % (ctype, self.struct_name + self.name, array_decl, default)
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_%s_tag' % (self.struct_name, self.name)
return '#define %-40s %d\n' % (identifier, self.tag)
def pb_field_t(self, prev_field_name):
'''Return the pb_field_t initializer to use in the constant array.
prev_field_name is the name of the previous field or None.
'''
if self.rules == 'ONEOF':
if self.anonymous:
result = ' PB_ANONYMOUS_ONEOF_FIELD(%s, ' % self.union_name
else:
result = ' PB_ONEOF_FIELD(%s, ' % self.union_name
else:
result = ' PB_FIELD('
result += '%3d, ' % self.tag
result += '%-8s, ' % self.pbtype
result += '%s, ' % self.rules
result += '%-8s, ' % (self.allocation if not self.inline else "INLINE")
result += '%s, ' % ("FIRST" if not prev_field_name else "OTHER")
result += '%s, ' % self.struct_name
result += '%s, ' % self.name
result += '%s, ' % (prev_field_name or self.name)
if self.pbtype == 'MESSAGE':
result += '&%s_fields)' % self.submsgname
elif self.default is None:
result += '0)'
elif self.pbtype in ['BYTES', 'STRING'] and self.allocation != 'STATIC':
result += '0)' # Arbitrary size default values not implemented
elif self.rules == 'OPTEXT':
result += '0)' # Default value for extensions is not implemented
else:
result += '&%s_default)' % (self.struct_name + self.name)
return result
def get_last_field_name(self):
return self.name
def largest_field_value(self):
'''Determine if this field needs 16bit or 32bit pb_field_t structure to compile properly.
Returns numeric value or a C-expression for assert.'''
check = []
if self.pbtype == 'MESSAGE' and self.allocation == 'STATIC':
if self.rules == 'REPEATED':
check.append('pb_membersize(%s, %s[0])' % (self.struct_name, self.name))
elif self.rules == 'ONEOF':
if self.anonymous:
check.append('pb_membersize(%s, %s)' % (self.struct_name, self.name))
else:
check.append('pb_membersize(%s, %s.%s)' % (self.struct_name, self.union_name, self.name))
else:
check.append('pb_membersize(%s, %s)' % (self.struct_name, self.name))
elif self.pbtype == 'BYTES' and self.allocation == 'STATIC':
if self.max_size > 251:
check.append('pb_membersize(%s, %s)' % (self.struct_name, self.name))
return FieldMaxSize([self.tag, self.max_size, self.max_count],
check,
('%s.%s' % (self.struct_name, self.name)))
def encoded_size(self, dependencies):
'''Return the maximum size that this field can take when encoded,
including the field tag. If the size cannot be determined, returns
None.'''
if self.allocation != 'STATIC':
return None
if self.pbtype == 'MESSAGE':
encsize = None
if str(self.submsgname) in dependencies:
submsg = dependencies[str(self.submsgname)]
encsize = submsg.encoded_size(dependencies)
if encsize is not None:
# Include submessage length prefix
encsize += varint_max_size(encsize.upperlimit())
if encsize is None:
# Submessage or its size cannot be found.
# This can occur if submessage is defined in different
# file, and it or its .options could not be found.
# Instead of direct numeric value, reference the size that
# has been #defined in the other file.
encsize = EncodedSize(self.submsgname + 'size')
# We will have to make a conservative assumption on the length
# prefix size, though.
encsize += 5
elif self.pbtype in ['ENUM', 'UENUM']:
if str(self.ctype) in dependencies:
enumtype = dependencies[str(self.ctype)]
encsize = enumtype.encoded_size()
else:
# Conservative assumption
encsize = 10
elif self.enc_size is None:
raise RuntimeError("Could not determine encoded size for %s.%s"
% (self.struct_name, self.name))
else:
encsize = EncodedSize(self.enc_size)
encsize += varint_max_size(self.tag << 3) # Tag + wire type
if self.rules == 'REPEATED':
# Decoders must be always able to handle unpacked arrays.
# Therefore we have to reserve space for it, even though
# we emit packed arrays ourselves.
encsize *= self.max_count
return encsize
class ExtensionRange(Field):
def __init__(self, struct_name, range_start, field_options):
'''Implements a special pb_extension_t* field in an extensible message
structure. The range_start signifies the index at which the extensions
start. Not necessarily all tags above this are extensions, it is merely
a speed optimization.
'''
self.tag = range_start
self.struct_name = struct_name
self.name = 'extensions'
self.pbtype = 'EXTENSION'
self.rules = 'OPTIONAL'
self.allocation = 'CALLBACK'
self.ctype = 'pb_extension_t'
self.array_decl = ''
self.default = None
self.max_size = 0
self.max_count = 0
self.inline = None
def __str__(self):
return ' pb_extension_t *extensions;'
def types(self):
return ''
def tags(self):
return ''
def encoded_size(self, dependencies):
# We exclude extensions from the count, because they cannot be known
# until runtime. Other option would be to return None here, but this
# way the value remains useful if extensions are not used.
return EncodedSize(0)
class ExtensionField(Field):
def __init__(self, struct_name, desc, field_options):
self.fullname = struct_name + desc.name
self.extendee_name = names_from_type_name(desc.extendee)
Field.__init__(self, self.fullname + 'struct', desc, field_options)
if self.rules != 'OPTIONAL':
self.skip = True
else:
self.skip = False
self.rules = 'OPTEXT'
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_tag' % self.fullname
return '#define %-40s %d\n' % (identifier, self.tag)
def extension_decl(self):
'''Declaration of the extension type in the .pb.h file'''
if self.skip:
msg = '/* Extension field %s was skipped because only "optional"\n' % self.fullname
msg +=' type of extension fields is currently supported. */\n'
return msg
return ('extern const pb_extension_type_t %s; /* field type: %s */\n' %
(self.fullname, str(self).strip()))
def extension_def(self):
'''Definition of the extension type in the .pb.c file'''
if self.skip:
return ''
result = 'typedef struct {\n'
result += str(self)
result += '\n} %s;\n\n' % self.struct_name
result += ('static const pb_field_t %s_field = \n %s;\n\n' %
(self.fullname, self.pb_field_t(None)))
result += 'const pb_extension_type_t %s = {\n' % self.fullname
result += ' NULL,\n'
result += ' NULL,\n'
result += ' &%s_field\n' % self.fullname
result += '};\n'
return result
# ---------------------------------------------------------------------------
# Generation of oneofs (unions)
# ---------------------------------------------------------------------------
class OneOf(Field):
def __init__(self, struct_name, oneof_desc):
self.struct_name = struct_name
self.name = oneof_desc.name
self.ctype = 'union'
self.pbtype = 'oneof'
self.fields = []
self.allocation = 'ONEOF'
self.default = None
self.rules = 'ONEOF'
self.anonymous = False
self.inline = None
def add_field(self, field):
if field.allocation == 'CALLBACK':
raise Exception("Callback fields inside of oneof are not supported"
+ " (field %s)" % field.name)
field.union_name = self.name
field.rules = 'ONEOF'
field.anonymous = self.anonymous
self.fields.append(field)
self.fields.sort(key = lambda f: f.tag)
# Sort by the lowest tag number inside union
self.tag = min([f.tag for f in self.fields])
def __str__(self):
result = ''
if self.fields:
result += ' pb_size_t which_' + self.name + ";\n"
result += ' union {\n'
for f in self.fields:
result += ' ' + str(f).replace('\n', '\n ') + '\n'
if self.anonymous:
result += ' };'
else:
result += ' } ' + self.name + ';'
return result
def types(self):
return ''.join([f.types() for f in self.fields])
def get_dependencies(self):
deps = []
for f in self.fields:
deps += f.get_dependencies()
return deps
def get_initializer(self, null_init):
return '0, {' + self.fields[0].get_initializer(null_init) + '}'
def default_decl(self, declaration_only = False):
return None
def tags(self):
return ''.join([f.tags() for f in self.fields])
def pb_field_t(self, prev_field_name):
result = ',\n'.join([f.pb_field_t(prev_field_name) for f in self.fields])
return result
def get_last_field_name(self):
if self.anonymous:
return self.fields[-1].name
else:
return self.name + '.' + self.fields[-1].name
def largest_field_value(self):
largest = FieldMaxSize()
for f in self.fields:
largest.extend(f.largest_field_value())
return largest
def encoded_size(self, dependencies):
'''Returns the size of the largest oneof field.'''
largest = EncodedSize(0)
for f in self.fields:
size = EncodedSize(f.encoded_size(dependencies))
if size.value is None:
return None
elif size.symbols:
return None # Cannot resolve maximum of symbols
elif size.value > largest.value:
largest = size
return largest
# ---------------------------------------------------------------------------
# Generation of messages (structures)
# ---------------------------------------------------------------------------
class Message:
def __init__(self, names, desc, message_options):
self.name = names
self.fields = []
self.oneofs = {}
no_unions = []
if message_options.msgid:
self.msgid = message_options.msgid
if hasattr(desc, 'oneof_decl'):
for i, f in enumerate(desc.oneof_decl):
oneof_options = get_nanopb_suboptions(desc, message_options, self.name + f.name)
if oneof_options.no_unions:
no_unions.append(i) # No union, but add fields normally
elif oneof_options.type == nanopb_pb2.FT_IGNORE:
pass # No union and skip fields also
else:
oneof = OneOf(self.name, f)
if oneof_options.anonymous_oneof:
oneof.anonymous = True
self.oneofs[i] = oneof
self.fields.append(oneof)
for f in desc.field:
field_options = get_nanopb_suboptions(f, message_options, self.name + f.name)
if field_options.type == nanopb_pb2.FT_IGNORE:
continue
field = Field(self.name, f, field_options)
if (hasattr(f, 'oneof_index') and
f.HasField('oneof_index') and
f.oneof_index not in no_unions):
if f.oneof_index in self.oneofs:
self.oneofs[f.oneof_index].add_field(field)
else:
self.fields.append(field)
if len(desc.extension_range) > 0:
field_options = get_nanopb_suboptions(desc, message_options, self.name + 'extensions')
range_start = min([r.start for r in desc.extension_range])
if field_options.type != nanopb_pb2.FT_IGNORE:
self.fields.append(ExtensionRange(self.name, range_start, field_options))
self.packed = message_options.packed_struct
self.ordered_fields = self.fields[:]
self.ordered_fields.sort()
def get_dependencies(self):
'''Get list of type names that this structure refers to.'''
deps = []
for f in self.fields:
deps += f.get_dependencies()
return deps
def __str__(self):
result = 'typedef struct _%s {\n' % self.name
if not self.ordered_fields:
# Empty structs are not allowed in C standard.
# Therefore add a dummy field if an empty message occurs.
result += ' char dummy_field;'
result += '\n'.join([str(f) for f in self.ordered_fields])
result += '\n/* @@protoc_insertion_point(struct:%s) */' % self.name
result += '\n}'
if self.packed:
result += ' pb_packed'
result += ' %s;' % self.name
if self.packed:
result = 'PB_PACKED_STRUCT_START\n' + result
result += '\nPB_PACKED_STRUCT_END'
return result
def types(self):
return ''.join([f.types() for f in self.fields])
def get_initializer(self, null_init):
if not self.ordered_fields:
return '{0}'
parts = []
for field in self.ordered_fields:
parts.append(field.get_initializer(null_init))
return '{' + ', '.join(parts) + '}'
def default_decl(self, declaration_only = False):
result = ""
for field in self.fields:
default = field.default_decl(declaration_only)
if default is not None:
result += default + '\n'
return result
def count_required_fields(self):
'''Returns number of required fields inside this message'''
count = 0
for f in self.fields:
if not isinstance(f, OneOf):
if f.rules == 'REQUIRED':
count += 1
return count
def count_all_fields(self):
count = 0
for f in self.fields:
if isinstance(f, OneOf):
count += len(f.fields)
else:
count += 1
return count
def fields_declaration(self):
result = 'extern const pb_field_t %s_fields[%d];' % (self.name, self.count_all_fields() + 1)
return result
def fields_definition(self):
result = 'const pb_field_t %s_fields[%d] = {\n' % (self.name, self.count_all_fields() + 1)
prev = None
for field in self.ordered_fields:
result += field.pb_field_t(prev)
result += ',\n'
prev = field.get_last_field_name()
result += ' PB_LAST_FIELD\n};'
return result
def encoded_size(self, dependencies):
'''Return the maximum size that this message can take when encoded.
If the size cannot be determined, returns None.
'''
size = EncodedSize(0)
for field in self.fields:
fsize = field.encoded_size(dependencies)
if fsize is None:
return None
size += fsize
return size
# ---------------------------------------------------------------------------
# Processing of entire .proto files
# ---------------------------------------------------------------------------
def iterate_messages(desc, names = Names()):
'''Recursively find all messages. For each, yield name, DescriptorProto.'''
if hasattr(desc, 'message_type'):
submsgs = desc.message_type
else:
submsgs = desc.nested_type
for submsg in submsgs:
sub_names = names + submsg.name
yield sub_names, submsg
for x in iterate_messages(submsg, sub_names):
yield x
def iterate_extensions(desc, names = Names()):
'''Recursively find all extensions.
For each, yield name, FieldDescriptorProto.
'''
for extension in desc.extension:
yield names, extension
for subname, subdesc in iterate_messages(desc, names):
for extension in subdesc.extension:
yield subname, extension
def toposort2(data):
'''Topological sort.
From http://code.activestate.com/recipes/577413-topological-sort/
This function is under the MIT license.
'''
for k, v in list(data.items()):
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, list(data.values()), set()) - set(data.keys())
data.update(dict([(item, set()) for item in extra_items_in_deps]))
while True:
ordered = set(item for item,dep in list(data.items()) if not dep)
if not ordered:
break
for item in sorted(ordered):
yield item
data = dict([(item, (dep - ordered)) for item,dep in list(data.items())
if item not in ordered])
assert not data, "A cyclic dependency exists amongst %r" % data
def sort_dependencies(messages):
'''Sort a list of Messages based on dependencies.'''
dependencies = {}
message_by_name = {}
for message in messages:
dependencies[str(message.name)] = set(message.get_dependencies())
message_by_name[str(message.name)] = message
for msgname in toposort2(dependencies):
if msgname in message_by_name:
yield message_by_name[msgname]
def make_identifier(headername):
'''Make #ifndef identifier that contains uppercase A-Z and digits 0-9'''
result = ""
for c in headername.upper():
if c.isalnum():
result += c
else:
result += '_'
return result
class ProtoFile:
def __init__(self, fdesc, file_options):
'''Takes a FileDescriptorProto and parses it.'''
self.fdesc = fdesc
self.file_options = file_options
self.dependencies = {}
self.parse()
# Some of types used in this file probably come from the file itself.
# Thus it has implicit dependency on itself.
self.add_dependency(self)
def parse(self):
self.enums = []
self.messages = []
self.extensions = []
if self.fdesc.package:
base_name = Names(self.fdesc.package.split('.'))
else:
base_name = Names()
for enum in self.fdesc.enum_type:
enum_options = get_nanopb_suboptions(enum, self.file_options, base_name + enum.name)
self.enums.append(Enum(base_name, enum, enum_options))
for names, message in iterate_messages(self.fdesc, base_name):
message_options = get_nanopb_suboptions(message, self.file_options, names)
if message_options.skip_message:
continue
self.messages.append(Message(names, message, message_options))
for enum in message.enum_type:
enum_options = get_nanopb_suboptions(enum, message_options, names + enum.name)
self.enums.append(Enum(names, enum, enum_options))
for names, extension in iterate_extensions(self.fdesc, base_name):
field_options = get_nanopb_suboptions(extension, self.file_options, names + extension.name)
if field_options.type != nanopb_pb2.FT_IGNORE:
self.extensions.append(ExtensionField(names, extension, field_options))
def add_dependency(self, other):
for enum in other.enums:
self.dependencies[str(enum.names)] = enum
for msg in other.messages:
self.dependencies[str(msg.name)] = msg
# Fix field default values where enum short names are used.
for enum in other.enums:
if not enum.options.long_names:
for message in self.messages:
for field in message.fields:
if field.default in enum.value_longnames:
idx = enum.value_longnames.index(field.default)
field.default = enum.values[idx][0]
# Fix field data types where enums have negative values.
for enum in other.enums:
if not enum.has_negative():
for message in self.messages:
for field in message.fields:
if field.pbtype == 'ENUM' and field.ctype == enum.names:
field.pbtype = 'UENUM'
def generate_header(self, includes, headername, options):
'''Generate content for a header file.
Generates strings, which should be concatenated and stored to file.
'''
yield '/* Automatically generated nanopb header */\n'
if options.notimestamp:
yield '/* Generated by %s */\n\n' % (nanopb_version)
else:
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
if self.fdesc.package:
symbol = make_identifier(self.fdesc.package + '_' + headername)
else:
symbol = make_identifier(headername)
yield '#ifndef PB_%s_INCLUDED\n' % symbol
yield '#define PB_%s_INCLUDED\n' % symbol
try:
yield options.libformat % ('pb.h')
except TypeError:
# no %s specified - use whatever was passed in as options.libformat
yield options.libformat
yield '\n'
for incfile in includes:
noext = os.path.splitext(incfile)[0]
yield options.genformat % (noext + options.extension + '.h')
yield '\n'
yield '/* @@protoc_insertion_point(includes) */\n'
yield '#if PB_PROTO_HEADER_VERSION != 30\n'
yield '#error Regenerate this file with the current version of nanopb generator.\n'
yield '#endif\n'
yield '\n'
yield '#ifdef __cplusplus\n'
yield 'extern "C" {\n'
yield '#endif\n\n'
if self.enums:
yield '/* Enum definitions */\n'
for enum in self.enums:
yield str(enum) + '\n\n'
if self.messages:
yield '/* Struct definitions */\n'
for msg in sort_dependencies(self.messages):
yield msg.types()
yield str(msg) + '\n\n'
if self.extensions:
yield '/* Extensions */\n'
for extension in self.extensions:
yield extension.extension_decl()
yield '\n'
if self.messages:
yield '/* Default values for struct fields */\n'
for msg in self.messages:
yield msg.default_decl(True)
yield '\n'
yield '/* Initializer values for message structs */\n'
for msg in self.messages:
identifier = '%s_init_default' % msg.name
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(False))
for msg in self.messages:
identifier = '%s_init_zero' % msg.name
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(True))
yield '\n'
yield '/* Field tags (for use in manual encoding/decoding) */\n'
for msg in sort_dependencies(self.messages):
for field in msg.fields:
yield field.tags()
for extension in self.extensions:
yield extension.tags()
yield '\n'
yield '/* Struct field encoding specification for nanopb */\n'
for msg in self.messages:
yield msg.fields_declaration() + '\n'
yield '\n'
yield '/* Maximum encoded size of messages (where known) */\n'
for msg in self.messages:
msize = msg.encoded_size(self.dependencies)
identifier = '%s_size' % msg.name
if msize is not None:
yield '#define %-40s %s\n' % (identifier, msize)
else:
yield '/* %s depends on runtime parameters */\n' % identifier
yield '\n'
yield '/* Message IDs (where set with "msgid" option) */\n'
yield '#ifdef PB_MSGID\n'
for msg in self.messages:
if hasattr(msg,'msgid'):
yield '#define PB_MSG_%d %s\n' % (msg.msgid, msg.name)
yield '\n'
symbol = make_identifier(headername.split('.')[0])
yield '#define %s_MESSAGES \\\n' % symbol
for msg in self.messages:
m = "-1"
msize = msg.encoded_size(self.dependencies)
if msize is not None:
m = msize
if hasattr(msg,'msgid'):
yield '\tPB_MSG(%d,%s,%s) \\\n' % (msg.msgid, m, msg.name)
yield '\n'
for msg in self.messages:
if hasattr(msg,'msgid'):
yield '#define %s_msgid %d\n' % (msg.name, msg.msgid)
yield '\n'
yield '#endif\n\n'
yield '#ifdef __cplusplus\n'
yield '} /* extern "C" */\n'
yield '#endif\n'
# End of header
yield '/* @@protoc_insertion_point(eof) */\n'
yield '\n#endif\n'
def generate_source(self, headername, options):
'''Generate content for a source file.'''
yield '/* Automatically generated nanopb constant definitions */\n'
if options.notimestamp:
yield '/* Generated by %s */\n\n' % (nanopb_version)
else:
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
yield options.genformat % (headername)
yield '\n'
yield '/* @@protoc_insertion_point(includes) */\n'
yield '#if PB_PROTO_HEADER_VERSION != 30\n'
yield '#error Regenerate this file with the current version of nanopb generator.\n'
yield '#endif\n'
yield '\n'
for msg in self.messages:
yield msg.default_decl(False)
yield '\n\n'
for msg in self.messages:
yield msg.fields_definition() + '\n\n'
for ext in self.extensions:
yield ext.extension_def() + '\n'
# Add checks for numeric limits
if self.messages:
largest_msg = max(self.messages, key = lambda m: m.count_required_fields())
largest_count = largest_msg.count_required_fields()
if largest_count > 64:
yield '\n/* Check that missing required fields will be properly detected */\n'
yield '#if PB_MAX_REQUIRED_FIELDS < %d\n' % largest_count
yield '#error Properly detecting missing required fields in %s requires \\\n' % largest_msg.name
yield ' setting PB_MAX_REQUIRED_FIELDS to %d or more.\n' % largest_count
yield '#endif\n'
max_field = FieldMaxSize()
checks_msgnames = []
for msg in self.messages:
checks_msgnames.append(msg.name)
for field in msg.fields:
max_field.extend(field.largest_field_value())
worst = max_field.worst
worst_field = max_field.worst_field
checks = max_field.checks
if worst > 255 or checks:
yield '\n/* Check that field information fits in pb_field_t */\n'
if worst > 65535 or checks:
yield '#if !defined(PB_FIELD_32BIT)\n'
if worst > 65535:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_32BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 65536' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield '/* If you get an error here, it means that you need to define PB_FIELD_32BIT\n'
yield ' * compile-time option. You can do that in pb.h or on compiler command line.\n'
yield ' * \n'
yield ' * The reason you need to do this is that some of your messages contain tag\n'
yield ' * numbers or field sizes that are larger than what can fit in 8 or 16 bit\n'
yield ' * field descriptors.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n\n'
if worst < 65536:
yield '#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)\n'
if worst > 255:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_16BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 256' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield '/* If you get an error here, it means that you need to define PB_FIELD_16BIT\n'
yield ' * compile-time option. You can do that in pb.h or on compiler command line.\n'
yield ' * \n'
yield ' * The reason you need to do this is that some of your messages contain tag\n'
yield ' * numbers or field sizes that are larger than what can fit in the default\n'
yield ' * 8 bit descriptors.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n\n'
# Add check for sizeof(double)
has_double = False
for msg in self.messages:
for field in msg.fields:
if field.ctype == 'double':
has_double = True
if has_double:
yield '\n'
yield '/* On some platforms (such as AVR), double is really float.\n'
yield ' * These are not directly supported by nanopb, but see example_avr_double.\n'
yield ' * To get rid of this error, remove any double fields from your .proto.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)\n'
yield '\n'
yield '/* @@protoc_insertion_point(eof) */\n'
# ---------------------------------------------------------------------------
# Options parsing for the .proto files
# ---------------------------------------------------------------------------
from fnmatch import fnmatch
def read_options_file(infile):
'''Parse a separate options file to list:
[(namemask, options), ...]
'''
results = []
data = infile.read()
data = re.sub('/\*.*?\*/', '', data, flags = re.MULTILINE)
data = re.sub('//.*?$', '', data, flags = re.MULTILINE)
data = re.sub('#.*?$', '', data, flags = re.MULTILINE)
for i, line in enumerate(data.split('\n')):
line = line.strip()
if not line:
continue
parts = line.split(None, 1)
if len(parts) < 2:
sys.stderr.write("%s:%d: " % (infile.name, i + 1) +
"Option lines should have space between field name and options. " +
"Skipping line: '%s'\n" % line)
continue
opts = nanopb_pb2.NanoPBOptions()
try:
text_format.Merge(parts[1], opts)
except Exception as e:
sys.stderr.write("%s:%d: " % (infile.name, i + 1) +
"Unparseable option line: '%s'. " % line +
"Error: %s\n" % str(e))
continue
results.append((parts[0], opts))
return results
class Globals:
'''Ugly global variables, should find a good way to pass these.'''
verbose_options = False
separate_options = []
matched_namemasks = set()
def get_nanopb_suboptions(subdesc, options, name):
'''Get copy of options, and merge information from subdesc.'''
new_options = nanopb_pb2.NanoPBOptions()
new_options.CopyFrom(options)
# Handle options defined in a separate file
dotname = '.'.join(name.parts)
for namemask, options in Globals.separate_options:
if fnmatch(dotname, namemask):
Globals.matched_namemasks.add(namemask)
new_options.MergeFrom(options)
if hasattr(subdesc, 'syntax') and subdesc.syntax == "proto3":
new_options.proto3 = True
# Handle options defined in .proto
if isinstance(subdesc.options, descriptor.FieldOptions):
ext_type = nanopb_pb2.nanopb
elif isinstance(subdesc.options, descriptor.FileOptions):
ext_type = nanopb_pb2.nanopb_fileopt
elif isinstance(subdesc.options, descriptor.MessageOptions):
ext_type = nanopb_pb2.nanopb_msgopt
elif isinstance(subdesc.options, descriptor.EnumOptions):
ext_type = nanopb_pb2.nanopb_enumopt
else:
raise Exception("Unknown options type")
if subdesc.options.HasExtension(ext_type):
ext = subdesc.options.Extensions[ext_type]
new_options.MergeFrom(ext)
if Globals.verbose_options:
sys.stderr.write("Options for " + dotname + ": ")
sys.stderr.write(text_format.MessageToString(new_options) + "\n")
return new_options
# ---------------------------------------------------------------------------
# Command line interface
# ---------------------------------------------------------------------------
import sys
import os.path
from optparse import OptionParser
optparser = OptionParser(
usage = "Usage: nanopb_generator.py [options] file.pb ...",
epilog = "Compile file.pb from file.proto by: 'protoc -ofile.pb file.proto'. " +
"Output will be written to file.pb.h and file.pb.c.")
optparser.add_option("-x", dest="exclude", metavar="FILE", action="append", default=[],
help="Exclude file from generated #include list.")
optparser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", default=".pb",
help="Set extension to use instead of '.pb' for generated files. [default: %default]")
optparser.add_option("-f", "--options-file", dest="options_file", metavar="FILE", default="%s.options",
help="Set name of a separate generator options file.")
optparser.add_option("-I", "--options-path", dest="options_path", metavar="DIR",
action="append", default = [],
help="Search for .options files additionally in this path")
optparser.add_option("-D", "--output-dir", dest="output_dir",
metavar="OUTPUTDIR", default=None,
help="Output directory of .pb.h and .pb.c files")
optparser.add_option("-Q", "--generated-include-format", dest="genformat",
metavar="FORMAT", default='#include "%s"\n',
help="Set format string to use for including other .pb.h files. [default: %default]")
optparser.add_option("-L", "--library-include-format", dest="libformat",
metavar="FORMAT", default='#include <%s>\n',
help="Set format string to use for including the nanopb pb.h header. [default: %default]")
optparser.add_option("-T", "--no-timestamp", dest="notimestamp", action="store_true", default=False,
help="Don't add timestamp to .pb.h and .pb.c preambles")
optparser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Don't print anything except errors.")
optparser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Print more information.")
optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[],
help="Set generator option (max_size, max_count etc.).")
def parse_file(filename, fdesc, options):
'''Parse a single file. Returns a ProtoFile instance.'''
toplevel_options = nanopb_pb2.NanoPBOptions()
for s in options.settings:
text_format.Merge(s, toplevel_options)
if not fdesc:
data = open(filename, 'rb').read()
fdesc = descriptor.FileDescriptorSet.FromString(data).file[0]
# Check if there is a separate .options file
had_abspath = False
try:
optfilename = options.options_file % os.path.splitext(filename)[0]
except TypeError:
# No %s specified, use the filename as-is
optfilename = options.options_file
had_abspath = True
paths = ['.'] + options.options_path
for p in paths:
if os.path.isfile(os.path.join(p, optfilename)):
optfilename = os.path.join(p, optfilename)
if options.verbose:
sys.stderr.write('Reading options from ' + optfilename + '\n')
Globals.separate_options = read_options_file(open(optfilename, "rU"))
break
else:
# If we are given a full filename and it does not exist, give an error.
# However, don't give error when we automatically look for .options file
# with the same name as .proto.
if options.verbose or had_abspath:
sys.stderr.write('Options file not found: ' + optfilename + '\n')
Globals.separate_options = []
Globals.matched_namemasks = set()
# Parse the file
file_options = get_nanopb_suboptions(fdesc, toplevel_options, Names([filename]))
f = ProtoFile(fdesc, file_options)
f.optfilename = optfilename
return f
def process_file(filename, fdesc, options, other_files = {}):
'''Process a single file.
filename: The full path to the .proto or .pb source file, as string.
fdesc: The loaded FileDescriptorSet, or None to read from the input file.
options: Command line options as they come from OptionsParser.
Returns a dict:
{'headername': Name of header file,
'headerdata': Data for the .h header file,
'sourcename': Name of the source code file,
'sourcedata': Data for the .c source code file
}
'''
f = parse_file(filename, fdesc, options)
# Provide dependencies if available
for dep in f.fdesc.dependency:
if dep in other_files:
f.add_dependency(other_files[dep])
# Decide the file names
noext = os.path.splitext(filename)[0]
headername = noext + options.extension + '.h'
sourcename = noext + options.extension + '.c'
headerbasename = os.path.basename(headername)
# List of .proto files that should not be included in the C header file
# even if they are mentioned in the source .proto.
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
includes = [d for d in f.fdesc.dependency if d not in excludes]
headerdata = ''.join(f.generate_header(includes, headerbasename, options))
sourcedata = ''.join(f.generate_source(headerbasename, options))
# Check if there were any lines in .options that did not match a member
unmatched = [n for n,o in Globals.separate_options if n not in Globals.matched_namemasks]
if unmatched and not options.quiet:
sys.stderr.write("Following patterns in " + f.optfilename + " did not match any fields: "
+ ', '.join(unmatched) + "\n")
if not Globals.verbose_options:
sys.stderr.write("Use protoc --nanopb-out=-v:. to see a list of the field names.\n")
return {'headername': headername, 'headerdata': headerdata,
'sourcename': sourcename, 'sourcedata': sourcedata}
def main_cli():
'''Main function when invoked directly from the command line.'''
options, filenames = optparser.parse_args()
if not filenames:
optparser.print_help()
sys.exit(1)
if options.quiet:
options.verbose = False
if options.output_dir and not os.path.exists(options.output_dir):
optparser.print_help()
sys.stderr.write("\noutput_dir does not exist: %s\n" % options.output_dir)
sys.exit(1)
Globals.verbose_options = options.verbose
for filename in filenames:
results = process_file(filename, None, options)
base_dir = options.output_dir or ''
to_write = [
(os.path.join(base_dir, results['headername']), results['headerdata']),
(os.path.join(base_dir, results['sourcename']), results['sourcedata']),
]
if not options.quiet:
paths = " and ".join([x[0] for x in to_write])
sys.stderr.write("Writing to %s\n" % paths)
for path, data in to_write:
with open(path, 'w') as f:
f.write(data)
def main_plugin():
'''Main function when invoked as a protoc plugin.'''
import io, sys
if sys.platform == "win32":
import os, msvcrt
# Set stdin and stdout to binary mode
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
data = io.open(sys.stdin.fileno(), "rb").read()
request = plugin_pb2.CodeGeneratorRequest.FromString(data)
try:
# Versions of Python prior to 2.7.3 do not support unicode
# input to shlex.split(). Try to convert to str if possible.
params = str(request.parameter)
except UnicodeEncodeError:
params = request.parameter
import shlex
args = shlex.split(params)
options, dummy = optparser.parse_args(args)
Globals.verbose_options = options.verbose
response = plugin_pb2.CodeGeneratorResponse()
# Google's protoc does not currently indicate the full path of proto files.
# Instead always add the main file path to the search dirs, that works for
# the common case.
import os.path
options.options_path.append(os.path.dirname(request.file_to_generate[0]))
# Process any include files first, in order to have them
# available as dependencies
other_files = {}
for fdesc in request.proto_file:
other_files[fdesc.name] = parse_file(fdesc.name, fdesc, options)
for filename in request.file_to_generate:
for fdesc in request.proto_file:
if fdesc.name == filename:
results = process_file(filename, fdesc, options, other_files)
f = response.file.add()
f.name = results['headername']
f.content = results['headerdata']
f = response.file.add()
f.name = results['sourcename']
f.content = results['sourcedata']
io.open(sys.stdout.fileno(), "wb").write(response.SerializeToString())
if __name__ == '__main__':
# Check if we are running as a plugin under protoc
if 'protoc-gen-' in sys.argv[0] or '--protoc-plugin' in sys.argv:
main_plugin()
else:
main_cli()
| ioants/pypi-packages | ioant/ioant/proto/messages/generators/nanopb/nanopb_generator.py | Python | mit | 62,069 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
# just to trigger sales order 'delivered' event...
class Picking(Model):
_inherit="stock.picking"
def set_done(self,ids,context={}):
sale_ids=[]
for obj in self.browse(ids):
for line in obj.lines:
rel=line.related_id
if not rel:
continue
if rel._model!="sale.order":
continue
sale_ids.append(rel.id)
sale_ids=list(set(sale_ids))
undeliv_sale_ids=[]
for sale in get_model("sale.order").browse(sale_ids):
if not sale.is_delivered:
undeliv_sale_ids.append(sale.id)
res=super().set_done(ids,context=context)
deliv_sale_ids=[]
for sale in get_model("sale.order").browse(undeliv_sale_ids):
if sale.is_delivered:
deliv_sale_ids.append(sale.id)
if deliv_sale_ids:
get_model("sale.order").trigger(deliv_sale_ids,"delivered")
return res
Picking.register()
| sidzan/netforce | netforce_sale/netforce_sale/models/stock_picking.py | Python | mit | 2,179 |
Subsets and Splits