filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_28803 | # -*- coding: utf-8 -*-
""" These are time functions provided for using the time-dependent solver.
Q: Why are there multiple versions of each?
A: The solver will want one list of arguments even if there are multiple
time-dependent parts to the Hamiltonian. (Say one laser is ramped on then CW
and another is a Gaussian pulse.) To distinguish the arguments we've got
multiple versions. Yes this is wasteful, I would like a better way to do
it but this works.
Thomas Ogden <[email protected]>
"""
import sys
from numpy import exp, log, sqrt, pi, sinc
sech = lambda t: 2/(exp(t) + exp(-t))
def square_1(t, args):
on_1 = args['on_1']
off_1 = args['off_1']
ampl_1 = args['ampl_1']
return ampl_1*(t >= on_1)*(t <= off_1)
def square_2(t, args):
on_2 = args['on_2']
off_2 = args['off_2']
ampl_2 = args['ampl_2']
return ampl_2*(t >= on_2)*(t <= off_2)
def gaussian_1(t, args):
ampl_1 = args['ampl_1']
width_1 = args['width_1']
centre_1 = args['centre_1']
return ampl_1*exp(-2*log(2)*((t - centre_1)/width_1)**2)
def gaussian_2(t, args):
ampl_2 = args['ampl_2']
width_2 = args['width_2']
centre_2 = args['centre_2']
return ampl_2*exp(-2*log(2)*((t - centre_2)/width_2)**2)
def gaussian_fwhm_1(t, args):
ampl_1 = args['ampl_1']
fwhm_1 = args['fwhm_1']
centre_1 = args['centre_1']
return ampl_1*exp(-4*log(2)*((t - centre_1)/fwhm_1)**2)
def gaussian_fwhm_2(t, args):
ampl_2 = args['ampl_2']
fwhm_2 = args['fwhm_2']
centre_2 = args['centre_2']
return ampl_2*exp(-4*log(2)*((t - centre_2)/fwhm_2)**2)
def gaussian_fwhm_3(t, args):
ampl_3 = args['ampl_3']
fwhm_3 = args['fwhm_3']
centre_3 = args['centre_3']
return ampl_3*exp(-4*log(2)*((t - centre_3)/fwhm_3)**2)
def gaussian_fwhm_4(t, args):
ampl_4 = args['ampl_4']
fwhm_4 = args['fwhm_4']
centre_4 = args['centre_4']
return ampl_4*exp(-4*log(2)*((t - centre_4)/fwhm_4)**2)
def gaussian_fwhm_5(t, args):
ampl_5 = args['ampl_5']
fwhm_5 = args['fwhm_5']
centre_5 = args['centre_5']
return ampl_5*exp(-4*log(2)*((t - centre_5)/fwhm_5)**2)
def gaussian_fwhm_6(t, args):
ampl_6 = args['ampl_6']
fwhm_6 = args['fwhm_6']
centre_6 = args['centre_6']
return ampl_6*exp(-4*log(2)*((t - centre_6)/fwhm_6)**2)
def gaussian_fwhm_7(t, args):
ampl_7 = args['ampl_7']
fwhm_7 = args['fwhm_7']
centre_7 = args['centre_7']
return ampl_7*exp(-4*log(2)*((t - centre_7)/fwhm_7)**2)
def ramp_on_1(t, args):
ampl_1 = args['ampl_1']
width_1 = args['width_1']
centre_1 = args['centre_1']
return ampl_1*(exp(-2*log(2)*((t - centre_1)/width_1)**2)*(t <= centre_1) +
(t > centre_1))
def ramp_on_2(t, args):
ampl_2 = args['ampl_2']
width_2 = args['width_2']
centre_2 = args['centre_2']
return ampl_2*(exp(-2*log(2)*((t - centre_2)/width_2)**2)*(t <= centre_2) +
(t > centre_2))
def ramp_on_3(t, args):
ampl_3 = args['ampl_3']
width_3 = args['width_3']
centre_3 = args['centre_3']
return ampl_3*(exp(-2*log(2)*((t - centre_3)/width_3)**2)*(t <= centre_3) +
(t > centre_3))
def ramp_off_1(t, args):
ampl_1 = args['ampl_1']
width_1 = args['width_1']
centre_1 = args['centre_1']
return ampl_1*(exp(-2*log(2)*((t - centre_1)/width_1)**2)*(t >= centre_1) +
(t < centre_1))
def ramp_off_2(t, args):
ampl_2 = args['ampl_2']
width_2 = args['width_2']
centre_2 = args['centre_2']
return ampl_2*(exp(-2*log(2)*((t - centre_2)/width_2)**2)*(t >= centre_2) +
(t < centre_2))
def ramp_off_3(t, args):
ampl_3 = args['ampl_3']
width_3 = args['width_3']
centre_3 = args['centre_3']
return ampl_3*(exp(-2*log(2)*((t - centre_3)/width_3)**2)*(t >= centre_3) +
(t < centre_3))
def ramp_onoff_1(t, args):
ampl_1 = args['ampl_1']
width_1 = args['width_1']
on_1 = args['on_1']
off_1 = args['off_1']
ramp_on = (exp(-2*log(2)*((t - on_1)/width_1)**2)*
(t <= on_1) + (t > on_1))
ramp_off = (exp(-2*log(2)*((t - off_1)/width_1)**2)*
(t >= off_1) + (t < off_1))
return ampl_1*(ramp_on + ramp_off - 1.)
def ramp_onoff_2(t, args):
ampl_2 = args['ampl_2']
width_2 = args['width_2']
on_2 = args['on_2']
off_2 = args['off_2']
ramp_on = (exp(-2*log(2)*((t - on_2)/width_2)**2)*
(t <= on_2) + (t > on_2))
ramp_off = (exp(-2*log(2)*((t - off_2)/width_2)**2)*
(t >= off_2) + (t < off_2))
return ampl_2*(ramp_on + ramp_off - 1.)
def ramp_onoff_3(t, args):
ampl_3 = args['ampl_3']
width_3 = args['width_3']
on_3 = args['on_3']
off_3 = args['off_3']
ramp_on = (exp(-2*log(2)*((t - on_3)/width_3)**2)*
(t <= on_3) + (t > on_3))
ramp_off = (exp(-2*log(2)*((t - off_3)/width_3)**2)*
(t >= off_3) + (t < off_3))
return ampl_3*(ramp_on + ramp_off - 1.)
def sinc_1(t, args):
ampl = args['ampl_1']
width = args['width_1']
return ampl*sinc(width*t)/sqrt(pi/2.)
def sech_1(t, args):
ampl_1 = args['ampl_1']
width_1 = args['width_1']
centre_1 = args['centre_1']
return ampl_1*sech((t - centre_1)/width_1)
def sech_2(t, args):
ampl_2 = args['ampl_2']
width_2 = args['width_2']
centre_2 = args['centre_2']
return ampl_2*sech((t - centre_2)/width_2)
|
the-stack_106_28805 | import converters
import math
import random
import sys
def random_real(a, b):
"""
Random real between a and b inclusively.
"""
return a + random.random() * (b - a)
def branch_length(depth):
"""
Somewhat random length of the branch. Play around
with this to achieve a desired tree structure.
"""
return math.log(depth) * random_real(.5, 1)
def branch_angle(initial_lean, max_lean):
"""
Somewhat random angle of the branch. Play around
with this to achieve a desired tree structure.
"""
return initial_lean + max_lean * random_real(-.5, .5)
def branches(x0, y0, depth, nfurcation, max_lean, initial_lean):
"""
Make a tree!
"""
# maximum depth achieved, stop adding branches
# maybe add a fruit or flower here
if not depth:
return []
angle = branch_angle(initial_lean, max_lean)
length = branch_length(depth)
# branch is the line segment (x0, y0) - (x1, y0)
x1 = x0 + length*math.sin(angle)
y1 = y0 + length*math.cos(angle)
# construct the branch
# the depth -- or inverse height -- is stored so that the
# rendering code can use it to vary the thickness of the
# branches, color, etc.
new_branches = [[depth, [[x0, y0], [x1, y1]]]]
# number of branches
n = random.randint(1, nfurcation)
# branches growing out of this branch
for _ in xrange(n):
# angle of the current branch becomes the initial lean
new_branches.extend(
branches(x1, y1, depth-1, nfurcation, max_lean, angle)
)
return new_branches
def main():
tree = branches(
# origin
0, 0,
# 11 branches from trunk to crown
11,
# at each juncture, there's either 1 or 2 branches
2,
# the branch can deviate 90/2=45 degrees in either direction
math.pi/2,
# initial lean [bias] is zero degrees
0
)
print(converters.to_mathematica(tree))
if __name__ == '__main__':
main()
|
the-stack_106_28806 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_stalled_unresponsive_tags(apps, schema_editor):
"""Add "stalled" and "unresponsive" tags."""
Tag = apps.get_model('workshops', 'Tag')
Tag.objects.create(
name='stalled',
details='Events with lost contact with the host or TTT events that '
'aren\'t running.',
)
Tag.objects.create(
name='unresponsive',
details='Events whose hosts and/or organizers aren\'t going to send '
'attendance data',
)
class Migration(migrations.Migration):
dependencies = [
('workshops', '0061_merge'),
]
operations = [
migrations.RunPython(add_stalled_unresponsive_tags),
migrations.AlterField(
model_name='event',
name='tags',
field=models.ManyToManyField(help_text="<ul><li><i>stalled</i> — for events with lost contact with the host or TTT events that aren't running.</li><li><i>unresponsive</i> – for events whose hosts and/or organizers aren't going to send us attendance data.</li></ul>", to='workshops.Tag'),
),
]
|
the-stack_106_28807 | #
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
# output dimension:
# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0]}
output0 = {output: # output 0
[.875, .875, .875, .875]}
# Instantiate an example
Example((input0, output0))
|
the-stack_106_28808 | from django.test import TestCase
from django.utils.html import escape
from unittest import skip
from lists.models import Item, List
from lists.forms import ItemForm, ExistingListItemForm, EMPTY_ITEM_ERROR, DUPLICATE_ITEM_ERROR
class HomePageTest(TestCase):
def test_home_page_renders_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_uses_item_form(self):
response = self.client.get('/')
self.assertIsInstance(response.context['form'], ItemForm)
class ListViewTest(TestCase):
def test_uses_list_template(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertTemplateUsed(response, 'list.html')
def test_displays_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='itemey1', list=correct_list)
Item.objects.create(text='itemey2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other item1', list=other_list)
Item.objects.create(text='other item2', list=other_list)
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertContains(response, 'itemey1')
self.assertContains(response, 'itemey2')
self.assertNotContains(response, 'other item1')
self.assertNotContains(response, 'other item2')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertEqual(response.context['list'], correct_list)
def test_displays_all_list_items(self):
list_ = List()
list_.save()
Item.objects.create(text='item1', list=list_)
Item.objects.create(text='item2', list=list_)
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertContains(response, 'item1')
self.assertContains(response, 'item2')
def test_saving_a_post_request(self):
self.client.post('/lists/new',
data={'text': 'A new list item'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_post(self):
response = self.client.post('/lists/new',
data={'text': 'A new list item'})
new_list = List.objects.first()
self.assertRedirects(response, '/lists/%d/' % (new_list.id,))
def test_can_save_a_post_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post('/lists/%d/' % (correct_list.id,),
data={'text': 'A new item for an existing list'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_post_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post('/lists/%d/' % (correct_list.id,),
data={'text': 'A new item for an existing list'})
self.assertRedirects(response, '/lists/%d/' % (correct_list.id,))
def test_for_invalid_input_renders_home_template(self):
response = self.client.post('/lists/new',
data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_validation_errors_are_shown_on_home_page(self):
response = self.client.post('/lists/new',
data={'text': ''})
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
def test_for_invalid_input_passes_form_to_template(self):
response = self.client.post('/lists/new',
data={'text': ''})
self.assertIsInstance(response.context['form'], ItemForm)
def test_invalid_list_items_arent_saved(self):
self.client.post('/lists/new', data={'text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
def post_invalid_input(self):
list_ = List.objects.create()
return self.client.post('/lists/%d/' % (list_.id,),
data={'text': ''})
def test_for_invalid_input_nothing_saved_to_db(self):
self.post_invalid_input()
self.assertEqual(Item.objects.count(), 0)
def test_for_invalid_input_renders_list_template(self):
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
def test_for_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], ExistingListItemForm)
def test_for_invalid_input_shows_error_on_page(self):
response = self.post_invalid_input()
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
def test_duplicate_item_validation_errors_end_up_on_lists_page(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text='textey')
response = self.client.post(
'/lists/%d/' % (list1.id,),
data={'text': 'textey'}
)
expected_error = escape(DUPLICATE_ITEM_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'list.html')
self.assertEqual(Item.objects.all().count(), 1)
def test_displays_item_form(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertIsInstance(response.context['form'], ExistingListItemForm)
self.assertContains(response, 'name="text"') |
the-stack_106_28810 | #!/usr/bin/env python
import plac
import re
import random
import json
from pathlib import Path
from collections import Counter
import thinc.extra.datasets
import spacy
import torch
from spacy.util import minibatch
import tqdm
import unicodedata
import wasabi
from spacy_pytorch_transformers.util import cyclic_triangular_rate
@plac.annotations(
model=("Model name", "positional", None, str),
input_dir=("Optional input directory", "option", "i", Path),
output_dir=("Optional output directory", "option", "o", Path),
use_test=("Whether to use the actual test set", "flag", "E"),
batch_size=("Number of docs per batch", "option", "bs", int),
learn_rate=("Learning rate", "option", "lr", float),
max_wpb=("Max words per sub-batch", "option", "wpb", int),
n_texts=("Number of texts to train from", "option", "t", int),
n_iter=("Number of training epochs", "option", "n", int),
pos_label=("Positive label for evaluation", "option", "pl", str),
)
def main(
model,
input_dir=None,
output_dir=None,
n_iter=5,
n_texts=100,
batch_size=8,
learn_rate=2e-5,
max_wpb=1000,
use_test=False,
pos_label=None,
):
spacy.util.fix_random_seed(0)
is_using_gpu = spacy.prefer_gpu()
if is_using_gpu:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp = spacy.load(model)
print(nlp.pipe_names)
print(f"Loaded model '{model}'")
textcat = nlp.create_pipe(
"pytt_textcat", config={"architecture": "softmax_last_hidden", "words_per_batch": max_wpb}
)
if input_dir is not None:
train_texts, train_cats = read_inputs(input_dir / "training.jsonl")
eval_texts, eval_cats = read_inputs(input_dir / "evaluation.jsonl")
labels = set()
for cats in train_cats + eval_cats:
labels.update(cats)
# use the first label in the set as the positive label if one isn't
# provided
for label in sorted(labels):
if not pos_label:
pos_label = label
textcat.add_label(label)
else:
# add label to text classifier
textcat.add_label("POSITIVE")
textcat.add_label("NEGATIVE")
if not pos_label:
pos_label = "POSITIVE"
# load the IMDB dataset
print("Loading IMDB data...")
if use_test:
(train_texts, train_cats), (
eval_texts,
eval_cats,
) = load_data_for_final_test(limit=n_texts)
else:
(train_texts, train_cats), (eval_texts, eval_cats) = load_data(
limit=n_texts
)
print("Labels:", textcat.labels)
print("Positive label for evaluation:", pos_label)
nlp.add_pipe(textcat, last=True)
print(f"Using {len(train_texts)} training docs, {len(eval_texts)} evaluation")
split_training_by_sentence = False
if split_training_by_sentence:
# If we're using a model that averages over sentence predictions (we are),
# there are some advantages to just labelling each sentence as an example.
# It means we can mix the sentences into different batches, so we can make
# more frequent updates. It also changes the loss somewhat, in a way that's
# not obviously better -- but it does seem to work well.
train_texts, train_cats = make_sentence_examples(nlp, train_texts, train_cats)
print(f"Extracted {len(train_texts)} training sents")
# total_words = sum(len(text.split()) for text in train_texts)
train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats]))
# Initialize the TextCategorizer, and create an optimizer.
optimizer = nlp.resume_training()
optimizer.alpha = 0.001
optimizer.pytt_weight_decay = 0.005
optimizer.L2 = 0.0
learn_rates = cyclic_triangular_rate(
learn_rate / 3, learn_rate * 3, 2 * len(train_data) // batch_size
)
print("Training the model...")
print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F"))
pbar = tqdm.tqdm(total=100, leave=False)
results = []
epoch = 0
step = 0
eval_every = 100
patience = 3
while True:
# Train and evaluate
losses = Counter()
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_size)
for batch in batches:
optimizer.pytt_lr = next(learn_rates)
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.1, losses=losses)
pbar.update(1)
if step and (step % eval_every) == 0:
pbar.close()
with nlp.use_params(optimizer.averages):
scores = evaluate(nlp, eval_texts, eval_cats, pos_label)
results.append((scores["textcat_f"], step, epoch))
print(
"{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format(
losses["pytt_textcat"],
scores["textcat_p"],
scores["textcat_r"],
scores["textcat_f"],
)
)
pbar = tqdm.tqdm(total=eval_every, leave=False)
step += 1
epoch += 1
# Stop if no improvement in HP.patience checkpoints
if results:
best_score, best_step, best_epoch = max(results)
if ((step - best_step) // eval_every) >= patience:
break
msg = wasabi.Printer()
table_widths = [2, 4, 6]
msg.info(f"Best scoring checkpoints")
msg.row(["Epoch", "Step", "Score"], widths=table_widths)
msg.row(["-" * width for width in table_widths])
for score, step, epoch in sorted(results, reverse=True)[:10]:
msg.row([epoch, step, "%.2f" % (score * 100)], widths=table_widths)
# Test the trained model
test_text = eval_texts[0]
doc = nlp(test_text)
print(test_text, doc.cats)
if output_dir is not None:
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc2 = nlp2(test_text)
print(test_text, doc2.cats)
def read_inputs(input_path):
texts = []
cats = []
with input_path.open(mode="r") as file_:
for line in file_:
text, gold = json.loads(line)
text = preprocess_text(text)
texts.append(text)
cats.append(gold["cats"])
return texts, cats
def make_sentence_examples(nlp, texts, labels):
"""Treat each sentence of the document as an instance, using the doc labels."""
sents = []
sent_cats = []
for text, cats in zip(texts, labels):
doc = nlp.make_doc(text)
doc = nlp.get_pipe("sentencizer")(doc)
for sent in doc.sents:
sents.append(sent.text)
sent_cats.append(cats)
return sents, sent_cats
white_re = re.compile(r"\s\s+")
def preprocess_text(text):
text = text.replace("<s>", "<open-s-tag>")
text = text.replace("</s>", "<close-s-tag>")
text = white_re.sub(" ", text).strip()
return "".join(
c for c in unicodedata.normalize("NFD", text) if unicodedata.category(c) != "Mn"
)
def load_data(*, limit=0, dev_size=2000):
"""Load data from the IMDB dataset, splitting off a held-out set."""
if limit != 0:
limit += dev_size
assert dev_size != 0
train_data, _ = thinc.extra.datasets.imdb(limit=limit)
assert len(train_data) > dev_size
random.shuffle(train_data)
dev_data = train_data[:dev_size]
train_data = train_data[dev_size:]
train_texts, train_labels = _prepare_partition(train_data, preprocess=False)
dev_texts, dev_labels = _prepare_partition(dev_data, preprocess=False)
return (train_texts, train_labels), (dev_texts, dev_labels)
def load_data_for_final_test(*, limit=0):
print(
"Warning: Using test data. You should use development data for most experiments."
)
train_data, test_data = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
train_texts, train_labels = _prepare_partition(train_data)
test_texts, test_labels = _prepare_partition(test_data)
return (train_texts, train_labels), (test_texts, test_labels)
def _prepare_partition(text_label_tuples, *, preprocess=False):
texts, labels = zip(*text_label_tuples)
if preprocess:
# Preprocessing can mask errors in our handling of noisy text, so
# we don't want to do it by default
texts = [preprocess_text(text) for text in texts]
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
return texts, cats
def evaluate(nlp, texts, cats, pos_label):
tp = 0.0 # True positives
fp = 0.0 # False positives
fn = 0.0 # False negatives
tn = 0.0 # True negatives
total_words = sum(len(text.split()) for text in texts)
with tqdm.tqdm(total=total_words, leave=False) as pbar:
for i, doc in enumerate(nlp.pipe(texts, batch_size=8)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if label != pos_label:
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.0
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.0
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
pbar.update(len(doc.text.split()))
precision = tp / (tp + fp + 1e-8)
recall = tp / (tp + fn + 1e-8)
if (precision + recall) == 0:
f_score = 0.0
else:
f_score = 2 * (precision * recall) / (precision + recall)
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
if __name__ == "__main__":
plac.call(main)
|
the-stack_106_28811 | import numpy as np
class Node(object):
def __init__(self,label,root,level_n=0):
self.children = []
self.father = None
self.root = root
self.label = label
self.name = "-"
self.level_n = level_n
self.pos = -1
def __str__(self, level=0):
image = "\t" * level + repr(self.name) +"-" +repr(self.label) + "\n"
for child in self.children:
image += child.__str__(level + 1)
return image
def __repr__(self):
return str(self.name)+"-"+str(self.label)
def add_node(self,label):
node=Node(label,self.root,self.level_n+1)
if len(self.root)<=node.level_n:
self.root.append([])
self.root[node.level_n].append(node)
node.root=self.root
self.children.append(node)
node.father=self
return node
def put_name(self):
stack = [self]
i=0
while stack:
cur_node = stack[0]
stack = stack[1:]
cur_node.name = i
i += 1
for child in cur_node.children:
stack.append(child)
def get_node(self,name):
stack = [self]
while stack:
cur_node = stack[0]
if cur_node.name == name:
return cur_node
stack = stack[1:]
for child in cur_node.children:
stack.append(child)
return None
def get_label(self,name):
aux = self.get_node(name)
if aux != None:
return aux.label
else:
return None
def pa(self,name):
aux = self.get_node(name)
if aux != None:
return aux.father
else:
return None
def ch_l(self,name,l):
aux = self.get_node(name)
if aux != None and len(aux.children) > l:
return aux.children[l].name
else:
return None
def posizione(self):
if self != None:
if self.father != None:
return self.father.children.index(self)
else:
return 0
else:
return None
def get_reverse_children(self):
children = self.children[:]
children.reverse()
return children
def make_linear_tree(self,max_child,max_level,max_label):
if(max_level>0):
for i in range(0,max_child):
self.add_node(((i*7)+3)%max_label)
self.children[i].make_linear_tree(max_child,max_level-1,max_label)
class Tree(object):
def __init__(self,label,classe,level_n=0):
self.struct = []
self.struct.append([])
self.t = Node(label,self.struct)
self.struct[0].append(self.t)
self.size=None
self.classe=classe
self.max_child=66
self.leaves_n=None
self.no_leaves_n=None
self.N_L=np.zeros(self.max_child)
self.N_I=None
self.N_II=np.zeros(self.max_child)
def __str__(self):
return str(self.classe)+"-" +self.t.__str__()
def __repr__(self):
return self.t.__repr__()
def set_name(l_t):
i=0
for level in l_t.struct:
for node in level:
node.name=i
if node.father != None:
node.pos = node.father.children.index(node)+1
else:
node.pos = 0
i=i+1
l_t.size=i
l_t.N_I=l_t.size -len(l_t.struct[-1])
def divide_leaves(self):
self.struct.append([])
stack = [self.t]
while stack:
cur_node=stack[0]
stack = stack[1:]
if not cur_node.children:
self.struct[-1].append(cur_node)
self.struct[cur_node.level_n].remove(cur_node)
else:
for child in cur_node.get_reverse_children():
stack.insert(0, child)
del self.struct[-2]
def set_N_L(self):
for node in self.struct[-1]:
self.N_L[node.pos-1]=self.N_L[node.pos-1] + 1
for level in self.struct[:-1]:
for node in level:
for child in node.children:
self.N_II[child.pos - 1] = self.N_II[child.pos - 1] + 1
|
the-stack_106_28812 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_alarm
short_description: Create or delete a Rackspace Cloud Monitoring alarm.
description:
- Create or delete a Rackspace Cloud Monitoring alarm that associates an
existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
criteria that specify what conditions will trigger which levels of
notifications. Rackspace monitoring module flow | rax_mon_entity ->
rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
*rax_mon_alarm*
version_added: "2.0"
options:
state:
description:
- Ensure that the alarm with this C(label) exists or does not exist.
choices: [ "present", "absent" ]
required: false
default: present
label:
description:
- Friendly name for this alarm, used to achieve idempotence. Must be a String
between 1 and 255 characters long.
required: true
entity_id:
description:
- ID of the entity this alarm is attached to. May be acquired by registering
the value of a rax_mon_entity task.
required: true
check_id:
description:
- ID of the check that should be alerted on. May be acquired by registering
the value of a rax_mon_check task.
required: true
notification_plan_id:
description:
- ID of the notification plan to trigger if this alarm fires. May be acquired
by registering the value of a rax_mon_notification_plan task.
required: true
criteria:
description:
- Alarm DSL that describes alerting conditions and their output states. Must
be between 1 and 16384 characters long. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
for a reference on the alerting language.
disabled:
description:
- If yes, create this alarm, but leave it in an inactive state. Defaults to
no.
choices: [ "yes", "no" ]
metadata:
description:
- Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
keys and values between 1 and 255 characters long.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Alarm example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Ensure that a specific alarm exists.
rax_mon_alarm:
credentials: ~/.rax_pub
state: present
label: uhoh
entity_id: "{{ the_entity['entity']['id'] }}"
check_id: "{{ the_check['check']['id'] }}"
notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
criteria: >
if (rate(metric['average']) > 10) {
return new AlarmStatus(WARNING);
}
return new AlarmStatus(OK);
register: the_alarm
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
disabled, metadata):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
if criteria and len(criteria) < 1 or len(criteria) > 16384:
module.fail_json(msg='criteria must be between 1 and 16384 characters long')
# Coerce attributes.
changed = False
alarm = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
if existing:
alarm = existing[0]
if state == 'present':
should_create = False
should_update = False
should_delete = False
if len(existing) > 1:
module.fail_json(msg='%s existing alarms have the label %s.' %
(len(existing), label))
if alarm:
if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
should_delete = should_create = True
should_update = (disabled and disabled != alarm.disabled) or \
(metadata and metadata != alarm.metadata) or \
(criteria and criteria != alarm.criteria)
if should_update and not should_delete:
cm.update_alarm(entity=entity_id, alarm=alarm,
criteria=criteria, disabled=disabled,
label=label, metadata=metadata)
changed = True
if should_delete:
alarm.delete()
changed = True
else:
should_create = True
if should_create:
alarm = cm.create_alarm(entity=entity_id, check=check_id,
notification_plan=notification_plan_id,
criteria=criteria, disabled=disabled, label=label,
metadata=metadata)
changed = True
else:
for a in existing:
a.delete()
changed = True
if alarm:
alarm_dict = {
"id": alarm.id,
"label": alarm.label,
"check_id": alarm.check_id,
"notification_plan_id": alarm.notification_plan_id,
"criteria": alarm.criteria,
"disabled": alarm.disabled,
"metadata": alarm.metadata
}
module.exit_json(changed=changed, alarm=alarm_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
entity_id=dict(required=True),
check_id=dict(required=True),
notification_plan_id=dict(required=True),
criteria=dict(),
disabled=dict(type='bool', default=False),
metadata=dict(type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
entity_id = module.params.get('entity_id')
check_id = module.params.get('check_id')
notification_plan_id = module.params.get('notification_plan_id')
criteria = module.params.get('criteria')
disabled = module.boolean(module.params.get('disabled'))
metadata = module.params.get('metadata')
setup_rax_module(module, pyrax)
alarm(module, state, label, entity_id, check_id, notification_plan_id,
criteria, disabled, metadata)
if __name__ == '__main__':
main()
|
the-stack_106_28815 | """
Uses the Twitter API to collect the data required for inferring the relationship between two users
Requires access to the Twitter API for inference
"""
import tweepy
import numpy as np
import json
from tqdm import tqdm
def get_api_credentials():
credentials={}
with open('data/credentials.txt') as f:
for line in f:
category,key = line.strip().split(':')
credentials[category.strip()]=key.strip()
# authorization of consumer key and consumer secret
auth = tweepy.OAuthHandler(credentials['consumer_key'], credentials['consumer_secret'])
# set access to user's access key and access secret
auth.set_access_token(credentials['access_token'], credentials['access_token_secret'])
# calling the api
api = tweepy.API(auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)
print("Loaded Tweepy instance of the Twitter API!")
return api
def get_dyads(dyad_file='data/dyad-examples.txt'):
dyad_list=[]
with open(dyad_file) as f:
for line in f:
dyad_list.append(line.strip().split(','))
return dyad_list
def process_twitter_data():
"""
Processes the data collected from Tweepy into a form that can be run by the model
"""
# load the API
api = get_api_credentials()
def extract_text(tweet):
if 'full_text' in tweet:
return tweet['full_text']
elif 'text' in tweet:
return tweet['text']
else:
return None
out_list = []
list_of_dyads = get_dyads()
print("Getting the data for dyads")
for user_name_or_id_1,user_name_or_id_2 in tqdm(list_of_dyads):
# get user objects and store user info into dictionary
user1 = api.get_user(user_name_or_id_1)._json
user2 = api.get_user(user_name_or_id_2)._json
aid,bid = user1['id_str'],user2['id_str']
out_obj={'aid':aid,'bid':bid,
'a_data':{'name':[user1['screen_name'],user1['name']],'bio':user1['description'],
'direct-mention':[],'public-mention':[],'retweets':[]},
'b_data':{'name':[user2['screen_name'],user2['name']],'bio':user2['description'],
'direct-mention':[],'public-mention':[],'retweets':[]},
}
networked_users = {aid:set(),bid:set()}
vol_users = {aid:0,bid:0}
for uid1,uid2,direction in [(aid,bid,'a_data'),(bid,aid,'b_data')]:
# get all tweets from a user's timeline
all_tweets = []
oldest_id=None
while True:
tweets = api.user_timeline(user_id=aid,count=200,include_rts=True,
max_id=oldest_id,tweet_mode='extended')
if len(tweets) == 0:
break
oldest_id = tweets[-1].id-1
all_tweets.extend([tweet._json for tweet in tweets])
relevant_tweets = []
for tweet in all_tweets:
if 'retweeted_status' in tweet:
uid_rt = tweet['retweeted_status']['user']['id_str']
networked_users[uid1].add(uid_rt)
if uid_rt == bid:
relevant_tweets.append(('retweets', extract_text(tweet['retweeted_status'])))
elif 'quoted_status' in tweet:
uid_qt = tweet['quoted_status']['user']['id_str']
networked_users[uid1].add(uid_qt)
if uid_qt == bid:
relevant_tweets.append(('retweets', extract_text(tweet['quoted_status'])))
else:
if tweet['in_reply_to_user_id_str']:
uid_rp = tweet['in_reply_to_user_id_str']
networked_users[uid1].add(uid_rp)
if uid_rp == bid:
relevant_tweets.append(('direct-mention', extract_text(tweet)))
else:
uids_mn = [x['id_str'] for x in tweet['entities']['user_mentions']]
networked_users[uid1].update(uids_mn)
if bid in uids_mn:
relevant_tweets.append(('public-mention', extract_text(tweet)))
# update with tweets
for typ,tweet in relevant_tweets:
out_obj[direction][typ].append(tweet)
# get count-norm
arr = np.array([len(out_obj[direction]['direct-mention']),len(out_obj[direction]['public-mention']),
len(out_obj[direction]['retweets'])])
vol_users[uid1]=arr.sum()
arr = arr/max(1,arr.sum())
out_obj[direction]['count_norm']=arr.tolist()
# fill none values with dummy
for typ,V in out_obj[direction].items():
if len(V)==0:
out_obj[direction][typ]=['<None>']
# get Jaccard index based on neighbors
jacc = len(networked_users[aid] & networked_users[bid]) / len(networked_users[aid] | networked_users[bid])
# get reciprocity score based on activity
rec = 1-np.abs(vol_users[aid]-vol_users[bid])/(vol_users[aid]+vol_users[bid])
out_obj['network']=[jacc,rec]
out_list.append(out_obj)
# save out-list
print("Saving to data/sample_outputs.json...")
with open('data/sample_outputs.json','w') as outf:
for obj in out_list:
outf.write(json.dumps(obj)+'\n')
return
if __name__=='__main__':
process_twitter_data() |
the-stack_106_28816 | import ctypes
import sdl2
from event_dispatcher import EventDispatcher
class EventLoop:
def __init__(self, window):
self._event_dispatcher = EventDispatcher(self, window)
self._running = False
rotate_event = sdl2.SDL_Event()
rotate_event.type = EventDispatcher.ROTATE_EVENT
self._window = window
self._rotate_event_pointer = ctypes.byref(rotate_event)
def run(self):
self._running = True
while self._running:
if self._window.can_rotate:
sdl2.SDL_PushEvent(self._rotate_event_pointer)
self._receive_events()
def stop(self):
self._running = False
def _receive_events(self):
event = sdl2.SDL_Event()
event_pointer = ctypes.byref(event)
while self._running and sdl2.SDL_PollEvent(event_pointer) != 0:
self._event_dispatcher.dispatch(event)
|
the-stack_106_28817 | import os
from pathlib import Path
from envparse import env
# load environment variables from .env
app_dir: Path = Path(__file__).parent.parent
env_file = app_dir / ".env"
if os.path.isfile(env_file):
env.read_envfile(env_file)
BOT_API_TOKEN = env.str("BOT_API_TOKEN", default="")
SERVERLESS = env.bool("SERVERLESS", default=False)
WEBHOOK_HOST = env.str("WEBHOOK_HOST", default="")
WEBAPP_HOST = env.str("HOST", default="0.0.0.0")
WEBAPP_PORT = env.int("PORT", default=3000)
|
the-stack_106_28818 | # Setup paths for module imports
import gc
# Import required modules
from pyaedt import Emit
from pyaedt.generic.filesystem import Scratch
from pyaedt.modeler.PrimitivesEmit import EmitComponent, EmitComponents
from _unittest.conftest import scratch_path, config
try:
import pytest
except ImportError:
import _unittest_ironpython.conf_unittest as pytest
class TestClass:
def setup_class(self):
# set a scratch directory and the environment / test data
with Scratch(scratch_path) as self.local_scratch:
self.aedtapp = Emit()
def teardown_class(self):
self.aedtapp._desktop.ClearMessages("", "", 3)
assert self.aedtapp.close_project(saveproject=False)
self.local_scratch.remove()
gc.collect()
def test_objects(self):
assert self.aedtapp.solution_type
assert isinstance(self.aedtapp.existing_analysis_setups, list)
assert isinstance(self.aedtapp.setup_names, list)
assert isinstance(self.aedtapp.modeler.components, EmitComponents)
assert self.aedtapp.modeler
assert self.aedtapp.oanalysis is None
@pytest.mark.skipif(config["build_machine"], reason="Not functional in non-graphical mode")
def test_create_components(self):
radio = self.aedtapp.modeler.components.create_component(
"New Radio", "TestRadio")
assert radio.name == "TestRadio"
assert isinstance(radio, EmitComponent)
antenna = self.aedtapp.modeler.components.create_component(
"Antenna", "TestAntenna")
assert antenna.name == "TestAntenna"
assert isinstance(antenna, EmitComponent)
@pytest.mark.skipif(config["build_machine"], reason="Not functional in non-graphical mode")
@pytest.mark.skipif(config["desktopVersion"] < "2021.2", reason="Skipped on versions lower than 2021.2")
def test_connect_components(self):
radio = self.aedtapp.modeler.components.create_component("New Radio")
antenna = self.aedtapp.modeler.components.create_component("Antenna")
antenna.move_and_connect_to(radio)
antenna_port = antenna.port_names()[0] # antennas have 1 port
radio_port = radio.port_names()[0] # radios have 1 port
connected_comp, connected_port = antenna.port_connection(antenna_port)
assert connected_comp == radio.name
assert connected_port == radio_port
# Verify None,None is returned for an unconnected port
radio2 = self.aedtapp.modeler.components.create_component("New Radio")
radio2_port = radio2.port_names()[0]
connected_comp, connected_port = radio2.port_connection(radio2_port)
assert connected_comp is None
assert connected_port is None
@pytest.mark.skipif(config["build_machine"], reason="Not functional in non-graphical mode")
def test_radio_component(self):
radio = self.aedtapp.modeler.components.create_component("New Radio")
# default radio has 1 Tx channel and 1 Rx channel
assert radio.has_rx_channels()
assert radio.has_tx_channels()
# test band.enabled to confirm component properties can be get/set
assert len(radio.bands()) > 0
band = radio.bands()[0]
assert band.enabled
band.enabled = False
assert not band.enabled
|
the-stack_106_28819 | from typing import Optional, Union, Dict, Any
from algoliasearch.configs import RecommendationConfig
from algoliasearch.helpers import is_async_available
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.http.requester import Requester
from algoliasearch.http.transporter import Transporter
from algoliasearch.http.verb import Verb
class RecommendationClient(object):
def __init__(self, transporter, config):
# type: (Transporter, RecommendationConfig) -> None
self._transporter = transporter
self._config = config
@staticmethod
def create(app_id=None, api_key=None, region=None):
# type: (Optional[str], Optional[str], Optional[str]) -> RecommendationClient # noqa: E501
config = RecommendationConfig(app_id, api_key, region)
return RecommendationClient.create_with_config(config)
@staticmethod
def create_with_config(config):
# type: (RecommendationConfig) -> RecommendationClient
requester = Requester()
transporter = Transporter(requester, config)
client = RecommendationClient(transporter, config)
if is_async_available():
from algoliasearch.recommendation_client_async import \
RecommendationClientAsync
from algoliasearch.http.transporter_async import \
TransporterAsync
from algoliasearch.http.requester_async import RequesterAsync
return RecommendationClientAsync(
client, TransporterAsync(RequesterAsync(), config), config
)
return client
def set_personalization_strategy(self, personalization_strategy,
request_options=None): # noqa: E501
# type: (dict, Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.write(
Verb.POST,
'1/strategies/personalization',
personalization_strategy,
request_options
)
def get_personalization_strategy(self, request_options=None):
# type: (Optional[Union[dict, RequestOptions]]) -> dict
return self._transporter.read(
Verb.GET,
'1/strategies/personalization',
None,
request_options
)
def close(self):
# type: () -> None
return self._transporter.close() # type: ignore
|
the-stack_106_28823 | # yellowbrick.model_selection.rfecv
# Visualize the number of features selected with recursive feature elimination
#
# Author: Benjamin Bengfort
# Created: Tue Apr 03 17:31:37 2018 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: rfecv.py [a4599db] [email protected] $
"""
Visualize the number of features selected using recursive feature elimination
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from yellowbrick.base import ModelVisualizer
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.utils import check_X_y
from sklearn.feature_selection import RFE
from sklearn.model_selection import cross_val_score
##########################################################################
## Recursive Feature Elimination
##########################################################################
class RFECV(ModelVisualizer):
"""
Recursive Feature Elimination, Cross-Validated (RFECV) feature selection.
Selects the best subset of features for the supplied estimator by removing
0 to N features (where N is the number of features) using recursive
feature elimination, then selecting the best subset based on the
cross-validation score of the model. Recursive feature elimination
eliminates n features from a model by fitting the model multiple times and
at each step, removing the weakest features, determined by either the
``coef_`` or ``feature_importances_`` attribute of the fitted model.
The visualization plots the score relative to each subset and shows trends
in feature elimination. If the feature elimination CV score is flat, then
potentially there are not enough features in the model. An ideal curve is
when the score jumps from low to high as the number of features removed
increases, then slowly decreases again from the optimal number of
features.
Parameters
----------
estimator : a scikit-learn estimator
An object that implements ``fit`` and provides information about the
relative importance of features with either a ``coef_`` or
``feature_importances_`` attribute.
Note that the object is cloned for each validation.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
step : int or float, optional (default=1)
If greater than or equal to 1, then step corresponds to the (integer)
number of features to remove at each iteration. If within (0.0, 1.0),
then step corresponds to the percentage (rounded down) of features to
remove at each iteration.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
n_features_ : int
The number of features in the selected subset
support_ : array of shape [n_features]
A mask of the selected features
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranked position of feature i. Selected features are assigned rank 1.
cv_scores_ : array of shape [n_subsets_of_features, n_splits]
The cross-validation scores for each subset of features and splits in
the cross-validation strategy.
rfe_estimator_ : sklearn.feature_selection.RFE
A fitted RFE estimator wrapping the original estimator. All estimator
functions such as ``predict()`` and ``score()`` are passed through to
this estimator (it rewraps the original model).
n_feature_subsets_ : array of shape [n_subsets_of_features]
The number of features removed on each iteration of RFE, computed by the
number of features in the dataset and the step parameter.
Notes
-----
This model wraps ``sklearn.feature_selection.RFE`` and not
``sklearn.feature_selection.RFECV`` because access to the internals of the
CV and RFE estimators is required for the visualization. The visualizer
does take similar arguments, however it does not expose the same internal
attributes.
Additionally, the RFE model can be accessed via the ``rfe_estimator_``
attribute. Once fitted, the visualizer acts as a wrapper for this
estimator and not for the original model passed to the model. This way the
visualizer model can be used to make predictions.
.. caution:: This visualizer requires a model that has either a ``coef_``
or ``feature_importances_`` attribute when fitted.
"""
def __init__(
self, estimator, ax=None, step=1, groups=None, cv=None, scoring=None, **kwargs
):
# Initialize the model visualizer
super(RFECV, self).__init__(estimator, ax=ax, **kwargs)
# Set parameters
self.step = step
self.groups = groups
self.cv = cv
self.scoring = scoring
def fit(self, X, y=None):
"""
Fits the RFECV with the wrapped model to the specified data and draws
the rfecv curve with the optimal number of features found.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression.
Returns
-------
self : instance
Returns the instance of the RFECV visualizer.
"""
X, y = check_X_y(X, y, "csr")
n_features = X.shape[1]
# This check is kind of unnecessary since RFE will do it, but it's
# nice to get it out of the way ASAP and raise a meaningful error.
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise YellowbrickValueError("step must be >0")
# Create the RFE model
rfe = RFE(self.estimator, step=step)
self.n_feature_subsets_ = np.arange(1, n_features + step, step)
# Create the cross validation params
# TODO: handle random state
cv_params = {key: self.get_params()[key] for key in ("groups", "cv", "scoring")}
# Perform cross-validation for each feature subset
scores = []
for n_features_to_select in self.n_feature_subsets_:
rfe.set_params(n_features_to_select=n_features_to_select)
scores.append(cross_val_score(rfe, X, y, **cv_params))
# Convert scores to array
self.cv_scores_ = np.array(scores)
# Find the best RFE model
bestidx = self.cv_scores_.mean(axis=1).argmax()
self.n_features_ = self.n_feature_subsets_[bestidx]
# Fit the final RFE model for the number of features
self.rfe_estimator_ = rfe
self.rfe_estimator_.set_params(n_features_to_select=self.n_features_)
self.rfe_estimator_.fit(X, y)
# Rewrap the visualizer to use the rfe estimator
self._wrapped = self.rfe_estimator_
# Hoist the RFE params to the visualizer
self.support_ = self.rfe_estimator_.support_
self.ranking_ = self.rfe_estimator_.ranking_
self.draw()
return self
def draw(self, **kwargs):
"""
Renders the rfecv curve.
"""
# Compute the curves
x = self.n_feature_subsets_
means = self.cv_scores_.mean(axis=1)
sigmas = self.cv_scores_.std(axis=1)
# Plot one standard deviation above and below the mean
self.ax.fill_between(x, means - sigmas, means + sigmas, alpha=0.25)
# Plot the curve
self.ax.plot(x, means, "o-")
# Plot the maximum number of features
self.ax.axvline(
self.n_features_,
c="k",
ls="--",
label="n_features = {}\nscore = {:0.3f}".format(
self.n_features_, self.cv_scores_.mean(axis=1).max()
),
)
return self.ax
def finalize(self, **kwargs):
"""
Add the title, legend, and other visual final touches to the plot.
"""
# Set the title of the figure
self.set_title("RFECV for {}".format(self.name))
# Add the legend
self.ax.legend(frameon=True, loc="best")
# Set the axis labels
self.ax.set_xlabel("Number of Features Selected")
self.ax.set_ylabel("Score")
##########################################################################
## Quick Methods
##########################################################################
def rfecv(
estimator,
X,
y,
ax=None,
step=1,
groups=None,
cv=None,
scoring=None,
show=True,
**kwargs
):
"""
Performs recursive feature elimination with cross-validation to determine
an optimal number of features for a model. Visualizes the feature subsets
with respect to the cross-validation score.
This helper function is a quick wrapper to utilize the RFECV visualizer
for one-off analysis.
Parameters
----------
estimator : a scikit-learn estimator
An object that implements ``fit`` and provides information about the
relative importance of features with either a ``coef_`` or
``feature_importances_`` attribute.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
step : int or float, optional (default=1)
If greater than or equal to 1, then step corresponds to the (integer)
number of features to remove at each iteration. If within (0.0, 1.0),
then step corresponds to the percentage (rounded down) of features to
remove at each iteration.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers. These arguments are
also passed to the `show()` method, e.g. can pass a path to save the
figure to.
Returns
-------
viz : RFECV
Returns the fitted, finalized visualizer.
"""
# Initialize the visualizer
oz = RFECV(
estimator, ax=ax, step=step, groups=groups, cv=cv, scoring=scoring, show=show
)
# Fit and show the visualizer
oz.fit(X, y)
if show:
oz.show()
else:
oz.finalize()
# Return the visualizer object
return oz
|
the-stack_106_28824 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class route_func(nn.Module):
r"""CondConv: Conditionally Parameterized Convolutions for Efficient Inference
https://papers.nips.cc/paper/8412-condconv-conditionally-parameterized-convolutions-for-efficient-inference.pdf
Args:
c_in (int): Number of channels in the input image
num_experts (int): Number of experts for mixture. Default: 1
"""
def __init__(self, c_in, num_experts):
super(route_func, self).__init__()
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Linear(c_in, num_experts)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.sigmoid(x)
return x
class CondConv2d(nn.Module):
r"""CondConv: Conditionally Parameterized Convolutions for Efficient Inference
https://papers.nips.cc/paper/8412-condconv-conditionally-parameterized-convolutions-for-efficient-inference.pdf
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
num_experts (int): Number of experts for mixture. Default: 1
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
num_experts=1, norm=None):
super(CondConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.num_experts = num_experts
self.weight = nn.Parameter(
torch.Tensor(num_experts, out_channels, in_channels // groups, kernel_size, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(num_experts, out_channels))
else:
self.register_parameter('bias', None)
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
self.norm = norm
def forward(self, x, routing_weight):
b, c_in, h, w = x.size()
k, c_out, c_in, kh, kw = self.weight.size()
x = x.view(1, -1, h, w)
weight = self.weight.view(k, -1)
combined_weight = torch.mm(routing_weight, weight).view(-1, c_in, kh, kw)
if self.bias is not None:
combined_bias = torch.mm(routing_weight, self.bias).view(-1)
output = F.conv2d(
x, weight=combined_weight, bias=combined_bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * b)
else:
output = F.conv2d(
x, weight=combined_weight, bias=None, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * b)
output = output.view(b, c_out, output.size(-2), output.size(-1))
if self.norm is not None:
output = self.norm(output)
return output |
the-stack_106_28826 | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import paddle
import os, sys
import copy as cp
import cv2
import math
try:
import ppdet
except ImportError as e:
print(
f"{e}, [paddledet] package and it's dependencies is required for AVA.")
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
from paddlevideo.modeling.builder import build_model
from paddlevideo.utils import get_config
from paddlevideo.loader.builder import build_dataloader, build_dataset, build_pipeline
from paddlevideo.metrics.ava_utils import read_labelmap
import time
from os import path as osp
import numpy as np
from paddlevideo.utils import get_config
import pickle
from paddlevideo.utils import (get_logger, load, mkdir, save)
import shutil
FONTFACE = cv2.FONT_HERSHEY_DUPLEX
FONTSCALE = 0.5
FONTCOLOR = (255, 255, 255) # BGR, white
MSGCOLOR = (128, 128, 128) # BGR, gray
THICKNESS = 1
LINETYPE = 1
def hex2color(h):
"""Convert the 6-digit hex string to tuple of 3 int value (RGB)"""
return (int(h[:2], 16), int(h[2:4], 16), int(h[4:], 16))
plate_blue = '03045e-023e8a-0077b6-0096c7-00b4d8-48cae4'
plate_blue = plate_blue.split('-')
plate_blue = [hex2color(h) for h in plate_blue]
plate_green = '004b23-006400-007200-008000-38b000-70e000'
plate_green = plate_green.split('-')
plate_green = [hex2color(h) for h in plate_green]
def abbrev(name):
"""Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
"""
while name.find('(') != -1:
st, ed = name.find('('), name.find(')')
name = name[:st] + '...' + name[ed + 1:]
return name
# annotations is pred results
def visualize(frames, annotations, plate=plate_blue, max_num=5):
"""Visualize frames with predicted annotations.
Args:
frames (list[np.ndarray]): Frames for visualization, note that
len(frames) % len(annotations) should be 0.
annotations (list[list[tuple]]): The predicted results.
plate (str): The plate used for visualization. Default: plate_blue.
max_num (int): Max number of labels to visualize for a person box.
Default: 5,目前不能大于5.
Returns:
list[np.ndarray]: Visualized frames.
"""
assert max_num + 1 <= len(plate)
plate = [x[::-1] for x in plate]
frames_ = cp.deepcopy(frames)
nf, na = len(frames), len(annotations)
assert nf % na == 0
nfpa = len(frames) // len(annotations)
anno = None
h, w, _ = frames[0].shape
# proposals被归一化需要还原真实坐标值
scale_ratio = np.array([w, h, w, h])
for i in range(na):
anno = annotations[i]
if anno is None:
continue
for j in range(nfpa):
ind = i * nfpa + j
frame = frames_[ind]
for ann in anno:
box = ann[0]
label = ann[1]
if not len(label):
continue
score = ann[2]
box = (box * scale_ratio).astype(np.int64)
st, ed = tuple(box[:2]), tuple(box[2:])
cv2.rectangle(frame, st, ed, plate[0], 2)
for k, lb in enumerate(label):
if k >= max_num:
break
text = abbrev(lb)
text = ': '.join([text, str(score[k])])
location = (0 + st[0], 18 + k * 18 + st[1])
textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE,
THICKNESS)[0]
textwidth = textsize[0]
diag0 = (location[0] + textwidth, location[1] - 14)
diag1 = (location[0], location[1] + 2)
cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1)
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
return frames_
def frame_extraction(video_path, target_dir):
"""Extract frames given video_path.
Args:
video_path (str): The video_path.
"""
if not os.path.exists(target_dir):
os.makedirs(target_dir, exist_ok=True)
# Should be able to handle videos up to several hours
frame_tmpl = osp.join(target_dir, '{:05d}.jpg')
vid = cv2.VideoCapture(video_path)
FPS = int(vid.get(5))
frames = []
frame_paths = []
flag, frame = vid.read()
index = 1
while flag:
frames.append(frame)
frame_path = frame_tmpl.format(index)
frame_paths.append(frame_path)
cv2.imwrite(frame_path, frame)
index += 1
flag, frame = vid.read()
return frame_paths, frames, FPS
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
# general params
parser = argparse.ArgumentParser("PaddleVideo Inference model script")
parser.add_argument('-c',
'--config',
type=str,
default='configs/example.yaml',
help='config file path')
parser.add_argument('--video_path', help='video file/url')
parser.add_argument('-o',
'--override',
action='append',
default=[],
help='config options to be overridden')
parser.add_argument('-w',
'--weights',
type=str,
help='weights for finetuning or testing')
#detection_model_name
parser.add_argument('--detection_model_name',
help='the name of detection model ')
# detection_model_weights
parser.add_argument('--detection_model_weights',
help='the weights path of detection model ')
# params for predict
parser.add_argument('--out-filename',
default='ava_det_demo.mp4',
help='output filename')
parser.add_argument('--predict-stepsize',
default=8,
type=int,
help='give out a prediction per n frames')
parser.add_argument(
'--output-stepsize',
default=4,
type=int,
help=('show one frame per n frames in the demo, we should have: '
'predict_stepsize % output_stepsize == 0'))
parser.add_argument('--output-fps',
default=6,
type=int,
help='the fps of demo video output')
return parser.parse_args()
# 一帧的结果。根据概率大小进行排序
def pack_result(human_detection, result):
"""Short summary.
Args:
human_detection (np.ndarray): Human detection result.
result (type): The predicted label of each human proposal.
Returns:
tuple: Tuple of human proposal, label name and label score.
"""
results = []
if result is None:
return None
for prop, res in zip(human_detection, result):
res.sort(key=lambda x: -x[1])
results.append((prop, [x[0] for x in res], [x[1] for x in res]))
return results
# 构造数据处理需要的results
def get_timestep_result(frame_dir, timestamp, clip_len, frame_interval, FPS):
result = {}
result["frame_dir"] = frame_dir
frame_num = len(os.listdir(frame_dir))
dir_name = frame_dir.split("/")[-1]
result["video_id"] = dir_name
result['timestamp'] = timestamp
timestamp_str = '{:04d}'.format(timestamp)
img_key = dir_name + "," + timestamp_str
result['img_key'] = img_key
result['shot_info'] = (1, frame_num)
result['fps'] = FPS
result['suffix'] = '{:05}.jpg'
result['timestamp_start'] = 1
result['timestamp_end'] = int(frame_num / result['fps'])
return result
def detection_inference(frame_paths, output_dir, model_name, weights_path):
"""Detect human boxes given frame paths.
Args:
frame_paths (list[str]): The paths of frames to do detection inference.
Returns:
list[np.ndarray]: The human detection results.
"""
detection_cfg = ppdet.model_zoo.get_config_file(model_name)
detection_cfg = ppdet.core.workspace.load_config(detection_cfg)
detection_trainer = ppdet.engine.Trainer(detection_cfg, mode='test')
detection_trainer.load_weights(weights_path)
print('Performing Human Detection for each frame')
detection_trainer.predict(frame_paths, output_dir=output_dir, save_txt=True)
print("finish object detection")
results = []
for frame_path in frame_paths:
(file_dir, file_name) = os.path.split(frame_path)
(file_path, ext) = os.path.splitext(frame_path)
txt_file_name = file_name.replace(ext, ".txt")
txt_path = os.path.join(output_dir, txt_file_name)
results.append(txt_path)
return results
def get_detection_result(txt_file_path, img_h, img_w, person_det_score_thr):
"""
根据检测结果文件得到图像中人的检测框(proposals)和置信度(scores)
txt_file_path:检测结果存放路径
img_h:图像高度
img_w:图像宽度
"""
proposals = []
scores = []
with open(txt_file_path, 'r') as detection_file:
lines = detection_file.readlines()
for line in lines: # person 0.9842637181282043 0.0 469.1407470703125 944.7770385742188 831.806396484375
items = line.split(" ")
if items[0] != 'person': #只要人
continue
score = items[1]
if (float)(score) < person_det_score_thr:
continue
x1 = (float(items[2])) / img_w
y1 = ((float)(items[3])) / img_h
box_w = ((float)(items[4]))
box_h = ((float)(items[5]))
x2 = (float(items[2]) + box_w) / img_w
y2 = (float(items[3]) + box_h) / img_h
scores.append(score)
proposals.append([x1, y1, x2, y2])
return np.array(proposals), np.array(scores)
@paddle.no_grad()
def main(args):
config = get_config(args.config, show=False) #parse config file
# extract frames from video
video_path = args.video_path
frame_dir = 'tmp_frames'
frame_paths, frames, FPS = frame_extraction(video_path, frame_dir)
num_frame = len(frame_paths) #视频秒数*FPS
assert num_frame != 0
print("Frame Number:", num_frame)
# 帧图像高度和宽度
h, w, _ = frames[0].shape
# Get clip_len, frame_interval and calculate center index of each clip
data_process_pipeline = build_pipeline(config.PIPELINE.test) #测试时输出处理流水配置
clip_len = config.PIPELINE.test.sample['clip_len']
assert clip_len % 2 == 0, 'We would like to have an even clip_len'
frame_interval = config.PIPELINE.test.sample['frame_interval']
# 此处关键帧每秒取一个
clip_len = config.PIPELINE.test.sample['clip_len']
assert clip_len % 2 == 0, 'We would like to have an even clip_len'
frame_interval = config.PIPELINE.test.sample['frame_interval']
window_size = clip_len * frame_interval
timestamps = np.arange(window_size // 2, (num_frame + 1 - window_size // 2),
args.predict_stepsize)
print("timetamps number:", len(timestamps))
# get selected frame list according to timestamps
selected_frame_list = []
for timestamp in timestamps:
selected_frame_list.append(frame_paths[timestamp - 1])
# Load label_map
label_map_path = config.DATASET.test['label_file']
categories, class_whitelist = read_labelmap(open(label_map_path))
label_map = {}
for item in categories:
id = item['id']
name = item['name']
label_map[id] = name
# Construct model.
if config.MODEL.backbone.get('pretrained'):
config.MODEL.backbone.pretrained = '' # disable pretrain model init
model = build_model(config.MODEL)
model.eval()
state_dicts = load(args.weights)
model.set_state_dict(state_dicts)
detection_result_dir = 'tmp_detection'
detection_model_name = args.detection_model_name
detection_model_weights = args.detection_model_weights
detection_txt_list = detection_inference(selected_frame_list,
detection_result_dir,
detection_model_name,
detection_model_weights)
assert len(detection_txt_list) == len(timestamps)
print('Performing SpatioTemporal Action Detection for each clip')
human_detections = []
predictions = []
index = 0
for timestamp, detection_txt_path in zip(timestamps, detection_txt_list):
proposals, scores = get_detection_result(
detection_txt_path, h, w,
(float)(config.DATASET.test['person_det_score_thr']))
if proposals.shape[0] == 0:
predictions.append(None)
human_detections.append(None)
continue
human_detections.append(proposals)
result = get_timestep_result(frame_dir,
timestamp,
clip_len,
frame_interval,
FPS=FPS)
result["proposals"] = proposals
result["scores"] = scores
new_result = data_process_pipeline(result)
proposals = new_result['proposals']
img_slow = new_result['imgs'][0]
img_slow = img_slow[np.newaxis, :]
img_fast = new_result['imgs'][1]
img_fast = img_fast[np.newaxis, :]
proposals = proposals[np.newaxis, :]
scores = scores[np.newaxis, :]
img_shape = np.asarray(new_result['img_shape'])
img_shape = img_shape[np.newaxis, :]
data = [
paddle.to_tensor(img_slow, dtype='float32'),
paddle.to_tensor(img_fast, dtype='float32'),
paddle.to_tensor(proposals, dtype='float32'), scores,
paddle.to_tensor(img_shape, dtype='int32')
]
with paddle.no_grad():
result = model(data, mode='infer')
result = result[0]
prediction = []
person_num = proposals.shape[1]
# N proposals
for i in range(person_num):
prediction.append([])
# Perform action score thr
for i in range(len(result)):
if i + 1 not in class_whitelist:
continue
for j in range(person_num):
if result[i][j, 4] > config.MODEL.head['action_thr']:
prediction[j].append((label_map[i + 1], result[i][j,
4]))
predictions.append(prediction)
index = index + 1
if index % 10 == 0:
print(index, "/", len(timestamps))
results = []
for human_detection, prediction in zip(human_detections, predictions):
results.append(pack_result(human_detection, prediction))
def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
new_frame_inds = np.arange(
len(timestamps) * n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int)
dense_n = int(args.predict_stepsize / args.output_stepsize) #30
frames = [
cv2.imread(frame_paths[i - 1])
for i in dense_timestamps(timestamps, dense_n)
]
vis_frames = visualize(frames, results)
try:
import moviepy.editor as mpy
except ImportError:
raise ImportError('Please install moviepy to enable output file')
vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames],
fps=args.output_fps)
vid.write_videofile(args.out_filename)
print("finish write !")
# delete tmp files and dirs
shutil.rmtree(frame_dir)
shutil.rmtree(detection_result_dir)
if __name__ == '__main__':
args = parse_args() #解析参数
main(args)
|
the-stack_106_28827 | import pytest
from bayesian.factor_graph import *
def f_prize_door(prize_door):
return 1.0 / 3
def f_guest_door(guest_door):
return 1.0 / 3
def f_monty_door(prize_door, guest_door, monty_door):
if prize_door == guest_door:
if prize_door == monty_door:
return 0
else:
return 0.5
elif prize_door == monty_door:
return 0
elif guest_door == monty_door:
return 0
return 1
def pytest_funcarg__monty_graph(request):
g = build_graph(
f_prize_door,
f_guest_door,
f_monty_door,
domains=dict(
prize_door=['A', 'B', 'C'],
guest_door=['A', 'B', 'C'],
monty_door=['A', 'B', 'C']))
return g
class TestPersistance():
def test_create_sqlite_db_when_inference_method_changed(self, monty_graph):
assert monty_graph.inference_method == 'sumproduct'
# Now switch the inference_method to sample_db...
monty_graph.inference_method = 'sample_db'
assert monty_graph.inference_method == 'sample_db'
|
the-stack_106_28828 | BACKEND = 'sqlite'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'django_pivot.tests.pivot',
)
SITE_ID = 1,
SECRET_KEY = 'secret'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
|
the-stack_106_28829 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ConnectedMachineConfiguration(Configuration):
"""Configuration for ConnectedMachine.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ConnectedMachineConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-05-20"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-hybridcompute/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
the-stack_106_28834 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to loss functions."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine import keras_tensor
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.losses.Reduction', v1=[])
class ReductionV2:
"""Types of loss reduction.
Contains the following values:
* `AUTO`: Indicates that the reduction option will be determined by the usage
context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
used with `tf.distribute.Strategy`, outside of built-in training loops such
as `tf.keras` `compile` and `fit`, we expect reduction value to be
`SUM` or `NONE`. Using `AUTO` in that case will raise an error.
* `NONE`: No **additional** reduction is applied to the output of the wrapped
loss function. When non-scalar losses are returned to Keras functions like
`fit`/`evaluate`, the unreduced vector loss is passed to the optimizer
but the reported loss will be a scalar value.
Caution: **Verify the shape of the outputs when using** `Reduction.NONE`.
The builtin loss functions wrapped by the loss classes reduce
one dimension (`axis=-1`, or `axis` if specified by loss function).
`Reduction.NONE` just means that no **additional** reduction is applied by
the class wrapper. For categorical losses with an example input shape of
`[batch, W, H, n_classes]` the `n_classes` dimension is reduced. For
pointwise losses you must include a dummy axis so that `[batch, W, H, 1]`
is reduced to `[batch, W, H]`. Without the dummy axis `[batch, W, H]`
will be incorrectly reduced to `[batch, W]`.
* `SUM`: Scalar sum of weighted losses.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
This reduction type is not supported when used with
`tf.distribute.Strategy` outside of built-in training loops like `tf.keras`
`compile`/`fit`.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size)
```
Please see the [custom training guide](
https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details on this.
"""
AUTO = 'auto'
NONE = 'none'
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
@classmethod
def all(cls):
return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError(
f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"')
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with backend.name_scope(name or 'remove_squeezable_dimensions'):
if not isinstance(predictions, tf.RaggedTensor):
predictions = tf.convert_to_tensor(predictions)
if not isinstance(labels, tf.RaggedTensor):
labels = tf.convert_to_tensor(labels)
predictions_shape = predictions.shape
predictions_rank = predictions_shape.ndims
labels_shape = labels.shape
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if (rank_diff == expected_rank_diff + 1 and
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = tf.squeeze(predictions, [-1])
elif (rank_diff == expected_rank_diff - 1 and
labels_shape.dims[-1].is_compatible_with(1)):
labels = tf.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = tf.rank(predictions) - tf.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = tf.cond(
tf.equal(expected_rank_diff + 1, rank_diff),
lambda: tf.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = tf.cond(
tf.equal(expected_rank_diff - 1, rank_diff),
lambda: tf.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None):
"""Squeeze or expand last dimension if needed.
1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
(using `remove_squeezable_dimensions`).
2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
from the new rank of `y_pred`.
If `sample_weight` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.
Returns:
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed,
`sample_weight` could be extended by one dimension.
If `sample_weight` is None, (y_pred, y_true) is returned.
"""
y_pred_shape = y_pred.shape
y_pred_rank = y_pred_shape.ndims
if y_true is not None:
# If sparse matrix is provided as `y_true`, the last dimension in `y_pred`
# may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),
# y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))
# In this case, we should not try to remove squeezable dimension.
y_true_shape = y_true.shape
y_true_rank = y_true_shape.ndims
if (y_true_rank is not None) and (y_pred_rank is not None):
# Use static rank for `y_true` and `y_pred`.
if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:
y_true, y_pred = remove_squeezable_dimensions(
y_true, y_pred)
else:
# Use dynamic rank.
rank_diff = tf.rank(y_pred) - tf.rank(y_true)
squeeze_dims = lambda: remove_squeezable_dimensions( # pylint: disable=g-long-lambda
y_true, y_pred)
is_last_dim_1 = tf.equal(1, tf.shape(y_pred)[-1])
maybe_squeeze_dims = lambda: tf.cond( # pylint: disable=g-long-lambda
is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))
y_true, y_pred = tf.cond(
tf.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)
if sample_weight is None:
return y_pred, y_true
weights_shape = sample_weight.shape
weights_rank = weights_shape.ndims
if weights_rank == 0: # If weights is scalar, do nothing.
return y_pred, y_true, sample_weight
if (y_pred_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - y_pred_rank == 1:
sample_weight = tf.squeeze(sample_weight, [-1])
elif y_pred_rank - weights_rank == 1:
sample_weight = tf.expand_dims(sample_weight, [-1])
return y_pred, y_true, sample_weight
# Use dynamic rank.
weights_rank_tensor = tf.rank(sample_weight)
rank_diff = weights_rank_tensor - tf.rank(y_pred)
maybe_squeeze_weights = lambda: tf.squeeze(sample_weight, [-1])
def _maybe_expand_weights():
expand_weights = lambda: tf.expand_dims(sample_weight, [-1])
return tf.cond(
tf.equal(rank_diff, -1), expand_weights, lambda: sample_weight)
def _maybe_adjust_weights():
return tf.cond(
tf.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# squeeze or expand last dim of `sample_weight` if its rank differs by 1
# from the new rank of `y_pred`.
sample_weight = tf.cond(
tf.equal(weights_rank_tensor, 0), lambda: sample_weight,
_maybe_adjust_weights)
return y_pred, y_true, sample_weight
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = tf.reduce_sum(losses)
return tf.math.divide_no_nan(total_loss, num_present, name='value')
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with backend.name_scope('num_elements') as scope:
return tf.cast(tf.size(losses, name=scope), dtype=losses.dtype)
def reduce_weighted_loss(weighted_losses,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE):
"""Reduces the individual weighted loss measurements."""
if reduction == ReductionV2.NONE:
loss = weighted_losses
else:
loss = tf.reduce_sum(weighted_losses)
if reduction == ReductionV2.SUM_OVER_BATCH_SIZE:
loss = _safe_mean(loss, _num_elements(weighted_losses))
return loss
@keras_export('keras.__internal__.losses.compute_weighted_loss', v1=[])
def compute_weighted_loss(losses,
sample_weight=None,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
ReductionV2.validate(reduction)
# If this function is called directly, then we just default 'AUTO' to
# 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases.
if reduction == ReductionV2.AUTO:
reduction = ReductionV2.SUM_OVER_BATCH_SIZE
if sample_weight is None:
sample_weight = 1.0
with backend.name_scope(name or 'weighted_loss'):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
tf.compat.v1.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
if not isinstance(losses,
(keras_tensor.KerasTensor, tf.RaggedTensor)):
losses = tf.convert_to_tensor(losses)
input_dtype = losses.dtype
if not isinstance(sample_weight,
(keras_tensor.KerasTensor, tf.RaggedTensor)):
sample_weight = tf.convert_to_tensor(sample_weight)
# TODO(psv): Handle casting here in a better way, eg. if losses is float64
# we do not want to lose precision.
losses = tf.cast(losses, 'float32')
sample_weight = tf.cast(sample_weight, 'float32')
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions( # pylint: disable=unbalanced-tuple-unpacking
losses, None, sample_weight)
weighted_losses = tf.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = tf.cast(loss, input_dtype)
return loss
def scale_loss_for_distribution(loss_value):
"""Scales and returns the given loss value by the number of replicas."""
num_replicas = (
tf.distribute.get_strategy().num_replicas_in_sync)
if num_replicas > 1:
loss_value *= (1. / num_replicas)
return loss_value
def cast_losses_to_common_dtype(losses):
"""Cast a list of losses to a common dtype.
If any loss is floating-point, they will all be casted to the most-precise
floating-point loss. Otherwise the losses are not casted. We also skip casting
losses if there are any complex losses.
Args:
losses: A list of losses.
Returns:
`losses`, but they have been casted to a common dtype.
"""
highest_float = None
for loss in losses:
if loss.dtype.is_floating:
if highest_float is None or loss.dtype.size > highest_float.size:
highest_float = loss.dtype
elif {loss.dtype, highest_float} == {'bfloat16', 'float16'}:
highest_float = 'float32'
if loss.dtype.is_complex:
return losses # If we find any complex losses, do not cast any losses
if highest_float:
losses = [tf.cast(loss, highest_float) for loss in losses]
return losses
|
the-stack_106_28835 | # (c) 2012, Daniel Hokka Zakrisson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import subprocess
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_text
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.extend([to_text(l) for l in stdout.splitlines()])
else:
raise AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
return ret
|
the-stack_106_28836 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from setuptools import setup, find_packages
import os
import sys
from sphinx_intl import __version__
install_requires = [
'setuptools',
'six',
'polib',
'sphinx',
]
if sys.version_info < (2, 7):
install_requires.append('ordereddict')
extras_require = {
'test': [
'nose',
'flake8',
],
}
if sys.version_info < (2, 6):
extras_require['transifex'] = ['transifex_client==0.8']
else:
extras_require['transifex'] = ['transifex_client']
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
description = \
'Sphinx utility that make it easy to translate and to apply translation.'
setup(
name='sphinx-intl',
version=__version__,
description=description,
long_description=README,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"License :: OSI Approved :: BSD License",
"Topic :: Documentation",
"Topic :: Software Development :: Documentation",
"Topic :: Text Processing :: General",
"Topic :: Utilities",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
],
author="Takayuki SHIMIZUKAWA",
author_email="[email protected]",
url="https://bitbucket.org/shimizukawa/sphinx-intl",
namespace_packages=[],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require=extras_require,
entry_points="""\
[console_scripts]
sphinx-intl = sphinx_intl.commands:main
""",
)
|
the-stack_106_28838 | # -*- coding: utf-8 -*-
"""
DEPRICATE
TODO: Rename to ibeis/init/commands.py
The AID configuration selection is getting a mjor update right now
"""
from __future__ import absolute_import, division, print_function
import utool as ut
import numpy as np # NOQA
import six
from ibeis import params
(print, rrr, profile) = ut.inject2(__name__, '[main_helpers]')
VERB_TESTDATA = ut.get_argflag(('--verbose-testdata', '--verbtd'))
VERB_MAIN_HELPERS = ut.get_argflag(('--verbose-main-helpers', '--verbmhelp')) or ut.VERBOSE or VERB_TESTDATA
def define_named_aid_cfgs():
"""
Definitions for common aid configurations
TODO: potentially move to experiment configs
"""
from ibeis.expt import annotation_configs
named_defaults_dict = ut.dict_take(annotation_configs.__dict__, annotation_configs.TEST_NAMES)
named_qcfg_defaults = dict(zip(annotation_configs.TEST_NAMES, ut.get_list_column(named_defaults_dict, 'qcfg')))
named_dcfg_defaults = dict(zip(annotation_configs.TEST_NAMES, ut.get_list_column(named_defaults_dict, 'dcfg')))
alias_keys = annotation_configs.alias_keys
named_cfg_dict = {
'qcfg': named_qcfg_defaults,
'dcfg': named_dcfg_defaults,
}
return named_cfg_dict, alias_keys
def get_commandline_aidcfg():
"""
Parse the command line for "THE NEW AND IMPROVED" cannonical annotation
configuration dictionaries
CommandLine:
python -m ibeis.init.main_helpers --exec-get_commandline_aidcfg
python -m ibeis.init.main_helpers --exec-get_commandline_aidcfg --qcfg default:shuffle=True,index=0:25 --dcfg default
python -m ibeis.init.main_helpers --exec-get_commandline_aidcfg --qcfg default --dcfg default
python -m ibeis.init.main_helpers --exec-get_commandline_aidcfg --qcfg controlled --dcfg controlled
python -m ibeis.init.main_helpers --exec-get_commandline_aidcfg --acfg controlled
python -m ibeis.init.main_helpers --exec-get_commandline_aidcfg --acfg varydbsize
python -m ibeis.init.main_helpers --exec-get_commandline_aidcfg --acfg controlled:qindex=0:10
--aidcfg=controlled=True,species=primary
--aidcfg=controlled=True,species=primary,annot_per_name=2
--aidcfg=controlled=True,species=primary,annot_per_name=3
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.init.main_helpers import * # NOQA
>>> aidcfg = get_commandline_aidcfg()
>>> print('aidcfg = ' + ut.repr2(aidcfg))
"""
def parse_cfgstr_list2(cfgstr_list, named_dcfgs_dict, cfgtype=None, alias_keys=None):
"""
Parse a genetic cfgstr --flag name1:custom_args1 name2:custom_args2
"""
cfg_list = []
for cfgstr in cfgstr_list:
cfgstr_split = cfgstr.split(':')
cfgname = cfgstr_split[0]
cfg = named_dcfgs_dict[cfgname].copy()
# Parse dict out of a string
if len(cfgstr_split) > 1:
cfgstr_options = ':'.join(cfgstr_split[1:]).split(',')
cfg_options = ut.parse_cfgstr_list(cfgstr_options, smartcast=True, oldmode=False)
else:
cfg_options = {}
# Hack for q/d specific configs
if cfgtype is not None:
for key in list(cfg_options.keys()):
# check if key is nonstandard
if not (key in cfg or key in alias_keys):
# does removing prefix make it stanard?
prefix = cfgtype[0]
if key.startswith(prefix):
key_ = key[len(prefix):]
if key_ in cfg or key_ in alias_keys:
# remove prefix
cfg_options[key_] = cfg_options[key]
try:
assert key[1:] in cfg or key[1:] in alias_keys, 'key=%r, key[1:] =%r' % (key, key[1:] )
except AssertionError as ex:
ut.printex(ex, 'error', keys=['key', 'cfg', 'alias_keys'])
raise
del cfg_options[key]
# Remap keynames based on aliases
if alias_keys is not None:
for key in alias_keys.keys():
if key in cfg_options:
# use standard new key
cfg_options[alias_keys[key]] = cfg_options[key]
# remove old alised key
del cfg_options[key]
# Finalize configuration dict
cfg = ut.update_existing(cfg, cfg_options, copy=True, assert_exists=True)
cfg['_cfgtype'] = cfgtype
cfg['_cfgname'] = cfgname
cfg['_cfgstr'] = cfgstr
cfg_list.append((cfgname, cfg))
break # FIXME: do more than one eventually
return cfg
named_cfg_dict, alias_keys = define_named_aid_cfgs()
# Parse the cfgstr list from the command line
qcfgstr_list, has_qcfg = ut.get_argval('--qcfg', type_=list, default=['default'], return_specified=True)
dcfgstr_list, has_dcfg = ut.get_argval('--dcfg', type_=list, default=['default'], return_specified=True)
if not has_qcfg and not has_dcfg:
# TODO: Specify both with one flag
acfgstr_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=['default'])
aidcfg = {}
aidcfg['qcfg'] = parse_cfgstr_list2(acfgstr_list, named_cfg_dict['qcfg'], 'qcfg', alias_keys)
aidcfg['dcfg'] = parse_cfgstr_list2(acfgstr_list, named_cfg_dict['dcfg'], 'dcfg', alias_keys)
else:
aidcfg = {}
aidcfg['qcfg'] = parse_cfgstr_list2(qcfgstr_list, named_cfg_dict['qcfg'], 'qcfg', alias_keys)
aidcfg['dcfg'] = parse_cfgstr_list2(dcfgstr_list, named_cfg_dict['dcfg'], 'dcfg', alias_keys)
return aidcfg
def ensure_flatiterable(input_):
if isinstance(input_, six.string_types):
input_ = ut.fuzzy_int(input_)
if isinstance(input_, int) or not ut.isiterable(input_):
return [input_]
elif isinstance(input_, (list, tuple)):
#print(input_)
if len(input_) > 0 and ut.isiterable(input_[0]):
return ut.flatten(input_)
return input_
else:
raise TypeError('cannot ensure %r input_=%r is iterable', (type(input_), input_))
def ensure_flatlistlike(input_):
#if isinstance(input_, slice):
# pass
iter_ = ensure_flatiterable(input_)
return list(iter_)
def get_test_qaids(ibs, default_qaids=None, return_annot_info=False, aidcfg=None):
"""
Gets test annot_rowids based on command line arguments
DEPRICATE
Args:
ibs (IBEISController): ibeis controller object
default_qaids (None): if list then used only if no other aids are available (default = [1])
as a string it mimics the command line
Returns:
list: available_qaids
CommandLine:
python -m ibeis.init.main_helpers --test-get_test_qaids
python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0
python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qaid 1
python -m ibeis.init.main_helpers --test-get_test_qaids --allgt --db PZ_MTEST
python -m ibeis.init.main_helpers --test-get_test_qaids --qaid 4 5 8 --verbmhelp
python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST
python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2 --verbmhelp
python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2
python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qindex 0:10 --verbmhelp
python -m ibeis.init.main_helpers --exec-get_test_qaids --controlled --db PZ_Master0 --exec-mode
python -m ibeis.init.main_helpers --exec-get_test_qaids --db testdb1 --allgt --qindex 0:256
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.init.main_helpers import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> default_qaids = None
>>> available_qaids = get_test_qaids(ibs, default_qaids)
>>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_qaids, with_contrib=False, short=True)
>>> result = 'available_qaids = ' + ut.obj_str(available_qaids, truncate=True, nl=False)
>>> print('len(available_qaids) = %d' % len(available_qaids))
>>> print(result)
available_qaids = [1]
"""
qaid_request_info = {}
if VERB_MAIN_HELPERS:
print('[get_test_qaids] + --- GET_TEST_QAIDS ---')
# Old version of this function
if VERB_MAIN_HELPERS:
print('[get_test_qaids] + --- GET_TEST_QAIDS ---')
print('[get_test_qaids] * default_qaids = %s' % (ut.obj_str(default_qaids, truncate=True, nl=False)))
valid_aids = ibs.get_valid_aids()
if len(valid_aids) == 0:
print('[get_test_qaids] WARNING no annotations available')
# ---- INCLUDING STEP
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * include step')
available_qaids = []
#ut.get_argflag(('--all-cases', '--all'))
#ut.get_argflag(('--all-gt-cases', '--allgt'))
#ut.get_argflag(('--all-hard-cases', '--allhard'))
#ut.get_argflag(('--qaid', '--qaids'))
#ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids')
#not ut.get_argflag('--junk')
ALL_CASES = params.args.all_cases or default_qaids == 'all'
GT_CASES = params.args.all_gt_cases or default_qaids == 'gt'
HARD_CASES = params.args.all_hard_cases or ut.get_argflag(('--all-hard-cases', '--allhard', '--hard'))
NO_JUNK = not ut.get_argflag('--junk')
CONTROLLED_CASES = ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids')
NO_REVIEWED = ut.get_argflag('--unreviewed')
species = ut.get_argval('--species')
#QAID = params.args.qaid
QAID = ut.get_argval('--qaid', type_='fuzzy_subset', default=None)
QINDEX = params.args.qindex
QSHUFFLE = ut.get_argval('--qshuffle')
if QAID is not None:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Including cmdline specified qaids')
try:
args_qaid = ensure_flatlistlike(QAID)
except Exception:
args_qaid = QAID
available_qaids.extend(args_qaid)
qaid_request_info['custom_commandline'] = args_qaid
if ALL_CASES:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Including all qaids')
available_qaids.extend(valid_aids)
qaid_request_info['all_cases'] = True
if HARD_CASES:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Including hard qaids')
is_hard_list = ibs.get_annot_is_hard(valid_aids)
hard_aids = ut.compress(valid_aids, is_hard_list)
available_qaids.extend(hard_aids)
qaid_request_info['hard_cases'] = True
if GT_CASES:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Including groundtruth qaids')
has_gt_list = ibs.get_annot_has_groundtruth(valid_aids)
hasgt_aids = ut.compress(valid_aids, has_gt_list)
print('[get_test_qaids] Adding all %d/%d ground-truthed test cases' % (len(hasgt_aids), len(valid_aids)))
available_qaids.extend(hasgt_aids)
qaid_request_info['gt_cases'] = True
if CONTROLLED_CASES:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Including controlled qaids')
from ibeis.other import ibsfuncs
# Override all other gts with controlled
controlled_qaids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=True)
available_qaids.extend(controlled_qaids)
qaid_request_info['controlled'] = True
else:
qaid_request_info['controlled'] = False
# ---- CHECK_DEFAULTS QUERY
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))
if len(available_qaids) == 0:
print('[get_test_qaids] * ... defaulting, no available qaids on command line.')
if default_qaids is None:
default_qaids = valid_aids[0:1]
qaid_request_info['default_one'] = True
elif isinstance(default_qaids, six.string_types):
if default_qaids == 'gt' or default_qaids == 'allgt':
default_qaids = ibs.get_valid_aids(hasgt=True)
qaid_request_info['default_gt'] = True
available_qaids = default_qaids
else:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * ... not defaulting')
available_qaids = ut.unique_ordered(available_qaids)
# ---- EXCLUSION STEP
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))
print('[get_test_qaids] * exclude step')
if NO_JUNK:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Filtering junk')
available_qaids = ibs.filter_junk_annotations(available_qaids)
qaid_request_info['has_junk'] = False
if NO_REVIEWED:
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Filtering unreviewed')
isreviewed_list = ibs.get_annot_has_reviewed_matching_aids(available_qaids)
available_qaids = ut.filterfalse_items(available_qaids, isreviewed_list)
qaid_request_info['has_unreviewed'] = False
if species is not None:
if species == 'primary':
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Finiding primary species')
#species = ibs.get_primary_database_species(available_qaids)
species = ibs.get_primary_database_species()
qaid_request_info['primary_species'] = True
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Filtering to species=%r' % (species,))
isvalid_list = np.array(ibs.get_annot_species(available_qaids)) == species
available_qaids = ut.compress(available_qaids, isvalid_list)
qaid_request_info['species_filter'] = species
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))
print('[get_test_qaids] * subindex step')
# ---- INDEX SUBSET
#ut.get_argval('--qshuffle')
if QSHUFFLE:
# Determenistic shuffling
available_qaids = ut.take(available_qaids, ut.random_indexes(len(available_qaids), seed=42))
qaid_request_info['shuffled'] = True
# Sample a large pool of chosen query qindexes
if QINDEX is not None:
# FIXME: should use a slice of the list or a sublist
qindexes = ensure_flatlistlike(QINDEX)
_test_qaids = [available_qaids[qx] for qx in qindexes if qx < len(available_qaids)]
print('[get_test_qaids] Chose subset of size %d/%d' % (len(_test_qaids), len(available_qaids)))
available_qaids = _test_qaids
qaid_request_info['subset'] = qindexes
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids)))
print('[get_test_qaids] L ___ GET_TEST_QAIDS ___')
if return_annot_info:
return available_qaids, qaid_request_info
else:
return available_qaids
def get_test_daids(ibs, default_daids='all', qaid_list=None, return_annot_info=False, aidcfg=None):
""" Gets database annot_rowids based on command line arguments
DEPRICATE
CommandLine:
python dev.py --db PZ_MTEST -t best --exclude-query --qaid 72 -r 0 -c 0 --show --va --vf --dump-extra
Args:
ibs (IBEISController): ibeis controller object
default_daids (str): (default = 'all')
qaid_list (list): list of chosen qaids that may affect daids (default = None)
Returns:
list: available_daids
CommandLine:
python -m ibeis.init.main_helpers --test-get_test_daids
python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --verbmhelp
python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --exclude-query
python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --daid-exclude 2 3 4
python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_MTEST --species=zebra_grevys
python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_Master0 --species=zebra_grevys
python -m ibeis.init.main_helpers --test-get_test_daids --db PZ_Master0 --controlled --verbmhelp
python -m ibeis.init.main_helpers --exec-get_test_daids --controlled --db PZ_Master0 --exec-mode
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.init.main_helpers import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> default_daids = 'all'
>>> qaid_list = [1]
>>> available_daids = get_test_daids(ibs, default_daids, qaid_list)
>>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_daids, with_contrib=False, short=True)
>>> result = 'available_daids = ' + ut.obj_str(available_daids, truncate=True, nl=False)
>>> print('len(available_daids) %d' % len(available_daids))
>>> print(result)
available_daids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
"""
daid_request_info = {}
if VERB_MAIN_HELPERS:
print('[get_test_daids] + --- GET_TEST_DAIDS ---')
print('[get_test_daids] * default_daids = %s' % (ut.obj_str(default_daids, truncate=True, nl=False)))
print('[get_test_daids] * qaid_list = %s' % (ut.obj_str(qaid_list, truncate=True, nl=False)))
# ---- INCLUDING STEP
if VERB_MAIN_HELPERS:
print('[get_test_daids] * include step')
available_daids = []
CONTROLLED_CASES = ut.get_argflag('--controlled') or ut.get_argflag('--controlled_daids')
DSHUFFLE = ut.get_argval('--dshuffle')
DINDEX = params.args.dindex
NO_JUNK = not ut.get_argflag('--junk')
EXCLUDE_QUERY = ut.get_argflag('--exclude-query')
#daids_exclude = params.args.daid_exclude
daids_exclude = None
if CONTROLLED_CASES:
print('[get_test_daids] * Including controlled daids')
from ibeis.other import ibsfuncs
controlled_daids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=False)
available_daids.extend(controlled_daids)
daid_request_info['controlled'] = True
else:
daid_request_info['controlled'] = False
# ---- CHECK_DEFAULTS DATA
if VERB_MAIN_HELPERS:
print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))
if len(available_daids) == 0:
print('[get_test_daids] * ... defaulting, no available daids on command line.')
if isinstance(default_daids, six.string_types):
if default_daids == 'all':
default_daids = ibs.get_valid_aids()
daid_request_info['default_daids'] = 'all'
elif default_daids == 'gt':
default_daids = ut.flatten(ibs.get_annot_groundtruth(qaid_list))
daid_request_info['default_daids'] = 'gt'
#available_qaids = valid_aids[0:1]
assert not isinstance(available_daids, six.string_types)
available_daids = default_daids
else:
if VERB_MAIN_HELPERS:
print('[get_test_daids] * ... not defaulting')
available_daids = ut.unique_ordered(available_daids)
# ---- EXCLUSION STEP
if VERB_MAIN_HELPERS:
print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))
print('[get_test_daids] * exclude step')
species = ut.get_argval('--species', type_=str, default=None)
if NO_JUNK:
if VERB_MAIN_HELPERS:
print('[get_test_daids] * Filtering junk')
available_daids = ibs.filter_junk_annotations(available_daids)
if EXCLUDE_QUERY:
if VERB_MAIN_HELPERS:
print('[get_test_daids] * Excluding query qaids')
assert qaid_list is not None, 'must specify qaids to exclude'
available_daids = ut.setdiff_ordered(available_daids, qaid_list)
if daids_exclude is not None:
if VERB_MAIN_HELPERS:
print('[get_test_daids] * Excluding specified daids')
available_daids = ut.setdiff_ordered(available_daids, daids_exclude)
if species is not None:
if species == 'primary':
if VERB_MAIN_HELPERS:
print('[get_test_qaids] * Finiding primary species')
#species = ibs.get_primary_database_species(available_daids)
species = ibs.get_primary_database_species()
if VERB_MAIN_HELPERS:
print('[get_test_daids] * Filtering to species=%r' % (species,))
import numpy as np
isvalid_list = np.array(ibs.get_annot_species(available_daids)) == species
available_daids = ut.compress(available_daids, isvalid_list)
# ---- SUBINDEXING STEP
if VERB_MAIN_HELPERS:
print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))
print('[get_test_daids] * subindex step')
#ut.get_argval('--qshuffle')
if DSHUFFLE:
# Determenistic shuffling
available_daids = ut.take(available_daids, ut.random_indexes(len(available_daids), seed=43))
daid_request_info['shuffled'] = True
if DINDEX is not None:
dindexes = ensure_flatlistlike(DINDEX)
_test_daids = [available_daids[dx] for dx in dindexes if dx < len(available_daids)]
print('[get_test_daids] Chose subset of size %d/%d' % (len(_test_daids), len(available_daids)))
available_daids = _test_daids
if VERB_MAIN_HELPERS:
print('[get_test_daids] * len(available_daids) = %r' % (len(available_daids)))
print('[get_test_daids] L ___ GET_TEST_DAIDS ___')
if return_annot_info:
return available_daids, daid_request_info
else:
return available_daids
|
the-stack_106_28840 | #!/usr/bin/env python3
# Copyright (c) 2014-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 1 creates an address for each type of
import RPC call and node 0 sends BTC to it. Then other nodes import the
addresses, and the test makes listtransactions and getbalance calls to confirm
that the importing node either did or did not execute rescans picking up the
send transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (start_nodes, connect_nodes, sync_blocks, assert_equal, set_node_times)
from decimal import Decimal
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response, error = try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response, error = try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response, error = try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
assert_equal(error, {'message': 'Rescan is disabled in pruned mode',
'code': -4} if self.expect_disabled else None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + RESCAN_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
RESCAN_WINDOW = 2 * 60 * 60
class ImportRescanTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [["-debug=1"] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + RESCAN_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
fee = self.nodes[0].getnetworkinfo()["relayfee"]
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
def try_rpc(func, *args, **kwargs):
try:
return func(*args, **kwargs), None
except JSONRPCException as e:
return None, e.error
if __name__ == "__main__":
ImportRescanTest().main()
|
the-stack_106_28841 | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from distributed.client import _get_global_client
class DaskTask:
@classmethod
def deploy(cls, func, num_return_vals, kwargs):
client = _get_global_client()
remote_task_future = client.submit(func, **kwargs)
return [
client.submit(lambda l, i: l[i], remote_task_future, i)
for i in range(num_return_vals)
]
@classmethod
def materialize(cls, future):
client = _get_global_client()
return client.gather(future)
|
the-stack_106_28844 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains locators (transform) data part implementation
"""
from __future__ import print_function, division, absolute_import
import os
import re
import logging
from tpDcc import dcc
from tpDcc.core import dcc as core_dcc
from tpDcc.libs.datalibrary.core import consts, datapart, mirrortable
logger = logging.getLogger(consts.LIB_ID)
class MirrorTableData(datapart.DataPart):
DATA_TYPE = 'dcc.mirror'
MENU_ICON = 'mirror'
MENU_NAME = 'Mirror Table'
PRIORITY = 17
EXTENSION = '.mirror'
_has_trait = re.compile(r'\.mirror$', re.I)
def __init__(self, *args, **kwargs):
super(MirrorTableData, self).__init__(*args, **kwargs)
self._validated_objects = list()
@classmethod
def can_represent(cls, identifier, only_extension=False):
if MirrorTableData._has_trait.search(identifier):
if only_extension:
return True
if os.path.isfile(identifier):
return True
return False
@classmethod
def supported_dccs(cls):
return [core_dcc.Dccs.Maya]
def label(self):
return os.path.basename(self.identifier())
def icon(self):
return 'mirror'
def extension(self):
return '.mirror'
def type(self):
return 'dcc.mirror'
def menu_name(self):
return 'Mirror Table'
def load_schema(self):
mirror_table = mirrortable.MirrorTable().from_path(self.format_identifier())
return [
{
'name': 'Left',
'value': mirror_table.left_side()
},
{
'name': 'Right',
'value': mirror_table.right_side()
},
{
'name': 'optionsGroup',
'title': 'Options',
'type': 'group',
'order': 2
},
{
'name': 'keysOption',
'title': 'keys',
'type': 'radio',
'value': 'Selected Range',
'items': ['All Keys', 'Selected Range'],
'persistent': True
},
{
'name': 'option',
'type': 'enum',
'default': 'swap',
'items': ['swap', 'left to right', 'right to left'],
'persistent': True
}
]
def save_schema(self):
return [
{
'name': 'mirrorPlane',
'type': 'buttonGroup',
'default': 'YZ',
'layout': 'vertical',
'items': ['YZ', 'XY', 'XZ']
},
{
'name': 'leftSide',
'type': 'string',
'layout': 'vertical',
'menu': {'name': '0'}
},
{
'name': 'rightSide',
'type': 'string',
'layout': 'vertical',
'menu': {'name': '0'}
}
]
def save_validator(self, **kwargs):
results = list()
objects = dcc.client().selected_nodes() or list()
dirty = kwargs.get('fieldChanged') in ['leftSide', 'rightSide']
dirty = dirty or self._validated_objects != objects
if dirty:
self._validated_objects = objects
left_side = kwargs.get('leftSide', '')
if not left_side:
left_side = mirrortable.MirrorTable().find_left_side(objects)
right_side = kwargs.get('rightSide', '')
if not right_side:
right_side = mirrortable.MirrorTable().find_right_side(objects)
mirror_table = mirrortable.MirrorTable().from_objects([], left_side=left_side, right_side=right_side)
results.extend([
{
'name': 'leftSide',
'value': left_side,
'menu': {'name': str(mirror_table.left_count(objects))}
},
{
'name': 'rightSide',
'value': right_side,
'menu': {'name': str(mirror_table.right_count(objects))}
}
])
return results
def functionality(self):
return dict(
save=self.save,
import_data=self.import_data
)
def save(self, *args, **kwargs):
filepath = self.format_identifier()
if not filepath:
logger.warning('Impossible to save Mirror Table file because save file path not defined!')
return False
objects = kwargs.get('objects', None)
if not objects:
objects = dcc.client().selected_nodes(full_path=True)
logger.debug('Saving {} | {}'.format(filepath, kwargs))
mirrortable.save_mirror_table(
filepath, objects,
left_side=kwargs.get('leftSide'),
right_side=kwargs.get('rightSide'),
mirror_plane=kwargs.get('mirrorPlane')
)
logger.debug('Saved {} successfully!'.format(filepath))
return True
def import_data(self, *args, **kwargs):
filepath = self.format_identifier()
if not filepath or not os.path.isfile(filepath):
return
return
if not filepath:
logger.warning('Impossible to load pose because save file path not defined!')
return False
logger.debug('Loading {} | {}'.format(filepath, kwargs))
mirror_table = mirrortable.MirrorTable().from_path(self.format_identifier())
mirror_table.load(
objects=kwargs.get('objects'), namespaces=kwargs.get('namespaces'),
option=kwargs.get('option'), keys_option=kwargs.get('keysOption'), time=kwargs.get('time'))
logger.debug('Loaded {} successfully!'.format(filepath))
return True
|
the-stack_106_28849 | # import necessary libraries
import io
import random
import string # to process standard python strings
import warnings
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import warnings
warnings.filterwarnings('ignore')
import nltk
from nltk.stem import WordNetLemmatizer
nltk.download('popular', quiet=True) # for downloading packages
# # uncomment the following only the first time
# nltk.download('punkt') # first-time use only
# nltk.download('wordnet') # first-time use only
# Reading in the corpus
with open('dialogue1.txt', 'r', encoding='utf8', errors='ignore') as fin:
raw = fin.read().lower()
# Tokenisation
sent_tokens = nltk.sent_tokenize(raw) # converts to list of sentences
word_tokens = nltk.word_tokenize(raw) # converts to list of words
# Preprocessing
lemmer = WordNetLemmatizer()
def LemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
def response(user_response):
bot_response = ''
sent_tokens.append(user_response)
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')
tfidf = TfidfVec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx = vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if (req_tfidf == 0):
bot_response = bot_response + "I am sorry! I don't understand you"
return bot_response
else:
bot_response = bot_response + sent_tokens[idx]
return bot_response
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
"""Send a message when the command /start is issued."""
intro = "My name is Captain. I will answer your queries about Vivekanand College Kolhapur"
update.message.reply_text(intro)
def help(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('What can I do for you!')
def echo(update, context):
"""Echo the user message."""
msg = response(update.message.text)
update.message.reply_text(msg)
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
"""Start the bot."""
# Create the Updater and pass it your bot's token.
# Make sure to set use_context=True to use the new context based callbacks
# Post version 12 this will no longer be necessary
tokentel="Telegram token"
updater = Updater(tokentel, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, echo))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
the-stack_106_28851 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student progress trackers."""
__author__ = 'Sean Lip ([email protected])'
import datetime
import logging
import os
from collections import defaultdict
import transforms
from common import utils
from models import QuestionDAO
from models import QuestionGroupDAO
from models import StudentPropertyEntity
from tools import verify
# Names of component tags that are tracked for progress calculations.
TRACKABLE_COMPONENTS = [
'question',
'question-group',
]
class UnitLessonCompletionTracker(object):
"""Tracks student completion for a unit/lesson-based linear course."""
PROPERTY_KEY = 'linear-course-completion'
# Here are representative examples of the keys for the various entities
# used in this class:
# Unit 1: u.1
# Unit 1, Lesson 1: u.1.l.1
# Unit 1, Lesson 1, Activity 0: u.1.l.1.a.0
# Unit 1, Lesson 1, Activity 0, Block 4: u.1.l.1.a.0.b.4
# Assessment 'Pre': s.Pre
# At the moment, we do not divide assessments into blocks.
#
# The following keys were added in v1.5:
# Unit 1, Lesson 1, HTML: u.1.l.1.h.0
# Unit 1, Lesson 1, HTML, Component with instanceid id: u.1.l.1.h.0.c.id
#
# The number after the 'h' and 'a' codes is always zero, since a lesson may
# have at most one HTML body and one activity.
#
# IMPORTANT NOTE: The values of the keys mean different things depending on
# whether the entity is a composite entity or not.
# If it is a composite entity (unit, lesson, activity), then the value is
# - 0 if none of its sub-entities has been completed
# - 1 if some, but not all, of its sub-entities have been completed
# - 2 if all its sub-entities have been completed.
# If it is not a composite entity (i.e. block, assessment, component), then
# the value is just the number of times the event has been triggered.
# Constants for recording the state of composite entities.
# TODO(sll): Change these to enums.
NOT_STARTED_STATE = 0
IN_PROGRESS_STATE = 1
COMPLETED_STATE = 2
MULTIPLE_CHOICE = 'multiple choice'
MULTIPLE_CHOICE_GROUP = 'multiple choice group'
QUESTION_GROUP = 'question-group'
QUESTION = 'question'
EVENT_CODE_MAPPING = {
'course': 'r',
'course_forced': 'r',
'unit': 'u',
'unit_forced': 'u',
'lesson': 'l',
'activity': 'a',
'html': 'h',
'block': 'b',
'assessment': 's',
'component': 'c',
'custom_unit': 'x'
}
COMPOSITE_ENTITIES = [
EVENT_CODE_MAPPING['course'],
EVENT_CODE_MAPPING['unit'],
EVENT_CODE_MAPPING['lesson'],
EVENT_CODE_MAPPING['activity'],
EVENT_CODE_MAPPING['html'],
EVENT_CODE_MAPPING['custom_unit']
]
POST_UPDATE_PROGRESS_HOOK = []
def __init__(self, course):
self._course = course
self._progress_by_user_id = {}
def _get_course(self):
return self._course
def get_activity_as_python(self, unit_id, lesson_id):
"""Gets the corresponding activity as a Python object."""
root_name = 'activity'
course = self._get_course()
activity_text = course.app_context.fs.get(
os.path.join(course.app_context.get_home(),
course.get_activity_filename(unit_id, lesson_id)))
content, noverify_text = verify.convert_javascript_to_python(
activity_text, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
return activity
def _get_course_key(self):
return '%s.0' % (
self.EVENT_CODE_MAPPING['course'],
)
def _get_unit_key(self, unit_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['unit'], unit_id)
def _get_custom_unit_key(self, unit_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['custom_unit'], unit_id)
def _get_lesson_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id
)
def _get_activity_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0
)
def _get_html_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0
)
def _get_component_key(self, unit_id, lesson_id, component_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0,
self.EVENT_CODE_MAPPING['component'], component_id
)
def _get_block_key(self, unit_id, lesson_id, block_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0,
self.EVENT_CODE_MAPPING['block'], block_id
)
def _get_assessment_key(self, assessment_id):
assessment_key = '%s.%s' % (
self.EVENT_CODE_MAPPING['assessment'], assessment_id)
# If this assessment is used as a "lesson" within a unit, prepend
# the unit identifier.
parent_unit = self._get_course().get_parent_unit(assessment_id)
if parent_unit:
assessment_key = '.'.join([self._get_unit_key(parent_unit.unit_id),
assessment_key])
return assessment_key
def get_entity_type_from_key(self, progress_entity_key):
return progress_entity_key.split('.')[-2]
def determine_if_composite_entity(self, progress_entity_key):
return self.get_entity_type_from_key(
progress_entity_key) in self.COMPOSITE_ENTITIES
def get_valid_component_ids(self, unit_id, lesson_id):
"""Returns a list of cpt ids representing trackable components."""
components = []
for cpt_name in TRACKABLE_COMPONENTS:
all_cpts = self._get_course().get_components_with_name(
unit_id, lesson_id, cpt_name)
components += [
cpt['instanceid'] for cpt in all_cpts if cpt['instanceid']]
return components
def get_valid_block_ids(self, unit_id, lesson_id):
"""Returns a list of block ids representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[0] for block in valid_blocks_data]
def get_valid_blocks(self, unit_id, lesson_id):
"""Returns a list of blocks representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[1] for block in valid_blocks_data]
def _get_valid_blocks_data(self, unit_id, lesson_id):
"""Returns a list of (b_id, block) representing trackable activities."""
valid_blocks = []
# Check if activity exists before calling get_activity_as_python.
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
if unit and lesson and lesson.activity:
# Get the activity corresponding to this unit/lesson combination.
activity = self.get_activity_as_python(unit_id, lesson_id)
for block_id in range(len(activity['activity'])):
block = activity['activity'][block_id]
if isinstance(block, dict):
valid_blocks.append((block_id, block))
return valid_blocks
def get_id_to_questions_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in lessons. The keys of this
dict are question ids, and the corresponding values are dicts, each
containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_questions = {}
for unit in self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT):
unit_id = unit.unit_id
for lesson in self._get_course().get_lessons(unit_id):
lesson_id = lesson.lesson_id
# Add mapping dicts for questions in old-style activities.
if lesson.activity:
blocks = self._get_valid_blocks_data(unit_id, lesson_id)
for block_index, (block_id, block) in enumerate(blocks):
if block['questionType'] == self.MULTIPLE_CHOICE:
# Old style question.
id_to_questions.update(
self._create_old_style_question_dict(
block, block_id, block_index, unit, lesson))
elif (block['questionType'] ==
self.MULTIPLE_CHOICE_GROUP):
# Old style multiple choice group.
for ind, q in enumerate(block['questionsList']):
id_to_questions.update(
self._create_old_style_question_dict(
q, block_id, block_index, unit,
lesson, index=ind))
# Add mapping dicts for CBv1.5 style questions.
if lesson.objectives:
for cpt in self._get_course().get_question_components(
unit_id, lesson_id):
# CB v1.5 style questions.
id_to_questions.update(
self._create_v15_lesson_question_dict(
cpt, unit, lesson))
for cpt in self._get_course().get_question_group_components(
unit_id, lesson_id):
# CB v1.5 style question groups.
id_to_questions.update(
self._create_v15_lesson_question_group_dict(
cpt, unit, lesson))
return id_to_questions
def get_id_to_assessments_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in assessments. The keys of
this dict are question ids, and the corresponding values are dicts,
each containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_assessments = {}
for assessment in self._get_course().get_assessment_list():
if not self._get_course().needs_human_grader(assessment):
assessment_components = self._get_course(
).get_assessment_components(assessment.unit_id)
# CB v1.5 style assessments.
for cpt in assessment_components:
if cpt['cpt_name'] == self.QUESTION_GROUP:
id_to_assessments.update(
self._create_v15_assessment_question_group_dict(
cpt, assessment))
elif cpt['cpt_name'] == self.QUESTION:
id_to_assessments.update(
self._create_v15_assessment_question_dict(
cpt, assessment))
# Old style javascript assessments.
try:
content = self._get_course().get_assessment_content(
assessment)
id_to_assessments.update(
self._create_old_style_assessment_dict(
content['assessment'], assessment))
except AttributeError:
# Assessment file does not exist.
continue
return id_to_assessments
def _get_link_for_assessment(self, assessment_id):
return 'assessment?name=%s' % (assessment_id)
def _get_link_for_activity(self, unit_id, lesson_id):
return 'activity?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _get_link_for_lesson(self, unit_id, lesson_id):
return 'unit?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _create_v15_question_dict(self, q_id, label, link, num_choices):
"""Returns a dict that represents CB v1.5 style question."""
return {
q_id: {
'answer_counts': [0] * num_choices,
'label': label,
'location': link,
'score': 0,
'num_attempts': 0
}
}
def _create_v15_lesson_question_dict(self, cpt, unit, lesson):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'])
lesson_label = str(lesson.index or lesson.title)
if lesson_label.startswith('Lesson '):
lesson_label = lesson_label.replace('Lesson ', '', 1)
label = 'Unit %s Lesson %s, Question %s' % (
unit.index, lesson_label, question.description)
link = self._get_link_for_lesson(unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_lesson_question_group_dict(self, cpt, unit, lesson):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'], ind)
lesson_label = str(lesson.index or lesson.title)
if lesson_label.startswith('Lesson '):
lesson_label = lesson_label.replace('Lesson ', '', 1)
label = ('Unit %s Lesson %s, Question Group %s Question %s'
% (unit.index, lesson_label,
question_group.description,
question.description))
link = self._get_link_for_lesson(
unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
questions.update(self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_group_dict(self, cpt, assessment):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s.i.%s' % (
assessment.unit_id, cpt['instanceid'], ind)
label = '%s, Question Group %s Question %s' % (
assessment.title, question_group.description,
question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
questions.update(
self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_dict(self, cpt, assessment):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s' % (assessment.unit_id, cpt['instanceid'])
label = '%s, Question %s' % (
assessment.title, question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_old_style_question_dict(self, block, block_id, block_index,
unit, lesson, index=None):
try:
if index is not None:
# Question is in a multiple choice group.
b_id = 'u.%s.l.%s.b.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, block_id, index)
label = 'Unit %s Lesson %s Activity, Item %s Part %s' % (
unit.index, lesson.index, block_index + 1, index + 1)
else:
b_id = 'u.%s.l.%s.b.%s' % (
unit.unit_id, lesson.lesson_id, block_id)
label = 'Unit %s Lesson %s Activity, Item %s' % (
unit.index, lesson.index, block_index + 1)
return {
b_id: {
'answer_counts': [0] * len(block['choices']),
'label': label,
'location': self._get_link_for_activity(
unit.unit_id, lesson.lesson_id),
'score': 0,
'num_attempts': 0
}
}
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, block)
return {}
def _create_old_style_assessment_dict(self, content, assessment):
try:
questions = {}
for ind, question in enumerate(content['questionsList']):
if 'choices' in question:
questions.update(
{
's.%s.i.%s' % (assessment.unit_id, ind): {
'answer_counts': [0] * len(question['choices']),
'label': '%s, Question %s' % (
assessment.title, ind + 1),
'location': self._get_link_for_assessment(
assessment.unit_id),
'score': 0,
'num_attempts': 0
}
}
)
return questions
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, content)
return {}
def _update_course(self, progress, student):
event_key = self._get_course_key()
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
course = self._get_course()
units, lessons = course.get_track_matching_student(student)
for unit in units:
if course.get_parent_unit(unit.unit_id):
# Completion of an assessment-as-lesson rolls up to its
# containing unit; it is not considered for overall course
# completion (except insofar as assessment completion
# contributes to the completion of its owning unit)
pass
else:
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
if not self.is_assessment_completed(progress, unit.unit_id):
return
elif unit.type == verify.UNIT_TYPE_UNIT:
unit_state = self.get_unit_status(progress, unit.unit_id)
if unit_state != self.COMPLETED_STATE:
return
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_course_forced(self, progress):
"""Force state of course to completed."""
event_key = self._get_course_key()
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_unit(self, progress, event_key):
"""Updates a unit's progress if all its lessons have been completed."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 2
unit_id = split_event_key[1]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one lesson in this unit has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
# Check if all lessons in this unit have been completed.
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if (self.get_lesson_status(
progress,
unit_id, lesson.lesson_id) != self.COMPLETED_STATE):
return
# Check whether pre/post assessments in this unit have been completed.
unit = self._get_course().find_unit_by_id(unit_id)
pre_assessment_id = unit.pre_assessment
if (pre_assessment_id and
not self.get_assessment_status(progress, pre_assessment_id)):
return
post_assessment_id = unit.post_assessment
if (post_assessment_id and
not self.get_assessment_status(progress, post_assessment_id)):
return
# Record that all lessons in this unit have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_unit_forced(self, progress, event_key):
"""Force-mark a unit as completed, ignoring normal criteria."""
# Record that all lessons in this unit have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_lesson(self, progress, event_key):
"""Updates a lesson's progress based on the progress of its children."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 4
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one part of this lesson has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if str(lesson.lesson_id) == lesson_id and lesson:
# Is the activity completed?
if (lesson.activity and self.get_activity_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Are all components of the lesson completed?
if (self.get_html_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Record that all activities in this lesson have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_activity(self, progress, event_key):
"""Updates activity's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
valid_block_ids = self.get_valid_block_ids(unit_id, lesson_id)
for block_id in valid_block_ids:
if not self.is_block_completed(
progress, unit_id, lesson_id, block_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_html(self, progress, event_key):
"""Updates html's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
cpt_ids = self.get_valid_component_ids(unit_id, lesson_id)
for cpt_id in cpt_ids:
if not self.is_component_completed(
progress, unit_id, lesson_id, cpt_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_custom_unit(self, student, event_key, state):
"""Update custom unit."""
if student.is_transient:
return
progress = self.get_or_create_progress(student)
current_state = self._get_entity_value(progress, event_key)
if current_state == state or current_state == self.COMPLETED_STATE:
return
self._set_entity_value(progress, event_key, state)
progress.updated_on = datetime.datetime.now()
progress.put()
UPDATER_MAPPING = {
'activity': _update_activity,
'course': _update_course,
'course_forced': _update_course_forced,
'html': _update_html,
'lesson': _update_lesson,
'unit': _update_unit,
'unit_forced': _update_unit_forced,
}
# Dependencies for recording derived events. The key is the current
# event, and the value is a tuple, each element of which contains:
# - the dependent entity to be updated
# - the transformation to apply to the id of the current event to get the
# id for the derived parent event
DERIVED_EVENTS = {
'block': (
{
'entity': 'activity',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'activity': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'lesson': (
{
'entity': 'unit',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'component': (
{
'entity': 'html',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'html': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'assessment': (
{
'entity': 'unit',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
}
def force_course_completed(self, student):
self._put_event(
student, 'course_forced', self._get_course_key())
def force_unit_completed(self, student, unit_id):
"""Records that the given student has completed a unit.
NOTE: This should not generally be used directly. The definition
of completing a unit is generally taken to be completion of all
parts of all components of a unit (assessments, lessons,
activities in lessons, etc. Directly marking a unit as complete
is provided only for manual marking where the student feels "done",
but has not taken a fully completionist approach to the material.
Args:
student: A logged-in, registered student object.
unit_id: The ID of the unit to be marked as complete.
"""
self._put_event(
student, 'unit_forced', self._get_unit_key(unit_id))
def put_activity_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed an activity."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'activity', self._get_activity_key(unit_id, lesson_id))
def put_html_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed a lesson page."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'html', self._get_html_key(unit_id, lesson_id))
def put_block_completed(self, student, unit_id, lesson_id, block_id):
"""Records that the given student has completed an activity block."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if block_id not in self.get_valid_block_ids(unit_id, lesson_id):
return
self._put_event(
student,
'block',
self._get_block_key(unit_id, lesson_id, block_id)
)
def put_component_completed(self, student, unit_id, lesson_id, cpt_id):
"""Records completion of a component in a lesson body."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if cpt_id not in self.get_valid_component_ids(unit_id, lesson_id):
return
self._put_event(
student,
'component',
self._get_component_key(unit_id, lesson_id, cpt_id)
)
def put_assessment_completed(self, student, assessment_id):
"""Records that the given student has completed the given assessment."""
if not self._get_course().is_valid_assessment_id(assessment_id):
return
self._put_event(
student, 'assessment', self._get_assessment_key(assessment_id))
def put_custom_unit_completed(self, student, unit_id):
"""Records that the student has completed the given custom_unit."""
if not self._get_course().is_valid_custom_unit(unit_id):
return
self._update_custom_unit(
student, self._get_custom_unit_key(unit_id),
self.COMPLETED_STATE)
def put_custom_unit_in_progress(self, student, unit_id):
"""Records that the given student has started the given custom_unit."""
if not self._get_course().is_valid_custom_unit(unit_id):
return
self._update_custom_unit(
student, self._get_custom_unit_key(unit_id),
self.IN_PROGRESS_STATE)
def put_activity_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this activity."""
# This method currently exists because we need to mark activities
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_block_ids(unit_id, lesson_id):
self.put_activity_completed(student, unit_id, lesson_id)
def put_html_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this lesson page."""
# This method currently exists because we need to mark lesson bodies
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_component_ids(unit_id, lesson_id):
self.put_html_completed(student, unit_id, lesson_id)
def _put_event(self, student, event_entity, event_key):
"""Starts a cascade of updates in response to an event taking place."""
if student.is_transient or event_entity not in self.EVENT_CODE_MAPPING:
return
progress = self.get_or_create_progress(student)
self._update_event(
student, progress, event_entity, event_key, direct_update=True)
progress.updated_on = datetime.datetime.now()
progress.put()
def _update_event(self, student, progress, event_entity, event_key,
direct_update=False):
"""Updates statistics for the given event, and for derived events.
Args:
student: the student
progress: the StudentProgressEntity for the student
event_entity: the name of the affected entity (unit, lesson, etc.)
event_key: the key for the recorded event
direct_update: True if this event is being updated explicitly; False
if it is being auto-updated.
"""
if direct_update or event_entity not in self.UPDATER_MAPPING:
if event_entity in self.UPDATER_MAPPING:
# This is a derived event, so directly mark it as completed.
self._set_entity_value(
progress, event_key, self.COMPLETED_STATE)
else:
# This is not a derived event, so increment its counter by one.
self._inc(progress, event_key)
else:
self.UPDATER_MAPPING[event_entity](self, progress, event_key)
if event_entity in self.DERIVED_EVENTS:
for derived_event in self.DERIVED_EVENTS[event_entity]:
parent_event_key = derived_event['generate_parent_id'](
event_key)
if parent_event_key:
# Event entities may contribute upwards to more than one
# kind of container. Only pass the notification up to the
# handler that our event_key indicates we actually have.
leaf_type = self.get_entity_type_from_key(parent_event_key)
event_entity = derived_event['entity']
if leaf_type == self.EVENT_CODE_MAPPING[event_entity]:
self._update_event(
student=student,
progress=progress,
event_entity=event_entity,
event_key=parent_event_key)
else:
# Only update course status when we are at the top of
# a containment list
self._update_course(progress, student)
else:
# Or only update course status when we are doing something not
# in derived events (Unit, typically).
self._update_course(progress, student)
utils.run_hooks(self.POST_UPDATE_PROGRESS_HOOK, self._get_course(),
student, progress, event_entity, event_key)
def get_course_status(self, progress):
return self._get_entity_value(progress, self._get_course_key())
def get_unit_status(self, progress, unit_id):
return self._get_entity_value(progress, self._get_unit_key(unit_id))
def get_custom_unit_status(self, progress, unit_id):
return self._get_entity_value(
progress, self._get_custom_unit_key(unit_id))
def get_lesson_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_lesson_key(unit_id, lesson_id))
def get_activity_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_activity_key(unit_id, lesson_id))
def get_html_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_html_key(unit_id, lesson_id))
def get_block_status(self, progress, unit_id, lesson_id, block_id):
return self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
def get_assessment_status(self, progress, assessment_id):
return self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
def is_block_completed(self, progress, unit_id, lesson_id, block_id):
value = self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
return value is not None and value > 0
def is_component_completed(self, progress, unit_id, lesson_id, cpt_id):
value = self._get_entity_value(
progress, self._get_component_key(unit_id, lesson_id, cpt_id))
return value is not None and value > 0
def is_assessment_completed(self, progress, assessment_id):
value = self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
return value is not None and value > 0
def is_custom_unit_completed(self, progress, unit_id):
value = self.get_custom_unit_status(progress, unit_id)
return self.COMPLETED_STATE == value
def get_or_create_progress(self, student):
if student.user_id in self._progress_by_user_id:
# Use per-instance cache of student progress entities. This is
# necessary due to multiple calls to this function during
# callbacks to POST_UPDATE_PROGRESS_HOOK functions. Note that
# this relies on callers being disciplined about getting the
# progress entity via Course.get_progress_tracker().
return self._progress_by_user_id[student.user_id]
progress = StudentPropertyEntity.get(student, self.PROPERTY_KEY)
if not progress:
progress = StudentPropertyEntity.create(
student=student, property_name=self.PROPERTY_KEY)
progress.put()
self._progress_by_user_id[student.user_id] = progress
return progress
def get_course_progress(self, student):
"""Return [NOT_STARTED|IN_PROGRESS|COMPLETED]_STATE for course."""
progress = self.get_or_create_progress(student)
return self.get_course_status(progress) or self.NOT_STARTED_STATE
def get_unit_progress(self, student, progress=None):
"""Returns a dict with the states of each unit."""
if student.is_transient:
return {}
units = self._get_course().get_units()
if progress is None:
progress = self.get_or_create_progress(student)
result = {}
for unit in units:
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
result[unit.unit_id] = self.is_assessment_completed(
progress, unit.unit_id)
elif unit.type == verify.UNIT_TYPE_UNIT:
value = self.get_unit_status(progress, unit.unit_id)
result[unit.unit_id] = value or 0
elif unit.type == verify.UNIT_TYPE_CUSTOM:
value = self.get_custom_unit_status(progress, unit.unit_id)
result[unit.unit_id] = value or 0
return result
def get_unit_percent_complete(self, student):
"""Returns a dict with each unit's completion in [0.0, 1.0]."""
if student.is_transient:
return {}
course = self._get_course()
units = course.get_units()
assessment_scores = {int(s['id']): s['score'] / 100.0
for s in course.get_all_scores(student)}
result = {}
progress = self.get_or_create_progress(student)
for unit in units:
# Assessments are scored as themselves.
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
result[unit.unit_id] = assessment_scores[unit.unit_id]
elif unit.type == verify.UNIT_TYPE_UNIT:
if (unit.pre_assessment and
assessment_scores[unit.pre_assessment] >= 1.0):
# Use pre-assessment iff it exists and student scored 100%
result[unit.unit_id] = 1.0
else:
# Otherwise, count % completion on lessons within unit.
num_completed = 0
lesson_progress = self.get_lesson_progress(
student, unit.unit_id, progress=progress)
if not lesson_progress:
result[unit.unit_id] = 0.0
else:
for lesson in lesson_progress.values():
if lesson['has_activity']:
# Lessons that have activities must be
# activity-complete as well as HTML complete.
if (lesson['html'] == self.COMPLETED_STATE and
lesson['activity'] == self.COMPLETED_STATE):
num_completed += 1
else:
# Lessons without activities just need HTML
if lesson['html'] == self.COMPLETED_STATE:
num_completed += 1
result[unit.unit_id] = round(
num_completed / float(len(lesson_progress)), 3)
return result
def get_lesson_progress(self, student, unit_id, progress=None):
"""Returns a dict saying which lessons in this unit are completed."""
if student.is_transient:
return {}
lessons = self._get_course().get_lessons(unit_id)
if progress is None:
progress = self.get_or_create_progress(student)
result = {}
for lesson in lessons:
result[lesson.lesson_id] = {
'html': self.get_html_status(
progress, unit_id, lesson.lesson_id) or 0,
'activity': self.get_activity_status(
progress, unit_id, lesson.lesson_id) or 0,
'has_activity': lesson.has_activity,
}
return result
def get_component_progress(self, student, unit_id, lesson_id, cpt_id):
"""Returns the progress status of the given component."""
if student.is_transient:
return 0
progress = self.get_or_create_progress(student)
return self.is_component_completed(
progress, unit_id, lesson_id, cpt_id) or 0
def _get_entity_value(self, progress, event_key):
if not progress.value:
return None
return transforms.loads(progress.value).get(event_key)
def _set_entity_value(self, student_property, key, value):
"""Sets the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
progress_dict[key] = value
student_property.value = transforms.dumps(progress_dict)
def _inc(self, student_property, key, value=1):
"""Increments the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
if key not in progress_dict:
progress_dict[key] = 0
progress_dict[key] += value
student_property.value = transforms.dumps(progress_dict)
@classmethod
def get_elements_from_key(cls, key):
"""Decomposes the key into a dictionary with its values.
Args:
key: a string, the key of an element of the progress. For
example, u.1.l.5.h.0
Returns:
A dictionary mapping each element in the key to its value. For
the key u.1.l.5.h.0 the result is:
{
'unit': 1,
'lesson': 5,
'html': 0,
'unit_forced': 1
}
"""
reversed_event_mapping = defaultdict(lambda: [])
for full_type, value in cls.EVENT_CODE_MAPPING.iteritems():
reversed_event_mapping[value].append(full_type)
key_elements = key.split('.')
assert len(key_elements) % 2 == 0
result = {}
for index in range(0, len(key_elements), 2):
element_type = key_elements[index]
element_value = key_elements[index + 1]
full_element_types = reversed_event_mapping.get(element_type)
for full_element_type in full_element_types:
result[full_element_type] = element_value
return result
class ProgressStats(object):
"""Defines the course structure definition for course progress tracking."""
def __init__(self, course):
self._course = course
self._tracker = UnitLessonCompletionTracker(course)
def compute_entity_dict(self, entity, parent_ids):
"""Computes the course structure dictionary.
Args:
entity: str. Represents for which level of entity the dict is being
computed. Valid entity levels are defined as keys to the dict
defined below, COURSE_STRUCTURE_DICT.
parent_ids: list of ids necessary to get children of the current
entity.
Returns:
A nested dictionary representing the structure of the course.
Every other level of the dictionary consists of a key, the label of
the entity level defined by EVENT_CODE_MAPPING in
UnitLessonCompletionTracker, whose value is a dictionary
INSTANCES_DICT. The keys of INSTANCES_DICT are instance_ids of the
corresponding entities, and the values are the entity_dicts of the
instance's children, in addition to a field called 'label'. Label
represents the user-facing name of the entity rather than
its intrinsic id. If one of these values is empty, this means
that the corresponding entity has no children.
Ex:
A Course with the following outlined structure:
Pre Assessment
Unit 1
Lesson 1
Unit 2
will have the following dictionary representation:
{
's': [{
'child_id': 1,
'child_val': {'label': 'Pre Assessment'}
}],
'u': [{
'child_id': 2,
'child_val': {
'l': [{
'child_id': 3,
'child_val': {'label': 1}}],
'label': 1}, {
'child_id': 4,
'child_val': {'label': 2}
}]
'label': 'UNTITLED COURSE'
}
"""
entity_dict = {'label': self._get_label(entity, parent_ids)}
for child_entity, get_children_ids in self.COURSE_STRUCTURE_DICT[
entity]['children']:
child_entity_list = []
for child_id in get_children_ids(self, *parent_ids):
new_parent_ids = parent_ids + [child_id]
child_entity_list.append({
'child_id': child_id,
'child_val': self.compute_entity_dict(
child_entity, new_parent_ids)
})
entity_dict[UnitLessonCompletionTracker.EVENT_CODE_MAPPING[
child_entity]] = child_entity_list
return entity_dict
def _get_course(self):
return self._course
def _get_unit_ids_of_type_unit(self):
units = self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT)
return [unit.unit_id for unit in units]
def _get_assessment_ids(self):
contained = set()
for unit in self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT):
if unit.pre_assessment:
contained.add(unit.pre_assessment)
if unit.post_assessment:
contained.add(unit.post_assessment)
assessments = self._get_course().get_assessment_list()
return [a.unit_id for a in assessments if a.unit_id not in contained]
def _get_lesson_ids(self, unit_id):
lessons = self._get_course().get_lessons(unit_id)
return [lesson.lesson_id for lesson in lessons]
def _get_activity_ids(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
if self._get_course().find_lesson_by_id(unit, lesson_id).activity:
return [0]
return []
def _get_html_ids(self, unused_unit_id, unused_lesson_id):
return [0]
def _get_block_ids(self, unit_id, lesson_id, unused_activity_id):
return self._tracker.get_valid_block_ids(unit_id, lesson_id)
def _get_component_ids(self, unit_id, lesson_id, unused_html_id):
return self._tracker.get_valid_component_ids(unit_id, lesson_id)
def _get_label(self, entity, parent_ids):
return self.ENTITY_TO_HUMAN_READABLE_NAME_DICT[entity](
self, *parent_ids)
def _get_course_label(self):
# pylint: disable=protected-access
return self._get_course().app_context.get_environ()['course']['title']
def _get_unit_label(self, unit_id):
unit = self._get_course().find_unit_by_id(unit_id)
return 'Unit %s' % unit.index
def _get_assessment_label(self, unit_id, assessment_id=None):
if not assessment_id:
assessment_id = unit_id
assessment = self._get_course().find_unit_by_id(assessment_id)
return assessment.title
def _get_lesson_label(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
return lesson.index
def _get_activity_label(self, unit_id, lesson_id, unused_activity_id):
return str('L%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id)))
def _get_html_label(self, unit_id, lesson_id, unused_html_id):
return self._get_activity_label(unit_id, lesson_id, unused_html_id)
def _get_block_label(self, unit_id, lesson_id, unused_activity_id,
block_id):
return str('L%s.%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id),
block_id))
def _get_component_label(self, unit_id, lesson_id, unused_html_id,
component_id):
return self._get_block_label(
unit_id, lesson_id, unused_html_id, component_id)
def _get_pre_post_assessments(self, unit_id):
ret = []
unit = self._get_course().find_unit_by_id(unit_id)
if unit.pre_assessment:
ret.append(unit.pre_assessment)
if unit.post_assessment:
ret.append(unit.post_assessment)
return ret
# Outlines the structure of the course. The key is the entity level, and
# its value is a dictionary with following keys and its values:
# 'children': list of tuples. Each tuple consists of string representation
# of the child entity(ex: 'lesson') and a function to get the
# children elements. If the entity does not have children, the
# value will be an empty list.
# 'id': instance_id of the entity. If the entity is represented by a class
# with an id attribute(ex: units), string representation of the
# attribute is stored here. If the entity is defined by a dictionary
# (ex: components), then the value is the string 'None'.
#
COURSE_STRUCTURE_DICT = {
'course': {
'children': [('unit', _get_unit_ids_of_type_unit),
('assessment', _get_assessment_ids)],
},
'unit': {
'children': [('lesson', _get_lesson_ids),
('assessment', _get_pre_post_assessments)]
},
'assessment': {
'children': [],
},
'lesson': {
'children': [('activity', _get_activity_ids),
('html', _get_html_ids)],
},
'activity': {
'children': [('block', _get_block_ids)],
},
'html': {
'children': [('component', _get_component_ids)],
},
'block': {
'children': [],
},
'component': {
'children': [],
}
}
ENTITY_TO_HUMAN_READABLE_NAME_DICT = {
'course': _get_course_label,
'unit': _get_unit_label,
'assessment': _get_assessment_label,
'lesson': _get_lesson_label,
'activity': _get_activity_label,
'html': _get_html_label,
'block': _get_block_label,
'component': _get_component_label
}
|
the-stack_106_28852 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.tensorflow.org/get_started/mnist/beginners
# https://codelabs.developers.google.com/codelabs/cloud-tensorflow-mnist
#
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
tf.set_random_seed(0)
# data
mnist = read_data_sets("../MNIST_data/", one_hot=True)
print("MNIST data ready for analysis!\n") # get data ready
batch_size = 100 # how many imgs in each batch?
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
mnist.test.images, mnist.test.labels
'''
In [15]: shape(batch_xs)
Out[15]: (100, 784)
In [16]: shape(batch_ys)
Out[16]: (100, 10)
In [17]: shape(mnist.test.images)
Out[17]: (10000, 784)
In [18]: shape(mnist.test.labels)
Out[18]: (10000, 10)
'''
|
the-stack_106_28856 | import pytest
from plenum.cli.helper import getClientGrams
from plenum.common.constants import NAME, VERSION, TYPE, KEYS
from plenum.test.cli.helper import assertCliTokens
from plenum.test.cli.test_command_reg_ex import getMatchedVariables
from prompt_toolkit.contrib.regular_languages.compiler import compile
from sovrin_client.cli.helper import getNewClientGrams
from sovrin_common.constants import REF
from sovrin_common.roles import Roles
@pytest.fixture("module")
def grammar():
grams = getClientGrams() + getNewClientGrams()
return compile("".join(grams))
def testSendNymWithRole(grammar):
dest = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
role = Roles.TRUST_ANCHOR.name
matchedVars = getMatchedVariables(
grammar, "send NYM dest={} role={}".format(dest, role))
assertCliTokens(matchedVars, {
"send_nym": "send NYM", "dest_id": dest, "role": role})
def testSendNymWithoutRole(grammar):
dest = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
matchedVars = getMatchedVariables(grammar, 'send NYM dest={}'.format(dest))
assertCliTokens(matchedVars, {
"send_nym": "send NYM", "dest_id": dest})
def testSendNymVerkey(grammar):
dest = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
role = Roles.TRUST_ANCHOR.name
verkey = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
# Test with verkey
matchedVars = getMatchedVariables(
grammar, "send NYM dest={} role={} verkey={}".format(dest, role, verkey))
assertCliTokens(matchedVars, {
"send_nym": "send NYM", "dest_id": dest,
"role": role, "new_ver_key": verkey
})
# Test without verkey
matchedVars = getMatchedVariables(
grammar,
"send NYM dest={} role={}".format(dest, role))
assertCliTokens(matchedVars, {
"send_nym": "send NYM", "dest_id": dest, "role": role
})
# Verkey being empty string is not supported
with pytest.raises(AssertionError):
matchedVars = getMatchedVariables(
grammar,
"send NYM dest={} role={} verkey={}".format(dest, role, ''))
def testGetNym(grammar):
dest = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
matchedVars = getMatchedVariables(
grammar, "send GET_NYM dest={}".format(dest))
assertCliTokens(matchedVars, {
"send_get_nym": "send GET_NYM", "dest_id": dest})
def testSendSchema(grammar):
name = "Degree"
version = "1.0"
keys = "undergrad,last_name,first_name,birth_date,postgrad,expiry_date"
matchedVars = getMatchedVariables(grammar,
'send SCHEMA name={} version={} keys={}'
.format(name, version, keys))
assertCliTokens(matchedVars, {
"send_schema": "send SCHEMA", NAME: name, VERSION: version, KEYS: keys})
def test_send_get_schema(grammar):
dest = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
name = "Degree"
version = "1.0"
matchedVars = getMatchedVariables(grammar,
'send GET_SCHEMA dest={} name={} version={}'
.format(dest, name, version))
assertCliTokens(matchedVars, {
"send_get_schema": "send GET_SCHEMA", NAME: name, VERSION: version})
def testSendAttribRegEx(grammar):
dest = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
raw = '{"legal org": "BRIGHAM YOUNG UNIVERSITY, PROVO, UT", ' \
'"email": "[email protected]"}'
matchedVars = getMatchedVariables(grammar,
'send ATTRIB dest={} raw={}'.format(dest, raw))
assertCliTokens(matchedVars, {
"send_attrib": "send ATTRIB", "dest_id": dest, "raw": raw})
def test_send_get_attrib_regex(grammar):
dest = "LNAyBZUjvLF7duhrNtOWgdAKs18nHdbJUxJLT39iEGU="
raw = 'legal'
matchedVars = getMatchedVariables(grammar,
'send GET_ATTR dest={} raw={}'.format(dest, raw))
assertCliTokens(matchedVars, {
"send_get_attr": "send GET_ATTR", "dest_id": dest, "raw": raw})
def testAddAttrRegEx(grammar):
getMatchedVariables(
grammar,
"add attribute first_name=Tyler,last_name=Ruff,birth_date=12/17/1991,undergrad=True,postgrad=True,expiry_date=12/31/2101 for Tyler")
def testAddAttrProverRegEx(grammar):
getMatchedVariables(grammar,
"attribute known to BYU first_name=Tyler, last_name=Ruff, birth_date=12/17/1991, undergrad=True, postgrad=True, expiry_date=12/31/2101")
def testSendClaimDefRegEx(grammar):
matchedVars = getMatchedVariables(grammar, "send CLAIM_DEF ref=15 signature_type=CL")
from sovrin_common.constants import SIGNATURE_TYPE
assertCliTokens(matchedVars, {
"send_claim_def": "send CLAIM_DEF", REF: "15", SIGNATURE_TYPE: "CL"})
def test_send_get_claim_def_regex(grammar):
matchedVars = getMatchedVariables(grammar, "send GET_CLAIM_DEF ref=15 signature_type=CL")
from sovrin_common.constants import SIGNATURE_TYPE
assertCliTokens(matchedVars, {
"send_get_claim_def": "send GET_CLAIM_DEF", REF: "15", SIGNATURE_TYPE: "CL"})
def testShowFileCommandRegEx(grammar):
matchedVars = getMatchedVariables(grammar,
"show sample/faber-invitation.sovrin")
assertCliTokens(matchedVars, {
"show_file": "show", "file_path": "sample/faber-invitation.sovrin"})
matchedVars = getMatchedVariables(grammar,
"show sample/faber-invitation.sovrin ")
assertCliTokens(matchedVars, {
"show_file": "show", "file_path": "sample/faber-invitation.sovrin"})
def testLoadFileCommandRegEx(grammar):
matchedVars = getMatchedVariables(grammar,
"load sample/faber-invitation.sovrin")
assertCliTokens(matchedVars, {
"load_file": "load", "file_path": "sample/faber-invitation.sovrin"})
matchedVars = getMatchedVariables(grammar,
"load sample/faber-invitation.sovrin ")
assertCliTokens(matchedVars, {
"load_file": "load", "file_path": "sample/faber-invitation.sovrin"})
def testShowLinkRegEx(grammar):
matchedVars = getMatchedVariables(grammar, "show link faber")
assertCliTokens(matchedVars, {"show_link": "show link",
"link_name": "faber"})
matchedVars = getMatchedVariables(grammar, "show link faber college")
assertCliTokens(matchedVars, {"show_link": "show link",
"link_name": "faber college"})
matchedVars = getMatchedVariables(grammar, "show link faber college ")
assertCliTokens(matchedVars, {"show_link": "show link",
"link_name": "faber college "})
def testConnectRegEx(grammar):
getMatchedVariables(grammar, "connect dummy")
getMatchedVariables(grammar, "connect test")
getMatchedVariables(grammar, "connect live")
def testSyncLinkRegEx(grammar):
matchedVars = getMatchedVariables(grammar, "sync faber")
assertCliTokens(matchedVars, {"sync_link": "sync", "link_name": "faber"})
matchedVars = getMatchedVariables(grammar, 'sync "faber"')
assertCliTokens(matchedVars, {"sync_link": "sync", "link_name": '"faber"'})
matchedVars = getMatchedVariables(grammar, 'sync "faber" ')
assertCliTokens(matchedVars, {"sync_link": "sync", "link_name": '"faber" '})
def testPingTargetRegEx(grammar):
matchedVars = getMatchedVariables(grammar, "ping faber")
assertCliTokens(matchedVars, {"ping": "ping", "target_name": "faber"})
def testAcceptInvitationLinkRegEx(grammar):
matchedVars = getMatchedVariables(grammar, "accept invitation from faber")
assertCliTokens(matchedVars, {"accept_link_invite": "accept invitation from",
"link_name": "faber"})
matchedVars = getMatchedVariables(grammar, 'accept invitation from "faber"')
assertCliTokens(matchedVars, {"accept_link_invite": "accept invitation from",
"link_name": '"faber"'})
matchedVars = getMatchedVariables(grammar, 'accept invitation from "faber" ')
assertCliTokens(matchedVars, {"accept_link_invite": "accept invitation from",
"link_name": '"faber" '})
def testShowClaimRegEx(grammar):
matchedVars = getMatchedVariables(grammar, "show claim Transcript")
assertCliTokens(matchedVars, {"show_claim": "show claim",
"claim_name": "Transcript"})
matchedVars = getMatchedVariables(grammar, 'show claim "Transcript"')
assertCliTokens(matchedVars, {"show_claim": "show claim",
"claim_name": '"Transcript"'})
def testRequestClaimRegEx(grammar):
matchedVars = getMatchedVariables(grammar, "request claim Transcript")
assertCliTokens(matchedVars, {"req_claim": "request claim",
"claim_name": "Transcript"})
matchedVars = getMatchedVariables(grammar, 'request claim "Transcript"')
assertCliTokens(matchedVars, {"req_claim": "request claim",
"claim_name": '"Transcript"'})
def testProofReqRegEx(grammar):
matchedVars = getMatchedVariables(grammar,
"show proof request Transcript")
assertCliTokens(matchedVars, {"show_proof_req": "show proof request",
"proof_req_name": "Transcript"})
matchedVars = getMatchedVariables(grammar,
"show proof request Transcript ")
assertCliTokens(matchedVars, {"show_proof_req": "show proof request",
"proof_req_name": "Transcript "})
def testSendProofReqRegEx(grammar):
matchedVars = getMatchedVariables(grammar,
"send proof-request Over-21 to JaneDoe")
assertCliTokens(matchedVars, {"send_proof_req": "send proof-request",
"proof_request_name": "Over-21",
"target": " JaneDoe"})
def testSetAttribute(grammar):
matchedVars = getMatchedVariables(
grammar, "set first_name to Alice")
assertCliTokens(matchedVars, {
"set_attr": "set", "attr_name": "first_name", "attr_value": "Alice"})
def testSendProof(grammar):
getMatchedVariables(grammar, 'send proof Job-Application to Acme')
def testSendPoolUpgrade(grammar):
# Testing for start
getMatchedVariables(grammar, "send POOL_UPGRADE name=upgrade-13 "
"version=0.0.6 sha256=f284bdc3c1c9e24a494e285cb387c69510f28de51c15bb93179d9c7f28705398 action=start "
"schedule={'AtDfpKFe1RPgcr5nnYBw1Wxkgyn8Zjyh5MzFoEUTeoV3': "
"'2017-01-25T12:49:05.258870+00:00', "
"'4yC546FFzorLPgTNTc6V43DnpFrR8uHvtunBxb2Suaa2': "
"'2017-01-25T12:33:53.258870+00:00', "
"'JpYerf4CssDrH76z7jyQPJLnZ1vwYgvKbvcp16AB5RQ': "
"'2017-01-25T12:44:01.258870+00:00', "
"'DG5M4zFm33Shrhjj6JB7nmx9BoNJUq219UXDfvwBDPe2': "
"'2017-01-25T12:38:57.258870+00:00'} "
"timeout=10")
# Testing for cancel
getMatchedVariables(grammar, 'send POOL_UPGRADE name=upgrade-13 version=0.0.6 '
'sha256=aad1242 action=cancel '
'justification="not gonna give you"')
def testDisconnect(grammar):
matchedVars = getMatchedVariables(
grammar, "disconnect")
assertCliTokens(matchedVars, {"disconn": "disconnect"})
def testNewIdentifier(grammar):
matchedVars = getMatchedVariables(
grammar, "new identifier")
assertCliTokens(matchedVars, {"new_id": "new identifier",
"id": None,
"seed": None, "alias": None})
matchedVars = getMatchedVariables(
grammar, "new identifier as myalis")
assertCliTokens(matchedVars,
{"new_id": "new identifier", "id": None,
"seed": None, "alias": "myalis"})
matchedVars = getMatchedVariables(
grammar, "new identifier 4QxzWk3ajdnEA37NdNU5Kt")
assertCliTokens(matchedVars, {"new_id": "new identifier",
"id": "4QxzWk3ajdnEA37NdNU5Kt",
"seed": None, "alias": None})
matchedVars = getMatchedVariables(
grammar, "new identifier 4QxzWk3ajdnEA37NdNU5Kt "
"with seed aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
assertCliTokens(matchedVars, {"new_id": "new identifier",
"id": "4QxzWk3ajdnEA37NdNU5Kt",
"seed": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"alias": None})
def testAddGenTxnRegEx(grammar):
matchedVars = getMatchedVariables(grammar,
"add genesis transaction NYM dest=2ru5PcgeQzxF7QZYwQgDkG2K13PRqyigVw99zMYg8eML")
assertCliTokens(matchedVars, {"add_genesis": "add genesis transaction NYM", "dest": "dest=",
"dest_id": "2ru5PcgeQzxF7QZYwQgDkG2K13PRqyigVw99zMYg8eML", "role": None,
"ver_key": None})
matchedVars = getMatchedVariables(grammar,
"add genesis transaction NYM dest=2ru5PcgeQzxF7QZYwQgDkG2K13PRqyigVw99zMYg8eML role={role}".format(
role=Roles.STEWARD.name))
assertCliTokens(matchedVars, {"add_genesis": "add genesis transaction NYM", "dest": "dest=",
"dest_id": "2ru5PcgeQzxF7QZYwQgDkG2K13PRqyigVw99zMYg8eML", "role": Roles.STEWARD.name,
"ver_key": None})
matchedVars = getMatchedVariables(grammar,
'add genesis transaction NODE for 2ru5PcgeQzxF7QZYwQgDkG2K13PRqyigVw99zMYg8eML '
'by FvDi9xQZd1CZitbK15BNKFbA7izCdXZjvxf91u3rQVzW with data '
'{"node_ip": "localhost", "node_port": "9701", "client_ip": "localhost", "client_port": "9702", "alias": "AliceNode"}')
assertCliTokens(matchedVars, {"add_gen_txn": "add genesis transaction", "type": "NODE",
"dest": "2ru5PcgeQzxF7QZYwQgDkG2K13PRqyigVw99zMYg8eML",
"identifier": "FvDi9xQZd1CZitbK15BNKFbA7izCdXZjvxf91u3rQVzW", "role": None,
"data": '{"node_ip": "localhost", "node_port": "9701", "client_ip": "localhost", "client_port": "9702", "alias": "AliceNode"}'})
def testReqAvailClaims(grammar):
matchedVars = getMatchedVariables(grammar,
"request available claims from Faber")
assertCliTokens(matchedVars, {
"req_avail_claims": "request available claims from",
"link_name": "Faber"
})
|
the-stack_106_28861 | # using https://github.com/RaRe-Technologies/gensim-data
import pandas as pd
import numpy as np
import joblib
import os
label2mid = joblib.load(os.path.join('data','label2mid.joblib'))
def get_manual_bias_labels(biases):
df = pd.read_csv('bias_labels.csv')
biases = [bias if type(bias)==str else bias[0] for bias in biases]
dd = {bias: [label for label in df[bias] if label is not np.nan] for bias in biases if bias!='zero'}
assert {label for sublist in dd.values() for label in sublist} <= set(label2mid)
return dd
if __name__ == '__main__':
import gensim.downloader as api
from gensim.models import KeyedVectors
threshold = 0.4
topk = 3
sim = 'manual' #'pairdirection', '3cosadd', '3cosmul', '2cosadd', '2cosmul', 'manual', 'manualreject'
find_missing = True
biases = [('zero',), ('gender','woman','man'), ('war','war','peace'), ('money','rich','poor'), ('love','love','hate'), ('fear',)]
model_name, prefix = 'fasttext-wiki-news-subwords-300', ''
#model_name, prefix = 'glove-wiki-gigaword-300', ''
#model_name, prefix = 'conceptnet-numberbatch-17-06-300', '/c/en/'
#model_name, prefix = 'word2vec-google-news-300', ''
model_vectors = api.load(model_name) # load pre-trained word-vectors from gensim-data
print('model loaded')
def norm(label):
return prefix + label.lower().split(' (')[0].replace(' ', '-')
norm_labels = [norm(label) for label in label2mid]
secondary_labels = []
def compound_norm(label):
global missing_map
norm_label = norm(label)
if find_missing and norm_label not in model_vectors and '-' in norm_label:
candidate = norm_label.split('-')[1]
if candidate in model_vectors:
norm_label = candidate
if norm_label in norm_labels and label not in secondary_labels:
secondary_labels.append(label)
return norm_label
labels, vectors = [list(a) for a in zip(*[(label, model_vectors[compound_norm(label)]) for label in label2mid if compound_norm(label) in model_vectors])]
assert len(labels)==len(set(labels))
print ('found %d out of %d labels in vocab'%(len(labels), len(label2mid)))
if len(labels)<len(label2mid):
print('missing labels:', sorted(label for label in label2mid if label not in labels))
uniques = len(set(tuple(vector) for vector in vectors))
if len(labels)>uniques:
print('note: there are %d duplicate vectors'%(len(vectors)-uniques))
print('secondary labels:',secondary_labels)
shifts, shift_vectors = [list(a) for a in zip(*[(label, model_vectors[compound_norm(label)]) for sublist in biases for label in sublist[1:] if label not in labels])]
word_vectors = KeyedVectors(model_vectors.vector_size)
word_vectors.add(labels+shifts, vectors+shift_vectors)
def get_auto_bias_labels(biases, topn=None):
return {bias[0]: word_vectors.similar_by_vector(word_vectors[bias[1]]-word_vectors[bias[2]], topn=len(labels), restrict_vocab=len(labels))[:topn] for bias in biases if bias!='zero'}
#note: effectively this could be less than topn due to possible secondary labels
def pair_direction(positive, negative, topn=None):
vocab = [word for word in word_vectors.vocab if word not in positive+negative+shifts+secondary_labels]
vector_1 = word_vectors[positive[1]] - word_vectors[negative[0]]
vectors_all = np.asarray([word_vectors[word] - word_vectors[positive[0]] for word in vocab])
scores = word_vectors.cosine_similarities(vector_1, vectors_all)
ind = np.argsort(-scores)
return np.asarray(list(zip(vocab,scores)))[ind][:topn].tolist()
def get_bias(label, bias, sim, threshold=None):
func = word_vectors.most_similar
if sim == 'pairdirection':
func = pair_direction
elif sim.endswith('mul'):
func = word_vectors.most_similar_cosmul
if not sim.startswith('2') and len(bias)>1 and bias[0] in labels and np.allclose(word_vectors[label], word_vectors[bias[1]]):
biased = [bias[0]]
else:
biased = func(positive=[label, bias[0]], negative=None if sim.startswith('2') else [bias[1]], topn=(topk+len(secondary_labels) if threshold is None else len(labels))+len(shifts))
biased = [biased_label[0] for biased_label in biased if biased_label[0] not in shifts+secondary_labels and (threshold is None or word_vectors.similarity(label, biased_label[0]) >= threshold)]
biased = biased[:topk] if len(biased)>0 else label
return biased
rows = []
bias_labels = get_manual_bias_labels(biases)
with open('bias_scores.txt', 'w') as f:
for label in labels:
row = []
for bias in biases:
biased = [label]
if sim.startswith('manual') and bias[0]!='zero':
vocab = [word for word in bias_labels[bias[0]] if word in labels]
vectors_all = np.asarray([word_vectors[word] for word in vocab])
vector_1 = word_vectors[label]
if sim.endswith('reject') and len(bias)>2:
bias_vector = word_vectors[bias[1]]-word_vectors[bias[2]]
bias_vector /= np.linalg.norm(bias_vector)
vectors_all -= np.outer(np.dot(vectors_all, bias_vector), bias_vector)
vector_1 = vector_1 - np.dot(vector_1, bias_vector)*bias_vector
scores = word_vectors.cosine_similarities(vector_1, vectors_all)
scores, vocab = zip(*sorted(zip(scores, vocab), key=lambda x: (-x[0], label!=x[1], x[1] in secondary_labels))[:topk])
f.write('%s %s '%(label,bias[0])+' '.join('%.4f %s'%(scores[k],vocab[k]) for k in range(len(vocab))) + '\n')
if scores[0] >= threshold:
biased = [v for v,s in zip(vocab,scores) if s>threshold]
elif len(bias)>1:
biased = get_bias(label, bias[1:], sim, threshold)
row.append('/'.join(biased))
rows.append(row)
pd.DataFrame(rows, columns=[bias[0] for bias in biases]).to_csv('biases.csv', index=False)
|
the-stack_106_28863 | """
Line colors with a custom CPT
-----------------------------
The color of the lines made by :meth:`pygmt.Figure.plot` can be set according
to a custom CPT and assigned with the ``pen`` parameter.
The custom CPT can be used by setting the plot command's ``cmap`` parameter to
``True``. The ``zvalue`` parameter sets the z-value (color) to be used from the
custom CPT, and the line color is set as the z-value by using **+z** when
setting the ``pen`` color.
"""
import numpy as np
import pygmt
# Create a list of values between 20 and 30 with 0.2 intervals
x = np.arange(start=20, stop=30, step=0.2)
fig = pygmt.Figure()
fig.basemap(frame=["WSne", "af"], region=[20, 30, -10, 10])
# Create a custom CPT with the batlow CPT and 10 discrete z-values (colors),
# use color_model="+c0-9" to write the color palette in categorical format and
# add labels (0) to (9) for the colorbar legend
pygmt.makecpt(cmap="batlow", series=[0, 9, 1], color_model="+c0-9")
# Plot 10 lines and set a different z-value for each line
for zvalue in range(0, 10):
y = zvalue * np.sin(x)
fig.plot(x=x, y=y, cmap=True, zvalue=zvalue, pen="thick,+z,-")
# Color bar to show the custom CPT and the associated z-values
fig.colorbar()
fig.show()
|
the-stack_106_28865 | import os
import time
from jina import DocumentArray, Executor, requests
class TagTextExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pod_uid = os.environ['POD_UID']
@requests
def process(self, docs: DocumentArray, *args, **kwargs):
for doc in docs:
doc.tags['replica_uid'] = self.pod_uid
doc.tags['time'] = time.time()
doc.text += f'_{self.pod_uid}'
return docs
|
the-stack_106_28866 | """
Created on Mon Jul 31 07:07:07 2017
@author: Juan Carlos Entizne
@email: e.entizne[at]dundee.ac.uk
"""
import sys
import argparse
from logging import getLogger
from modules import srQC_args as srQC # short-reads Quality-Control (srQC)
description = \
"Description:\n" + \
"RTDmaker is a computational pipeline to generate High-Quality transcriptome annotations, known as Reference-Transcript-Datasets (RTDs). " \
"Currently, RTDmaker is made of one module, 'ShortReads', that process transcriptome annotations assembled from RNA-seq data. " \
"Contact: [email protected]\n"
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter, prog='RTDmaker')
# Version
parser.add_argument('-v', '--version', action='version', version='0.1.5')
subparsers = parser.add_subparsers()
# Parser of the module for RNA-seq analysis
ShortReads_Subparser = subparsers.add_parser("ShortReads", parents=[srQC.parser],
help="Quality Control of transcriptome assemblies from RNA-seq data.")
ShortReads_Subparser.set_defaults(which="ShortReads")
# Setting logging preferences
logger = getLogger(__name__)
def main():
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
try:
args = parser.parse_args()
# Setting modules parser
if args.which == "ShortReads":
srQC.parser = parser
srQC.main()
else:
print(f"Command {args.which} not recognized.")
sys.exit(1)
except Exception as e:
logger.error(f"{e}")
sys.exit(1)
if __name__ == "__main__":
main()
|
the-stack_106_28867 | import sublime, sublime_plugin
from .simplenote import Simplenote
import functools
import time
import copy
from collections import deque
from os import path, makedirs, remove, listdir
from datetime import datetime
from threading import Semaphore, Lock
from .operations import NoteCreator, MultipleNoteContentDownloader, GetNotesDelta, NoteDeleter, NoteUpdater
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def sort_notes(a_note, b_note):
if 'pinned' in a_note['systemtags']:
return 1
elif 'pinned' in b_note['systemtags']:
return -1
else:
date_a = datetime.fromtimestamp(float(a_note['modifydate']))
date_b = datetime.fromtimestamp(float(b_note['modifydate']))
return (date_a > date_b) - (date_a < date_b)
def show_message(message):
if not message:
message = ''
for window in sublime.windows():
for currentView in window.views():
currentView.set_status('QuickSimplenote', message)
def remove_status():
show_message(None)
def write_note_to_path(note, filepath):
f = open(filepath, 'wb')
try:
content = note['content']
f.write(content.encode('utf-8'))
except KeyError:
pass
f.close()
def open_note(note, window=None):
if not window:
window = sublime.active_window()
filepath = get_path_for_note(note)
write_note_to_path(note, filepath)
return window.open_file(filepath)
def get_filename_for_note(note):
# Take out invalid characters from title and use that as base for the name
import string
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
note_name = get_note_name(note)
base = ''.join(c for c in note_name if c in valid_chars)
# Determine extension based on title
extension_map = settings.get('title_extension_map')
extension = ''
if extension_map:
for item in extension_map:
import re
pattern = re.compile(item['title_regex'], re.UNICODE)
if re.search(pattern, note_name):
extension = '.' + item['extension']
break
return base + ' (' + note['key'] + ')' + extension
def get_path_for_note(note):
return path.join(temp_path, get_filename_for_note(note))
def get_note_from_path(view_filepath):
note = None
if view_filepath:
if path.dirname(view_filepath) == temp_path:
note_filename = path.split(view_filepath)[1]
note = [note for note in notes if get_filename_for_note(note) == note_filename]
if not note:
import re
pattern = re.compile(r'\((.*?)\)')
results = re.findall(pattern, note_filename)
if results:
noteKey = results[ len(results) - 1]
note = [note for note in notes if note['key'] == noteKey]
if note:
note = note[0]
return note
def get_note_name(note):
try:
content = note['content']
except Exception as e:
return 'untitled'
index = content.find('\n');
if index > -1:
title = content[:index]
else:
if content:
title = content
else:
title = 'untitled'
return title
def handle_open_filename_change(old_file_path, updated_note):
new_file_path = get_path_for_note(updated_note)
old_note_view = None
new_view = None
# If name changed
if old_file_path != new_file_path:
# Save the current active view because we might lose the focus
old_active_view = sublime.active_window().active_view()
# Search for the view of the open note
for view_list in [window.views() for window in sublime.windows()]:
for view in view_list:
if view.file_name() == old_file_path:
old_note_view = view
break
# If found
if old_note_view:
# Open the note in a new view
new_view = open_note(updated_note, old_note_view.window())
# Close the old dirty note
old_note_view_id = old_note_view.id()
old_active_view_id = old_active_view.id()
if old_note_view.window():
old_note_window_id = old_note_view.window().id()
else:
old_note_window_id = sublime.active_window() # Sometimes this happens on Sublime 2...
close_view(old_note_view)
# Focus on the new view or on the previous one depending
# on where we were
if old_note_view_id == old_active_view_id:
old_note_window = [window for window in sublime.windows() if window.id() == old_note_window_id]
if old_note_window:
old_note_window[0].focus_view(new_view)
else:
sublime.active_window().focus_view(old_active_view)
try:
remove(old_file_path)
except OSError as e:
pass
return True
return False
def close_view(view):
view.set_scratch(True)
view_window = view.window()
if not view_window:
view_window = sublime.active_window()
view_window.focus_view(view)
view_window.run_command("close_file")
def synch_note_resume(existing_note_entry, updated_note_resume):
for key in updated_note_resume:
existing_note_entry[key] = updated_note_resume[key]
def update_note(existing_note, updated_note):
synch_note_resume(existing_note, updated_note)
existing_note['local_modifydate'] = time.time()
existing_note['needs_update'] = False
existing_note['filename'] = get_filename_for_note(existing_note)
def load_notes():
notes = []
try:
with open(path.join(package_path, 'note_cache'),'rb') as cache_file:
notes = pickle.load(cache_file, encoding='utf-8')
except (EOFError, IOError) as e:
pass
return notes
def save_notes(notes):
with open(path.join(package_path, 'note_cache'),'w+b') as cache_file:
pickle.dump(notes, cache_file)
class OperationManager:
_instance = None
_lock = Lock()
@classmethod
def instance(cls):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = OperationManager()
return cls._instance
def __init__(self):
self.operations = deque([])
self.running = False
self.current_operation = None
def is_running(self):
return self.running
def add_operation(self, operation):
self.operations.append(operation)
if (not self.running):
self.run()
def check_operations(self):
if self.current_operation == None:
return
# If it's still running, update the status
if self.current_operation.is_alive():
text = self.current_operation.get_update_run_text()
else:
# If not running, show finished text
# call callback with result and do the
# next operation
text = self.current_operation.get_run_finished_text()
self.current_operation.join()
if len( self.operations ) > 0:
self.start_next_operation()
else:
self.running = False
sublime.set_timeout(remove_status, 1000)
show_message(text)
if self.running:
sublime.set_timeout(self.check_operations, 1000)
def run(self):
self.start_next_operation()
sublime.set_timeout(self.check_operations, 1000)
self.running = True
def start_next_operation(self):
self.current_operation = self.operations.popleft()
self.current_operation.start()
class HandleNoteViewCommand(sublime_plugin.EventListener):
waiting_to_save = []
def on_modified(self, view):
def flush_saves():
if OperationManager.instance().is_running():
sublime.set_timeout(flush_saves, 1000)
return
for entry in HandleNoteViewCommand.waiting_to_save:
if entry['note_key'] == note['key']:
with entry['lock']:
entry['count'] = entry['count'] - 1
if entry['count'] == 0:
view.run_command("save")
break
view_filepath = view.file_name()
note = get_note_from_path(view_filepath)
if note:
debounce_time = settings.get('autosave_debounce_time')
if not debounce_time:
return
debounce_time = debounce_time * 1000
found = False
for entry in HandleNoteViewCommand.waiting_to_save:
if entry['note_key'] == note['key']:
with entry['lock']:
entry['count'] = entry['count'] + 1
found = True
break
if not found:
new_entry = {}
new_entry['note_key'] = note['key']
new_entry['lock'] = Lock()
new_entry['count'] = 1
HandleNoteViewCommand.waiting_to_save.append(new_entry)
sublime.set_timeout(flush_saves, debounce_time)
def on_load(self, view):
view_filepath = view.file_name()
note = get_note_from_path(view_filepath)
syntax = settings.get('note_syntax')
if note and syntax:
view.set_syntax_file(syntax)
def get_current_content(self, view):
return view.substr(sublime.Region(0, view.size()))
def handle_note_changed(self, modified_note_resume, content, old_file_path, open_view):
global notes
# We get all the resume data back. We have to merge it
# with our data (extended fields and content)
for note in notes:
if note['key'] == modified_note_resume['key']:
# Set content to the updated one
# or to the view's content if we don't have any update
updated_from_server = False
if not 'content' in modified_note_resume:
modified_note_resume['content'] = content
else:
updated_from_server = True
update_note(note, modified_note_resume) # Update all fields
name_changed = handle_open_filename_change(old_file_path, note)
# If we didn't reopen the view with the name changed, but the content has changed
# we have to update the view anyway
if updated_from_server and not name_changed:
filepath = get_path_for_note(note)
write_note_to_path(note, filepath)
sublime.set_timeout(functools.partial(open_view.run_command, 'revert'), 0)
break
notes.sort(key=cmp_to_key(sort_notes), reverse=True)
save_notes(notes)
def on_post_save(self, view):
view_filepath = view.file_name()
note = get_note_from_path(view_filepath)
if note:
# Update with new content
updated_note = copy.deepcopy(note)
# Handle when the note changes elsewhere and the user goes to that tab:
# sublime reloads the view, it's handled as changed and sent here
if 'content' in updated_note and updated_note['content'] == self.get_current_content(view):
return
updated_note['content'] = self.get_current_content(view)
# Send update
update_op = NoteUpdater(note=updated_note, simplenote_instance=simplenote_instance)
update_op.set_callback(self.handle_note_changed,
{'content': updated_note['content'],
'old_file_path': view_filepath,
'open_view': view})
OperationManager.instance().add_operation(update_op)
class ShowQuickSimplenoteNotesCommand(sublime_plugin.ApplicationCommand):
def handle_selected(self, selected_index):
if not selected_index > -1:
return
selected_note = notes[selected_index]
open_note(selected_note)
def run(self):
if not started:
if not start():
return
i = 0
keys = []
for note in notes:
i += 1
title = get_note_name(note)
keys.append(title)
sublime.active_window().show_quick_panel(keys, self.handle_selected)
import pickle
class StartQuickSimplenoteSyncCommand(sublime_plugin.ApplicationCommand):
def set_result(self, new_notes):
global notes
notes = new_notes
notes.sort(key=cmp_to_key(sort_notes), reverse=True)
def merge_delta(self, updated_note_resume, existing_notes):
# Here we create the note_resume we use on the rest of the app.
# The note_resume we store consists of:
# The note resume as it comes from the simplenote api.
# The title, filename and last modified date of the local cache entry
# Look at the new resume and find existing entries
for current_updated_note_resume in updated_note_resume:
existing_note_entry = None
for existing_note in existing_notes:
if existing_note['key'] == current_updated_note_resume['key']:
existing_note_entry = existing_note
break
# If we have it already
if existing_note_entry:
# Mark for update if needed
try:
# Note with old content
if existing_note_entry['local_modifydate'] < float(current_updated_note_resume['modifydate']):
synch_note_resume(existing_note_entry, current_updated_note_resume)
existing_note_entry['needs_update'] = True
else:
# Up to date note
existing_note_entry['needs_update'] = False
except KeyError as e:
# Note that never got the content downloaded:
existing_note_entry['needs_update'] = True
# New note
else:
new_note_entry = {'needs_update': True}
synch_note_resume(new_note_entry, current_updated_note_resume)
existing_notes.append(new_note_entry)
# Look at the existing notes to find deletions
updated_note_resume_keys = [note['key'] for note in updated_note_resume]
deleted_notes = [deleted_note for deleted_note in existing_notes if deleted_note['key'] not in updated_note_resume_keys]
for deleted_note in deleted_notes:
existing_notes.remove(deleted_note)
save_notes(existing_notes)
self.notes_synch(existing_notes)
def notes_synch(self, notes):
# Here we synch updated notes in order of priority.
# Open notes:
# Locally unsaved
# Locally saved
# Other notes in order of modifydate and priority
open_files_dirty = []
open_files_ok = []
for view_list in [window.views() for window in sublime.windows()]:
for view in view_list:
if view.file_name() == None:
continue
if view.is_dirty():
open_files_dirty.append(path.split(view.file_name())[1])
else:
open_files_ok.append(path.split(view.file_name())[1])
# Classify notes
lu = []
ls = []
others = []
for note in notes:
if not note['needs_update']:
continue
try:
filename = note['filename']
except KeyError as e:
others.append(note)
continue
if filename in open_files_dirty:
lu.append(note)
elif filename in open_files_ok:
ls.append(note)
else:
others.append(note)
# Sorted by priority/importance
lu.sort(key=cmp_to_key(sort_notes), reverse=True)
ls.sort(key=cmp_to_key(sort_notes), reverse=True)
others.sort(key=cmp_to_key(sort_notes), reverse=True)
# Start updates
sem = Semaphore(3)
show_message('QuickSimplenote: Downloading content')
if lu:
down_op = MultipleNoteContentDownloader(sem, simplenote_instance=simplenote_instance, notes=lu)
down_op.set_callback(self.merge_open, {'existing_notes':notes, 'dirty':True})
OperationManager.instance().add_operation(down_op)
if ls:
down_op = MultipleNoteContentDownloader(sem, simplenote_instance=simplenote_instance, notes=ls)
down_op.set_callback(self.merge_open, {'existing_notes':notes})
OperationManager.instance().add_operation(down_op)
if others:
down_op = MultipleNoteContentDownloader(sem, simplenote_instance=simplenote_instance, notes=others)
down_op.set_callback(self.merge_notes, {'existing_notes':notes})
OperationManager.instance().add_operation(down_op)
def merge_open(self, updated_notes, existing_notes, dirty=False):
global settings
auto_overwrite_on_conflict = settings.get('on_conflict_use_server')
do_nothing_on_conflict = settings.get('on_conflict_leave_alone')
update = False
# If it's not a conflict or it's a conflict we can resolve
if ( not dirty ) or ( dirty and not do_nothing_on_conflict ):
# If we don't have an overwrite policy, ask the user
if ( not auto_overwrite_on_conflict ) and dirty and len( updated_notes ) > 0:
note_names = '\n'.join([get_note_name(updated_note) for updated_note in updated_notes])
update = sublime.ok_cancel_dialog('Note(s):\n%s\nAre in conflict. Overwrite?' % note_names, 'Overwrite')
if ( not dirty ) or update or auto_overwrite_on_conflict:
# Update notes if the change is clean, or we were asked to update
for note in existing_notes:
for updated_note in updated_notes:
# If we find the updated note
if note['key'] == updated_note['key']:
old_file_path = get_path_for_note(note)
new_file_path = get_path_for_note(updated_note)
# Update contents
write_note_to_path(updated_note, new_file_path)
# Handle filename change (note has the old filename value)
handle_open_filename_change(old_file_path, updated_note)
# Reload view of the note if it's selected
for view in [window.active_view() for window in sublime.windows()]:
if view.file_name() == new_file_path:
sublime.set_timeout(functools.partial(view.run_command, 'revert'), 0)
break
# Merge
self.merge_notes(updated_notes, existing_notes)
def merge_notes(self, updated_notes, existing_notes):
# Merge
for note in existing_notes:
if not note['needs_update']:
continue
for updated_note in updated_notes:
if note['key'] == updated_note['key']:
update_note(note, updated_note)
save_notes(existing_notes)
self.set_result(existing_notes)
def run(self):
show_message('QuickSimplenote: Synching')
get_delta_op = GetNotesDelta(simplenote_instance=simplenote_instance)
get_delta_op.set_callback(self.merge_delta, {'existing_notes':notes})
OperationManager.instance().add_operation(get_delta_op)
class CreateQuickSimplenoteNoteCommand(sublime_plugin.ApplicationCommand):
def handle_new_note(self, result):
if result:
global notes
update_note(result, result)
notes.append(result)
notes.sort(key=cmp_to_key(sort_notes), reverse=True)
save_notes(notes)
open_note(result)
def run(self):
creation_op = NoteCreator(simplenote_instance=simplenote_instance)
creation_op.set_callback(self.handle_new_note)
OperationManager.instance().add_operation(creation_op)
class DeleteQuickSimplenoteNoteCommand(sublime_plugin.ApplicationCommand):
def handle_deletion(self, result):
global notes
notes.remove(self.note)
save_notes(notes)
try:
remove(get_path_for_note(self.note))
except OSError as e:
pass
close_view(self.note_view)
def run(self):
self.note_view = sublime.active_window().active_view()
self.note = get_note_from_path(self.note_view.file_name())
if self.note:
deletion_op = NoteDeleter(note=self.note, simplenote_instance=simplenote_instance)
deletion_op.set_callback(self.handle_deletion)
OperationManager.instance().add_operation(deletion_op)
def sync():
if not OperationManager.instance().is_running():
print('QuickSimplenote: Syncing: %s' % time.time())
sublime.run_command('start_quick_simplenote_sync');
else:
print('QuickSimplenote: Sync ommited %s' % time.time())
sync_every = settings.get('sync_every')
if sync_every > 0:
sublime.set_timeout(sync, sync_every * 1000)
def start():
global started, simplenote_instance, settings
username = settings.get('username')
password = settings.get('password')
if (username and password):
simplenote_instance = Simplenote(username, password)
sync()
started = True
else:
filepath = path.join(package_path, 'quick_simplenote.sublime-settings')
sublime.active_window().open_file(filepath)
show_message('QuickSimplenote: Please configure username/password')
sublime.set_timeout(remove_status, 2000)
started = False
return started
def reload_if_needed():
global settings, started, reload_calls
# Sublime calls this twice for some reason :(
reload_calls = reload_calls + 1
if (reload_calls % 2 != 0):
return
if settings.get('autostart'):
sublime.set_timeout(start, 2000) # I know...
print('QuickSimplenote: Autostarting')
def plugin_loaded():
global package_path, temp_path, settings, notes
package_path = path.join(sublime.packages_path(), "QuickSimplenote")
settings = sublime.load_settings('quick_simplenote.sublime-settings')
temp_path = settings.get('temp_path')
if not temp_path:
temp_path = path.join(package_path, "temp")
notes = load_notes()
note_files = [note['filename'] for note in notes]
if not path.exists(temp_path):
makedirs(temp_path)
for f in listdir(temp_path):
if f not in note_files:
remove(path.join(temp_path, f))
settings.clear_on_change('username')
settings.clear_on_change('password')
settings.clear_on_change('temp_path')
settings.add_on_change('username', reload_if_needed)
settings.add_on_change('password', reload_if_needed)
settings.add_on_change('temp_path', reload_if_needed)
reload_if_needed()
reload_calls = -1
simplenote_instance = None
started = False
notes = []
|
the-stack_106_28868 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 33090 if testnet else 23090
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_28869 | import threading
from .test_utils import skip_unless_module
from pulsar.client import amqp_exchange
TEST_CONNECTION = "memory://test_amqp"
@skip_unless_module("kombu")
def test_amqp():
manager1_exchange = amqp_exchange.PulsarExchange(TEST_CONNECTION, "manager_test")
manager3_exchange = amqp_exchange.PulsarExchange(TEST_CONNECTION, "manager3_test")
manager2_exchange = amqp_exchange.PulsarExchange(TEST_CONNECTION, "manager2_test")
thread1 = TestThread("manager_test", manager1_exchange)
thread2 = TestThread("manager2_test", manager2_exchange)
thread3 = TestThread("manager3_test", manager3_exchange)
thread1.start()
thread2.start()
thread3.start()
manager1_exchange.publish("manager_test", "cow1")
manager2_exchange.publish("manager2_test", "cow2")
manager3_exchange.publish("manager3_test", "cow3")
thread1.join(1)
thread2.join(1)
thread3.join(1)
assert thread1.message == "cow1", thread1.message
assert thread2.message == "cow2", thread2.message
assert thread3.message == "cow3", thread3.message
class TestThread(threading.Thread):
def __init__(self, queue_name, exchange):
super(TestThread, self).__init__()
self.queue_name = queue_name
self.daemon = True
self.exchange = exchange
self.message = None
def __nonzero__(self):
return self.message is None
def run(self):
def callback(body, message):
self.message = body
message.ack()
self.exchange.consume(self.queue_name, callback=callback, check=self)
|
the-stack_106_28870 | '''
Author: Geeticka Chauhan
Performs pre-processing on a csv file independent of the dataset (once converters have been applied).
Refer to notebooks/Data-Preprocessing for more details. The methods are specifically used in the non
_original notebooks for all datasets.
'''
import os
import pandas as pd
from ast import literal_eval
from tqdm.auto import tqdm
import nltk
from nltk.corpus import stopwords, wordnet as wn
class Preprocess():
def __init__(self, dataset_name, preprocessing_types, entity_name="ENTITY") -> None:
self.dataset_name = dataset_name
self.preprocessing_types = preprocessing_types
# important global variables for identifying the location of entities
self.entity1 = 'E'
self.entity2 = 'EOTHER'
self.entity_either = 'EEITHER'
self.entity_name = entity_name
self.output_path = os.path.join("benchmark", dataset_name)
nltk.download('stopwords')
'''
The methods below are for the preprocessing type 1
'''
# separate the indexes of entity 1 and entity 2 by what is intersecting
# and what is not
def get_common_and_separate_entities(self, e1_indexes, e2_indexes):
e1_indexes = set(e1_indexes)
e2_indexes = set(e2_indexes)
common_indexes = e1_indexes.intersection(e2_indexes)
only_e1_indexes = list(e1_indexes.difference(common_indexes))
only_e2_indexes = list(e2_indexes.difference(common_indexes))
return only_e1_indexes, only_e2_indexes, list(common_indexes)
# given an entity replacement dictionary like {'0:0': 'entity1'}
# provide more information related to the location of the entity
def entity_replacement_dict_with_entity_location(self, entity_replacement_dict, replacement_type,
only_e1_indexes, only_e2_indexes, common_indexes):
def update_dict_with_indexes(new_entity_replacement_dict, only_indexes, start, end):
for i in only_indexes:
key = str(i[0]) + ':' + str(i[-1])
new_entity_replacement_dict[key]['start'] = start
new_entity_replacement_dict[key]['end'] = end
return new_entity_replacement_dict
new_entity_replacement_dict = {}
# below is just for initialization purposes, when start and end is none, means we are not
# inserting anything before or after those words in the sentence
for key in entity_replacement_dict.keys():
new_entity_replacement_dict[key] = {'replace_by': entity_replacement_dict[key],
'start': None, 'end': None}
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e1_indexes,
self.entity1 + 'START', self.entity1 + 'END')
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e2_indexes,
self.entity2 + 'START', self.entity2 + 'END')
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, common_indexes,
self.entity_either + 'START', self.entity_either + 'END')
return new_entity_replacement_dict
# given an entity replacement dictionary like {'0:0': 'entity1'}
# provide more information related to the location of the entity
def entity_replacement_dict_with_entity_location_embed(self, entity_replacement_dict, embed,
only_e1_indexes, only_e2_indexes, common_indexes):
def update_dict_with_indexes(new_entity_replacement_dict, only_indexes):
for i in only_indexes:
key = str(i[0]) + ':' + str(i[-1])
return new_entity_replacement_dict
new_entity_replacement_dict = {}
# below is just for initialization purposes, when start and end is none, means we are not
# inserting anything before or after those words in the sentence
for key in entity_replacement_dict.keys():
idx = int(key.split(':')[-1])
new_entity_replacement_dict[key] = {'replace_by': embed[idx],
'start': None, 'end': None}
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e1_indexes)
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e2_indexes)
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, common_indexes)
return new_entity_replacement_dict
###
### Helper functions
###
#given string 12:30, return 12, 30 as a tuple of ints
def parse_position(self, position):
positions = position.split(':')
return int(positions[0]), int(positions[1])
def sort_position_keys(self, entity_replacement_dict):
positions = list(entity_replacement_dict.keys())
sorted_positions = sorted(positions, key=lambda x: int(x.split(':')[0]))
return sorted_positions
# remove any additional whitespace within a line
def remove_whitespace(self, line):
return str(" ".join(line.split()).strip())
def list_to_string(self, sentence):
return " ".join(sentence)
# adapted from tag_sentence method in converter_ddi
# note that white spaces are added in the new sentence on purpose
def replace_with_concept(self, row, replacement_type):
sentence = row.tokenized_sentence.split(" ")
upos = row.upos_sentence.split(" ")
deps = row.deps_sentence.split(" ")
ner = row.ner_sentence.split(" ")
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index'] # assuming that within the same entity indexes, no overlap
new_sentence = ''
new_upos = ''
new_deps = ''
new_ner = ''
only_e1_indexes, only_e2_indexes, common_indexes = \
self.get_common_and_separate_entities(e1_indexes, e2_indexes)
entity_replacement_dict = row.metadata['entity_replacement'] # assuming no overlaps in replacement
new_entity_replacement_dict = self.entity_replacement_dict_with_entity_location(entity_replacement_dict, replacement_type,
only_e1_indexes, only_e2_indexes,
common_indexes)
repl_dict = new_entity_replacement_dict # just using proxy because names are long
new_entity_replacement_dict_upos = self.entity_replacement_dict_with_entity_location_embed(entity_replacement_dict, upos,
only_e1_indexes, only_e2_indexes,
common_indexes)
repl_dict_upos = new_entity_replacement_dict_upos # just using proxy because names are long
new_entity_replacement_dict_deps = self.entity_replacement_dict_with_entity_location_embed(entity_replacement_dict, deps,
only_e1_indexes, only_e2_indexes,
common_indexes)
repl_dict_deps = new_entity_replacement_dict_deps # just using proxy because names are long
new_entity_replacement_dict_ner = self.entity_replacement_dict_with_entity_location_embed(entity_replacement_dict, ner,
only_e1_indexes, only_e2_indexes,
common_indexes)
repl_dict_ner = new_entity_replacement_dict_ner # just using proxy because names are long
sorted_positions = self.sort_position_keys(new_entity_replacement_dict)
print("repl_dict:",repl_dict)
print("new_entity_replacement_dict_upos:",new_entity_replacement_dict_upos)
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = self.parse_position(curr_pos)
start_replace = '' if repl_dict[curr_pos]['start'] is None else repl_dict[curr_pos]['start'].upper()
end_replace = '' if repl_dict[curr_pos]['end'] is None else repl_dict[curr_pos]['end'].upper()
between_replace = repl_dict[curr_pos]['replace_by'][replacement_type].upper() # between the entity replacement
start_replace_other = ''
end_replace_other = ''
between_replace_upos = repl_dict_upos[curr_pos]['replace_by'] # between the entity replacement
between_replace_deps = repl_dict_deps[curr_pos]['replace_by'] # between the entity replacement
between_replace_ner = repl_dict_ner[curr_pos]['replace_by'] # between the entity replacement
if i == 0:
new_sentence += self.list_to_string(sentence[:curr_start_pos]) + ' ' + start_replace + ' ' + \
between_replace + ' ' + end_replace + ' '
new_upos += self.list_to_string(upos[:curr_start_pos]) + ' ' + start_replace_other + ' ' + \
between_replace_upos + ' ' + end_replace_other + ' '
new_deps += self.list_to_string(deps[:curr_start_pos]) + ' ' + start_replace_other + ' ' + \
between_replace_deps + ' ' + end_replace_other + ' '
new_ner += self.list_to_string(ner[:curr_start_pos]) + ' ' + start_replace_other + ' ' + \
between_replace_ner + ' ' + end_replace_other + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = self.parse_position(prev_pos)
middle = self.list_to_string(sentence[prev_end_pos+1 : curr_start_pos]) # refers to middle between prev segment and the
middle_upos = self.list_to_string(upos[prev_end_pos+1 : curr_start_pos]) # refers to middle between prev segment and the
middle_deps = self.list_to_string(deps[prev_end_pos+1 : curr_start_pos]) # refers to middle between prev segment and the
middle_ner = self.list_to_string(ner[prev_end_pos+1 : curr_start_pos]) # refers to middle between prev segment and the
# current segment
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_replace + ' ' + between_replace + ' ' + end_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
new_sentence += ' ' + self.list_to_string(sentence[curr_end_pos+1:])
if middle_upos == '':
middle_upos = ' '
new_upos += middle_upos + ' ' + start_replace_other + ' ' + between_replace_upos + ' ' + end_replace_other + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(upos) - 1:
new_upos += ' ' + self.list_to_string(upos[curr_end_pos+1:])
if middle_deps == '':
middle_deps = ' '
new_deps += middle_deps + ' ' + start_replace_other + ' ' + between_replace_deps + ' ' + end_replace_other + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(deps) - 1:
new_deps += ' ' + self.list_to_string(deps[curr_end_pos+1:])
if middle_ner == '':
middle_ner = ' '
new_ner += middle_ner + ' ' + start_replace_other + ' ' + between_replace_ner + ' ' + end_replace_other + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(ner) - 1:
new_ner += ' ' + self.list_to_string(ner[curr_end_pos+1:])
new_sentence = self.remove_whitespace(new_sentence)
new_upos = self.remove_whitespace(new_upos)
new_deps = self.remove_whitespace(new_deps)
new_ner = self.remove_whitespace(new_ner)
# print(len(new_sentence.split()))
# print(new_upos.split())
# print(new_deps.split())
#assert len(new_deps.split()) + 4 == len(new_sentence.split()) == len(new_upos.split()) + 4 == len(new_ner.split()) + 4
return pd.Series([new_sentence, new_upos, new_deps, new_ner])
'''
Preprocessing Type 2: Removal of stop words, punctuations and the replacement of digits
'''
# gives a dictionary signifying the location of the different entities in the sentence
def get_entity_location_dict(self, only_e1_indexes, only_e2_indexes, common_indexes):
entity_location_dict = {}
def update_dict_with_indexes(entity_location_dict, only_indexes, start, end):
print(only_indexes)
for i in only_indexes:
key = str(i[0]) + ':' + str(i[-1])
entity_location_dict[key] = {'start': start, 'end': end}
return entity_location_dict
entity_location_dict = update_dict_with_indexes(entity_location_dict, only_e1_indexes,
self.entity1 + 'START', self.entity1 + 'END')
entity_location_dict = update_dict_with_indexes(entity_location_dict, only_e2_indexes,
self.entity2 + 'START', self.entity2 + 'END')
entity_location_dict = update_dict_with_indexes(entity_location_dict, common_indexes,
self.entity_either + 'START', self.entity_either + 'END')
return entity_location_dict
# given the index information of the entities, return the sentence with
# tags ESTART EEND etc to signify the location of the entities
def get_new_sentence_with_entity_replacement(self, sentence, e1_indexes, e2_indexes):
new_sentence = ''
only_e1_indexes, only_e2_indexes, common_indexes = \
self.get_common_and_separate_entities(e1_indexes, e2_indexes)
#if not 'ESTART' in sentence and not 'EEND' in sentence:
entity_loc_dict = self.get_entity_location_dict(only_e1_indexes, only_e2_indexes, common_indexes)
sorted_positions = self.sort_position_keys(entity_loc_dict)
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = self.parse_position(curr_pos)
start_replace = entity_loc_dict[curr_pos]['start']
end_replace = entity_loc_dict[curr_pos]['end']
if i == 0:
new_sentence += self.list_to_string(sentence[:curr_start_pos]) + ' ' + start_replace + ' ' + \
self.list_to_string(sentence[curr_start_pos : curr_end_pos + 1]) + ' ' + end_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = self.parse_position(prev_pos)
middle = self.list_to_string(sentence[prev_end_pos+1 : curr_start_pos])
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_replace + ' ' + \
self.list_to_string(sentence[curr_start_pos: curr_end_pos+1]) + ' ' + end_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
new_sentence += ' ' + self.list_to_string(sentence[curr_end_pos+1:])
new_sentence = self.remove_whitespace(new_sentence)
# TODO write some code to do the replacement
return new_sentence
# preprocessing 2: remove the stop words and punctuation from the data
# and replace all digits
# TODO: might be nice to give an option to specify whether to remove the stop words or not
# this is a low priority part though
def generate_new_sentence(self, sentence, upos, deps, ner, index_dict):
new_sentence = []
new_upos = []
new_deps = []
new_ner = []
if isinstance(sentence, str):
sentence = sentence.split(" ")
idx = 0
entity_start_or_end = 0
for i in range(len(sentence)):
word = sentence[i]
if word.endswith('END') or word.endswith('START'):
new_sentence.append(word)
entity_start_or_end += 1
continue
if not index_dict[i]['keep']:
idx += 1
continue # don't append when it is a stop word or punctuation
if index_dict[i]['replace_with'] is not None:
words_length = len(index_dict[i]['replace_with'].split('_'))
new_sentence.extend(index_dict[i]['replace_with'].split('_'))
new_upos.extend([upos[idx]]*words_length)
new_deps.extend([deps[idx]]*words_length)
new_ner.extend([ner[idx]]*words_length)
idx += 1
continue
new_sentence.append(word)
new_upos.append(upos[idx])
new_deps.append(deps[idx])
new_ner.append(ner[idx])
idx += 1
# print((new_sentence), (new_upos), (new_deps), (new_ner))
# print(len(new_sentence), len(new_upos), len(new_deps), len(new_ner))
assert len(new_sentence) == len(new_upos) + entity_start_or_end and \
len(new_sentence) == len(new_deps) + entity_start_or_end and \
len(new_sentence) == len(new_ner) + entity_start_or_end
return self.list_to_string(new_sentence), self.list_to_string(new_upos), self.list_to_string(new_deps), self.list_to_string(new_ner)
def generate_tagged_sentence(self, row):
sentence = row.tokenized_sentence.split(" ")
#print("sentence:",sentence)
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index']
if not 'ESTART' in sentence and not 'EEND' in sentence:
sentence = self.get_new_sentence_with_entity_replacement(sentence, e1_indexes, e2_indexes)
return sentence
def replace_digit_punctuation_stop_word_brackets(self, row):
if "tagged_sentence" in row:
sentence = row.tagged_sentence.split(" ")
else:
sentence = row.tokenized_sentence.split(" ")
upos = row.upos_sentence.split(" ")
deps = row.deps_sentence.split(" ")
ner = row.ner_sentence.split(" ")
# print("sentence:",sentence)
# print("sentence:",len(sentence))
# print("upos:",len(upos))
# print("upos:",upos)
# print("deps:",len(deps))
#assert len(deps) == len(sentence) and len(deps) == len(upos)
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index']
if not 'ESTART' in sentence and not 'EEND' in sentence:
sentence = self.get_new_sentence_with_entity_replacement(sentence, e1_indexes, e2_indexes)
if isinstance(sentence,str):
sentence = sentence.split(" ")
# detection of stop words, punctuations and digits
index_to_keep_dict = {} # index: {keep that token or not, replace_with}
#tokenizedSentence = sentence.lower().split()
#doc2 = nlpSpacy(nlpSpacy.vocab, words=tokenizedSentence)
#doc = self.nlp(sentence)
#nlp.tagger(doc)
#nlp.parser(doc)
stop_words = set(stopwords.words('english'))
stop_words.remove('o')
brackets = False
wordnet = self.preprocessing_types["wordnet"]
idx = 0
#print("sentence:",sentence)
for i, word in enumerate(sentence):
#print(word, idx, upos[idx], len(sentence))
is_entity = False
word_index = i
for tup in e1_indexes:
if word_index >= tup[0] and word_index <= tup[1]:
is_entity = True
index_to_keep_dict[i] = {'keep': True, 'replace_with': None}
for tup in e2_indexes:
if word_index >= tup[0] and word_index <= tup[1]:
is_entity = True
index_to_keep_dict[i] = {'keep': True, 'replace_with': None}
if is_entity:
#idx += 1
continue
stop_word = word in stop_words and self.preprocessing_types["stopword"]
punct = not word.endswith('END') and upos[idx] == 'PUNCT' and self.preprocessing_types["punct"]
num = not word.endswith('END') and upos[idx] == "NUM" and self.preprocessing_types["digit"]
if not brackets and self.preprocessing_types["brackets"]:
brackets = "(" == word or "[" == word
if stop_word:
index_to_keep_dict[word_index] = {'keep': False, 'replace_with': None}
elif brackets:
index_to_keep_dict[word_index] = {'keep': False, 'replace_with': None}
brackets = not (")" == word) and not ("]" == word)
elif punct:
index_to_keep_dict[word_index] = {'keep': False, 'replace_with': None}
elif num:
index_to_keep_dict[word_index] = {'keep': True, 'replace_with': 'NUMBER'}
else:
index_to_keep_dict[word_index] = {'keep': True, 'replace_with': None}
if not word.endswith('START') and not word.endswith('END') and idx < len(upos) - 1:
idx += 1
# generation of the new sentence based on the above findings
new_sentence, new_upos, new_deps, new_ner = self.generate_new_sentence(sentence, upos, deps, ner, index_to_keep_dict)
return pd.Series([new_sentence, new_upos, new_deps, new_ner])
'''
Preprocessing Type 3 part 1: NER
'''
# a method to check for overlap between the ner_dict that is created
def check_for_overlap(self, ner_dict):
def expand_key(string): # a string that looks like '2:2' to [2]
start = int(string.split(':')[0])
end = int(string.split(':')[1])
return list(range(start, end+1))
expanded_keys = [expand_key(key) for key in ner_dict.keys()]
for i1, item in enumerate(expanded_keys):
for i2 in range(i1 + 1, len(expanded_keys)):
if set(item).intersection(expanded_keys[i2]):
return True # overlap is true
for i2 in range(0, i1):
if set(item).intersection(expanded_keys[i2]):
return True
return False
###
### Helper functions for the NER replacement
###
def overlap_index(self, index1, index2):
def expand(index):
start = int(index[0])
end = int(index[1])
return list(range(start, end+1))
expand_index1 = expand(index1)
expand_index2 = expand(index2)
if set(expand_index1).intersection(set(expand_index2)):
return True
else: return False
# for indexes that look like (1,1) and (2,2) check if the left is fully included in the right
def fully_included(self, index1, index2):
if int(index1[0]) >= int(index2[0]) and int(index1[1]) <= int(index2[1]): return True
else: return False
def beginning_overlap(self, index1, index2): # this is tricky when (1,1) and (2,2) are there
if int(index1[0]) < int(index2[0]) and int(index1[1]) <= int(index2[1]): return True
else: return False
def end_overlap(self, index1, index2): # this is tricky
if int(index1[0]) >= int(index2[0]) and int(index1[1]) > int(index2[1]): return True
else: return False
def beginning_and_end_overlap(self, index1, index2):
if int(index1[0]) < int(index2[0]) and int(index1[1]) > int(index2[1]): return True
else:
return False
#else there is no overlap
# taken from https://stackoverflow.com/questions/46548902/converting-elements-of-list-of-nested-lists-from-string-to-integer-in-python
def list_to_int(self, lists):
return [int(el) if not isinstance(el,list) else self.convert_to_int(el) for el in lists]
def correct_entity_indexes_with_ner(self, ner_dict, e_index):
for i in range(len(e_index)): # we are reading tuples here
for key in ner_dict.keys():
indexes = e_index[i]
index2 = indexes
index1 = self.parse_position(key) # checking if ner is fully included etc
if not self.overlap_index(index1, index2): # don't do below if there is no overlap
continue
if self.beginning_overlap(index1, index2):
e_index[i] = (index1[0], e_index[i][1])
elif self.end_overlap(index1, index2):
e_index[i] = (e_index[i][0], index1[1])
elif self.beginning_and_end_overlap(index1, index2):
e_index[i] = (index1[0], index1[1]) # else you don't change or do anything
return e_index
# given all of these dictionaries, return the ner replacement dictionary
def get_ner_replacement_dictionary(self, only_e1_index, only_e2_index, common_indexes, ner_dict):
#print("only_e1_index:",only_e1_index)
#print("only_e2_index:",only_e2_index)
print(ner_dict)
def update_dict_with_entity(e_index, ner_repl_dict, entity_name):
for indexes in e_index:
print("e_index:",e_index)
#key = str(idx) + ':' + str(idx)
#key_start = str(idx) + ':' + str(idx) + ':' + entity_name + 'START'
key1 = str(indexes[0]) + ':' + str(indexes[0])# + ':' + entity_name + 'START'
key1start = str(indexes[0]) + ':' + str(indexes[0]) + ':' + entity_name + 'START'
ner_repl_dict[key1] = {'replace_by': ner_dict[key1], 'insert': None}
ner_repl_dict[key1start] = {'replace_by': None, 'insert': entity_name + 'START'}
key2 = str(int(indexes[-1]) - 1) + ':' + str(int(indexes[-1]) - 1)
key2end = str(int(indexes[-1]) - 1) + ':' + str(int(indexes[-1]) - 1) + ':' + entity_name + 'END'
ner_repl_dict[key2] = {'replace_by': ner_dict[key2], 'insert': None}
ner_repl_dict[key2end] = {'replace_by': None, 'insert': entity_name + 'END'}
# if len(e_index) > 1:
# key2 = str(int(indexes[-1]) + 1) + ':' + str(int(indexes[-1]) + 1)# + entity_name + 'END'
# ner_repl_dict[key2] = {'replace_by': ner_dict[key2], 'insert': None}#entity_name + 'END'}
#ner_repl_dict[key2] = {'replace_by': ner_dict[key2], 'insert': None}
return ner_repl_dict
# we are going to do something different: only spans for NER will be counted, but
# for the ENTITYSTART and ENTITYEND, we will keep the span as what token to insert before
ner_repl_dict = {}
#for key in ner_dict:
# ner_repl_dict[key] = {'replace_by': ner_dict[key], 'insert': None}
ner_repl_dict = update_dict_with_entity(only_e1_index, ner_repl_dict, self.entity1)
ner_repl_dict = update_dict_with_entity(only_e2_index, ner_repl_dict, self.entity2)
ner_repl_dict = update_dict_with_entity(common_indexes, ner_repl_dict, self.entity_either)
return ner_repl_dict
# this function is different from the sort_position_keys because
# we care about sorting not just by the beginning token, but also by the length that the span contains
def ner_sort_position_keys(self, ner_repl_dict): # this can potentially replace sort_position_keys
# but only if the application of this function does not change the preprocessed CSVs generated
def len_key(key):
pos = self.parse_position(key)
return pos[1] - pos[0] + 1
def start_or_end(key):
# handle the case where the ending tag of the entity is in the same place as the
#starting tag of another entity - this happens when two entities are next to each other
if len(key.split(':')) <= 2: # means that this is a named entity
return 3
start_or_end = key.split(':')[2]
if start_or_end.endswith('END'): # ending spans should get priority
return 1
elif start_or_end.endswith('START'):
return 2
positions = list(ner_repl_dict.keys())
sorted_positions = sorted(positions, key=lambda x: (self.parse_position(x)[0], len_key(x), start_or_end(x)))
return sorted_positions
# given a splitted sentence - make sure that the sentence is in list form
def get_ner_dict(self, ner_sentence):
#nlp = spacy.load(spacy_model_name)
ner_tokenized = ner_sentence.split(" ")
#doc = Doc(nlp.vocab, words=tokenizedSentence)
#doc = nlp(tokenizedSentence)
#nlp.tagger(doc)
#nlp.parser(doc)
#nlp.entity(doc) # run NER
ner_dict = {} # first test for overlaps within ner
#tokens = [token for sent in doc.sentences for token in sent.tokens]
#for sent in doc.sentences:
# print(sent.tokens)
for i, ner in enumerate(ner_tokenized):
key = str(i) + ':' + str(i)
ner_dict[key] = ner
return ner_dict
def convert_indexes_to_int(self, e_idx):
new_e_idx = []
for indexes in e_idx:
t = (int(indexes[0]), int(indexes[1]))
new_e_idx.append(t)
return new_e_idx
def replace_ner(self, row, check_ner_overlap=False): # similar to concept_replace, with some caveats
#print("sentence:",row.tokenized_sentence)
sentence = row.tokenized_sentence.split()
# upos = row.upos_sentence.split()
# deps = row.deps_sentence.split()
# ner = row.ner_sentence.split()
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index']
e1_indexes = self.convert_indexes_to_int(e1_indexes)
e2_indexes = self.convert_indexes_to_int(e2_indexes)
#print("e1_indexes, e2_indexes:",e1_indexes, e2_indexes)
only_e1_indexes, only_e2_indexes, common_indexes = \
self.get_common_and_separate_entities(e1_indexes, e2_indexes)
#print("e1_indexes, e2_indexes:",e1_indexes, e2_indexes)
ner_dict = self.get_ner_dict(row.ner_sentence)
print(sentence)
print("ner_dict:",ner_dict)
if check_ner_overlap and self.check_for_overlap(ner_dict):
print("There is overlap", ner_dict) # only need to check this once
#Below code works only if there isn't overlap within ner_dict, so make sure that there isn't overlap
# overlaps between ner label and e1 and e2 indexes are a problem
# And they can be of two types
# Type 1: NER overlaps with e1 or e2 in the beginning or end
# Here we want to keep the NER link the same but extend e1 or e2 index to the beginning or end of the
# NER
#Type 2: NER is inside of the entity completely: At this point it should be simply ok to mention at what
# token to insert ENTITYstart and ENTITYend
# Type 1 is a problem, but Type 2 is easy to handle while the new sentence is being created
only_e1_indexes = self.correct_entity_indexes_with_ner(ner_dict, only_e1_indexes)
only_e2_indexes = self.correct_entity_indexes_with_ner(ner_dict, only_e2_indexes)
#print("e1_indexes, e2_indexes:",e1_indexes, e2_indexes)
common_indexes = self.correct_entity_indexes_with_ner(ner_dict, common_indexes)
# below needs to be done in case there was again a shift that might have caused both e1 and e2 to have
# the same spans
only_e1_indexes, only_e2_indexes, common_indexes2 = \
self.get_common_and_separate_entities(only_e1_indexes, only_e2_indexes)
# print("e1_indexes, e2_indexes:",e1_indexes, e2_indexes)
#print("common_indexes:",common_indexes2)
common_indexes.extend(common_indexes2)
ner_repl_dict = self.get_ner_replacement_dictionary(only_e1_indexes, only_e2_indexes, common_indexes,
ner_dict)
sorted_positions = self.ner_sort_position_keys(ner_repl_dict)
# print("sorted_positions:",sorted_positions)
# print("ner_repl_dict:",ner_repl_dict)
new_sentence = ''
new_upos = ''
new_deps = ''
new_ner = ''
#print("ner_repl_dict:",ner_repl_dict)
#print("sorted_positions:",sorted_positions)
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = self.parse_position(curr_pos)
curr_dict = ner_repl_dict[curr_pos]
start_insert = '' if curr_dict['insert'] is None else curr_dict['insert'].upper()
between_replace = '' if curr_dict['replace_by'] is None else curr_dict['replace_by']
if i == 0:
new_sentence += self.list_to_string(sentence[:curr_start_pos]) + ' ' + start_insert + ' ' + \
between_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = self.parse_position(prev_pos)
if ner_repl_dict[prev_pos]['insert'] is None: # means middle will be starting from prev_pos + 1
middle = self.list_to_string(sentence[prev_end_pos+1 : curr_start_pos])
else: # means middle needs to start from the prev_pos
middle = self.list_to_string(sentence[prev_end_pos: curr_start_pos])
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_insert + ' ' + between_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
position = curr_end_pos + 1 if curr_dict['insert'] is None else curr_end_pos
new_sentence += ' ' + self.list_to_string(sentence[position:])
new_sentence = self.remove_whitespace(new_sentence)
# doc = self.nlp(new_sentence)
# words = [word for sent in doc.sentences for word in sent.words]
# upos = [word.upos for word in words]
# deps = [word.deprel for word in words]
# print(new_sentence, upos, deps)
return new_sentence
'''
Below methods do entity detection from the tagged sentences, i.e. a sentence that contains
ESTART, EEND etc, use that to detect the locations of the respective entities and remove the tags
from the sentence to return something clean
'''
# below is taken directly from the ddi converter and
# removes the first occurence of the start and end, and tells of their location
def get_entity_start_and_end(self, entity_start, entity_end, tokens):
e_start = tokens.index(entity_start)
e_end = tokens.index(entity_end) - 2 # 2 tags will be eliminated
between_tags = 0
for index in range(e_start + 1, e_end + 2):
# we want to check between the start and end for occurence of other tags
if tokens[index].endswith('START') or tokens[index].endswith('END'):
between_tags += 1
e_end -= between_tags
# only eliminate the first occurence of the entity_start and entity_end
new_tokens = []
entity_start_seen = 0
entity_end_seen = 0
for i, x in enumerate(tokens):
if x == entity_start:
entity_start_seen += 1
if x == entity_end:
entity_end_seen += 1
if x == entity_start and entity_start_seen == 1:
continue
if x == entity_end and entity_end_seen == 1:
continue
new_tokens.append(x)
return (e_start, e_end), new_tokens
# based upon the method in converter for DDI, this will do removal of the entity tags and keep
# track of where they are located in the sentence
def get_entity_positions_and_replacement_sentence(self, tokens):
e1_idx = []
e2_idx = []
tokens_for_indexing = tokens
for token in tokens:
if token.endswith('START') and len(token) > len('START'):
ending_token = token[:-5] + 'END'
#print("tokens_for_indexing1:",tokens_for_indexing)
e_idx, tokens_for_indexing = \
self.get_entity_start_and_end(token, ending_token, tokens_for_indexing)
#print("tokens_for_indexing2:",tokens_for_indexing)
if token == self.entity1 + 'START' or token == self.entity_either + 'START':
e1_idx.append(e_idx)
if token == self.entity2 + 'START' or token == self.entity_either + 'START':
e2_idx.append(e_idx)
return e1_idx, e2_idx, tokens_for_indexing
#TODO unify the preprocessing code with actually writing to a dataframe so that experiments can be started
# Read the original dataframe, generate the replacement sentence and then from that, you should just
# call the get_entity_positions_and_replacement_sentence
# might be good to just have one method to do this because it seems like the tasks are kinda similar
# just different methods to call for preprocessing 1 vs 2
'''
Returns the dataframe after doing the preprocessing
'''
# update the metadata and the sentence with the preprocessed version
def update_metadata_sentence(self, row):
tagged_sentence = row.tagged_sentence
#print("tagged_sentence:",tagged_sentence.split())
e1_idx, e2_idx, tokens_for_indexing = \
self.get_entity_positions_and_replacement_sentence(tagged_sentence.split())
#print(e1_idx, e2_idx, tokens_for_indexing)
new_sentence = self.list_to_string(tokens_for_indexing)
metadata = row.metadata
metadata['e1']['word_index'] = e1_idx
metadata['e2']['word_index'] = e2_idx
metadata['e1']['word'] = " ".join(tokens_for_indexing[e1_idx[0][0]: e1_idx[0][1]+1])
metadata['e2']['word'] = " ".join(tokens_for_indexing[e2_idx[0][0]: e2_idx[0][1]+1])
metadata.pop('entity_replacement', None) # remove the entity replacement dictionary from metadata
row.tokenized_sentence = new_sentence
row.metadata = metadata
return row
# to streamline the reading of the dataframe
def read_dataframe(self, directory):
df = pd.read_csv(directory, sep='\t')
def literal_eval_metadata(row):
metadata = row.metadata
metadata = literal_eval(metadata)
return metadata
df['metadata'] = df.apply(literal_eval_metadata, axis=1)
# metadata is a dictionary which is written into the csv format as a string
# but in order to be treated as a dictionary it needs to be evaluated
return df
# give this preprocessing function a method to read the dataframe, and the location of the original
# dataframe to read so that it can do the preprocessing
# whether to do type 1 vs type 2 of the preprocessing
# 1: replace with all concepts in the sentence, 2: replace the stop words, punctuations and digits
# 3: replace only punctuations and digits
def preprocess(self, df_directory):
tqdm.pandas()
df = self.read_dataframe(df_directory)
none = True
for p in self.preprocessing_types:
if self.preprocessing_types[p]:
none = False
break
if none:
df['tagged_sentence'] = df.progress_apply(self.generate_tagged_sentence, axis=1)
print("Updating metadata sentence:")
df = df.progress_apply(self.update_metadata_sentence, axis=1)
else:
if self.preprocessing_types["ner_blinding"]:
print("NER blinding preprocessing:")
df[['tagged_sentence', 'upos_sentence', 'deps_sentence', 'ner_sentence']] = df.progress_apply(self.replace_with_concept, replacement_type='ner', axis=1)
print("Updating metadata sentence:")
df = df.progress_apply(self.update_metadata_sentence, axis=1)
elif self.preprocessing_types["entity_blinding"]:
print("Entity blinding preprocessing:")
df[['tagged_sentence', 'upos_sentence', 'deps_sentence', 'ner_sentence']] = df.progress_apply(self.replace_with_concept, replacement_type='entity', axis=1) # along the column axis
print("Updating metadata sentence:")
df = df.progress_apply(self.update_metadata_sentence, axis=1)
if self.preprocessing_types["digit"] or \
self.preprocessing_types["punct"] or \
self.preprocessing_types["stopword"] or \
self.preprocessing_types["brackets"]:
print("Digit, punctuaction, brackets, stopword or wordnet preprocessing:")
df[['tagged_sentence', 'upos_sentence', 'deps_sentence', 'ner_sentence']] = \
df.progress_apply(self.replace_digit_punctuation_stop_word_brackets, axis=1)
#print(df)
print("Updating metadata sentence:")
df = df.progress_apply(self.update_metadata_sentence, axis=1)
df = df.rename({'tokenized_sentence': 'preprocessed_sentence'}, axis=1)
df = df.drop(['tagged_sentence'], axis=1)
return df
def flatten_list_of_tuples(self, a):
return list(sum(a, ()))
def write_into_txt(self, df, directory):
print("Unique relations: \t", df['relation_type'].unique())
null_row = df[df["relation_type"].isnull()]
if null_row.empty:
idx_null_row = None
else:
idx_null_row = null_row.index.values[0]
with open(directory, 'w') as outfile:
for i in tqdm(range(0, len(df))):
dict = {}
head = {}
tail = {}
if idx_null_row is not None and i == idx_null_row:
continue
row = df.iloc[i]
metadata = row.metadata
# TODO: need to change below in order to contain a sorted list of the positions
e1 = self.flatten_list_of_tuples(metadata['e1']['word_index'])
e2 = self.flatten_list_of_tuples(metadata['e2']['word_index'])
e1 = sorted(e1)
e2 = sorted(e2)
head["name"] = metadata['e1']['word']
head["pos"] = [e1[0], e1[1]+1]
tail["name"] = metadata['e2']['word']
tail["pos"] = [e2[0], e2[1]+1]
try:
tokenized_sentence = row.tokenized_sentence
except AttributeError:
tokenized_sentence = row.preprocessed_sentence
if type(tokenized_sentence) is not str:
continue
tokenized_sentence = tokenized_sentence.split(" ")
dict["token"] = tokenized_sentence
dict["h"] = head
dict["t"] = tail
dict["pos"] = row.upos_sentence.split(" ")
dict["deps"] = row.deps_sentence.split(" ")
dict["ner"] = row.ner_sentence.split(" ")
dict["relation"] = row.relation_type
outfile.write(str(dict)+"\n")
outfile.close()
|
the-stack_106_28871 | #!/usr/bin/env python
#
# Stats.py -- Simple statistics class: computes mean, sigma, min, max, rms.
#
# Author: Brian Wilson
# @(#) Stats.py 1.0 2003/11/24
#
# Implemented by saving five accumulators:
# no of points, mean, sum of squares of diffs from mean, min, and max.
# Methods:
# add -- add a data point to the accumulating stats
# calc -- compute the five statistics: n, mean, std dev, min, max, rms
# label -- set the label for printing
# format -- set the float format for printing
# __repr__ -- generates one-line string version of statistics for easy printing
# reset -- zero the accumulators
# addm -- add an array of data points to the accumulators (add multiple)
#
# See tests at end of file for example usage.
#
from math import sqrt
class Stats:
"""Simple statistics class that computes mean, std dev, min, max, and rms."""
def __init__(self, label=None, format=None):
"""Create Stats object, optionally set print label and float format string."""
self.reset()
self.labelStr = label
self.formatStr = format
def add(self, val):
"""Add one data point to the accumulators."""
self.n += 1
dval = val - self.mean # difference from current mean
self.mean += dval/self.n # update mean
dval = val - self.mean # diff from new mean
self.sumsq += dval*dval # update sum of squares
if (self.n == 1):
self.min = val
self.max = val
else:
self.min = min(self.min, val)
self.max = max(self.max, val)
return self
def calc(self):
"""Calculate the statistics for the data added so far.
Returns tuple of six values: n, mean, sigma, min, max, rms.
"""
sigma = 0.
rms = 0.
if (self.n > 0):
if (self.n >= 2):
sd2 = self.sumsq / (self.n-1)
if (sd2 > 0.):
sigma = sqrt(sd2)
else:
sigma = 0.
rms = sqrt(self.mean*self.mean + self.sumsq/self.n)
return (self.n, self.mean, sigma, self.min, self.max, rms)
def label(self, str):
"""Label the statistics for printing."""
self.labelStr = str
return self
def format(self, str):
"""Set the float format to be used in printing stats."""
self.formatStr = str
return self
def __repr__(self):
"""One-line stats representation for simple printing."""
if (self.labelStr == None or self.labelStr == ""):
self.labelStr = "Stats"
line = self.labelStr + ": "
if self.formatStr:
a = [self.formatStr for i in range(5)]
a.insert(0, '%d')
format = ' '.join(a)
line += format % self.calc()
else:
line += "%d %f %f %f %f %f" % self.calc()
return line
def reset(self):
"""Reset the accumulators to start over."""
self.n = 0
self.mean = 0.0
self.sumsq = 0.0
self.min = 0.0
self.max = 0.0
self.labelStr = None
self.formatStr = None
return self
def addm(self, seq):
"""Add multiple - add a sequence of data points all at once."""
for val in seq:
self.add(val)
return self
if __name__ == '__main__':
def test():
"""
>>> print Stats()
Stats: 0 0.000000 0.000000 0.000000 0.000000 0.000000
>>> def f(s):
... for v in [2.3, 4.5, 1.8, 6.2, 3.5]: s.add(v)
... s.label('test2')
... return s
>>> print f( Stats() )
test2: 5 3.660000 1.468279 1.800000 6.200000 3.888480
>>> print Stats().label('test3').addm([2.3, 4.5, 1.8, 6.2, 3.5])
test3: 5 3.660000 1.468279 1.800000 6.200000 3.888480
>>> print Stats('test4').format('%5.2f').addm([2.3, 4.5, 1.8, 6.2, 3.5])
test4: 5 3.66 1.47 1.80 6.20 3.89
>>> print Stats('test5', '%4.1f').addm([2.3, 4.5, 1.8, 6.2, 3.5])
test5: 5 3.7 1.5 1.8 6.2 3.9
"""
import doctest
doctest.testmod()
|
the-stack_106_28873 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, VARCHAR, Text
from sqlalchemy.exc import StatementError
import sys
Base = declarative_base()
class Recipe(Base):
__tablename__ = "recipe"
id = Column(Integer, primary_key=True)
name = Column(VARCHAR(), nullable=False)
recipe = Column(Text, nullable=False)
def __init__(self, name, recipe):
self.name = name
self.recipe = recipe
def __repr__(self):
return f"Recipe<(name={self.name}, recipe={self.recipe})>"
def store(session, recipe_name, recipe_text):
try:
recipe = Recipe(recipe_name, recipe_text)
session.add(recipe)
session.commit()
except StatementError as se:
sys.stderr.write(se)
session.rollback()
def fetch(session, recipe_name):
try:
recipe = session.query(Recipe.recipe).filter_by(name=recipe_name)
return recipe[0][0]
except StatementError as se:
sys.stderr.write(se)
def fetch_all(session):
try:
recipes = session.query(Recipe.name).all()
return recipes
except StatementError as se:
sys.stderr.write(se)
def delete(session, recipe_name):
try:
recipes = session.query(Recipe).filter_by(name=recipe_name)
for recipe in recipes:
session.delete(recipe)
session.commit()
except StatementError as se:
sys.stderr.write(se)
session.rollback()
def find(session, name_part):
try:
recipes = session.query(Recipe.name).filter(Recipe.name.like(f"%{name_part}%"))
return recipes
except StatementError as se:
sys.stderr.write(se)
|
the-stack_106_28874 | import time
from rauth import OAuth1Session
from st2common.util import isotime
from st2reactor.sensor.base import PollingSensor
__all__ = [
'CubeSensorsMeasurementsSensor'
]
BASE_URL = 'https://api.cubesensors.com/v1'
FIELD_CONVERT_FUNCS = {
'temp': lambda value: (float(value) / 100)
}
class CubeSensorsMeasurementsSensor(PollingSensor):
DATASTORE_KEY_NAME = 'last_measurements_timestamp'
def __init__(self, sensor_service, config=None, poll_interval=None):
super(CubeSensorsMeasurementsSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._device_uids = self._config['sensor'].get('device_uids', [])
self._logger = self._sensor_service.get_logger(__name__)
self._device_info_cache = {}
self._last_measurement_timestamps = {} # maps device_uid -> last mes. timestamp
def setup(self):
if not self._device_uids:
raise ValueError('No "device_uids" configured!')
self._session = self._get_session()
# todo cache deviice names
# Populate device info cache
for device_uid in self._device_uids:
data = self._get_device_info(device_uid=device_uid)
self._device_info_cache[device_uid] = data
def poll(self):
for device_uid in self._device_uids:
result = self._get_measurements(device_uid=device_uid)
if not result:
continue
self._handle_result(device_uid=device_uid, result=result)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _handle_result(self, device_uid, result):
existing_last_measurement_timestamp = self._get_last_measurement_timestamp(
device_uid=device_uid)
new_last_measurement_timestamp = isotime.parse(result['time'])
new_last_measurement_timestamp = int(time.mktime(
new_last_measurement_timestamp.timetuple()))
if (existing_last_measurement_timestamp and
new_last_measurement_timestamp <= existing_last_measurement_timestamp):
# We have already seen this measurement, skip it
self._logger.debug(('No new measurements, skipping results we have already seen'
'for device %s' % (device_uid)))
return
# Dispatch trigger
self._dispatch_trigger(device_uid=device_uid, result=result)
# Store last measurement timestamp
self._set_last_measurement_timestamp(
device_uid=device_uid, last_measurement_timestamp=new_last_measurement_timestamp)
def _get_last_measurement_timestamp(self, device_uid):
"""
Retrieve last measurement timestamp for a particular device.
:rtype: ``int``
"""
last_measurement_timestamp = self._last_measurement_timestamps.get(device_uid, None)
if not last_measurement_timestamp:
name = self._get_datastore_key_name(device_uid=device_uid)
value = self._sensor_service.get_value(name=name)
self._last_measurement_timestamps[device_uid] = int(value) if value else 0
return self._last_measurement_timestamps[device_uid]
def _set_last_measurement_timestamp(self, device_uid, last_measurement_timestamp):
"""
Store a last measurement timestamp for a particular device.
"""
self._last_measurement_timestamps[device_uid] = last_measurement_timestamp
name = self._get_datastore_key_name(device_uid=device_uid)
value = self._sensor_service.get_value(name=name)
value = str(last_measurement_timestamp)
self._sensor_service.set_value(name=name, value=value)
return last_measurement_timestamp
def _get_datastore_key_name(self, device_uid):
name = self.DATASTORE_KEY_NAME + '.' + device_uid
return name
def _dispatch_trigger(self, device_uid, result):
trigger = 'cubesensors.measurements'
device_info = self._device_info_cache.get(device_uid, {})
device_name = device_info.get('extra', {}).get('name', 'unknown')
payload = {
'device_uid': device_uid,
'device_name': device_name,
'measurements': result
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _get_device_info(self, device_uid):
response = self._session.get('%s/devices/%s' % (BASE_URL, device_uid))
data = response.json()
return data['device']
def _get_measurements(self, device_uid):
"""
Retrieve measurements for a particular device.
"""
response = self._session.get('%s/devices/%s/current' % (BASE_URL, device_uid))
data = response.json()
values = data['results'][0]
field_list = data['field_list']
result = {}
for index, field_name in enumerate(field_list):
value = values[index]
convert_func = FIELD_CONVERT_FUNCS.get(field_name, None)
if convert_func:
value = convert_func(value=value)
result[field_name] = value
return result
def _get_session(self):
session = OAuth1Session(consumer_key=self._config['consumer_key'],
consumer_secret=self._config['consumer_secret'],
access_token=self._config['access_token'],
access_token_secret=self._config['access_token_secret'])
return session
|
the-stack_106_28875 | import click
import wandb
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from PIL import Image
import PIL
import os
from numpy import random
import numpy as np
import pytorch_fid.fid_score
from torch.utils.tensorboard import SummaryWriter
# Discriminator judges the image if it is a fake or real
class Discriminator(nn.Module):
def __init__(self, img_dim):
super().__init__()
self.disc = nn.Sequential(
nn.Linear(img_dim, 128),
nn.LeakyReLU(0.1),
nn.Linear(128,1), # output single value (fake 0, real 1)
nn.Sigmoid(), # ensure 0 or 1 at output
)
def forward(self, x):
return self.disc(x)
# Generator creates the fakes
# generator uses a prior distribution of noise to
# produce a wide range of diverse samples.
class Generator(nn.Module):
def __init__(self, z_dim, img_dim): # z_ dim is noise dimension
super().__init__()
self.gen = nn.Sequential(
nn.Linear(z_dim, 256),
nn.LeakyReLU(0.1),
nn.Linear(256, img_dim), # 28x28x1 -> 784
nn.Tanh(), # ensure output of pixel vas is between -1 and 1
)
def forward(self, x):
return self.gen(x)
@click.command()
@click.option('--num_epochs', default=50, help='Number of epochs to train.')
@click.option('--report_tensorboard', default=False, help='Use tensorboard for reporting.')
@click.option('--report_wandb', default=False, help='Use weights & biases for reporting.')
@click.option('--calculate_fid', default=False, help='Calculate the Frechet inception distance metric between fakes and reals.')
def train(num_epochs, report_tensorboard, report_wandb, calculate_fid):
runid = random.randint(9999999)
device = "cuda" if torch.cuda.is_available() else "cpu"
learning_rate = 3e-4
z_dim = 64
image_dim = 28 * 28 * 1
batch_size = 32
fid_samples = 2048
disc = Discriminator(image_dim).to(device)
gen = Generator(z_dim, image_dim).to(device)
fixed_noise = torch.randn((batch_size, z_dim)).to(device)
tforms = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5))]
)
dataset = datasets.MNIST(root="dataset/", transform=tforms, download=True)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
fid_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
opt_disc = optim.Adam(disc.parameters(), lr=learning_rate)
opt_gen = optim.Adam(gen.parameters(), lr=learning_rate)
criterion = nn.BCELoss()
writer_fake = SummaryWriter(f"runs/GAN_MNIST/fake")
writer_real = SummaryWriter(f"runs/GAN_MNIST/real")
if report_wandb:
wandb.init(project="mini-gans", entity="taptoi")
wandb.config = {
"learning_rate": learning_rate,
"epochs": num_epochs,
"batch_size": batch_size
}
image_func = lambda im : Image.fromarray(im, 'L')
if calculate_fid:
if not os.path.exists(f'fid/run{runid}/reals'):
os.makedirs(f'fid/run{runid}/reals')
for batch_idx, (fid_reals, _) in enumerate(fid_loader):
if(batch_idx * batch_size == fid_samples):
break
reals = fid_reals.reshape(-1, 1, 28, 28).cpu().detach().numpy()
for i in range(batch_size):
image_func(reals[i][0].astype('uint8')).save(f"fid/run{runid}/reals/real{i + batch_idx * batch_size:04d}.png")
for epoch in range(num_epochs):
for batch_idx, (real, _) in enumerate(loader):
real = real.view(-1, 784).to(device)
batch_size = real.shape[0]
### Train Discruminator: max log(D(real)) 0 log(1 - D(G(z))
# sample minibatch of noise samples from noise prior p_g(z)
noise = torch.randn(batch_size, z_dim).to(device)
# sample minibatch of examples from data generating distribution
fake = gen(noise)
disc_real = disc(real).view(-1)
lossD_real = criterion(disc_real, torch.ones_like(disc_real))
disc_fake = disc(fake).view(-1)
lossD_fake = criterion(disc_fake, torch.zeros_like(disc_fake))
lossD = 0.5 * (lossD_real + lossD_fake) / 2
disc.zero_grad()
lossD.backward(retain_graph=True) # to preserve fake computation
opt_disc.step()
output = disc(fake).view(-1)
lossG = criterion(output, torch.ones_like(output))
gen.zero_grad()
lossG.backward()
opt_gen.step()
if calculate_fid:
if batch_idx == 0:
# generate fakes datasets for FID calculation:
if not os.path.exists(f'fid/run{runid}/fakes'):
os.makedirs(f'fid/run{runid}/fakes')
with torch.no_grad():
for batch in range(round(fid_samples / batch_size)):
noise_input = torch.randn(batch_size, z_dim).to(device)
fakes = gen(noise_input).reshape(-1, 1, 28, 28)
for i in range(batch_size):
torchvision.utils.save_image(fakes[i][0], f"fid/run{runid}/fakes/fake{i + batch * batch_size:04d}.png",normalize=True)
path_fakes = f"fid/run{runid}/fakes"
path_reals = f"fid/run{runid}/reals"
fid_value = pytorch_fid.fid_score.calculate_fid_given_paths((path_fakes, path_reals), 50, device, fid_samples, 8)
if(report_wandb):
wandb.log({"FID": fid_value})
print("FID value: ", fid_value)
if report_tensorboard:
if batch_idx == 0:
print(
f"Epoch [{epoch + 1}/{num_epochs}] \ "
f"Loss D: {lossD:.4f}, Loss G: {lossG:.4f}"
)
with torch.no_grad():
fakes = gen(fixed_noise).reshape(-1, 1, 28, 28)
reals = real.reshape(-1, 1, 28, 28)
img_grid_fake = torchvision.utils.make_grid(fakes, normalize=True)
img_grid_real = torchvision.utils.make_grid(reals, normalize=True)
writer_fake.add_image(
"Mnist fakes", img_grid_fake, global_step=epoch
)
writer_real.add_image(
"Mnist reals", img_grid_real, global_step=epoch
)
if report_wandb:
if batch_idx == 0:
print(
f"Epoch [{epoch + 1}/{num_epochs}] \ "
f"Loss D: {lossD:.4f}, Loss G: {lossG:.4f}"
)
wandb.log({"lossD": lossD,
"lossG": lossG,
"epoch": epoch,
})
fakes = gen(fixed_noise).reshape(-1, 1, 28, 28)
reals = real.reshape(-1, 1, 28, 28)
img_grid_fake = torchvision.utils.make_grid(fakes, normalize=True)
img_grid_real = torchvision.utils.make_grid(reals, normalize=True)
wdb_fakes = wandb.Image(img_grid_fake, caption="Fakes")
wdb_reals = wandb.Image(img_grid_real, caption="Reals")
wandb.log({"fakes": wdb_fakes})
wandb.log({"reals": wdb_reals})
if __name__ == '__main__':
train() |
the-stack_106_28876 | from django.urls import path
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from ovu.core.views import home_view, about_us_view, componentes_list_view, componente_detail, \
dato_create_view, indicador_modal_detail_view, indicador_tabla_porcentual, informe_view
from ovu.core.views.indicadores import indicador_tablas, indicador_graficas, indicador_descargar
app_name = "core"
urlpatterns = [
# path("", TemplateView.as_view(template_name="core/index.html"), name="home"),
path('', home_view, name='home'),
path('about_us', about_us_view, name='about_us'),
path('faq', TemplateView.as_view(template_name="core/faq.html"), name='faq'),
path('componentes', (cache_page(60 * 0))(componentes_list_view), name='componentes'),
path('componente/<int:pk>', (componente_detail), name='componente_detalles'),
# Datos
path('dato/<int:componente>', dato_create_view, name='dato_add'),
# Indicadores
path('indicador/<int:pk>', indicador_modal_detail_view, name='indicador_modal_detail'),
path('indicador/tp/<int:pk>', (indicador_tablas), name='indicador_tablas'),
path('indicador/tg/<int:pk>', (cache_page(60 * 0))(indicador_graficas), name='indicador_graficas'),
path('indicador/tp/<int:pk>', (indicador_tabla_porcentual), name='indicador_tabla_porcentual'),
path('indicador/descargar/<int:indicador>', indicador_descargar, name='indicador_descargar'),
# Informes
path('informes', informe_view, name='informes'),
]
|
the-stack_106_28877 | """
Implementation of the FFTlog algorithm, very much inspired by mcfit (https://github.com/eelregit/mcfit) and implementation in
https://github.com/sfschen/velocileptors/blob/master/velocileptors/Utils/spherical_bessel_transform_fftw.py
"""
import os
import numpy as np
from scipy.special import gamma, loggamma
class FFTlog(object):
r"""
Implementation of the FFTlog algorithm presented in https://jila.colorado.edu/~ajsh/FFTLog/, which computes the generic integral:
.. math::
G(y) = \int_{0}^{\infty} x dx F(x) K(xy)
with :math:`F(x)` input function, :math:`K(xy)` a kernel.
This transform is (mathematically) invariant under a power law transformation:
.. math::
G_{q}(y) = \int_{0}^{\infty} x dx F_{q}(x) K_{q}(xy)
where :math:`F_{q}(x) = G(x)x^{-q}`, :math:`K_{q}(t) = K(t)t^{q}` and :math:`G_{q}(y) = G(y)y^{q}`.
"""
def __init__(self, x, kernel, q=0, minfolds=2, lowring=True, xy=1, check_level=0, engine='numpy', **engine_kwargs):
r"""
Initialize :class:`FFTlog`, which can perform several transforms at once.
Parameters
----------
x : array_like
Input log-spaced coordinates. Must be strictly increasing.
If 1D, is broadcast to the number of provided kernels.
kernel : callable, list of callables
Mellin transform of the kernel:
.. math:: U_{K}(z) = \int_{0}^{\infty} t^{z-1} K(t) dt
If a list of kernels is provided, will perform all transforms at once.
q : float, list of floats, default=0
Power-law tilt(s) to regularise integration.
minfolds : int, default=2
The c is chosen with minimum :math:`n` chosen such that ``2**n > minfolds * x.size``.
lowring : bool, default=True
If ``True`` set output coordinates according to the low-ringing condition, otherwise set it with ``xy``.
xy : float, list of floats, default=1
Enforce the reciprocal product (i.e. ``x[0] * y[-1]``) of the input ``x`` and output ``y`` coordinates.
check_level : int, default=0
If non-zero run sanity checks on input.
engine : string, default='numpy'
FFT engine. See :meth:`set_fft_engine`.
engine_kwargs : dict
Arguments for FFT engine.
Note
----
Kernel definition is different from that of https://jila.colorado.edu/~ajsh/FFTLog/, which uses (eq. 10):
.. math:: U_{K}(z) = \int_{0}^{\infty} t^{z} K(t) dt
Therefore, one should use :math:`q = 1` for Bessel functions to match :math:`q = 0` in https://jila.colorado.edu/~ajsh/FFTLog/.
"""
self.inparallel = isinstance(kernel, (tuple, list))
if not self.inparallel:
kernel = [kernel]
self.kernel = list(kernel)
if np.ndim(q) == 0:
q = [q] * self.nparallel
self.q = list(q)
self.x = np.asarray(x)
if not self.inparallel:
self.x = self.x[None, :]
elif self.x.ndim == 1:
self.x = np.tile(self.x[None, :], (self.nparallel, 1))
if np.ndim(xy) == 0:
xy = [xy] * self.nparallel
self.xy = list(xy)
self.check_level = check_level
if self.check_level:
if len(self.x) != self.nparallel:
raise ValueError('x and kernel must of same length')
if len(self.q) != self.nparallel:
raise ValueError('q and kernel must be lists of same length')
if len(self.xy) != self.nparallel:
raise ValueError('xy and kernel must be lists of same length')
self.minfolds = minfolds
self.lowring = lowring
self.setup()
self.set_fft_engine(engine, **engine_kwargs)
def set_fft_engine(self, engine='numpy', **engine_kwargs):
"""
Set up FFT engine.
See :func:`get_fft_engine`
Parameters
----------
engine : BaseEngine, string, default='numpy'
FFT engine, or one of ['numpy', 'fftw'].
engine_kwargs : dict
Arguments for FFT engine.
"""
self._engine = get_fft_engine(engine, size=self.padded_size, nparallel=self.nparallel, **engine_kwargs)
@property
def nparallel(self):
"""Number of transforms performed in parallel."""
return len(self.kernel)
def setup(self):
"""Set up u funtions."""
self.size = self.x.shape[-1]
self.delta = np.log(self.x[:, -1] / self.x[:, 0]) / (self.size - 1)
nfolds = (self.size * self.minfolds - 1).bit_length()
self.padded_size = 2**nfolds
npad = self.padded_size - self.size
self.padded_size_in_left, self.padded_size_in_right = npad // 2, npad - npad // 2
self.padded_size_out_left, self.padded_size_out_right = npad - npad // 2, npad // 2
if self.check_level:
if not np.allclose(np.log(self.x[:, 1:] / self.x[:, :-1]), self.delta, rtol=1e-3):
raise ValueError('Input x must be log-spaced')
if self.padded_size < self.size:
raise ValueError('Convolution size must be larger than input x size')
if self.lowring:
self.lnxy = np.array([delta / np.pi * np.angle(kernel(q + 1j * np.pi / delta)) for kernel, delta, q in zip(self.kernel, self.delta, self.q)])
else:
self.lnxy = np.log(self.xy) + self.delta
self.y = np.exp(self.lnxy - self.delta)[:, None] / self.x[:, ::-1]
m = np.arange(0, self.padded_size // 2 + 1)
self.padded_u, self.padded_prefactor, self.padded_postfactor = [], [], []
self.padded_x = pad(self.x, (self.padded_size_in_left, self.padded_size_in_right), axis=-1, extrap='log')
self.padded_y = pad(self.y, (self.padded_size_out_left, self.padded_size_out_right), axis=-1, extrap='log')
prev_kernel, prev_q, prev_delta, prev_u = None, None, None, None
for kernel, padded_x, padded_y, lnxy, delta, q in zip(self.kernel, self.padded_x, self.padded_y, self.lnxy, self.delta, self.q):
self.padded_prefactor.append(padded_x**(-q))
self.padded_postfactor.append(padded_y**(-q))
if kernel is prev_kernel and q == prev_q and delta == prev_delta:
u = prev_u
else:
u = prev_u = kernel(q + 2j * np.pi / self.padded_size / delta * m)
self.padded_u.append(u * np.exp(-2j * np.pi * lnxy / self.padded_size / delta * m))
prev_kernel, prev_q, prev_delta = kernel, q, delta
self.padded_u = np.array(self.padded_u)
self.padded_prefactor = np.array(self.padded_prefactor)
self.padded_postfactor = np.array(self.padded_postfactor)
def __call__(self, fun, extrap=0, keep_padding=False):
"""
Perform the transforms.
Parameters
----------
fun : array_like
Function to be transformed.
Last dimensions should match (:attr:`nparallel`,len(x)) where ``len(x)`` is the size of the input x-coordinates.
(if :attr:`nparallel` is 1, the only requirement is the last dimension to be (len(x))).
extrap : float, string, default=0
How to extrapolate function outside of ``x`` range to fit the integration range.
If 'log', performs a log-log extrapolation.
If 'edge', pad ``fun`` with its edge values.
Else, pad ``fun`` with the provided value.
Pass a tuple to differentiate between left and right sides.
keep_padding : bool, default=False
Whether to return function padded to the number of points in the integral.
By default, crop it to its original size.
Returns
-------
y : array
Array of new coordinates.
fftloged : array
Transformed function.
"""
padded_fun = pad(fun, (self.padded_size_in_left, self.padded_size_in_right), axis=-1, extrap=extrap)
fftloged = self._engine.backward(self._engine.forward(padded_fun * self.padded_prefactor) * self.padded_u) * self.padded_postfactor
if not keep_padding:
y, fftloged = self.y, fftloged[..., self.padded_size_out_left:self.padded_size_out_left + self.size]
else:
y, fftloged = self.padded_y, fftloged
if not self.inparallel:
y = y[0]
fftloged.shape = fun.shape if not keep_padding else fun.shape[:-1] + (self.padded_size,)
return y, fftloged
def inv(self):
"""Inverse the transform."""
self.x, self.y = self.y, self.x
self.padded_x, self.padded_y = self.y, self.x
self.padded_prefactor, self.padded_postfactor = 1 / self.padded_postfactor, 1 / self.padded_prefactor
self.padded_u = 1 / self.padded_u.conj()
class HankelTransform(FFTlog):
"""
Hankel transform implementation using :class:`FFTlog`.
It relies on Bessel function kernels.
"""
def __init__(self, x, nu=0, **kwargs):
"""
Initialize Hankel transform.
Parameters
----------
x : array_like
Input log-spaced coordinates.
If 1D, is broadcast to the number of provided ``nu``.
nu : int, list of int, default=0
Order of Bessel functions.
If a list is provided, will perform all transforms at once.
kwargs : dict
Arguments for :class:`FFTlog`.
"""
if np.ndim(nu) == 0:
kernel = BesselJKernel(nu)
else:
kernel = [BesselJKernel(nu_) for nu_ in nu]
FFTlog.__init__(self, x, kernel, **kwargs)
self.padded_prefactor *= self.padded_x**2
class PowerToCorrelation(FFTlog):
r"""
Power spectrum to correlation function transform, defined as:
.. math::
\xi_{\ell}(s) = \frac{(-i)^{\ell}}{2 \pi^{2}} \int dk k^{2} P_{\ell}(k) j_{\ell}(ks)
"""
def __init__(self, k, ell=0, q=0, complex=False, **kwargs):
"""
Initialize power to correlation transform.
Parameters
----------
k : array_like
Input log-spaced wavenumbers.
If 1D, is broadcast to the number of provided ``ell``.
ell : int, list of int, default=0
Poles. If a list is provided, will perform all transforms at once.
q : float, list of floats, default=0
Power-law tilt(s) to regularise integration.
complex : bool, default=False
``False`` assumes the imaginary part of odd power spectrum poles is provided.
kwargs : dict
Arguments for :class:`FFTlog`.
"""
if np.ndim(ell) == 0:
kernel = SphericalBesselJKernel(ell)
else:
kernel = [SphericalBesselJKernel(ell_) for ell_ in ell]
FFTlog.__init__(self, k, kernel, q=1.5 + q, **kwargs)
self.padded_prefactor *= self.padded_x**3 / (2 * np.pi)**1.5
# Convention is (-i)^ell/(2 pi^2)
ell = np.atleast_1d(ell)
if complex:
phase = (-1j) ** ell
else:
# Prefactor is (-i)^ell, but we take in the imaginary part of odd power spectra, hence:
# (-i)^ell = (-1)^(ell/2) if ell is even
# (-i)^ell i = (-1)^(ell//2) if ell is odd
phase = (-1)**(ell // 2)
# Not in-place as phase (and hence padded_postfactor) may be complex instead of float
self.padded_postfactor = self.padded_postfactor * phase[:, None]
class CorrelationToPower(FFTlog):
r"""
Correlation function to power spectrum transform, defined as:
.. math::
P_{\ell}(k) = 4 \pi i^{\ell} \int ds s^{2} \xi_{\ell}(s) j_{\ell}(ks)
"""
def __init__(self, s, ell=0, q=0, complex=False, **kwargs):
"""
Initialize power to correlation transform.
Parameters
----------
s : array_like
Input log-spaced separations.
If 1D, is broadcast to the number of provided ``ell``.
ell : int, list of int, default=0
Poles. If a list is provided, will perform all transforms at once.
q : float, list of floats, default=0
Power-law tilt(s) to regularise integration.
complex : bool, default=False
``False`` returns the real part of even poles, and the imaginary part of odd poles.
kwargs : dict
Arguments for :class:`FFTlog`.
"""
if np.ndim(ell) == 0:
kernel = SphericalBesselJKernel(ell)
else:
kernel = [SphericalBesselJKernel(ell_) for ell_ in ell]
FFTlog.__init__(self, s, kernel, q=1.5 + q, **kwargs)
self.padded_prefactor *= self.padded_x**3 * (2 * np.pi)**1.5
# Convention is 4 \pi i^ell, and we return imaginary part of odd poles
ell = np.atleast_1d(ell)
if complex:
phase = (-1j) ** ell
else:
# We return imaginary part of odd poles
phase = (-1)**(ell // 2)
self.padded_postfactor = self.padded_postfactor * phase[:, None]
class TophatVariance(FFTlog):
"""
Variance in tophat window.
It relies on tophat kernel.
"""
def __init__(self, k, q=0, **kwargs):
"""
Initialize tophat variance transform.
Parameters
----------
k : array_like
Input log-spaced wavenumbers.
If 1D, is broadcast to the number of provided ``ell``.
q : float, list of floats, default=0
Power-law tilt(s) to regularise integration.
kwargs : dict
Arguments for :class:`FFTlog`.
"""
kernel = TophatSqKernel(ndim=3)
FFTlog.__init__(self, k, kernel, q=1.5 + q, **kwargs)
self.padded_prefactor *= self.padded_x**3 / (2 * np.pi**2)
class GaussianVariance(FFTlog):
"""
Variance in Gaussian window.
It relies on Gaussian kernel.
"""
def __init__(self, k, q=0, **kwargs):
"""
Initialize Gaussian variance transform.
Parameters
----------
k : array_like
Input log-spaced wavenumbers.
If 1D, is broadcast to the number of provided ``ell``.
q : float, list of floats, default=0
Power-law tilt(s) to regularise integration.
kwargs : dict
Arguments for :class:`FFTlog`.
"""
kernel = GaussianSqKernel()
FFTlog.__init__(self, k, kernel, q=1.5 + q, **kwargs)
self.padded_prefactor *= self.padded_x**3 / (2 * np.pi**2)
def pad(array, pad_width, axis=-1, extrap=0):
"""
Pad array along ``axis``.
Parameters
----------
array : array_like
Input array to be padded.
pad_width : int, tuple of ints
Number of points to be added on both sides of the array.
Pass a tuple to differentiate between left and right sides.
axis : int, default=-1
Axis along which padding is to be applied.
extrap : string, float, default=0
If 'log', performs a log-log extrapolation.
If 'edge', pad ``array`` with its edge values.
Else, pad ``array`` with the provided value.
Pass a tuple to differentiate between left and right sides.
Returns
-------
array : array
Padded array.
"""
array = np.asarray(array)
try:
pad_width_left, pad_width_right = pad_width
except (TypeError, ValueError):
pad_width_left = pad_width_right = pad_width
try:
extrap_left, extrap_right = extrap
except (TypeError, ValueError):
extrap_left = extrap_right = extrap
axis = axis % array.ndim
to_axis = [1] * array.ndim
to_axis[axis] = -1
if extrap_left == 'edge':
end = np.take(array, [0], axis=axis)
pad_left = np.repeat(end, pad_width_left, axis=axis)
elif extrap_left == 'log':
end = np.take(array, [0], axis=axis)
ratio = np.take(array, [1], axis=axis) / end
exp = np.arange(-pad_width_left, 0).reshape(to_axis)
pad_left = end * ratio ** exp
else:
pad_left = np.full(array.shape[:axis] + (pad_width_left,) + array.shape[axis + 1:], extrap_left)
if extrap_right == 'edge':
end = np.take(array, [-1], axis=axis)
pad_right = np.repeat(end, pad_width_right, axis=axis)
elif extrap_right == 'log':
end = np.take(array, [-1], axis=axis)
ratio = np.take(array, [-2], axis=axis) / end
exp = np.arange(1, pad_width_right + 1).reshape(to_axis)
pad_right = end / ratio ** exp
else:
pad_right = np.full(array.shape[:axis] + (pad_width_right,) + array.shape[axis + 1:], extrap_right)
return np.concatenate([pad_left, array, pad_right], axis=axis)
class BaseFFTEngine(object):
"""Base FFT engine."""
def __init__(self, size, nparallel=1, nthreads=None):
"""
Initialize FFT engine.
Parameters
----------
size : int
Array size.
nparallel : int, default=1
Number of FFTs to be performed in parallel.
nthreads : int, default=None
Number of threads.
"""
self.size = size
self.nparallel = nparallel
if nthreads is not None:
os.environ['OMP_NUM_THREADS'] = str(nthreads)
self.nthreads = int(os.environ.get('OMP_NUM_THREADS', 1))
class NumpyFFTEngine(BaseFFTEngine):
"""FFT engine based on :mod:`numpy.fft`."""
def forward(self, fun):
"""Forward transform of ``fun``."""
return np.fft.rfft(fun, axis=-1)
def backward(self, fun):
"""Backward transform of ``fun``."""
return np.fft.irfft(fun.conj(), n=self.size, axis=-1)
def apply_along_last_axes(func, array, naxes=1, toret=None):
"""Apply callable ``func`` over the last ``naxes`` of ``array``."""
if toret is None:
toret = np.empty_like(array)
shape_bak = array.shape
array.shape = (-1,) + shape_bak[-naxes:]
newshape_bak = toret.shape
toret.shape = (-1,) + newshape_bak[-naxes:]
for iarr, arr in enumerate(array):
toret[iarr] = func(arr)
array.shape = shape_bak
toret.shape = newshape_bak
return toret
try: import pyfftw
except ImportError: pyfftw = None
class FFTWEngine(BaseFFTEngine):
"""FFT engine based on :mod:`pyfftw`."""
def __init__(self, size, nparallel=1, nthreads=None, wisdom=None, plan='measure'):
"""
Initialize :mod:`pyfftw` engine.
Parameters
----------
size : int
Array size.
nparallel : int, default=1
Number of FFTs to be performed in parallel.
nthreads : int, default=None
Number of threads.
wisdom : string, tuple, default=None
:mod:`pyfftw` wisdom, to speed up initialization of FFTs.
If a string, should be a path to the save FFT wisdom (with :func:`numpy.save`).
If a tuple, directly corresponds to the wisdom.
plan : string, default='measure'
Choices are ['estimate', 'measure', 'patient', 'exhaustive'].
The increasing amount of effort spent during the planning stage to create the fastest possible transform.
Usually 'measure' is a good compromise.
"""
if pyfftw is None:
raise NotImplementedError('Install pyfftw to use {}'.format(self.__class__.__name__))
super(FFTWEngine, self).__init__(size, nparallel=nparallel, nthreads=nthreads)
plan = plan.lower()
allowed_plans = ['estimate', 'measure', 'patient', 'exhaustive']
if plan not in allowed_plans:
raise ValueError('Plan {} unknown'.format(plan))
plan = 'FFTW_{}'.format(plan.upper())
if isinstance(wisdom, str):
wisdom = tuple(np.load(wisdom))
if wisdom is not None:
pyfftw.import_wisdom(wisdom)
else:
pyfftw.forget_wisdom()
# flags = ('FFTW_DESTROY_INPUT','FFTW_MEASURE')
self.fftw_f = pyfftw.empty_aligned((self.nparallel, self.size), dtype='float64')
self.fftw_fk = pyfftw.empty_aligned((self.nparallel, self.size // 2 + 1), dtype='complex128')
self.fftw_gk = pyfftw.empty_aligned((self.nparallel, self.size // 2 + 1), dtype='complex128')
self.fftw_g = pyfftw.empty_aligned((self.nparallel, self.size), dtype='float64')
# pyfftw.config.NUM_THREADS = threads
self.fftw_forward_object = pyfftw.FFTW(self.fftw_f, self.fftw_fk, direction='FFTW_FORWARD', flags=(plan,), threads=self.nthreads)
self.fftw_backward_object = pyfftw.FFTW(self.fftw_gk, self.fftw_g, direction='FFTW_BACKWARD', flags=(plan,), threads=self.nthreads)
def forward(self, fun):
"""Forward transform of ``fun``."""
if fun.ndim > 1 and fun.shape[:-1] != (self.nparallel,):
# if nparallel match, apply along two last axes, else only last axis (nparallel should be 1)
toret = np.empty_like(self.fftw_fk, shape=fun.shape[:-1] + self.fftw_fk.shape[-1:])
return apply_along_last_axes(self.forward, fun, naxes=1 + (fun.shape[-2] == self.nparallel), toret=toret)
# fun.shape = self.fftw_f.shape
self.fftw_f[...] = fun
return self.fftw_forward_object(normalise_idft=True)
def backward(self, fun):
"""Backward transform of ``fun``."""
if fun.ndim > 1 and fun.shape[:-1] != (self.nparallel,):
toret = np.empty_like(self.fftw_g, shape=fun.shape[:-1] + self.fftw_g.shape[-1:])
return apply_along_last_axes(self.backward, fun, naxes=1 + (fun.shape[-2] == self.nparallel), toret=toret)
# fun.shape = self.fftw_gk.shape
self.fftw_gk[...] = np.conj(fun)
return self.fftw_backward_object(normalise_idft=True)
def get_fft_engine(engine, *args, **kwargs):
"""
Return FFT engine.
Parameters
----------
engine : BaseFFTEngine, string
FFT engine, or one of ['numpy', 'fftw'].
args, kwargs : tuple, dict
Arguments for FFT engine.
Returns
-------
engine : BaseFFTEngine
"""
if isinstance(engine, str):
if engine.lower() == 'numpy':
return NumpyFFTEngine(*args, **kwargs)
if engine.lower() == 'fftw':
return FFTWEngine(*args, **kwargs)
raise ValueError('FFT engine {} is unknown'.format(engine))
return engine
class BaseKernel(object):
"""Base kernel."""
def __call__(self, z):
return self.eval(z)
def __eq__(self, other):
return other.__class__ == self.__class__
class BaseBesselKernel(BaseKernel):
"""Base Bessel kernel."""
def __init__(self, nu):
self.nu = nu
def __eq__(self, other):
return other.__class__ == self.__class__ and other.nu == self.nu
class BesselJKernel(BaseBesselKernel):
"""(Mellin transform of) Bessel kernel."""
def eval(self, z):
return np.exp(np.log(2) * (z - 1) + loggamma(0.5 * (self.nu + z)) - loggamma(0.5 * (2 + self.nu - z)))
class SphericalBesselJKernel(BaseBesselKernel):
"""(Mellin transform of) spherical Bessel kernel."""
def eval(self, z):
return np.exp(np.log(2) * (z - 1.5) + loggamma(0.5 * (self.nu + z)) - loggamma(0.5 * (3 + self.nu - z)))
class BaseTophatKernel(BaseKernel):
"""Base tophat kernel."""
def __init__(self, ndim=1):
self.ndim = ndim
def __eq__(self, other):
return other.__class__ == self.__class__ and other.ndim == self.ndim
class TophatKernel(BaseTophatKernel):
"""(Mellin transform of) tophat kernel."""
def eval(self, z):
return np.exp(np.log(2) * (z - 1) + loggamma(1 + 0.5 * self.ndim) + loggamma(0.5 * z) - loggamma(0.5 * (2 + self.ndim - z)))
class TophatSqKernel(BaseTophatKernel):
"""(Mellin transform of) square of tophat kernel."""
def __init__(self, ndim=1):
self.ndim = ndim
if self.ndim == 1:
def eval(z):
return -0.25 * np.sqrt(np.pi) * np.exp(loggamma(0.5 * (z - 2)) - loggamma(0.5 * (3 - z)))
elif self.ndim == 3:
def eval(z):
return 2.25 * np.sqrt(np.pi) * (z - 2) / (z - 6) * np.exp(loggamma(0.5 * (z - 4)) - loggamma(0.5 * (5 - z)))
else:
def eval(z):
return np.exp(np.log(2) * (self.ndim - 1) + 2 * loggamma(1 + 0.5 * self.ndim)
+ loggamma(0.5 * (1 + self.ndim - z)) + loggamma(0.5 * z)
- loggamma(1 + self.ndim - 0.5 * z) - loggamma(0.5 * (2 + self.ndim - z))) / np.sqrt(np.pi)
self.eval = eval
class GaussianKernel(BaseKernel):
"""(Mellin transform of) Gaussian kernel."""
def eval(self, z):
return 2**(0.5 * z - 1) * gamma(0.5 * z)
class GaussianSqKernel(BaseKernel):
"""(Mellin transform of) square of Gaussian kernel."""
def eval(self, z):
return 0.5 * gamma(0.5 * z)
|
the-stack_106_28880 | import os
import shutil
import pytest
from ramp_database.model import Model
from ramp_database.testing import add_users
from ramp_database.tools.team import sign_up_team
from ramp_database.tools.submission import get_submissions
from ramp_database.tools.submission import submit_starting_kits
from ramp_database.utils import session_scope
from ramp_database.utils import setup_db
from ramp_database.tools.event import get_problem
from ramp_engine.local import CondaEnvWorker
from ramp_engine.dispatcher import Dispatcher
from ramp_utils import generate_ramp_config
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_utils.deploy import deploy_ramp_event
@pytest.fixture
def session_scope_function(database_connection):
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
try:
yield
finally:
# FIXME: we are recreating the deployment directory but it should be
# replaced by an temporary creation of folder.
deployment_dir = os.path.commonpath(
[ramp_config["ramp"]["kit_dir"], ramp_config["ramp"]["data_dir"]]
)
shutil.rmtree(deployment_dir, ignore_errors=True)
db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
def test_deploy_ramp_event_options(session_scope_function):
database_config = read_config(database_config_template())
ramp_config = generate_ramp_config(read_config(ramp_config_template()))
deploy_ramp_event(database_config_template(), ramp_config_template())
# deploy again by forcing the deployment
deploy_ramp_event(database_config_template(), ramp_config_template(), force=True)
# do not deploy the kit to trigger the error in the problem with we don't
# force the deployment
msg_err = "The RAMP problem already exists in the database."
with pytest.raises(ValueError, match=msg_err):
with session_scope(database_config["sqlalchemy"]) as session:
# if one of the ramp-kit or ramp-data folders changed
problem = get_problem(session, "iris")
problem.path_ramp_kit = problem.path_ramp_kit + "_xxx"
session.commit()
deploy_ramp_event(
database_config_template(),
ramp_config_template(),
setup_ramp_repo=False,
force=False,
)
problem = get_problem(session, "iris")
problem.path_ramp_kit = ramp_config["ramp_kit_dir"]
problem.path_ramp_data = problem.path_ramp_data + "_xxx"
session.commit()
deploy_ramp_event(
database_config_template(),
ramp_config_template(),
setup_ramp_repo=False,
force=False,
)
msg_err = "Attempting to overwrite existing event"
with pytest.raises(ValueError, match=msg_err):
with session_scope(database_config["sqlalchemy"]) as session:
# if the problem is the same, then the event should be overwritten
problem = get_problem(session, "iris")
problem.path_ramp_kit = ramp_config["ramp_kit_dir"]
problem.path_ramp_data = ramp_config["ramp_data_dir"]
session.commit()
deploy_ramp_event(
database_config_template(),
ramp_config_template(),
setup_ramp_repo=False,
force=False,
)
def test_deploy_ramp_event(session_scope_function):
database_config = read_config(database_config_template())
event_config_filename = ramp_config_template()
event_config = read_config(event_config_filename)
ramp_config = generate_ramp_config(event_config)
deploy_ramp_event(database_config_template(), ramp_config_template())
# check that we created the archive
assert os.path.isfile(
os.path.join(
ramp_config["ramp_kit_dir"],
"events_archived",
ramp_config["event_name"] + ".zip",
)
)
# simulate that we add users and sign-up for the event and that they
# submitted the starting kit
with session_scope(database_config["sqlalchemy"]) as session:
add_users(session)
sign_up_team(session, ramp_config["event_name"], "test_user")
submit_starting_kits(
session,
ramp_config["event_name"],
"test_user",
ramp_config["ramp_kit_submissions_dir"],
)
# run the dispatcher on the event which are in the dataset
dispatcher = Dispatcher(
config=database_config,
event_config=event_config,
worker=CondaEnvWorker,
n_workers=-1,
hunger_policy="exit",
)
dispatcher.launch()
# the iris kit contain a submission which should fail for a user
with session_scope(database_config["sqlalchemy"]) as session:
submission = get_submissions(
session, event_config["ramp"]["event_name"], "training_error"
)
assert len(submission) == 1
|
the-stack_106_28881 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable
"""Transposed convolution in python"""
import numpy as np
import scipy
import tvm.topi.testing
from tvm.topi.nn.util import get_pad_tuple
def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [in_channel, num_filter, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
output_padding : int or a list/tuple of two ints
Use to disambiguate the output shape.
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_c, in_h, in_w = a_np.shape
_, out_c, filter_h, filter_w = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(output_padding, int):
opad_h = opad_w = output_padding
else:
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w])
# padding stage
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
padded_a_np = np.zeros(
(
batch,
in_c,
dilated_a_np.shape[2] + bpad_top + bpad_bottom,
dilated_a_np.shape[3] + bpad_left + bpad_right,
)
)
padded_a_np[
:,
:,
bpad_top : dilated_a_np.shape[2] + bpad_top,
bpad_left : dilated_a_np.shape[3] + bpad_left,
] = dilated_a_np
# convolution stage
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h + opad_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w + opad_w
b_np = np.zeros((batch, out_c, out_h, out_w))
for n in range(batch):
for f in range(out_c):
for c in range(in_c):
out = scipy.signal.convolve2d(padded_a_np[n, c], w_np[c, f], mode="valid")
b_np[n, f] += out
return b_np
def conv2d_transpose_nhwc_python(
a_nhwc, weight, weight_format, stride, padding, output_padding=(0, 0)
):
"""Transposed convolution operator in NHWC layout.
Parameters
----------
a_nhwc : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
weight : numpy.ndarray
4-D in formats HWIO, HWOI, OIHW or IOHW
weight_format : str
['HWIO', 'HWOI', 'OIHW', 'IOHW']
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert a_nhwc.ndim == 4, "a_nhwc number of dimensions should be 4"
assert weight.ndim == 4, "weight number of dimensions should be 4"
a_nchw = np.transpose(a_nhwc, (0, 3, 1, 2))
# conv2d_transpose_nchw_python needs kernel layout to be IOHW
if weight_format == "HWIO":
w_iohw = np.transpose(weight, (2, 3, 0, 1))
elif weight_format == "HWOI":
w_iohw = np.transpose(weight, (3, 2, 0, 1))
elif weight_format == "OIHW":
w_iohw = np.transpose(weight, (1, 0, 2, 3))
elif weight_format == "IOHW":
w_iohw = weight
else:
raise ValueError("Valid weight_formats are HWIO, HWOI, OIHW or IOHW")
res_nchw = conv2d_transpose_nchw_python(
a_nchw, w_iohw, stride, padding, output_padding=output_padding
)
res_nhwc = np.transpose(res_nchw, (0, 2, 3, 1))
return res_nhwc
|
the-stack_106_28883 | """
Validate and clean request parameters for our endpoints using Django forms
"""
from datetime import MAXYEAR
from functools import reduce
from django import forms
from django.conf import settings
from django.utils.translation import get_language
import arrow
from .defaults import QUERY_ANALYZERS, RELATED_CONTENT_BOOST
from .filter_definitions import FILTERS, AvailabilityFilterDefinition
# Instantiate filter fields for each filter defined in settings
# It is of the form:
# {
# subjects: (
# ArrayField(required=False, base_type=forms.CharField(max_length=50)),
# True,
# ),
# subjects_include: (
# CharField(required=False, max_length=20),
# False,
# ),
# languages: (
# MultipleChoiceField(required=False, choices=LANGUAGES),
# True,
# ),
# }
# Where the first item is an instance of the form field and the second is a boolean
# indicating whether this formfield expects a list or a single value.
FILTER_FIELDS = {
key: value
for filter_definition in FILTERS.values()
for key, value in filter_definition.get_form_fields().items()
}
class SearchForm(forms.Form):
"""Validate the query string params in a search request."""
OBJECTS, FILTERS = "objects", "filters"
SCOPE_CHOICES = ((OBJECTS, "Objects"), (FILTERS, "Filters"))
limit = forms.IntegerField(required=False, min_value=1, initial=10)
query = forms.CharField(required=False, min_length=3, max_length=100)
offset = forms.IntegerField(required=False, min_value=0, initial=0)
scope = forms.ChoiceField(required=False, choices=SCOPE_CHOICES)
class CourseSearchForm(SearchForm):
"""
Validate the query string params in the course search request, connect them to filter
definitions and generate Elasticsearch queries.
"""
def __init__(self, *args, data=None, **kwargs):
"""
Adapt the search form to handle filters:
- Fix the QueryDict value getter to properly handle multi-value parameters,
- Add a field instance to the form for each filter,
- Define the `states` property as it is used by several methods.
"""
# QueryDict/MultiValueDict breaks lists: we need to fix it
data_fixed = (
{
k: data.getlist(k)
# Form fields are marked to expect lists as input or not as explained above
if (k in FILTER_FIELDS and FILTER_FIELDS[k][1] is True) else v[0]
for k, v in data.lists()
}
if data
else {}
)
super().__init__(data=data_fixed, *args, **kwargs)
self.fields.update({k: v[0] for k, v in FILTER_FIELDS.items()})
self.states = None
def clean_availability(self):
"""
Calculate and set the list of states relevant with the current availability filter.
e.g. if we filter on OPEN courses, only the course runs in state 0 (ongoing open) or
1 (future open) should be considered for sorting and computation of the course's state.
"""
availabilities = self.cleaned_data.get("availability", [])
if AvailabilityFilterDefinition.OPEN in availabilities:
self.states = [0, 1]
elif AvailabilityFilterDefinition.ONGOING in availabilities:
self.states = [0, 4]
elif AvailabilityFilterDefinition.COMING_SOON in availabilities:
self.states = [1, 2, 3]
elif AvailabilityFilterDefinition.ARCHIVED in availabilities:
self.states = [5]
return availabilities
def get_script_fields(self):
"""
Build the part of the Elasticseach query that defines script fields ie fields that can not
be indexed because they are dynamic and shoud be calculated at query time:
- ms_since_epoch: evolves with time to stay relevant on course dates even if the ES
instance and/or the Django server are long running.
- languages and states: depend on the filters applied by the user so that we only take into
consideration course runs that are interesting for this search.
- use_case: the script is used both for sorting and field computations because most of the
code is common and their is no other way to share code.
Note: we use script storage to save time on the script compilation, which is an expensive
operation. We'll only do it once at bootstrap time.
"""
return {
"state": {
"script": {
"id": "state",
"params": {
"languages": self.cleaned_data.get("languages") or None,
"ms_since_epoch": arrow.utcnow().timestamp * 1000,
"states": self.states,
"use_case": "field",
},
}
}
}
def get_sorting_script(self):
"""
Build the part of the Elasticseach query that defines sorting. We use a script for sorting
because we sort courses based on the complex and dynamic status of their course runs which
are under a nested field. The parameters passed to the script are up-to-date at query time:
- ms_since_epoch: evolves with time to stay relevant on course dates even if the ES
instance and/or the Django server are long running,
- languages and states: depend on the filters applied by the user so that we only take into
consideration course runs that are interesting for this search,
- max_date: passed as parameter to optimize script compilation,
- use_case: the script is used both for sorting and field computations because most of the
code is common and their is no other way to share code.
Call the relevant sorting script for courses lists, regenerating the parameters on each
call. This will allow the ms_since_epoch value to stay relevant even if the ES instance
and/or the Django server are long running.
The list of languages and states are passed to the script because the context of the
search defines which course runs are relevant or not for sorting.
Note: we use script storage to save time on the script compilation, which is an expensive
operation. We'll only do it once at bootstrap time.
"""
return {
"_script": {
"order": "asc",
"script": {
"id": "state",
"params": {
"languages": self.cleaned_data.get("languages") or None,
"max_date": arrow.get(MAXYEAR, 12, 31).timestamp * 1000,
"ms_since_epoch": arrow.utcnow().timestamp * 1000,
"states": self.states,
"use_case": "sorting",
},
},
"type": "number",
}
}
def get_queries(self):
"""
Aggregate queries from each filter definition.
The full text query is treated directly as it does not require heavy lifting.
Returns:
--------
List[Dict]: a list of dictionaries each mapping the name of a filter with a query
fragment that composes the global Elasticsearch query. For example:
[
{
"key": "new",
"fragment": [{'term': {'is_new': True}}]
}
...
]
"""
# Always filter out courses that are not flagged for listing
queries = [{"key": "is_listed", "fragment": [{"term": {"is_listed": True}}]}]
# Add the query fragments of each filter definition to the list of queries
for filter_definition in FILTERS.values():
queries = queries + filter_definition.get_query_fragment(self.cleaned_data)
# Full text search is a regular (multilingual) match query
full_text = self.cleaned_data.get("query")
if full_text:
related_content_boost = getattr(
settings, "RICHIE_RELATED_CONTENT_BOOST", RELATED_CONTENT_BOOST
)
lang = get_language()
queries.append(
{
"key": "query",
"fragment": [
{
"multi_match": {
"analyzer": QUERY_ANALYZERS[lang],
"fields": [
"description.*",
"title.*",
f"categories_names.*^{related_content_boost}",
f"organizations_names.*^{related_content_boost}",
f"persons_names.*^{related_content_boost}",
],
"query": full_text,
"type": "cross_fields",
}
}
],
}
)
return queries
def build_es_query(self):
"""
Build the actual Elasticsearch search query and aggregation query from the fragments
returned by each filter definition.
Returns:
--------
Tuple:
- limit (int): the maximum number of results to be returned by Elasticsearch,
- offset (int): the offset from which results are returned (for pagination),
- query (Dict): the raw Elasticsearch query as per:
https://elastic.co/guide/en/elasticsearch/reference/current/search.html
- aggs (Dict): the raw Elasticsearch aggregation as per:
https://elastic.co/guide/en/elasticsearch/reference/current/search-aggregations.html
"""
# queries is an array of individual queries that will be combined through "bool" before
# we pass them to ES. See the docs on bool queries.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html
queries = self.get_queries()
# Concatenate all the sub-queries lists together to form the queries list
query = {
"bool": {
"must":
# queries => map(pluck("fragment")) => flatten()
[clause for kf_pair in queries for clause in kf_pair["fragment"]]
}
}
# Concatenate our hardcoded filters query fragments with organizations and categories
# terms aggregations build on-the-fly
aggs = {
"all_courses": {
"global": {},
"aggregations": reduce(
# Merge all the partial aggregations dicts together
lambda acc, aggs_fragment: {**acc, **aggs_fragment},
# Generate a partial aggregations dict (an aggs_fragment) for each filter
[
filter.get_aggs_fragment(queries, self.cleaned_data)
for filter in FILTERS.values()
],
{},
),
}
}
return (
self.cleaned_data.get("limit"),
self.cleaned_data.get("offset") or 0,
query,
aggs,
)
class ItemSearchForm(SearchForm):
"""Generate Elasticsearch queries for the category/organization indices."""
def __init__(self, *args, data=None, **kwargs):
"""Fix the QueryDict value getter to properly handle multi-value parameters."""
# QueryDict/MultiValueDict breaks lists: we need to fix it
data_fixed = {k: v[0] for k, v in data.lists()} if data else {}
super().__init__(data=data_fixed, *args, **kwargs)
def build_es_query(self, kind=None):
"""
Build the actual Elasticsearch search query for category/organization indices.
Returns:
--------
Tuple:
- limit (int): the maximum number of results to be returned by Elasticsearch,
- offset (int): the offset from which results are returned (for pagination),
- query (Dict): the raw Elasticsearch query as per:
https://elastic.co/guide/en/elasticsearch/reference/current/search.html
"""
# Create an array to put the clauses of our bool/must query
clauses = []
# Add a term filter by kind for the objects that need it (eg. `kind="subjects"`)
if kind:
clauses.append({"term": {"kind": kind}})
# Add a match on the name field if it was handed by the client
full_text = self.cleaned_data.get("query")
if full_text:
clauses.append(
{
"multi_match": {
"analyzer": QUERY_ANALYZERS[get_language()],
"fields": ["title.*"],
"query": full_text,
}
}
)
# Build the query around the clauses if there are any
if clauses:
query = {"query": {"bool": {"must": clauses}}}
# Build a match_all query by default
else:
query = {"query": {"match_all": {}}}
return (
self.cleaned_data.get("limit"),
self.cleaned_data.get("offset") or 0,
query,
)
|
the-stack_106_28884 | import random
import time
import warnings
import sys
import argparse
import shutil
import os.path as osp
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torchvision.transforms as T
import torch.nn.functional as F
sys.path.append('../../..')
from dalib.modules.domain_discriminator import DomainDiscriminator
from common.modules.classifier import Classifier
from dalib.adaptation.dann import DomainAdversarialLoss, ImageClassifier
import common.vision.datasets.partial as datasets
from common.vision.datasets.partial import default_partial as partial
import common.vision.models as models
from common.vision.transforms import ResizeImage
from common.utils.data import ForeverDataIterator
from common.utils.metric import accuracy, ConfusionMatrix
from common.utils.meter import AverageMeter, ProgressMeter
from common.utils.logger import CompleteLogger
from common.utils.analysis import collect_feature, tsne, a_distance
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args: argparse.Namespace):
logger = CompleteLogger(args.log, args.phase)
print(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
cudnn.benchmark = True
# Data loading code
normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if args.center_crop:
train_transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize
])
else:
train_transform = T.Compose([
ResizeImage(256),
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize
])
val_transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224),
T.ToTensor(),
normalize
])
dataset = datasets.__dict__[args.data]
partial_dataset = partial(dataset)
train_source_dataset = dataset(root=args.root, task=args.source, download=True, transform=train_transform)
train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, drop_last=True)
train_target_dataset = partial_dataset(root=args.root, task=args.target, download=True, transform=train_transform)
train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, drop_last=True)
val_dataset = partial_dataset(root=args.root, task=args.target, download=True, transform=val_transform)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
if args.data == 'DomainNet':
test_dataset = partial_dataset(root=args.root, task=args.target, split='test', download=True,
transform=val_transform)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
else:
test_loader = val_loader
train_source_iter = ForeverDataIterator(train_source_loader)
train_target_iter = ForeverDataIterator(train_target_loader)
# create model
print("=> using pre-trained model '{}'".format(args.arch))
num_classes = train_source_dataset.num_classes
backbone = models.__dict__[args.arch](pretrained=True)
if args.data == 'ImageNetCaltech':
classifier = Classifier(backbone, num_classes, head=backbone.copy_head()).to(device)
else:
classifier = ImageClassifier(backbone, num_classes, args.bottleneck_dim).to(device)
domain_discri = DomainDiscriminator(in_feature=classifier.features_dim, hidden_size=1024).to(device)
# define optimizer and lr scheduler
optimizer = SGD(classifier.get_parameters() + domain_discri.get_parameters(),
args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))
# define loss function
domain_adv = DomainAdversarialLoss(domain_discri).to(device)
# resume from the best checkpoint
if args.phase != 'train':
checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
classifier.load_state_dict(checkpoint)
# analysis the model
if args.phase == 'analysis':
# extract features from both domains
feature_extractor = nn.Sequential(classifier.backbone, classifier.bottleneck).to(device)
source_feature = collect_feature(train_source_loader, feature_extractor, device)
target_feature = collect_feature(train_target_loader, feature_extractor, device)
# plot t-SNE
tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.png')
tsne.visualize(source_feature, target_feature, tSNE_filename)
print("Saving t-SNE to", tSNE_filename)
# calculate A-distance, which is a measure for distribution discrepancy
A_distance = a_distance.calculate(source_feature, target_feature, device)
print("A-distance =", A_distance)
return
if args.phase == 'test':
acc1 = validate(test_loader, classifier, args)
print(acc1)
return
# start training
best_acc1 = 0.
for epoch in range(args.epochs):
# train for one epoch
train(train_source_iter, train_target_iter, classifier, domain_adv, optimizer,
lr_scheduler, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, classifier, args)
# remember best acc@1 and save checkpoint
torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
if acc1 > best_acc1:
shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
best_acc1 = max(acc1, best_acc1)
print("best_acc1 = {:3.1f}".format(best_acc1))
# evaluate on test set
classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
acc1 = validate(test_loader, classifier, args)
print("test_acc1 = {:3.1f}".format(acc1))
logger.close()
def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,
model: ImageClassifier, domain_adv: DomainAdversarialLoss, optimizer: SGD,
lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':5.2f')
data_time = AverageMeter('Data', ':5.2f')
losses = AverageMeter('Loss', ':6.2f')
cls_accs = AverageMeter('Cls Acc', ':3.1f')
tgt_accs = AverageMeter('Tgt Acc', ':3.1f')
domain_accs = AverageMeter('Domain Acc', ':3.1f')
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses, cls_accs, tgt_accs, domain_accs],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
domain_adv.train()
end = time.time()
for i in range(args.iters_per_epoch):
x_s, labels_s = next(train_source_iter)
x_t, labels_t = next(train_target_iter)
x_s = x_s.to(device)
x_t = x_t.to(device)
labels_s = labels_s.to(device)
labels_t = labels_t.to(device)
# measure data loading time
data_time.update(time.time() - end)
# compute output
x = torch.cat((x_s, x_t), dim=0)
y, f = model(x)
y_s, y_t = y.chunk(2, dim=0)
f_s, f_t = f.chunk(2, dim=0)
cls_loss = F.cross_entropy(y_s, labels_s)
transfer_loss = domain_adv(f_s, f_t)
domain_acc = domain_adv.domain_discriminator_accuracy
loss = cls_loss + transfer_loss * args.trade_off
cls_acc = accuracy(y_s, labels_s)[0]
tgt_acc = accuracy(y_t, labels_t)[0]
losses.update(loss.item(), x_s.size(0))
cls_accs.update(cls_acc.item(), x_s.size(0))
tgt_accs.update(tgt_acc.item(), x_s.size(0))
domain_accs.update(domain_acc.item(), x_s.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader: DataLoader, model: ImageClassifier, args: argparse.Namespace) -> float:
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
if args.per_class_eval:
classes = val_loader.dataset.classes
confmat = ConfusionMatrix(len(classes))
else:
confmat = None
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.to(device)
target = target.to(device)
# compute output
output, _ = model(images)
loss = F.cross_entropy(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if confmat:
confmat.update(target, output.argmax(1))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
if confmat:
print(confmat.format(classes))
return top1.avg
if __name__ == '__main__':
architecture_names = sorted(
name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])
)
dataset_names = sorted(
name for name in datasets.__dict__
if not name.startswith("__") and callable(datasets.__dict__[name])
)
parser = argparse.ArgumentParser(description='DANN for Unsupervised Domain Adaptation')
# dataset parameters
parser.add_argument('root', metavar='DIR',
help='root path of dataset')
parser.add_argument('-d', '--data', metavar='DATA', default='Office31',
help='dataset: ' + ' | '.join(dataset_names) +
' (default: Office31)')
parser.add_argument('-s', '--source', help='source domain(s)')
parser.add_argument('-t', '--target', help='target domain(s)')
parser.add_argument('--center-crop', default=False, action='store_true',
help='whether use center crop during training')
# model parameters
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=architecture_names,
help='backbone architecture: ' +
' | '.join(architecture_names) +
' (default: resnet18)')
parser.add_argument('--bottleneck-dim', default=256, type=int,
help='Dimension of bottleneck')
parser.add_argument('--trade-off', default=1., type=float,
help='the trade-off hyper-parameter for transfer loss')
# training parameters
parser.add_argument('-b', '--batch-size', default=36, type=int,
metavar='N',
help='mini-batch size (default: 36)')
parser.add_argument('--lr', '--learning-rate', default=0.002, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--lr-gamma', default=0.001, type=float, help='parameter for lr scheduler')
parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay',default=1e-3, type=float,
metavar='W', help='weight decay (default: 1e-3)',
dest='weight_decay')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-i', '--iters-per-epoch', default=1000, type=int,
help='Number of iterations per epoch')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--per-class-eval', action='store_true',
help='whether output per-class accuracy during evaluation')
parser.add_argument("--log", type=str, default='dann',
help="Where to save logs, checkpoints and debugging images.")
parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
help="When phase is 'test', only test the model."
"When phase is 'analysis', only analysis the model.")
args = parser.parse_args()
main(args)
|
the-stack_106_28885 | import logging as log
import yaml
import os
import cv2 as cv
import json
import sys
import time
# application parameters
app_name = ""
app_version = "0.0.0"
matcher_directory = "repo"
matcher_tolerance = 0.6
extraction_layers = []
haarcascade_face_cascade = cv.CascadeClassifier()
haarcascade_eyes_cascade = cv.CascadeClassifier()
haarcascade_nose_cascade = cv.CascadeClassifier()
haarcascade_mouth_cascade = cv.CascadeClassifier()
fps = 20
camera_port = 0
caffemodel_net = None
caffemodel_confidence_threshold = 1.0
dlib_frame_resize_enabled = False
dlib_frame_resize_scale = 1
dlib_model = "hog"
dlib_upsamples = 1
image_enabled = False
image_output_directory = "../data"
image_type = "png"
image_jpg_quality = 95
image_png_compression = 3
image_ppm_binary_format_flag = 1
image_pgm_binary_format_flag = 1
image_pbm_binary_format_flag = 1
cv_image_params = []
# constants
PROGRAM_START_TIMESTAMP = time.time()
def set_logging(log_level="INFO", log_file="app.log", log_timestamp=True):
if (log_level == "DEBUG"):
log_level = log.DEBUG
elif (log_level == "INFO"):
log_level = log.INFO
elif (log_level == "WARNING"):
log_level = log.WARNING
elif (log_level == "ERROR"):
log_level = log.ERROR
elif (log_level == "CRITICAL"):
log_level = log.CRITICAL
else:
print("ERROR: invalid log level was specified log_level=" + str(log_level))
exit(0)
root = log.getLogger()
root.setLevel(log_level)
formatter = log.Formatter('%(asctime)s [%(levelname)s]: %(message)s')
streamHandler = log.StreamHandler(sys.stdout)
streamHandler.setLevel(log_level)
streamHandler.setFormatter(formatter)
root.addHandler(streamHandler)
if(log_timestamp):
index = log_file.rfind(".")
log_file_path = log_file[:index] + "." + str(int(PROGRAM_START_TIMESTAMP)) + log_file[index:]
os.makedirs("log", exist_ok=True)
fileHandler = log.FileHandler(log_file_path)
fileHandler.setLevel(log_level)
fileHandler.setFormatter(formatter)
root.addHandler(fileHandler)
return
def load(config_file_name):
global app_name
global app_version
global extraction_layers
global camera_port
global haarcascade_face_cascade
global haarcascade_eyes_cascade
global haarcascade_nose_cascade
global haarcascade_mouth_cascade
global caffemodel_net
global caffemodel_confidence_threshold
global dlib_frame_resize_enabled
global dlib_frame_resize_scale
global dlib_model
global dlib_upsamples
global image_enabled
global image_output_directory
global image_type
global image_jpg_quality
global image_png_compression
global image_ppm_binary_format_flag
global image_pgm_binary_format_flag
global image_pbm_binary_format_flag
global matcher_directory
global cv_image_params
loaded_config = None
with open(config_file_name, "r") as config_file:
try:
loaded_config = yaml.safe_load(config_file)
except yaml.YAMLError as e:
log.error(e)
logging_config = loaded_config["logging"]
log_enabled = bool(logging_config["enabled"])
log_level = str(logging_config["level"])
log_file = str(logging_config["file"])
log_timestamp = bool(logging_config["timestamp"])
if log_enabled:
set_logging(log_level, log_file, log_timestamp)
log.info("Loading config ...")
app_name = str(loaded_config["name"])
app_version = str(loaded_config["version"])
camera_port = int(loaded_config["camera_port"])
image_config = loaded_config["image"]
image_enabled = bool(image_config["enabled"])
image_output_directory = os.path.join(
str(image_config["output_directory"]),
str(int(PROGRAM_START_TIMESTAMP))
)
image_type = str(image_config["type"])
image_jpg_quality = int(image_config["jpg"]["quality"])
image_png_compression = int(image_config["png"]["compression"])
if image_enabled:
os.makedirs(image_output_directory, exist_ok=True)
if image_type == "jpg":
cv_image_params = [int(cv.IMWRITE_JPEG_QUALITY), image_jpg_quality]
elif image_type == "png":
cv_image_params = [int(cv.IMWRITE_PNG_COMPRESSION), image_png_compression]
elif image_type == "ppm":
cv_image_params = [int(cv.IMWRITE_PXM_BINARY), image_ppm_binary_format_flag]
elif image_type == "pgm":
cv_image_params = [int(cv.IMWRITE_PXM_BINARY), image_pgm_binary_format_flag]
elif image_type == "pbm":
cv_image_params = [int(cv.IMWRITE_PXM_BINARY), image_pbm_binary_format_flag]
else:
log.error("Invalid image type: %s" % (image_type))
exit(0)
matcher_config = loaded_config["matcher"]
matcher_directory = str(matcher_config["directory"])
matcher_tolerance = float(matcher_config["tolerance"])
extraction_layers = matcher_config["extraction_layers"]
dlib_config = matcher_config["dlib"]
frame_resize_config = dlib_config["frame_resize"]
dlib_frame_resize_enabled = bool(frame_resize_config["enabled"])
dlib_frame_resize_scale = float(frame_resize_config["scale"])
dlib_model = str(dlib_config["model"])
dlib_upsamples = int(dlib_config["upsamples"])
haarcascade_config = matcher_config["haarcascade"]
haarcascade_face_cascade_path = os.path.join("config", haarcascade_config["face_cascade_file"])
haarcascade_eyes_cascade_path = os.path.join("config", haarcascade_config["eyes_cascade_file"])
haarcascade_nose_cascade_path = os.path.join("config", haarcascade_config["nose_cascade_file"])
haarcascade_mouth_cascade_path = os.path.join("config", haarcascade_config["mouth_cascade_file"])
caffemodel_config = matcher_config["caffemodel"]
caffemodel_prototxt_file = os.path.join("config", caffemodel_config["prototxt_file"])
caffemodel_model_file = os.path.join("config", caffemodel_config["caffemodel_file"])
caffemodel_confidence_threshold = float(caffemodel_config["confidence_threshold"])
caffemodel_net = cv.dnn.readNetFromCaffe(caffemodel_prototxt_file, caffemodel_model_file)
if not haarcascade_face_cascade.load(cv.samples.findFile(haarcascade_face_cascade_path)):
log.error('cv cannot load face cascade file = ' + str(haarcascade_face_cascade_path))
exit(0)
if not haarcascade_eyes_cascade.load(cv.samples.findFile(haarcascade_eyes_cascade_path)):
log.error('cv cannot load eyes cascade file = ' + str(haarcascade_eyes_cascade_path))
exit(0)
if not haarcascade_nose_cascade.load(cv.samples.findFile(haarcascade_nose_cascade_path)):
log.error('cv cannot load nose cascade file = ' + str(haarcascade_nose_cascade_path))
exit(0)
if not haarcascade_mouth_cascade.load(cv.samples.findFile(haarcascade_mouth_cascade_path)):
log.error('cv cannot load mouth cascade file = ' + str(haarcascade_mouth_cascade_path))
exit(0)
log.info("\n" + json.dumps(loaded_config, indent=4))
log.info("Config loading complete.\n")
return
|
the-stack_106_28886 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
#################train lstm example on aclImdb########################
python eval.py --ckpt_path=./lstm-20-390.ckpt
"""
import argparse
import os
import numpy as np
from src.config import lstm_cfg as cfg
from src.dataset import lstm_create_dataset, convert_to_mindrecord
from mindspore import Tensor, nn, Model, context
from mindspore.model_zoo.lstm import SentimentNet
from mindspore.nn import Accuracy
from mindspore.train.callback import LossMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MindSpore LSTM Example')
parser.add_argument('--preprocess', type=str, default='false', choices=['true', 'false'],
help='whether to preprocess data.')
parser.add_argument('--aclimdb_path', type=str, default="./aclImdb",
help='path where the dataset is stored.')
parser.add_argument('--glove_path', type=str, default="./glove",
help='path where the GloVe is stored.')
parser.add_argument('--preprocess_path', type=str, default="./preprocess",
help='path where the pre-process data is stored.')
parser.add_argument('--ckpt_path', type=str, default=None,
help='the checkpoint file path used to evaluate model.')
parser.add_argument('--device_target', type=str, default="GPU", choices=['GPU', 'CPU'],
help='the target device to run, support "GPU", "CPU". Default: "GPU".')
args = parser.parse_args()
context.set_context(
mode=context.GRAPH_MODE,
save_graphs=False,
device_target=args.device_target)
if args.preprocess == "true":
print("============== Starting Data Pre-processing ==============")
convert_to_mindrecord(cfg.embed_size, args.aclimdb_path, args.preprocess_path, args.glove_path)
embedding_table = np.loadtxt(os.path.join(args.preprocess_path, "weight.txt")).astype(np.float32)
network = SentimentNet(vocab_size=embedding_table.shape[0],
embed_size=cfg.embed_size,
num_hiddens=cfg.num_hiddens,
num_layers=cfg.num_layers,
bidirectional=cfg.bidirectional,
num_classes=cfg.num_classes,
weight=Tensor(embedding_table),
batch_size=cfg.batch_size)
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum)
loss_cb = LossMonitor()
model = Model(network, loss, opt, {'acc': Accuracy()})
print("============== Starting Testing ==============")
ds_eval = lstm_create_dataset(args.preprocess_path, cfg.batch_size, training=False)
param_dict = load_checkpoint(args.ckpt_path)
load_param_into_net(network, param_dict)
if args.device_target == "CPU":
acc = model.eval(ds_eval, dataset_sink_mode=False)
else:
acc = model.eval(ds_eval)
print("============== Accuracy:{} ==============".format(acc))
|
the-stack_106_28888 | import os
from flask import Flask, render_template, request
from pycardano import (
Address,
Asset,
BlockFrostChainContext,
MultiAsset,
Network,
Transaction,
TransactionBuilder,
TransactionOutput,
TransactionWitnessSet,
Value,
)
app = Flask(__name__)
block_forst_project_id = os.environ.get("BLOCKFROST_ID")
# Use BlockFrostChainContext for simplicity. You can also implement your own chain context.
chain_context = BlockFrostChainContext(block_forst_project_id, network=Network.TESTNET)
def build_transaction(data):
input_addresses = [
Address.from_primitive(bytes.fromhex(sender)) for sender in data["senders"]
]
change_address = Address.from_primitive(bytes.fromhex(data["change_address"]))
transaction_outputs = [
TransactionOutput.from_primitive([address, int(amount) * 1000000])
for address, amount in data["recipients"]
]
print(f"Input addresses: {input_addresses}")
print(f"Transaction outputs: {transaction_outputs}")
print(f"Change address: {change_address}")
builder = TransactionBuilder(chain_context)
for input_address in input_addresses:
builder.add_input_address(input_address)
for transaction_output in transaction_outputs:
builder.add_output(transaction_output)
tx_body = builder.build(change_address=change_address)
# It seems like cardano-serialization-lib will sort policy and asset names by their lengths first, and then
# by their value in lexicographical order. Therefore, without sorting, Nami wallet can potentially
# reconstruct a different transaction where the order of asset is altered, and therefore resulting a mismatch
# between the signature it creates and the transaction we sent to it.
# Notice that it uses a BTreeMap to store values: https://github.com/Emurgo/cardano-serialization-lib/blob/10.0.4/rust/src/serialization.rs#L3354
# The short-term solution is to sort policies and asset names in the same way as cardano-serialization-lib, so the
# restored transaction in Nami wallet will be identical to the one we sent.
# Long-term solution is to create an issue in cardano-serialization-lib and ask it to respect the order from CBOR.
for output in tx_body.outputs:
if isinstance(output.amount, Value):
for policy in list(output.amount.multi_asset.keys()):
# Sort each asset in current policy
asset = output.amount.multi_asset[policy]
sorted_asset = Asset(
sorted(
asset.items(), key=lambda x: (len(x[0].payload), x[0].payload)
)
)
output.amount.multi_asset[policy] = sorted_asset
# Sort policies
output.amount.multi_asset = MultiAsset(
sorted(output.amount.multi_asset.items(), key=lambda x: x[0].payload)
)
return Transaction(tx_body, TransactionWitnessSet())
def compose_tx_and_witness(data):
tx = Transaction.from_cbor(data["tx"])
witness = TransactionWitnessSet.from_cbor(data["witness"])
tx.transaction_witness_set = witness
return tx
@app.route("/")
def home_page():
return render_template("index.html")
@app.route("/build_tx", methods=["POST"])
def build_tx():
tx = build_transaction(request.json)
cbor_hex = tx.to_cbor()
print(cbor_hex)
return {"tx": cbor_hex}
@app.route("/submit_tx", methods=["POST"])
def submit_tx():
tx = compose_tx_and_witness(request.json)
tx_id = tx.transaction_body.hash().hex()
print(f"Transaction: \n {tx}")
print(f"Transaction cbor: {tx.to_cbor()}")
print(f"Transaction ID: {tx_id}")
chain_context.submit_tx(tx.to_cbor())
return {"tx_id": tx_id}
|
the-stack_106_28889 | """
Work with *.cab files
"""
from ctypes import pythonapi
from ctypes import cdll
from ctypes import cast
import ctypes as _ctypes
libc = cdll[_ctypes.util.find_library('c')]
libcab = cdll[_ctypes.util.find_library('cabinet')]
PyMem_Malloc = pythonapi.PyMem_Malloc
PyMem_Malloc.restype = _ctypes.c_size_t
PyMem_Malloc.argtypes = [_ctypes.c_size_t]
strncpy = libc.strncpy
strncpy.restype = _ctypes.c_char_p
strncpy.argtypes = [_ctypes.c_char_p, _ctypes.c_char_p, _ctypes.c_size_t]
HOOKFUNC = _ctypes.CFUNCTYPE(_ctypes.c_char_p, _ctypes.c_void_p, _ctypes.c_void_p, _ctypes.c_char_p)
# typedef struct {
# DWORD cbStruct;
# DWORD dwReserved1;
# DWORD dwReserved2;
# DWORD dwFileVersionMS;
# DWORD dwFileVersionLS;
# } CABINETDLLVERSIONINFO, *PCABINETDLLVERSIONINFO;
class CABINETDLLVERSIONINFO(_ctypes.Structure):
_fields_ = [('cbStruct', _ctypes.c_double),
('dwReserved1', _ctypes.c_double),
('dwReserved2', _ctypes.c_double),
('dwFileVersionMS', _ctypes.c_double),
('dwFileVersionLS', _ctypes.c_double)]
libcab.DllGetVersion.restype = CABINETDLLVERSIONINFO
|
the-stack_106_28890 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module has different wrappers for the data structures."""
class PaginationResult:
"""PaginationResult wraps a data about a certain page in pagination."""
def __init__(self, model_class, items, pagination):
self.model_class = model_class
self.items = items
self.pagination = pagination
self.total = items.count()
def make_api_structure(self):
"""Makes API structure, converatable to JSON."""
if self.pagination["all"]:
return self.response_all()
return self.response_paginated()
def response_all(self):
return {
"total": self.total,
"per_page": self.total,
"page": 1,
"items": list(self.modelize(self.items))
}
def response_paginated(self):
items = self.items
page_items_before = self.pagination["per_page"] \
* (self.pagination["page"] - 1)
if page_items_before:
items = items.skip(page_items_before)
items = items.limit(self.pagination["per_page"])
return {
"items": list(self.modelize(items)),
"page": self.pagination["page"],
"per_page": self.pagination["per_page"],
"total": self.total
}
def modelize(self, items):
for item in items:
model = self.model_class()
model.update_from_db_document(item)
yield model
|
the-stack_106_28891 | errors = {
"profile_missing": {"errors": "profile with this username does not exist",
"status": 404},
"bad_image": {"errors": "Ensure that the file is an image",
"status": 400},
"follow_exists": {"errors": "You already follow this user",
"status": 403},
"unfollow_failed": {"errors": "You do not follow this user",
"status": 400},
"self_follow": {"errors": "You cannot follow yourself",
"status": 403},
"subscriptions_missing": {"error": "No subscriptions found for this user",
"status": 404},
"notification_missing": {"error": "This notification does not exist",
"status": 404},
"not_owner": {
"error": "You do not have the permission to perform this action",
"status": 403},
}
|
the-stack_106_28892 | #!/share/software/user/open/python/3.6.1/bin/python3
from src.ModelDriver import *
## MODIFY THESE PARAMS FOR SPECIFIC RUN ###
X_train = "/oak/stanford/groups/aboettig/Aparna/NNreviews/TestRobustness/jitterData/train_5.23.18_JitterRad-20.0_jitterPerc-0.25_xyz.txt"
Y_train = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/train_5.23.18_unbalanced_unaugmented_rna_2.txt"
X_dev = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/dev_5.23.18_unbalanced_unaugmented_xyz.txt"
Y_dev = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/dev_5.23.18_unbalanced_unaugmented_rna_2.txt"
version = 0
specific_info = "RF_AbdA_train_5.23.18_JitterRad-20.0_jitterPerc-0.5"
architecture = "rf"
num_estimators = 900
min_sample_split = 15
max_depth = None
max_leaf_nodes = 3
random_state = 0
class_weight = "balanced"
n_jobs = -1
tag = specific_info
## END OF PARAMS TO MODIFY ##
PARAMETERS = {
"X_train" : X_train,
"Y_train" : Y_train,
"X_dev" : X_dev,
"Y_dev" : Y_dev,
"architecture" : architecture,
"num_estimators" : num_estimators,
"min_sample_split" : min_sample_split,
"max_depth" : max_depth,
"max_leaf_nodes" : max_leaf_nodes,
"random_state" : random_state,
"class_weight" : class_weight,
"n_jobs" : n_jobs,
"tag" : tag,
"print_cost" : True
}
modelDriver = ModelDriver(PARAMETERS)
modelDriver.load()
modelDriver.init_model()
modelDriver.run_model()
|
the-stack_106_28893 | import collections
import time
import numpy as np
# from forceDAQ.force import *
from .force import *
_ForceSensorSetting = collections.namedtuple('ForceSensorSetting',
'device_name_prefix device_ids sensor_names remote_control '
'ask_filename calibration_folder '
' zip_data write_Fx write_Fy '
'write_Fz write_Tx write_Ty write_Tz write_trigger1 '
'write_trigger2 reverse_scaling convert_to_forces priority')
class ForceSensor:
def __init__(self, settings):
self.settings = settings
sensors = []
for d_id, sn in zip(settings.device_ids, settings.sensor_names):
try:
reverse_parameter_names = settings.reverse_scaling[str(d_id)]
except:
reverse_parameter_names = []
sensors.append(SensorSettings(device_id = d_id,
device_name_prefix=settings.device_name_prefix,
sensor_name = sn,
calibration_folder=settings.calibration_folder,
reverse_parameter_names=reverse_parameter_names,
rate = 1000, # This could be changed later, let's see
convert_to_FT=settings.convert_to_forces))
self.n_sensors = len(sensors)
self._last_processed_smpl = [0] * self.n_sensors
self.recorder = DataRecorder(sensors,
poll_udp_connection=True,
write_deviceid = len(settings.device_ids)>1,
write_Fx = settings.write_Fx,
write_Fy = settings.write_Fy,
write_Fz = settings.write_Fz,
write_Tx = settings.write_Tx,
write_Ty = settings.write_Ty,
write_Tz = settings.write_Tz,
write_trigger1= settings.write_trigger1,
write_trigger2= settings.write_trigger2,
polling_priority=settings.priority)
time.sleep(0.5)
# self.sensor_type = []
# for proc in self.recorder.force_sensor_processes:
# self.sensor_type+=[proc.sensor_type]
self.recorder.determine_biases(n_samples=500)
def start_recording(self):
self.recorder.start_recording()
def pause_recording(self):
return self.recorder.pause_recording()
def get_data(self, num_samples):
# st_time = time.time()
# print('Start: 0')
# self.recorder.start_recording()
# print('Rec_start: {}'.format(time.time()-st_time))
data = [[]]*self.n_sensors
data_time = [[]] * self.n_sensors
# For multiple sensors, k will need to be redefined differently
k = 0
while k<num_samples:
# if pause_recording:
# app_timer.wait(100)
udp = self.recorder.process_and_write_udp_events()
while len(udp)>0:
udp_event = udp.pop(0)
udp_data = udp_event.byte_string
# print(udp_data)
check_new = self.check_new_samples()
# print(check_new)
for s in check_new:
new_data = list(self.recorder.force_sensor_processes[s].get_Fxyz())
data[s] += [new_data]
data_time[s] += [time.time()]
k+=1
# print(new_data)
# print('Rec_pause: {}'.format(time.time()-st_time))
# self.recorder.pause_recording()
# print('Rec_end: {}'.format(time.time()-st_time))
# app_timer.wait(500)
return data, data_time
def check_new_samples(self):
"""returns list of sensors with new samples"""
rtn = []
for i,cnt in enumerate(map(SensorProcess.get_sample_cnt, self.recorder.force_sensor_processes)):
if self._last_processed_smpl[i] < cnt:
# new sample
self._last_processed_smpl[i] = cnt
rtn.append(i)
return rtn
def quit(self):
self.recorder.quit()
if __name__ == '__main__':
settings = _ForceSensorSetting(device_name_prefix="Dev",
device_ids = [1],
sensor_names = ["FT29531"],
calibration_folder="./",
reverse_scaling = {1: ["Fz"], 2:["Fz"]}, # key: device_id, parameter. E.g.:if x & z dimension of sensor 1 and z dimension of sensor 2 has to be flipped use {1: ["Fx", "Fz"], 2: ["Fz"]}
remote_control=False, ask_filename= False, write_Fx=True,
write_Fy=True, write_Fz=True, write_Tx=False, write_Ty=False,
write_Tz=False, write_trigger1=True, write_trigger2=False,
zip_data=True, convert_to_forces=True,
priority='normal')
a = ForceSensor(settings)
a.get_data(100)
a.recorder.quit()
|
the-stack_106_28895 | from IPython.display import display, Markdown, HTML
import ipywidgets as widgets
from os import listdir, path
from .bank import Bank
import io
from contextlib import redirect_stdout
from . import VERSION
from html import escape as escape_html
def run(bank=None):
if bank is None:
bank = Bank()
menu_dropdown = widgets.Dropdown(
options=[
('',''),
('Outcomes', 'outcome'),
('Bank', 'bank'),
],
description='Menu:',
)
submenu = widgets.Output()
menu_dropdown.observe(change_submenu(submenu,bank),names='value')
display(Markdown(f"## {bank.title}"))
display(menu_dropdown)
display(submenu)
display(Markdown("---"))
display(Markdown(f"`CheckIt Dashboard v{VERSION}`"))
def change_submenu(submenu,bank):
@submenu.capture(clear_output=True)
def callback(value):
if value['new'] == 'outcome':
outcome_submenu(bank)
elif value['new'] == 'bank':
bank_submenu(bank)
return callback
def outcome_submenu(bank):
options = [
(f"{o.slug}: {o.title}",o) for o in bank.outcomes()
]
outcomes_dropdown = widgets.Dropdown(options=options,
description='Outcome:')
preview_button = widgets.Button(description="Create preview")
build_button = widgets.Button(description="Generate seeds")
description = widgets.Output()
generated = widgets.Output()
output = widgets.Output()
def reset(*args,only_generated=False):
o = outcomes_dropdown.value
if not only_generated:
description.clear_output()
with description:
display(Markdown(f"**Description:** {escape_html(o.description)}"))
output.clear_output()
generated.clear_output()
with generated:
display(Markdown(f"*Last generated on:* `{o.generated_on()}`"))
def preview(*args):
o = outcomes_dropdown.value
output.clear_output()
with output:
display(Markdown(f"*Creating preview...*"))
preview = o.HTML_preview()
output.clear_output()
display(HTML(preview))
def build(*args):
o = outcomes_dropdown.value
output.clear_output()
with output:
display(Markdown("Generating 10,000 seeds..."))
o.generate_exercises(regenerate=True)
reset(only_generated=True)
with output:
display(Markdown("Done!"))
outcomes_dropdown.observe(reset,names="value")
preview_button.on_click(preview)
build_button.on_click(build)
display(widgets.HBox([outcomes_dropdown,preview_button,build_button]))
display(description)
display(generated)
display(output)
reset()
def bank_submenu(bank):
options = [
(f"{o.slug}: {o.title}",o) for o in bank.outcomes()
]
outcomes_select = widgets.SelectMultiple(
options=options,
value=[o[1] for o in options],
rows=min(6,len(options)),
description='Outcomes:',
)
amount_input = widgets.BoundedIntText(
value=300,
min=1,
max=1000,
step=1,
description='# Exercises:'
)
publicity_dropdown = widgets.Dropdown(options=[("Private",False),("Public",True)],
description='Publicity:')
build_label = widgets.Label(value="Build:")
viewer_button = widgets.Button(description="Viewer")
canvas_button = widgets.Button(description="Canvas")
brightspace_button = widgets.Button(description="Brightspace")
moodle_button = widgets.Button(description="Moodle")
buttons = widgets.HBox([build_label,viewer_button,canvas_button,brightspace_button,moodle_button])
output = widgets.Output()
def viewer(*args):
p = publicity_dropdown.value
r = not p
a = amount_input.value
output.clear_output()
with output:
display(Markdown("Building Viewer..."))
bank.write_json(public=p,amount=a,randomized=r)
display(Markdown("Done!"))
viewer_button.on_click(viewer)
display(outcomes_select)
display(publicity_dropdown)
display(amount_input)
display(buttons)
display(output)
#
# bank_slugs = [f for f in listdir('banks') if not path.isfile(path.join('banks', f))]
# bank_slugs.sort()
# bank_dropdown_options = ['']+bank_slugs
# bank_dropdown = widgets.Dropdown(options=bank_dropdown_options)
# build_button = widgets.Button(description="Build bank files")
# build_amount_widget = widgets.BoundedIntText(
# value=300,
# min=1,
# max=1000,
# step=1,
# description='Count:',
# )
# build_public_dropdown = widgets.Dropdown(options=[("Non-public",False),("Public",True)])
# def bank_dropdown_callback(c=None):
# bank_output.clear_output()
# if bank_dropdown.value != bank_dropdown_options[0]:
# f = io.StringIO()
# with redirect_stdout(f):
# bank = Bank(bank_dropdown.value)
# bank_errors = f.getvalue()
# boilerplate_button = widgets.Button(description="Create missing outcome files",layout=widgets.Layout(width="auto"))
# def write_boilerplate(c=None):
# bank.write_outcomes_boilerplate()
# boilerplate_button.description = boilerplate_button.description + " - Done!"
# boilerplate_button.on_click(write_boilerplate)
# bank_suboutput = widgets.Output()
# def build_bank(c=None):
# bank_suboutput.clear_output()
# with bank_suboutput:
# bank.generate_exercises(public=build_public_dropdown.value,amount=build_amount_widget.value,regenerate=True)
# print("Now building all output formats...")
# f = io.StringIO()
# with redirect_stdout(f):
# bank.build(public=build_public_dropdown.value,amount=build_amount_widget.value,regenerate=False)
# display(Markdown(f.getvalue()))
# build_button.on_click(build_bank)
# outcomes_dropdown = widgets.Dropdown(options=[(f"{o.slug}: {o.title}",o) for o in bank.outcomes])
# def preview_outcome(c=None):
# bank_suboutput.clear_output()
# with bank_suboutput:
# display(HTML(f"<strong>Description:</strong>" +
# f"<em>{outcomes_dropdown.value.description}</em>"))
# display(HTML(outcomes_dropdown.value.HTML_preview()))
# outcome_button = widgets.Button(description="Preview exercise")
# outcome_button.on_click(preview_outcome)
# with bank_output:
# display(Markdown(f'### {bank.title}'))
# display(HTML(bank_errors))
# display(boilerplate_button)
# display(widgets.HBox([build_button,build_public_dropdown,build_amount_widget]))
# display(widgets.HBox([outcome_button,outcomes_dropdown]))
# display(bank_suboutput)
# bank_dropdown.observe(bank_dropdown_callback,names='value')
# display(Markdown("### Select a bank directory"))
# display(bank_dropdown)
# display(bank_output)
|
the-stack_106_28896 | """
Train the model on RANDOM DATA
Reference : Barnes et al. [2020, JAMES]
Author : Zachary M. Labe
Date : 19 October 2020
"""
### Import packages
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
from keras.layers import Dense, Activation
from keras import regularizers
from keras import metrics
from keras import optimizers
from keras.models import Sequential
import tensorflow.keras as keras
import tensorflow as tf
import pandas as pd
import innvestigate
import random
import scipy.stats as stats
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import calc_LRP as LRP
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
### Prevent tensorflow 2.+ deprecation warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
### LRP param
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = .01
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directorydataBB = '/Users/zlabe/Data/BEST/'
directorydataEE = '/Users/zlabe/Data/ERA5/'
datasetsingle = ['RANDOM']
seasons = ['annual']
timerandom = np.arange(1920,2080+1,1)
yearsall = [timerandom]
directoriesall = [directorydataLLL]
### Set counter
SAMPLEQ = 100
### Begin model
valslopesexperi = []
valrrexperi = []
lrpmapsall = []
for sis,singlesimulation in enumerate(datasetsingle):
for seas in range(len(seasons)):
lrpmapstime = []
valslopes = []
valrr = []
for isample in range(SAMPLEQ):
###############################################################################
###############################################################################
###############################################################################
### ANN preliminaries
variq = 'T2M'
monthlychoice = seasons[seas]
reg_name = 'Globe'
lat_bounds,lon_bounds = UT.regions(reg_name)
directoryfigure = '/Users/zlabe/Desktop/SINGLE_v2.0/Histogram/'
experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',
'RMSE Train','RMSE Test',
'ridge penalty','zero mean',
'zero merid mean','land only?','ocean only?'])
### Define primary dataset to use
dataset = singlesimulation
modelType = dataset
### Whether to test and plot the results using obs data
test_on_obs = True
dataset_obs = '20CRv3'
if dataset_obs == '20CRv3':
year_obsall = np.arange(yearsall[sis].min(),2015+1,1)
elif dataset_obs == 'ERA5':
year_obsall = np.arange(1979,2019+1,1)
if monthlychoice == 'DJF':
obsyearstart = year_obsall.min()+1
year_obs = year_obsall[1:]
else:
obsyearstart = year_obsall.min()
year_obs = year_obsall
### Remove the annual mean? True to subtract it from dataset ##########
rm_annual_mean = False #################################################
if rm_annual_mean == True:
directoryfigure = '/Users/zlabe/Desktop/SINGLE_v1.2/rm_annual_mean/'
### Remove the meridional mean? True to subtract it from dataset ######
rm_merid_mean = False #################################################
if rm_merid_mean == True:
directoryfigure = '/Users/zlabe/Desktop/SINGLE_v1.2/rm_merid_mean/'
### Calculate only over land? True if land ############################
land_only = False ######################################################
if land_only == True:
directoryfigure = '/Users/zlabe/Desktop/SINGLE_v1.2/land_only/'
### Calculate only over ocean? True if ocean ##########################
ocean_only = False #####################################################
if ocean_only == True:
directoryfigure = '/Users/zlabe/Desktop/SINGLE_v1.2/ocean_only/'
### Rove the ensemble mean? True to subtract it from dataset ##########
rm_ensemble_mean = False ##############################################
if rm_ensemble_mean == True:
directoryfigure = '/Users/zlabe/Desktop/SINGLE_v1.2/rm_ensemble_mean/'
### Split the data into training and testing sets? value of 1 will use all
### data as training, .8 will use 80% training, 20% testing; etc.
segment_data_factor = .8
### iterations is for the # of sample runs the model will use. Must be a
### list, but can be a list with only one object
iterations = [150]
### Hiddens corresponds to the number of hidden layers the nnet will use - 0
### for linear model, or a list [10, 20, 5] for multiple layers of nodes
### (10 nodes in first layer, 20 in second, etc); The "loop" part
### allows you to loop through multiple architectures. For example,
### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the
### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,
### and the next would be 3 hidden layers of 1 node each.
### Set useGPU to True to use the GPU, but only if you selected the GPU
### Runtime in the menu at the top of this page
useGPU = False
### Set Cascade to True to utilize the nnet's cascade function
cascade = False
### Plot within the training loop - may want to set to False when testing out
### larget sets of parameters
plot_in_train = False
###############################################################################
###############################################################################
###############################################################################
### Read in model and observational/reanalysis data
def read_primary_dataset(variq,dataset,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,
lat_bounds,lon_bounds)
if dataset_obs == '20CRv3':
if monthlychoice == 'DJF':
year20cr = np.arange(1837,2015+1)
else:
year20cr = np.arange(1836,2015+1)
year_obsall = np.arange(yearsall[sis].min(),yearsall[sis].max()+1,1)
yearqq = np.where((year20cr >= year_obsall.min()) & (year20cr <= year_obsall.max()))[0]
data_obs = data_obs[yearqq,:,:]
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
###############################################################################
### Select data to test, train on
def segment_data(data,fac = segment_data_factor):
global random_segment_seed,trainIndices, estIndices
if random_segment_seed == None:
random_segment_seed = int(int(np.random.randint(1, 100000)))
np.random.seed(random_segment_seed)
if fac < 1 :
nrows = data.shape[0]
segment_train = int(np.round(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'ensembles, testing on',segment_test)
### Picking out random ensembles
i = 0
trainIndices = list()
while i < segment_train:
line = np.random.randint(0, nrows)
if line not in trainIndices:
trainIndices.append(line)
i += 1
else:
pass
i = 0
testIndices = list()
while i < segment_test:
line = np.random.randint(0, nrows)
if line not in trainIndices:
if line not in testIndices:
testIndices.append(line)
i += 1
else:
pass
### Random ensembles are picked
if debug:
print('Training on ensembles: ',trainIndices)
print('Testing on ensembles: ',testIndices)
### Training segment----------
data_train = ''
for ensemble in trainIndices:
this_row = data[ensemble, :, :, :]
this_row = this_row.reshape(-1,data.shape[1],data.shape[2],
data.shape[3])
if data_train == '':
data_train = np.empty_like(this_row)
data_train = np.vstack((data_train,this_row))
data_train = data_train[1:, :, :, :]
if debug:
print('org data - shape', data.shape)
print('training data - shape', data_train.shape)
### Reshape into X and T
Xtrain = data_train.reshape((data_train.shape[0] * data_train.shape[1]),
(data_train.shape[2] * data_train.shape[3]))
Ttrain = np.tile((np.arange(data_train.shape[1]) + yearsall[sis].min()).reshape(data_train.shape[1],1),
(data_train.shape[0],1))
Xtrain_shape = (data_train.shape[0],data_train.shape[1])
### Testing segment----------
data_test = ''
for ensemble in testIndices:
this_row = data[ensemble, :, :, :]
this_row = this_row.reshape(-1,data.shape[1],data.shape[2],
data.shape[3])
if data_test == '':
data_test = np.empty_like(this_row)
data_test = np.vstack((data_test, this_row))
data_test = data_test[1:, :, :, :]
if debug:
print('testing data', data_test.shape)
### Reshape into X and T
Xtest = data_test.reshape((data_test.shape[0] * data_test.shape[1]),
(data_test.shape[2] * data_test.shape[3]))
Ttest = np.tile((np.arange(data_test.shape[1]) + yearsall[sis].min()).reshape(data_test.shape[1],1),
(data_test.shape[0], 1))
else:
trainIndices = np.arange(0,np.shape(data)[0])
testIndices = np.arange(0,np.shape(data)[0])
print('Training on ensembles: ',trainIndices)
print('Testing on ensembles: ',testIndices)
data_train = data
data_test = data
Xtrain = data_train.reshape((data_train.shape[0] * data_train.shape[1]),
(data_train.shape[2] * data_train.shape[3]))
Ttrain = np.tile((np.arange(data_train.shape[1]) + yearsall[sis].min()).reshape(data_train.shape[1],1),
(data_train.shape[0],1))
Xtrain_shape = (data_train.shape[0], data_train.shape[1])
Xtest = data_test.reshape((data_test.shape[0] * data_test.shape[1]),
(data_test.shape[2] * data_test.shape[3]))
Ttest = np.tile((np.arange(data_test.shape[1]) + yearsall[sis].min()).reshape(data_test.shape[1],1),
(data_test.shape[0],1))
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[1]
data_test_shape = data_test.shape[1]
### 'unlock' the random seed
np.random.seed(None)
return Xtrain,Ttrain,Xtest,Ttest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices
###############################################################################
###############################################################################
###############################################################################
### Data management
def shape_obs(data_obs,year_obs):
Xtest_obs = np.reshape(data_obs,(data_obs.shape[0],
(data_obs.shape[1]*data_obs.shape[2])))
Ttest_obs = np.tile(np.arange(data_obs.shape[0])+year_obs[0])
return Xtest_obs,Ttest_obs
def consolidate_data():
'''function to delete data and data_obs since we have already sliced other
variables from them. Only run after segment_data and shape_obs!!!
will delete global variables and clear memory'''
global data
global data_obs
del data
del data_obs
###############################################################################
###############################################################################
###############################################################################
### Plotting functions
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
def plot_prediction (Ttest, test_output, Ttest_obs, obs_output):
### Predictions
plt.figure(figsize=(16,4))
plt.subplot(1, 2, 1)
plt.title('Predicted vs Actual Year for Testing')
plt.xlabel('Actual Year')
plt.ylabel('Predicted Year')
plt.plot(Ttest, test_output, 'o', color='black', label='GCM')
if test_on_obs == True:
plt.plot(Ttest_obs, obs_output,'o',color='deepskyblue',label='obs')
a = min(min(Ttest), min(test_output))
b = max(max(Ttest), max(test_output))
plt.plot((a,b), (a,b), '-', lw=3, alpha=0.7, color='gray')
#plt.axis('square')
plt.xlim(a * .995, b * 1.005)
plt.ylim(a * .995, b * 1.005)
plt.legend()
plt.show()
def plot_training_error(nnet):
### Training error (nnet)
plt.subplot(1, 2, 2)
plt.plot(nnet.getErrors(), color='black')
plt.title('Training Error per Itobstion')
plt.xlabel('Training Itobstion')
plt.ylabel('Training Error')
plt.show()
def plot_rmse(train_output,Ttrain,test_output,Ttest,data_train_shape,data_test_shape):
### rmse (train_output, Ttrain, test_output, Ttest, data_train_shape, data_test_shape)
plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)
rmse_by_year_train = np.sqrt(np.mean(((train_output - Ttrain)**2).reshape(Xtrain_shape),
axis=0))
xs_train = (np.arange(data_train_shape) + yearsall[sis].min())
rmse_by_year_test = np.sqrt(np.mean(((test_output - Ttest)**2).reshape(Xtest_shape),
axis=0))
xs_test = (np.arange(data_test_shape) + yearsall[sis].min())
plt.title('RMSE by year')
plt.xlabel('year')
plt.ylabel('error')
plt.plot(xs_train,rmse_by_year_train,label = 'training error',
color='gold',linewidth=1.5)
plt.plot(xs_test,rmse_by_year_test,labe ='test error',
color='forestgreen',linewidth=0.7)
plt.legend()
if test_on_obs == True:
plt.subplot(1,2,2)
error_by_year_test_obs = obs_output - Ttest_obs
plt.plot(Ttest_obs,error_by_year_test_obs,label='obs error',
color='deepskyblue',linewidth=2.)
plt.title('Error by year for obs')
plt.xlabel('year')
plt.ylabel('error')
plt.legend()
plt.plot((1979,2020), (0,0), color='gray', linewidth=2.)
plt.xlim(1979,2020)
plt.show()
def plot_weights(nnet, lats, lons, basemap):
# plot maps of the NN weights
plt.figure(figsize=(16, 6))
ploti = 0
nUnitsFirstLayer = nnet.layers[0].nUnits
for i in range(nUnitsFirstLayer):
ploti += 1
plt.subplot(np.ceil(nUnitsFirstLayer/3), 3, ploti)
maxWeightMag = nnet.layers[0].W[1:, i].abs().max().item()
df.drawOnGlobe(((nnet.layers[0].W[1:, i]).cpu().data.numpy()).reshape(len(lats),
len(lons)),
lats,lons,basemap,vmin=-maxWeightMag,vmax=maxWeightMag,
cmap=cmocean.cm.balance)
if(hiddens[0]==0):
plt.title('Linear Weights')
else:
plt.title('First Layer, Unit {}'.format(i+1))
if(cascade is True and hiddens[0]!=0):
plt.figure(figsize=(16, 6))
ploti += 1
plt.subplot(np.ceil(nUnitsFirstLayer/3), 3, ploti)
maxWeightMag = nnet.layers[-1].W[1:Xtrain.shape[1]+1, 0].abs().max().item()
df.drawOnGlobe(((nnet.layers[-1].W[1:Xtrain.shape[1]+1, 0]).cpu().data.numpy()).reshape(len(lats),
len(lons)),
lats,lons,basemap,vmin=-maxWeightMag,
vmax=maxWeightMag,cmap=cmocean.cm.balance)
plt.title('Linear Weights')
plt.tight_layout()
def plot_results(plots = 4):
### Calls all our plot functions together
global nnet,train_output,test_output,obs_output,Ttest,Ttrain,Xtrain_shape,Xtest_shape,data_train_shape,data_test_shape,Ttest_obs,lats,lons,basemap
if plots >=1:
plot_prediction(Ttest, test_output, Ttest_obs, obs_output)
if plots >= 2:
plot_training_error(nnet)
plot_rmse(train_output, Ttrain, test_output, Ttest, data_train_shape, data_test_shape)
if plots == 4:
plot_weights(nnet, lats, lons, basemap)
plt.show()
def plot_classifier_output(class_prob,test_class_prob,Xtest_shape,Xtrain_shape):
prob = class_prob[-1].reshape(Xtrain_shape)
plt.figure(figsize=(14, 6))
plt.plot((np.arange(Xtest_shape[1]) + yearsall[sis].min()),
prob[:,:,1].T, '-',alpha = .7)
plt.plot((np.arange(Xtest_shape[1]) + yearsall[sis].min()),
(np.mean(prob[:, :, 1], axis = 0).reshape(180, -1)),
'b-',linewidth=3.5, alpha = .5, label = 'ensemble avobsge')
plt.title('Classifier Output by Ensemble using Training Data')
plt.xlabel('year')
plt.yticks((0, 1), ['Pre-Baseline', 'Post-Baseline'])
plt.legend()
plt.show()
tprob = test_class_prob[0].reshape(Xtest_shape)
plt.figure(figsize=(14, 6))
plt.plot(((np.arange(Xtest_shape[1]) + yearsall[sis].min())),tprob[:,:,1].T,'-',
alpha = .7)
plt.plot((np.arange(Xtest_shape[1]) + yearsall[sis].min()),
(np.mean(tprob[:, :, 1], axis = 0).reshape(180, -1)),
'r-',linewidth=4,alpha = .5,label = 'ensemble avobsge')
plt.title('Classifier Output by Ensemble using Test Data')
plt.xlabel('year')
plt.yticks((0, 1), ['Pre-Baseline', 'Post-Baseline'])
plt.legend()
plt.show()
def beginFinalPlot(YpredTrain,YpredTest,Ytrain,Ytest,testIndices,years,yearsObs,YpredObs):
"""
Plot prediction of year
"""
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
fig = plt.figure()
ax = plt.subplot(111)
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='dimgrey')
train_output_rs = YpredTrain.reshape(len(trainIndices),
len(years))
test_output_rs = YpredTest.reshape(len(testIndices),
len(years))
xs_test = (np.arange(np.shape(test_output_rs)[1]) + yearsall[sis].min())
for i in range(0,train_output_rs.shape[0]):
if i == train_output_rs.shape[0]-1:
p3=plt.plot(xs_test,train_output_rs[i,:],'o',
markersize=4,color='lightgray',clip_on=False,
alpha=0.4,markeredgecolor='k',markeredgewidth=0.4,
label=r'\textbf{%s - Training Data}' % singlesimulation)
else:
p3=plt.plot(xs_test,train_output_rs[i,:],'o',
markersize=4,color='lightgray',clip_on=False,
alpha=0.4,markeredgecolor='k',markeredgewidth=0.4)
for i in range(0,test_output_rs.shape[0]):
if i == test_output_rs.shape[0]-1:
p4=plt.plot(xs_test,test_output_rs[i,:],'o',
markersize=4,color='crimson',clip_on=False,alpha=0.3,
markeredgecolor='crimson',markeredgewidth=0.4,
label=r'\textbf{%s - Testing Data}' % singlesimulation)
else:
p4=plt.plot(xs_test,test_output_rs[i,:],'o',
markersize=4,color='crimson',clip_on=False,alpha=0.3,
markeredgecolor='crimson',markeredgewidth=0.4)
if rm_ensemble_mean == False:
iy = np.where(yearsObs>=obsyearstart)[0]
plt.plot(yearsObs[iy],YpredObs[iy],'x',color='deepskyblue',
label=r'\textbf{Reanalysis}',clip_on=False)
plt.xlabel(r'\textbf{ACTUAL YEAR}',fontsize=10,color='dimgrey')
plt.ylabel(r'\textbf{PREDICTED YEAR}',fontsize=10,color='dimgrey')
plt.plot(np.arange(yearsall[sis].min(),yearsall[sis].max()+1,1),np.arange(yearsall[sis].min(),yearsall[sis].max()+1,1),'-',
color='black',linewidth=2,clip_on=False)
plt.xticks(np.arange(yearsall[sis].min(),2101,20),map(str,np.arange(yearsall[sis].min(),2101,20)),size=6)
plt.yticks(np.arange(yearsall[sis].min(),2101,20),map(str,np.arange(yearsall[sis].min(),2101,20)),size=6)
plt.xlim([yearsall[sis].min(),yearsall[sis].max()])
plt.ylim([yearsall[sis].min(),yearsall[sis].max()])
plt.title(r'\textbf{[ %s ] $\bf{\longrightarrow}$ RMSE Train = %s; RMSE Test = %s}' % (variq,np.round(dSS.rmse(YpredTrain[:,],
Ytrain[:,0]),1),np.round(dSS.rmse(YpredTest[:,],
Ytest[:,0]),
decimals=1)),
color='k',
fontsize=15)
iyears = np.where(Ytest<2000)[0]
plt.text(yearsall[sis].max(),yearsall[sis].min()+5, r'\textbf{Test RMSE before 2000 = %s}' % (np.round(dSS.rmse(YpredTest[iyears,],
Ytest[iyears,0]),
decimals=1)),
fontsize=5,ha='right')
iyears = np.where(Ytest>=2000)[0]
plt.text(yearsall[sis].max(),yearsall[sis].min(), r'\textbf{Test RMSE after 2000 = %s}' % (np.round(dSS.rmse(YpredTest[iyears,],
Ytest[iyears,0]),
decimals=1)),
fontsize=5,ha='right')
leg = plt.legend(shadow=False,fontsize=7,loc='upper left',
bbox_to_anchor=(-0.01,1),fancybox=True,ncol=1,frameon=False,
handlelength=1,handletextpad=0.5)
savefigName = modelType+'_'+variq+'_scatterPred_'+savename
# plt.annotate(savename,(0,.98),xycoords='figure fraction',
# fontsize=5,
# color='gray')
plt.savefig(directoryfigure+savefigName+'_%s_land%s_ocean%s.png' % (monthlychoice,land_only,ocean_only),
dpi=300)
print(np.round(np.corrcoef(yearsObs,YpredObs)[0,1],2))
return
###############################################################################
###############################################################################
###############################################################################
### Neural Network Creation & Training
def movingAverageInputMaps(data,avgHalfChunk):
print(np.shape(data))
dataAvg = np.zeros(data.shape)
halfChunk = 2
for iy in np.arange(0,data.shape[1]):
yRange = np.arange(iy-halfChunk,iy+halfChunk+1)
yRange[yRange<0] = -99
yRange[yRange>=data.shape[1]] = -99
yRange = yRange[yRange>=0]
dataAvg[:,iy,:,:] = np.nanmean(data[:,yRange,:,:],axis=1)
return dataAvg
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def defineNN(hidden, input_shape, output_shape, ridgePenalty):
model = Sequential()
### Initialize first layer
if hidden[0]==0:
### Model is linear
model.add(Dense(1,input_shape=(input_shape,),
activation='linear',use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
print('\nTHIS IS A LINEAR NN!\n')
else:
### Model is a single node with activation function
model.add(Dense(hidden[0],input_shape=(input_shape,),
activation=actFun, use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Initialize other layers
for layer in hidden[1:]:
model.add(Dense(layer,activation=actFun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
print('\nTHIS IS A ANN!\n')
#### Initialize output layer
model.add(Dense(output_shape,activation=None,use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.0),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Add softmax layer at the end
model.add(Activation('softmax'))
return model
def trainNN(model, Xtrain, Ytrain, niter=500, verbose=False):
global lr_here, batch_size
lr_here = .01
model.compile(optimizer=optimizers.SGD(lr=lr_here,
momentum=0.9,nesterov=True), #Adadelta .Adam()
loss = 'binary_crossentropy',
metrics=[metrics.categorical_accuracy],)
### Declare the relevant model parameters
batch_size = 32 # np.shape(Xtrain)[0] ### This doesn't seem to affect much in this case
print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----')
time_callback = TimeHistory()
history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,
shuffle=True,verbose=verbose,
callbacks=[time_callback],
validation_split=0.)
print('******** done training ***********')
return model, history
def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,plot_in_train=True):
"""or loops to iterate through training iterations, ridge penalty,
and hidden layer list
"""
results = {}
global nnet,random_network_seed
for niter in iterations:
for penalty in ridge_penalty:
for hidden in hiddens:
### Check / use random seed
if random_network_seed == None:
np.random.seed(None)
random_network_seed = int(np.random.randint(1, 100000))
np.random.seed(random_network_seed)
random.seed(random_network_seed)
tf.set_random_seed(0)
### Standardize the data
Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean,Xstd = stdVals
### Define the model
model = defineNN(hidden,
input_shape=np.shape(Xtrain)[1],
output_shape=np.shape(Ytrain)[1],
ridgePenalty=penalty)
### Train the net
model, history = trainNN(model,Xtrain,
Ytrain,niter=niter,verbose=0)
### After training, use the network with training data to
### check that we don't have any errors and output RMSE
rmse_train = dSS.rmse(convert_fuzzyDecade_toYear(Ytrain,startYear,
classChunk),
convert_fuzzyDecade_toYear(model.predict(Xtrain),
startYear,
classChunk))
if type(Ytest) != bool:
rmse_test = 0.
rmse_test = dSS.rmse(convert_fuzzyDecade_toYear(Ytest,
startYear,classChunk),
convert_fuzzyDecade_toYear(model.predict(Xtest),
startYear,
classChunk))
else:
rmse_test = False
this_result = {'iters': niter,
'hiddens' : hidden,
'RMSE Train' : rmse_train,
'RMSE Test' : rmse_test,
'ridge penalty': penalty,
'zero mean' : rm_annual_mean,
'zero merid mean' : rm_merid_mean,
'land only?' : land_only,
'ocean only?' : ocean_only,
'Segment Seed' : random_segment_seed,
'Network Seed' : random_network_seed }
results.update(this_result)
global experiment_result
experiment_result = experiment_result.append(results,
ignore_index=True)
#if True to plot each iter's graphs.
if plot_in_train == True:
plt.figure(figsize = (16,6))
plt.subplot(1,2,1)
plt.plot(history.history['loss'],label = 'training')
plt.title(history.history['loss'][-1])
plt.xlabel('epoch')
plt.xlim(2,len(history.history['loss'])-1)
plt.legend()
plt.subplot(1,2,2)
plt.plot(convert_fuzzyDecade_toYear(Ytrain,startYear,
classChunk),
convert_fuzzyDecade_toYear(model.predict(Xtrain),
startYear,
classChunk),'o',
color='gray')
plt.plot(convert_fuzzyDecade_toYear(Ytest,startYear,
classChunk),
convert_fuzzyDecade_toYear(model.predict(Xtest),
startYear,
classChunk),'x',
color='red')
plt.plot([startYear,yearsall[sis].max()],[startYear,yearsall[sis].max()],'--k')
plt.yticks(np.arange(yearsall[sis].min(),yearsall[sis].max(),10))
plt.xticks(np.arange(yearsall[sis].min(),yearsall[sis].max(),10))
plt.grid(True)
plt.show()
#'unlock' the random seed
np.random.seed(None)
random.seed(None)
tf.set_random_seed(None)
return experiment_result, model
def convert_fuzzyDecade(data,startYear,classChunk):
years = np.arange(startYear-classChunk*2,yearsall[sis].max()+classChunk*2)
chunks = years[::int(classChunk)] + classChunk/2
labels = np.zeros((np.shape(data)[0],len(chunks)))
for iy,y in enumerate(data):
norm = stats.uniform.pdf(years,loc=y-classChunk/2.,scale=classChunk)
vec = []
for sy in years[::classChunk]:
j=np.logical_and(years>sy,years<sy+classChunk)
vec.append(np.sum(norm[j]))
vec = np.asarray(vec)
vec[vec<.0001] = 0. # This should not matter
vec = vec/np.sum(vec)
labels[iy,:] = vec
return labels, chunks
def convert_fuzzyDecade_toYear(label,startYear,classChunk):
years = np.arange(startYear-classChunk*2,yearsall[sis].max()+classChunk*2)
chunks = years[::int(classChunk)] + classChunk/2
return np.sum(label*chunks,axis=1)
def invert_year_output(ypred,startYear):
if(option4):
inverted_years = convert_fuzzyDecade_toYear(ypred,startYear,classChunk)
else:
inverted_years = invert_year_outputChunk(ypred,startYear)
return inverted_years
def invert_year_outputChunk(ypred,startYear):
if(len(np.shape(ypred))==1):
maxIndices = np.where(ypred==np.max(ypred))[0]
if(len(maxIndices)>classChunkHalf):
maxIndex = maxIndices[classChunkHalf]
else:
maxIndex = maxIndices[0]
inverted = maxIndex + startYear - classChunkHalf
else:
inverted = np.zeros((np.shape(ypred)[0],))
for ind in np.arange(0,np.shape(ypred)[0]):
maxIndices = np.where(ypred[ind]==np.max(ypred[ind]))[0]
if(len(maxIndices)>classChunkHalf):
maxIndex = maxIndices[classChunkHalf]
else:
maxIndex = maxIndices[0]
inverted[ind] = maxIndex + startYear - classChunkHalf
return inverted
###############################################################################
###############################################################################
###############################################################################
### Results
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
### Parameters
debug = True
NNType = 'ANN'
classChunkHalf = 5
classChunk = 10
iSeed = 8#10#8
avgHalfChunk = 0
option4 = True
biasBool = False
if NNType == 'ANN':
hiddensList = [[20,20]]
ridge_penalty = [0.01]
actFun = 'relu'
elif NNType == 'linear':
hiddensList = [[0]]
ridge_penalty = [0.]
actFun = 'linear'
expList = [(0)] # (0,1)
expN = np.size(expList)
iterations = [500] # [500]#[1500]
random_segment = True
foldsN = 1
for avgHalfChunk in (0,): # ([1,5,10]):#([1,2,5,10]):
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# K.get_session()
K.clear_session()
for loop in ([0]): # (0,1,2,3,4,5):
# get info about the region
lat_bounds,lon_bounds = UT.regions(reg_name)
data_all,lats,lons = read_primary_dataset(variq,dataset,
lat_bounds,
lon_bounds)
data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,dataset_obs,
lat_bounds,
lon_bounds)
test_output_mat = np.empty((np.max(expList)+1,
foldsN,180*int(np.round(np.shape(data_all)[0]*(1.0-segment_data_factor)))))
for exp in expList:
# get the data together
data, data_obs, = data_all, data_obs_all,
if rm_annual_mean == True:
data, data_obs = dSS.remove_annual_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('*Removed annual mean*')
if rm_merid_mean == True:
data, data_obs = dSS.remove_merid_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('*Removed meridian mean*')
if rm_ensemble_mean == True:
data = dSS.remove_ensemble_mean(data)
print('*Removed ensemble mean*')
if land_only == True:
data, data_obs = dSS.remove_ocean(data,data_obs)
if ocean_only == True:
data, data_obs = dSS.remove_land(data,data_obs)
for ih in np.arange(0,len(hiddensList)):
hiddens = [hiddensList[ih]]
if hiddens[0][0]==0:
annType = 'linear'
elif hiddens[0][0]==1 and len(hiddens)==1:
annType = 'layers1'
else:
annType = 'layers10x10'
if(avgHalfChunk!=0):
data = movingAverageInputMaps(data,avgHalfChunk)
# ### Loop over folds
for loop in np.arange(0,foldsN):
K.clear_session()
#---------------------------
random_segment_seed = None #34515
#---------------------------
Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices = segment_data(data,segment_data_factor)
# Convert year into decadal class
startYear = Ytrain[0] # define startYear for GLOBAL USE
YtrainClassMulti, decadeChunks = convert_fuzzyDecade(Ytrain,
startYear,
classChunk)
YtestClassMulti, __ = convert_fuzzyDecade(Ytest,
startYear,
classChunk)
# For use later
XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean, Xstd = stdVals
#---------------------------
random_network_seed = None #87750
#---------------------------
# Create and train network
exp_result,model = test_train_loopClass(Xtrain,
YtrainClassMulti,
Xtest,
YtestClassMulti,
iterations=iterations,
ridge_penalty=ridge_penalty,
hiddens=hiddensList,
plot_in_train=plot_in_train)
model.summary()
################################################################################################################################################
# save the model
dirname = '/Users/zlabe/Documents/Research/InternalSignal/savedModels/'
savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4_Chunk'+ str(classChunk)+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)
if(reg_name=='Globe'):
regSave = ''
else:
regSave = '_' + reg_name
if(rm_annual_mean==True):
savename = savename + '_AnnualMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'
if(avgHalfChunk!=0):
savename = savename + '_avgHalfChunk' + str(avgHalfChunk)
savenameModelTestTrain = savenameModelTestTrain + '_avgHalfChunk' + str(avgHalfChunk)
savename = savename + regSave
model.save(dirname + savename + '.h5')
np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)
print('saving ' + savename)
###############################################################
### Make final plot
### Get obs
dataOBSERVATIONS = data_obs
latsOBSERVATIONS = lats_obs
lonsOBSERVATIONS = lons_obs
def findStringMiddle(start,end,s):
return s[s.find(start)+len(start):s.rfind(end)]
if(avgHalfChunk!=0):
dataOBSERVATIONS = movingAverageInputMaps(dataOBSERVATIONS,avgHalfChunk)
Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])
yearsObs = np.arange(dataOBSERVATIONS.shape[0]) + obsyearstart
annType = 'class'
if monthlychoice == 'DJF':
startYear = yearsall[sis].min()+1
endYear = yearsall[sis].max()
else:
startYear = yearsall[sis].min()
endYear = yearsall[sis].max()
years = np.arange(startYear,endYear+1,1)
Xmeanobs = np.nanmean(Xobs,axis=0)
Xstdobs = np.nanstd(Xobs,axis=0)
XobsS = (Xobs-Xmeanobs)/Xstdobs
XobsS[np.isnan(XobsS)] = 0
if(annType=='class'):
### Chunk by individual year
YpredObs = convert_fuzzyDecade_toYear(model.predict(XobsS),
startYear,
classChunk)
YpredTrain = convert_fuzzyDecade_toYear(model.predict((Xtrain-Xmean)/Xstd),
startYear,
classChunk)
YpredTest = convert_fuzzyDecade_toYear(model.predict((Xtest-Xmean)/Xstd),
startYear,
classChunk)
### Chunk by multidecadal
Ytrainchunk = model.predict((Xtrain-Xmean)/Xstd)
Ytestchunk = model.predict((Xtest-Xmean)/Xstd)
YObschunk = model.predict(XobsS)
YtrainClassMulti = YtrainClassMulti
YtestClassMulti = YtestClassMulti
##############################################################################
##############################################################################
##############################################################################
## Visualizing through LRP
summaryDT,summaryDTFreq,summaryNanCount=LRP.deepTaylorAnalysis(model,
np.append(XtrainS,XtestS,axis=0),
np.append(Ytrain,Ytest,axis=0),
biasBool,annType,classChunk,
startYear)
# for training data only
summaryDTTrain,summaryDTFreqTrain,summaryNanCountTrain=LRP.deepTaylorAnalysis(
model,XtrainS,Ytrain,biasBool,
annType,classChunk,startYear)
biasBool = False
model_nosoftmax = innvestigate.utils.model_wo_softmax(model)
analyzer10=innvestigate.analyzer.relevance_based.relevance_analyzer.LRPAlphaBeta(model_nosoftmax,
alpha=1,beta=0,bias=biasBool)
analyzer_output=analyzer10.analyze(XobsS)
analyzer_output=analyzer_output/np.nansum(analyzer_output,axis=1)[:,np.newaxis]
### Scale LRP
for scale in (0,):#(0,1):
if(scale==1):
summaryDTScaled = summaryDT*Xstd
else:
summaryDTScaled = summaryDT
x_perc = np.zeros(summaryDTScaled.shape)*np.nan
for itime in np.arange(0,summaryDTScaled.shape[0]):
x = summaryDTScaled[itime,:]
if(np.isnan(x[0])):
continue
x_perc[itime,:] = (stats.rankdata(x)-1)/len(x)
numLats = lats.shape[0]
numLons = lons.shape[0]
perclrp = x_perc.reshape(np.shape(summaryDTScaled)[0],numLats,numLons)
lrp = summaryDTScaled.reshape(np.shape(summaryDTScaled)[0],numLats,numLons)*1000
## Define variable for analysis
print('\n\n------------------------')
print(variq,'= Variable!')
print(monthlychoice,'= Time!')
print(reg_name,'= Region!')
print(lat_bounds,lon_bounds)
print(dataset,'= Model!')
print(dataset_obs,'= Observations!\n')
print(rm_annual_mean,'= rm_annual_mean')
print(rm_merid_mean,'= rm_merid_mean')
print(rm_ensemble_mean,'= rm_ensemble_mean')
print(land_only,'= land_only')
print(ocean_only,'= ocean_only')
### Select observations to save
obsactual = yearsObs
obspredic = YpredObs
### Regression
slopeobs,interceptobs,r_valueobs,p_valueobs,std_errobs = stats.linregress(obsactual,obspredic)
### Append slopes
valslopes.append(slopeobs)
valrr.append(r_valueobs)
### Append lrp averaged over all years
lrpyearmean = np.nanmean(lrp,axis=0)
lrpmapstime.append(lrpyearmean)
print('\n\n<<<<<<<<<< COMPLETED ITERATION = %s >>>>>>>>>>>\n\n' % (isample+1))
valslopesexperi.append(valslopes)
valrrexperi.append(valrr)
lrpmapsall.append(lrpmapstime)
### See statistics for observations
modelslopes = np.asarray(valslopesexperi)
modelr = np.asarray(valrrexperi)
modelr2 = modelr**2
lrpmapsallarray = np.asarray(lrpmapsall)
### Save the arrays
directorydataoutput = '/Users/zlabe/Documents/Research/InternalSignal/Data/'
np.savetxt(directorydataoutput + 'Slopes_20CRv3-RANDOM_%s_RANDOMSEED_20ens.txt' % SAMPLEQ,modelslopes)
np.savetxt(directorydataoutput + 'R2_20CRv3-RANDOM_%s_RANDOMSEED_20ens.txt' % SAMPLEQ,modelr2)
##############################################################################
##############################################################################
##############################################################################
def netcdfLENS(lats,lons,var,directory,SAMPLEQ):
print('\n>>> Using netcdf4LENS function!')
from netCDF4 import Dataset
import numpy as np
name = 'LRP_Maps_%s_20ens_RANDOM.nc' % SAMPLEQ
filename = directory + name
ncfile = Dataset(filename,'w',format='NETCDF4')
ncfile.description = 'LRP maps for random sampling'
### Dimensions
ncfile.createDimension('model',var.shape[0])
ncfile.createDimension('samples',var.shape[1])
ncfile.createDimension('lat',var.shape[2])
ncfile.createDimension('lon',var.shape[3])
### Variables
model = ncfile.createVariable('model','f4',('model'))
samples = ncfile.createVariable('samples','f4',('samples'))
latitude = ncfile.createVariable('lat','f4',('lat'))
longitude = ncfile.createVariable('lon','f4',('lon'))
varns = ncfile.createVariable('LRP','f4',('model','samples','lat','lon'))
### Units
varns.units = 'unitless relevance'
ncfile.title = 'LRP relevance'
ncfile.instituion = 'Colorado State University'
ncfile.references = 'Barnes et al. [2020]'
### Data
model[:] = np.arange(var.shape[0])
samples[:] = np.arange(var.shape[1])
latitude[:] = lats
longitude[:] = lons
varns[:] = var
ncfile.close()
print('*Completed: Created netCDF4 File!')
netcdfLENS(lats,lons,lrpmapsallarray,directorydataoutput,SAMPLEQ) |
the-stack_106_28897 | import numpy as np
import pandas as pd
from scipy import integrate
import matplotlib.pyplot as plt
class Model(list):
"""
The model class is central. It inherits from a list. Reactions are appended to this list to build the model.
Upon creating a new object logging can be turned off by passing in logging=False
1. Create model, append reactions and set time and species.
2. setup_model()
3. run_model()
Attributes:
species (dict): The starting species concentrations. For example {'Substrate_1' : 100}
species_distributions (dict): The starting species concentrations, with uncertainty using probability distributions from scipy.
For example {'Substrate_1' : norm(100, 10)}
parameters (dict): Parameters. These are loaded from the appended reactions upon running setup_model(). For example {'param_1' : 100}
parameter_distributions (dict): Parameter scipy distributions. These are loaded from the appended reactions upon running setup_model(). For example {'param_1' : norm(100, 10)}
y (numpy array): a numpy array of 2 dimensions. Time by substrate. Filled upon running run_model()
The first dimension gives a list of all the substrate concentrations at that timepoint.
The first dimension is the same size as self.time.
Each index in self.time relates to an index in the first dimension of y.
logging (bool): True gives text feedback upon running some commands
start (int): Model start time
end (int): Model end time
steps (int): The number of timpoints in the model output
mxsteps (int): mxsteps used by scipy.integrate.odeint
time (np.linspace(self.start, self.end, self.steps)): The timepoints of the model
"""
def __init__(self, logging=False):
# Model inherits from list - reaction classes are held in this self list.
super(Model, self).__init__()
""" Time """
self.start = 0
self.end = 100
self.steps = 100
self.mxsteps = 10000
self.time = np.linspace(self.start, self.end, self.steps)
""" Species - used to reset the model, or as the bounds to run ua/sa """
self.species = {}
self.species_distributions = {}
""" Parameters - used to reset the model, or as the bounds to run ua/sa. Set by self.set_parameters_from_reactions() """
self.parameters = {}
self.parameter_distributions = {}
""" Species and parameters used when the model is ran. These are changed each run when doing ua/sa """
self.run_model_species = {}
self.run_model_species_names = []
self.run_model_species_starting_values = []
self.run_model_parameters = {}
self.y = []
self.logging = logging
# Time
def set_time(self, start, end, steps):
"""
This function sets the time parameters for the model. This is how long the model will simulate
Args:
start (int): the start time - usually 0
end (int): the end time (default is 100)
steps (int): the number of timepoints for the output
"""
self.start = start
self.end = end
self.steps = steps
self.time = np.linspace(self.start, self.end, self.steps)
# Setup Model
def set_parameters_from_reactions(self):
"""
Sets all the parameter variables from those set in the reaction classes attached to the model
For each reaction_class, updates self.parameters and self.parameter_distributions with the dictionaries held in each reaction_class.
This will add new keys, or overwrite existing ones.
Where only a distribution is set, the median value of this distribution will be used for the parameter value.
Called by self.setup_model()
"""
self.run_model_parameters = {}
if self.logging == True:
print('-- Setting default parameters, using means of distributions where undefined: --')
for reaction_class in self:
reaction_class.set_parameter_defaults_to_mean()
if self.logging==True:
print(reaction_class.parameters)
# if parameter not already set in model, load it from reaction
for name in reaction_class.parameters:
if name not in self.parameters:
self.parameters[name] = reaction_class.parameters[name]
# if parameter_distribution not already set in model, load it from reaction
for name in reaction_class.parameter_distributions:
if name not in self.parameter_distributions:
self.parameter_distributions[name] = reaction_class.parameter_distributions[name]
# if parameter not set in model, and hasn't been loaded from reaction, take mean of model_distribution
for name in self.parameter_distributions:
if name not in self.parameters:
if type(self.parameter_distributions[name]) == list or type(self.parameter_distributions[name]) == tuple:
self.parameters[name] = (self.parameter_distributions[name][0] + self.parameter_distributions[name][1]) / 2
else:
self.parameters[name] = self.parameter_distributions[name].mean()
if self.logging == True:
print(str(name) + ' - ' + str(self.parameters[name]))
self.run_model_parameters.update(self.parameters)
def update_species(self, species_dict):
"""
This func is used by to update starting species values used by the model
Called by: self.setup_model() and self.reset_model_to_defaults()
"""
self.run_model_species.update(species_dict)
self.run_model_species_names = list(self.run_model_species.keys())
self.run_model_species_starting_values = list(self.run_model_species.values())
def load_species_from_reactions(self):
"""
Loads species which are present in one of the reaction_classes but not in
either self.species or self.species_distributions. Loads them as self.species[name] = 0.
Called by self.setup_model()
"""
if self.logging == True:
print('-- Load unspecified species as default = 0 --')
for reaction in self:
for substrate in reaction.substrates + reaction.products + reaction.reaction_substrate_names:
if substrate not in self.species:
self.species[substrate] = 0
if self.logging == True:
print(str(substrate) + ' ', end='')
if self.logging == True:
print()
def set_species_defaults_to_mean(self):
"""
For any species defined in self.species_distributions, but not in self.species,
set self.species[name] to the median of self.species_distributions[name]
Called by self.setup_model()
"""
if self.logging==True:
print('-- Setting default species, using means of distributions where undefined: --')
for name in self.species_distributions:
if name not in self.species:
if type(self.species_distributions[name]) == list or type(self.species_distributions[name]) == tuple:
self.species[name] = (self.species_distributions[name][0] + self.species_distributions[name][1])/2
else:
self.species[name] = self.species_distributions[name].mean()
if self.logging==True:
print(str(name) + ' - ' + str(self.species[name]))
def setup_model(self):
"""
Run methods to setup the model.
1. set_species_defaults_to_median()
2. load_species_from_reactions()
3. update_species(self.species())
4. set_parameters_from_reactions()
"""
# Species
self.set_species_defaults_to_mean()
self.load_species_from_reactions()
self.update_species(self.species)
# Parameters
self.set_parameters_from_reactions()
# Reset the model
def reset_reaction_indexes(self):
"""
Called at the end of run_model() to reset the indexes of the substrates and parameters in the reaction classes.
May not be necessary - need to look into this.
"""
for reaction_class in self:
reaction_class.reset_reaction()
def reset_model_to_defaults(self):
"""
Reset the model back to the default settings
This uses self.species and self.parameters to set the run_model attibutes, which are used when calling run_model
When running ua the run_model attributes are the ones that are changed.
"""
self.update_species(self.species)
self.run_model_parameters = self.parameters
self.y = []
# Run the model
def deriv(self, y, t):
"""
deriv function called by integrate.odeint(self.deriv, y0, self.time)
For each step when the model is run, the rate for each reaction is calculated and changes in substrates and products calculated.
These are returned by this function as y_prime, which are added to y which is returned by run_model
Args:
y (list): ordered list of substrate values at this current timepoint. Has the same order as self.run_model_species_names
t (): time, not used in this function but required for some reason
Returns:
y_prime - ordered list the same as y, y_prime is the new set of y's for this timepoint.
"""
yprime = np.zeros(len(y))
for reaction_class in self:
yprime += reaction_class.reaction(y, self.run_model_species_names, self.run_model_parameters)
return yprime
def run_model(self):
"""
Runs the model and outputs y
Uses self.run_model_species, run_model_species_names, self.run_model_species_starting_values and self.run_model_parameters.
These are loaded by calling self.setup_model() before running.
Outputs saved to self.y
"""
y0 = np.array(self.run_model_species_starting_values)
self.y = integrate.odeint(self.deriv, y0, self.time, mxstep=self.mxsteps)
self.reset_reaction_indexes()
return self.y
# Export results as dataframe and plot
def results_dataframe(self):
"""
Gives the results of a model run as a dataframe
Returns:
Pandas dataframe of results
"""
ys_at_t = {'Time' : self.time}
for i in range(len(self.run_model_species_names)):
name = self.run_model_species_names[i]
ys_at_t[name] = []
for t in range(len(self.time)):
ys_at_t[name].append(self.y[t][i])
df = pd.DataFrame(ys_at_t)
return df
def plot_substrate(self, substrate, plot=False, units=['','']):
"""
Plot a graph of substrate concentration vs time.
Args:
substrate (str): Name of substrate to plot
plot (bool): Default False. If True calls plt.show()
"""
ys_at_t = []
i = self.run_model_species_names.index(substrate)
for t in range(len(self.time)):
ys_at_t.append(self.y[t][i])
plt.plot(self.time, ys_at_t, label=substrate)
plt.ylabel(units[0])
plt.xlabel(units[1])
plt.legend()
if plot == True:
plt.show()
# Check parameters when contraining parameter space
def check_parameter_limits(self):
all_within_limits = True
for reaction_class in self:
if reaction_class.sampling_limits(self.run_model_parameters) == False:
all_within_limits = False
return all_within_limits |
the-stack_106_28899 | #
# MIT License
#
# Copyright (c) 2020 - Present Aaron Ma,
# Copyright (c) 2018 - 2020 Udacity, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import ast
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
import torchvision.models as models
from torch import __version__
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
vgg16 = models.vgg16(pretrained=True)
models = {'resnet': resnet18, 'alexnet': alexnet, 'vgg': vgg16}
# obtain ImageNet labels
with open('imagenet1000_clsid_to_human.txt') as imagenet_classes_file:
imagenet_classes_dict = ast.literal_eval(imagenet_classes_file.read())
def classifier(img_path, model_name):
# load the image
img_pil = Image.open(img_path)
# define transforms
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# preprocess the image
img_tensor = preprocess(img_pil)
# resize the tensor (add dimension for batch)
img_tensor.unsqueeze_(0)
# wrap input in variable, wrap input in variable - no longer needed for
# v 0.4 & higher code changed 04/26/2018 by Jennifer S. to handle PyTorch upgrade
pytorch_ver = __version__.split('.')
# pytorch versions 0.4 & hihger - Variable depreciated so that it returns
# a tensor. So to address tensor as output (not wrapper) and to mimic the
# affect of setting volatile = True (because we are using pretrained models
# for inference) we can set requires_gradient to False. Here we just set
# requires_grad_ to False on our tensor
if int(pytorch_ver[0]) > 0 or int(pytorch_ver[1]) >= 4:
img_tensor.requires_grad_(False)
# pytorch versions less than 0.4 - uses Variable because not-depreciated
else:
# apply model to input
# wrap input in variable
data = Variable(img_tensor, volatile = True)
# apply model to input
model = models[model_name]
# puts model in evaluation mode
# instead of (default)training mode
model = model.eval()
# apply data to model - adjusted based upon version to account for
# operating on a Tensor for version 0.4 & higher.
if int(pytorch_ver[0]) > 0 or int(pytorch_ver[1]) >= 4:
output = model(img_tensor)
# pytorch versions less than 0.4
else:
# apply data to model
output = model(data)
# return index corresponding to predicted class
pred_idx = output.data.numpy().argmax()
return imagenet_classes_dict[pred_idx]
|
the-stack_106_28900 | import time
import logging
import ipc.enums
# from pebl.robots.pebl import memory as mem # NOTE disabled because this relies on a closed source library
log = logging.getLogger(__name__)
class TimedLoop(object):
"""
Decorator class to run a function repeatedly with some specified period.
Use:
- write a function which is meant to be called on a loop.
- decorate the function with this class, specifying dt
- call the function in a while or for loop
Example:
# This program will print 'loop_func ran' every .75 seconds
@TimedLoop(dt=.75)
def loop_func():
print('loop_func ran')
while True:
loop_func()
This decorator uses an internal counter in order to properly maintain the
timing, i.e. slow down if ahead and try to catch up if behind. In order to
maintain a proper counter while running multiple looping functions, the
same TimedLoop object must be applied to all functions.
Example:
# This program will print 'funcA ran', 'funcB ran', 'funcC ran' with .75
# second intervals in between.
TL = TimedLoop(dt=.75)
@TL2
def funcA():
print("funcA ran")
@TL
def funcB():
print("funcB ran")
@TL
def funcC():
print("funcC ran")
while True:
funcA()
funcB()
funcC()
"""
def __init__(self, dt):
self.i = 0
self.dt = dt
self.init_time = None
def __call__(self, loop_func, *args, **kwargs):
def wrapper(*args, **kwargs):
# increment call count
self.i += 1
try:
# calculate start time and desired end time
des_end_time = self.i * self.dt + self.init_time
except TypeError: # raised if init_time has not been initialized
self.init_time = time.perf_counter()
des_end_time = self.i * self.dt + self.init_time
# call function
res = loop_func(*args, **kwargs)
# sleep until desired end time or just continue
current_time = time.perf_counter()
# if des_end_time < current_time:
# log.warning(f'Exceeded alloted loop iteration time: {des_end_time - current_time}')
time.sleep(max(des_end_time - current_time, 0))
# return result
return res
return wrapper
def change_dt(self, new_dt):
"""
Change dt for Timed Loop to hit between each sucessive call to its
wrapped function
:param new_dt:
:return:
"""
self.i = 0
self.dt = new_dt
self.init_time = None
def reset_start_time(self):
"""
reset start time
:return:
"""
self.i = 0
self.init_time = None
class SynchronizedTimedLoop(TimedLoop):
def __init__(self, dt, iterations_per_update, timestep_lookup_fn):
"""
Extends the TimedLoop functionality to let you also dynamically change the
dt of the TimedLoop based on an external 'real-time factor'.
:param dt: timestep
:param iterations_per_update: number of iterations before checking
update function
:param timestep_lookup_fn: a function to call to lookup the real-time
factor
:param lookup_args: if additional arguments are needed for using the lookup function
"""
super().__init__(dt)
self.iterations_per_update = iterations_per_update
self.timestep_lookup_fn = timestep_lookup_fn
def __call__(self, loop_func, *args, **kwargs):
def wrapper(*args, **kwargs):
# increment call count
self.i += 1
try:
# calculate start time and desired end time
des_end_time = self.i * self.dt + self.init_time
except TypeError: # raised if init_time has not been initialized
self.init_time = time.perf_counter()
des_end_time = self.i * self.dt + self.init_time
# call function
res = loop_func(*args, **kwargs)
# sleep until desired end time or just continue
current_time = time.perf_counter()
# if current_time > des_end_time:
# print(f'Exceeded alloted loop iteration time: {des_end_time - current_time}')
try:
time.sleep(max(des_end_time - current_time, 0))
except OverflowError:
print("OVERFLOW: {}".format(max(des_end_time - current_time,
0)))
print(des_end_time, current_time)
if self.i >= self.iterations_per_update:
self.synchronize()
# return result
return res
return wrapper
def synchronize(self):
"""
Use given lookup function to synchronize
:return:
"""
desired_dt = self.timestep_lookup_fn()
self.change_dt(desired_dt)
def get_timestep_lookup_function_by_player(control_loop_period, player):
"""
Create customized timestep lookup function for a Synchronized TimedLoop
based on the player provided.
:param control_loop_period: duration of cycle of the timed loop. [s]
:param player: either settings.PLAYER.VREP or settings.PLAYER.DXL
:return: a customized function that takes no arguments and returns a
desired_dt based on the scaling from the real_time_factor
"""
if player == ipc.enums.PLAYER.DXL or ipc.enums.PLAYER.BULLET:
memoryloc = mem.DATA_CLOCK
elif player == ipc.enums.PLAYER.VREP:
memoryloc = mem.DATA_VREP
else:
raise ValueError(
"Non-recognized player enum specified for player device. "
"It should be either settings.PLAYER.DXL or settings.PLAYER.VREP.")
timestep_fn = _get_timestep_lookup_function(control_loop_period,
memoryloc,
rtf_key='real_time_factor')
return timestep_fn
def get_synchronized_timed_loop(control_loop_period,
player,
iterations_per_update=100,):
"""
Creates an instance of SynchronizedTimedLoop. Returns the timed loop
instance and an empty sleep function that can be used to make execution
halt until the next time interval.
:param control_loop_period: duration of cycle of the timed loop. [s]
:param player: either settings.PLAYER.VREP or settings.PLAYER.DXL
:param iterations_per_update: number of iterations before syncing
:return: sleep_fn, SynchronizedTimedLoop instance
"""
lookup_fn = get_timestep_lookup_function_by_player(
control_loop_period=control_loop_period,
player=player
)
stl = SynchronizedTimedLoop(control_loop_period,
iterations_per_update=iterations_per_update,
timestep_lookup_fn=lookup_fn
)
@stl
def sleep_fn():
pass
return sleep_fn, stl
def get_timed_loop(control_loop_period,
synchronize,
player=None,
iterations_per_update=100):
"""
Function to create timed loop functions and have the option to synchronize
with an external clock.
:param control_loop_period: duration of cycle of the timed loop. [s]
:param synchronize: whether to slow down the timed loop to match the
speed of the player (usually used for simulations)
:param player: either settings.PLAYER.VREP or settings.PLAYER.DXL
:param iterations_per_update: number of iterations before syncing
:return:
"""
if synchronize:
return get_synchronized_timed_loop(
control_loop_period=control_loop_period,
player=player,
iterations_per_update=iterations_per_update
)
else:
logging.debug('un-timed loop')
tl = TimedLoop(dt=control_loop_period)
@tl
def sleep_fn():
pass
return sleep_fn, tl
def _get_timestep_lookup_function(control_loop_period, memory_loc, rtf_key):
"""
Creates a customized timestep lookup function for a SynchronizedTimedLoop
:param control_loop_period: duration in [s] of each timed loop
:param memory_loc: location in shared memory to get() data from
:param rtf_key: key for dictionary to plug into memory location
:return: a customized function that takes no arguments and returns a
desired_dt based on the scaling from the real_time_factor
"""
def timestep_lookup_function():
data = memory_loc.get()
real_time_factor = data[rtf_key][0]
desired_dt = control_loop_period / real_time_factor
return desired_dt
return timestep_lookup_function |
the-stack_106_28904 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 RoBERTa model."""
import math
import warnings
from typing import Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
DUMMY_INPUTS,
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutputWithPastAndCrossAttentions,
TFBaseModelOutputWithPoolingAndCrossAttentions,
TFCausalLMOutputWithCrossAttentions,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFMaskedLanguageModelingLoss,
TFModelInputType,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class TFRobertaEmbeddings(tf.keras.layers.Layer):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.padding_idx = 1
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.hidden_size = config.hidden_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
input_ids: tf.Tensor
Returns: tf.Tensor
"""
mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
return incremental_indices + self.padding_idx
def call(
self,
input_ids=None,
position_ids=None,
token_type_ids=None,
inputs_embeds=None,
past_key_values_length=0,
training=False,
):
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids=input_ids, past_key_values_length=past_key_values_length
)
else:
position_ids = tf.expand_dims(
tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Roberta
class TFRobertaPooler(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(inputs=first_token_tensor)
return pooled_output
# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Roberta
class TFRobertaSelfAttention(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
f"of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
self.query = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
return tf.transpose(tensor, perm=[0, 2, 1, 3])
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
encoder_hidden_states: tf.Tensor,
encoder_attention_mask: tf.Tensor,
past_key_value: Tuple[tf.Tensor],
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(inputs=hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
key_layer = tf.concatenate([past_key_value[0], key_layer], dim=2)
value_layer = tf.concatenate([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
attention_scores = tf.divide(attention_scores, dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFRobertaModel call() function)
attention_scores = tf.add(attention_scores, attention_mask)
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(inputs=attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = tf.multiply(attention_probs, head_mask)
attention_output = tf.matmul(attention_probs, value_layer)
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, all_head_size)
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Roberta
class TFRobertaSelfOutput(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Roberta
class TFRobertaAttention(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFRobertaSelfAttention(config, name="self")
self.dense_output = TFRobertaSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(
self,
input_tensor: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
encoder_hidden_states: tf.Tensor,
encoder_attention_mask: tf.Tensor,
past_key_value: Tuple[tf.Tensor],
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
self_outputs = self.self_attention(
hidden_states=input_tensor,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=past_key_value,
output_attentions=output_attentions,
training=training,
)
attention_output = self.dense_output(
hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
)
# add attentions (possibly with past_key_value) if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Roberta
class TFRobertaIntermediate(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Roberta
class TFRobertaOutput(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Roberta
class TFRobertaLayer(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFRobertaAttention(config, name="attention")
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = TFRobertaAttention(config, name="crossattention")
self.intermediate = TFRobertaIntermediate(config, name="intermediate")
self.bert_output = TFRobertaOutput(config, name="output")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
encoder_hidden_states: Optional[tf.Tensor],
encoder_attention_mask: Optional[tf.Tensor],
past_key_value: Optional[Tuple[tf.Tensor]],
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
input_tensor=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=self_attn_past_key_value,
output_attentions=output_attentions,
training=training,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers "
"by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
input_tensor=attention_output,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
training=training,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
intermediate_output = self.intermediate(hidden_states=attention_output)
layer_output = self.bert_output(
hidden_states=intermediate_output, input_tensor=attention_output, training=training
)
outputs = (layer_output,) + outputs # add attentions if we output them
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Roberta
class TFRobertaEncoder(tf.keras.layers.Layer):
def __init__(self, config: RobertaConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.layer = [TFRobertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
encoder_hidden_states: Optional[tf.Tensor],
encoder_attention_mask: Optional[tf.Tensor],
past_key_values: Optional[Tuple[Tuple[tf.Tensor]]],
use_cache: Optional[bool],
output_attentions: bool,
output_hidden_states: bool,
return_dict: bool,
training: bool = False,
) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
past_key_value = past_key_values[i] if past_key_values is not None else None
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=past_key_value,
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if self.config.add_cross_attention and encoder_hidden_states is not None:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
)
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
@keras_serializable
class TFRobertaMainLayer(tf.keras.layers.Layer):
config_class = RobertaConfig
def __init__(self, config, add_pooling_layer=True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.is_decoder = config.is_decoder
self.num_hidden_layers = config.num_hidden_layers
self.initializer_range = config.initializer_range
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.return_dict = config.use_return_dict
self.encoder = TFRobertaEncoder(config, name="encoder")
self.pooler = TFRobertaPooler(config, name="pooler") if add_pooling_layer else None
# The embeddings must be the last declaration in order to follow the weights order
self.embeddings = TFRobertaEmbeddings(config, name="embeddings")
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if not self.config.is_decoder:
inputs["use_cache"] = False
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
if inputs["past_key_values"] is None:
past_key_values_length = 0
inputs["past_key_values"] = [None] * len(self.encoder.layer)
else:
past_key_values_length = shape_list(inputs["past_key_values"][0][0])[-2]
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
embedding_output = self.embeddings(
input_ids=inputs["input_ids"],
position_ids=inputs["position_ids"],
token_type_ids=inputs["token_type_ids"],
inputs_embeds=inputs["inputs_embeds"],
past_key_values_length=past_key_values_length,
training=inputs["training"],
)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask_shape = shape_list(inputs["attention_mask"])
mask_seq_length = seq_length + past_key_values_length
# Copied from `modeling_tf_t5.py`
# Provided a padding mask of dimensions [batch_size, mask_seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
if self.is_decoder:
seq_ids = tf.range(mask_seq_length)
causal_mask = tf.less_equal(
tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
seq_ids[None, :, None],
)
causal_mask = tf.cast(causal_mask, dtype=inputs["attention_mask"].dtype)
extended_attention_mask = causal_mask * inputs["attention_mask"][:, None, :]
attention_mask_shape = shape_list(extended_attention_mask)
extended_attention_mask = tf.reshape(
extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
)
else:
extended_attention_mask = tf.reshape(
inputs["attention_mask"], (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Copied from `modeling_tf_t5.py` with -1e9 -> -10000
if self.is_decoder and inputs["encoder_attention_mask"] is not None:
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
inputs["encoder_attention_mask"] = tf.cast(
inputs["encoder_attention_mask"], dtype=extended_attention_mask.dtype
)
num_dims_encoder_attention_mask = len(shape_list(inputs["encoder_attention_mask"]))
if num_dims_encoder_attention_mask == 3:
encoder_extended_attention_mask = inputs["encoder_attention_mask"][:, None, :, :]
if num_dims_encoder_attention_mask == 2:
encoder_extended_attention_mask = inputs["encoder_attention_mask"][:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
# tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
head_mask=inputs["head_mask"],
encoder_hidden_states=inputs["encoder_hidden_states"],
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=inputs["past_key_values"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
if not inputs["return_dict"]:
return (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class TFRobertaPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
@property
# Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainedModel.dummy_inputs
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
dummy = {"input_ids": tf.constant(DUMMY_INPUTS)}
# Add `encoder_hidden_states` to make the cross-attention layers' weights initialized
if self.config.add_cross_attention:
batch_size, seq_len = tf.constant(DUMMY_INPUTS).shape
shape = (batch_size, seq_len) + (self.config.hidden_size,)
h = tf.random.uniform(shape=shape)
dummy["encoder_hidden_states"] = h
return dummy
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
ROBERTA_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Parameters:
config ([`RobertaConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class TFRobertaModel(TFRobertaPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.roberta = TFRobertaMainLayer(config, name="roberta")
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`). Set to `False` during training, `True` during generation
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.roberta(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
encoder_hidden_states=inputs["encoder_hidden_states"],
encoder_attention_mask=inputs["encoder_attention_mask"],
past_key_values=inputs["past_key_values"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output
def serving_output(
self, output: TFBaseModelOutputWithPoolingAndCrossAttentions
) -> TFBaseModelOutputWithPoolingAndCrossAttentions:
output_cache = self.config.use_cache and self.config.is_decoder
pkv = tf.convert_to_tensor(output.past_key_values) if output_cache else None
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if output.cross_attentions is not None else None
if not (self.config.output_attentions and self.config.add_cross_attention):
cross_attns = None
return TFBaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
past_key_values=pkv,
hidden_states=hs,
attentions=attns,
cross_attentions=cross_attns,
)
class TFRobertaLMHead(tf.keras.layers.Layer):
"""Roberta Head for masked language modeling."""
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.act = get_tf_activation("gelu")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.decoder
def set_output_embeddings(self, value):
self.decoder.weight = value
self.decoder.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.layer_norm(hidden_states)
# project back to size of vocabulary with bias
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING)
class TFRobertaForMaskedLM(TFRobertaPreTrainedModel, TFMaskedLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
self.lm_head = TFRobertaLMHead(config, self.roberta.embeddings, name="lm_head")
def get_lm_head(self):
return self.lm_head
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.lm_head.name
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roberta(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
loss = None if inputs["labels"] is None else self.hf_compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFRobertaForCausalLM(TFRobertaPreTrainedModel, TFCausalLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
def __init__(self, config: RobertaConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
if not config.is_decoder:
logger.warning("If you want to use `TFRobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
self.lm_head = TFRobertaLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head")
def get_lm_head(self):
return self.lm_head
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.lm_head.name
# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
def prepare_inputs_for_generation(self, inputs, past=None, attention_mask=None, **model_kwargs):
# cut decoder_input_ids if past is used
if past:
inputs = tf.expand_dims(inputs[:, -1], -1)
return {
"input_ids": inputs,
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": model_kwargs["use_cache"],
}
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFCausalLMOutputWithCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`). Set to `False` during training, `True` during generation
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
config.vocab_size - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roberta(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
encoder_hidden_states=inputs["encoder_hidden_states"],
encoder_attention_mask=inputs["encoder_attention_mask"],
past_key_values=inputs["past_key_values"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.lm_head(hidden_states=sequence_output)
loss = None
if inputs["labels"] is not None:
# shift labels to the left and cut last logit token
logits = logits[:, :-1]
labels = inputs["labels"][:, 1:]
loss = self.hf_compute_loss(labels=labels, logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.serving_output
def serving_output(self, output: TFCausalLMOutputWithCrossAttentions) -> TFCausalLMOutputWithCrossAttentions:
output_cache = self.config.use_cache and self.config.is_decoder
pkv = tf.convert_to_tensor(output.past_key_values) if output_cache else None
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if output.cross_attentions is not None else None
if not (self.config.output_attentions and self.config.add_cross_attention):
cross_attns = None
return TFCausalLMOutputWithCrossAttentions(
logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns
)
class TFRobertaClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
def call(self, features, training=False):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x, training=training)
x = self.dense(x)
x = self.dropout(x, training=training)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
self.classifier = TFRobertaClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roberta(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output, training=inputs["training"])
loss = None if inputs["labels"] is None else self.hf_compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class TFRobertaForMultipleChoice(TFRobertaPreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"lm_head"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.roberta = TFRobertaMainLayer(config, name="roberta")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs_embeds)[1]
seq_length = shape_list(inputs_embeds)[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
outputs = self.roberta(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
inputs["inputs_embeds"],
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, training=inputs["training"])
logits = self.classifier(pooled_output)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.hf_compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class TFRobertaForTokenClassification(TFRobertaPreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roberta(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.hf_compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class TFRobertaForQuestionAnswering(TFRobertaPreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.roberta(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.hf_compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
|
the-stack_106_28905 | """cli.py - Command line argument parser.
"""
import sys
import errno
import os
import argparse
import logging
import tempfile
import json
import time
from j2lint import NAME, VERSION, DESCRIPTION
from j2lint.linter.collection import RulesCollection
from j2lint.linter.runner import Runner
from j2lint.utils import get_files
from j2lint.logger import logger, add_handler
from j2lint.settings import settings
RULES_DIR = os.path.dirname(os.path.realpath(__file__)) + "/rules"
IGNORE_RULES = ['jinja-syntax-error',
'single-space-decorator',
'operator-enclosed-by-spaces',
'jinja-statements-single-space',
'jinja-statements-indentation',
'jinja-statements-no-tabs',
'single-statement-per-line',
'jinja-statements-delimeter',
'jinja-variable-lower-case',
'jinja-variable-format',
'S0', 'S1', 'S2', 'S3', 'S4',
'S5', 'S6', 'S7', 'V1', 'V2'
]
def create_parser():
"""Initializes a new argument parser object
Returns:
Object: Argument parser object
"""
parser = argparse.ArgumentParser(prog=NAME, description=DESCRIPTION)
parser.add_argument(dest='files', metavar='FILE', nargs='*', default=[],
help='files or directories to lint')
parser.add_argument('-i', '--ignore', nargs='*',
choices=IGNORE_RULES, default=[], help='rules to ignore')
parser.add_argument('-l', '--list', default=False,
action='store_true', help='list of lint rules')
parser.add_argument('-r', '--rules_dir', dest='rules_dir', action='append',
default=[RULES_DIR], help='rules directory')
parser.add_argument('-v', '--verbose', default=False,
action='store_true', help='verbose output for lint issues')
parser.add_argument('-d', '--debug', default=False,
action='store_true', help='enable debug logs')
parser.add_argument('-j', '--json', default=False,
action='store_true', help='enable JSON output')
parser.add_argument('-s', '--stdin', default=False,
action='store_true', help='accept template from STDIN')
parser.add_argument('--log', default=False,
action='store_true', help='enable logging')
parser.add_argument('-ver', '--version', default=False,
action='store_true', help='Version of j2lint')
parser.add_argument('-stdout', '--vv', default=False,
action='store_true', help='stdout logging')
parser.add_argument('-sout', '--vvv', default=False,
action='store_true', help='stdout debug logging')
return parser
def sort_issues(issues):
"""Sorted list of issues
Args:
issues (list): list of issue dictionaries
Returns:
list: list of sorted issue dictionaries
"""
issues.sort(
key=lambda issue: (
issue.filename,
issue.linenumber,
issue.rule.id
)
)
return issues
def run(args=None):
"""Runs jinja2 linter
Args:
args ([string], optional): Command line arguments. Defaults to None.
Returns:
int: 0 on success
"""
parser = create_parser()
options = parser.parse_args(args if args is not None else sys.argv[1:])
# Enable logs
if not options.log and not options.vv and not options.vvv:
logging.disable(sys.maxsize)
else:
if options.vv:
# Enable logs on console
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
time.sleep(1)
if options.vvv:
# Enable debug logs on console
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
time.sleep(1)
if options.log:
add_handler(logger)
# Enable debug logs
if options.debug:
logger.setLevel(logging.DEBUG)
time.sleep(1)
logger.debug("Lint options selected {}".format(options))
stdin_filename = None
file_or_dir_names = set(options.files)
checked_files = set()
if options.stdin and not sys.stdin.isatty():
with tempfile.NamedTemporaryFile('w', suffix='.j2', delete=False) as stdin_tmpfile:
stdin_tmpfile.write(sys.stdin.read())
stdin_filename = stdin_tmpfile.name
file_or_dir_names.add(stdin_filename)
# Collect the rules from the configuration
collection = RulesCollection(options.verbose)
for rulesdir in options.rules_dir:
collection.extend(RulesCollection.create_from_directory(
rulesdir, options.ignore))
# List lint rules
if options.list:
rules = "Jinja2 lint rules\n{}\n".format(collection)
print(rules)
logger.debug(rules)
return 0
# Version of j2lint
if options.version:
print(f"Jinja2-Linter Version {VERSION}")
return 0
# Print help message
if not file_or_dir_names:
parser.print_help(file=sys.stderr)
return 1
# Print verbose output for linting
if options.verbose:
settings.verbose = True
logger.debug("Verbose mode enabled")
if options.json:
settings.output = "json"
logger.debug("JSON output enabled")
lint_issues = {}
files = get_files(file_or_dir_names)
# Get linting issues
for file_name in files:
runner = Runner(collection, file_name, checked_files)
if file_name not in lint_issues:
lint_issues[file_name] = []
lint_issues[file_name].extend(runner.run())
# Remove temporary file
if stdin_filename:
os.unlink(stdin_filename)
# Sort and print linting issues
found_issues = False
json_output = []
if lint_issues:
for key, issues in lint_issues.items():
if len(issues):
if not found_issues:
found_issues = True
if not options.json:
print("Jinja2 linting issues found")
sorted_issues = sort_issues(issues)
if options.json:
json_output.extend([json.loads(str(issue))
for issue in sorted_issues])
else:
print("************ File {}".format(key))
for issue in sorted_issues:
print(issue)
if options.json:
print(json.dumps(json_output))
elif not found_issues:
print("Linting complete. No problems found.")
if found_issues:
return 2
return 0
|
the-stack_106_28908 | from dataclasses import dataclass, field
from typing import List, Optional
from output.models.ms_data.particles.particles_q030_xsd.particles_q030_imp import E2 as ParticlesQ030ImpE2
from output.models.ms_data.particles.particles_q030_xsd.particles_q030_imp2 import E2
__NAMESPACE__ = "http://xsdtesting"
@dataclass
class B:
foo: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
foo_bar_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "foo bar",
"max_occurs": 4,
}
)
@dataclass
class R:
foo: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
foo_bar_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "foo bar",
"max_occurs": 4,
}
)
e2: List[ParticlesQ030ImpE2] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "foo",
"max_occurs": 2,
"sequential": True,
}
)
bar_e2: List[E2] = field(
default_factory=list,
metadata={
"name": "e2",
"type": "Element",
"namespace": "bar",
"max_occurs": 2,
"sequential": True,
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
namespace = "http://xsdtesting"
elem: Optional[R] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
|
the-stack_106_28910 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, [email protected]
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.core.constant import API_LEVEL
from ppmessage.db.models import PredefinedScript
from ppmessage.db.models import PredefinedScriptGroup
import json
import logging
class PPRemovePredefinedScriptGroup(BaseHandler):
def _remove(self):
_request = json.loads(self.request.body)
_group_uuid = _request.get("group_uuid")
if _group_uuid == None or len(_group_uuid) == 0:
self.setErrorCode(API_ERR.NO_PARA)
return
_group = PredefinedScriptGroup(uuid=_group_uuid)
_group.async_delete()
_group.delete_redis_keys(self.application.redis)
_key = PredefinedScript.__tablename__ + ".group_uuid." + _group_uuid
_scripts = self.application.redis.smembers(_key)
self.application.redis.delete(_key)
_key = PredefinedScript.__tablename__ + ".group_uuid.None"
for _script in _scripts:
self.application.redis.sadd(_key, _script)
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPRemovePredefinedScriptGroup, self)._Task()
self._remove()
return
|
the-stack_106_28916 | import logging
import re
import threading
import time
from bot import download_dict, download_dict_lock
LOGGER = logging.getLogger(__name__)
URL_REGEX = r"(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+"
SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
class setInterval:
def __init__(self, interval, action):
self.interval = interval
self.action = action
self.stopEvent = threading.Event()
thread = threading.Thread(target=self.__setInterval)
thread.start()
def __setInterval(self):
nextTime = time.time() + self.interval
while not self.stopEvent.wait(nextTime - time.time()):
nextTime += self.interval
self.action()
def cancel(self):
self.stopEvent.set()
def get_readable_file_size(size_in_bytes) -> str:
if size_in_bytes is None:
return '0B'
index = 0
while size_in_bytes >= 1024:
size_in_bytes /= 1024
index += 1
try:
return f'{round(size_in_bytes, 2)}{SIZE_UNITS[index]}'
except IndexError:
return 'File too large'
def get_readable_time(seconds: int) -> str:
result = ''
(days, remainder) = divmod(seconds, 86400)
days = int(days)
if days != 0:
result += f'{days}d'
(hours, remainder) = divmod(remainder, 3600)
hours = int(hours)
if hours != 0:
result += f'{hours}h'
(minutes, seconds) = divmod(remainder, 60)
minutes = int(minutes)
if minutes != 0:
result += f'{minutes}m'
seconds = int(seconds)
result += f'{seconds}s'
return result
def is_url(url: str):
url = re.findall(URL_REGEX, url)
if url:
return True
return False
|
the-stack_106_28918 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'CoderDojo Mobile Toolbox'
copyright = '2018, CoderDojo Brianza, CoderDojo Lissone'
author = 'CoderDojo Brianza, CoderDojo Lissone'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CoderDojoMobileToolboxdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CoderDojoMobileToolbox.tex', 'CoderDojo Mobile Toolbox Documentation',
'CoderDojo Brianza, CoderDojo Lissone', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'coderdojomobiletoolbox', 'CoderDojo Mobile Toolbox Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CoderDojoMobileToolbox', 'CoderDojo Mobile Toolbox Documentation',
author, 'CoderDojoMobileToolbox', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration ------------------------------------------------- |
the-stack_106_28920 | import adv_test
import adv
from adv import *
def module():
return Fritz
class Fritz(adv.Adv):
def prerun(this):
this.stance = 0
this.s2fscharge = 0
def s2_proc(this, e):
this.s2fscharge = 3
def fs_proc(this, e):
if this.s2fscharge > 0:
this.s2fscharge -= 1
this.dmg_make("o_s2fs",0.57*3+0.29)
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1, seq=5 and cancel or fsc
`s2
`s3, seq=5 and cancel or fsc
`fs, seq=5
"""
adv_test.test(module(), conf, verbose=0)
|
the-stack_106_28921 | from copy import deepcopy
from datetime import datetime
from datetime import timezone
from email.utils import mktime_tz
from email.utils import parsedate_tz
from io import BytesIO
from itertools import chain
from mimetypes import guess_type
from typing import Callable
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from bs4 import BeautifulSoup
from PIL import Image
from pyzmail import PyzMessage
from pyzmail.parse import MailPart
from requests import Response
from requests import get as http_get
from opwen_email_server.config import MAX_HEIGHT_IMAGES
from opwen_email_server.config import MAX_WIDTH_IMAGES
from opwen_email_server.constants import mailbox
from opwen_email_server.utils.log import LogMixin
from opwen_email_server.utils.serialization import to_base64
def _parse_body(message: PyzMessage, default_charset: str = 'ascii') -> str:
body_parts = (message.html_part, message.text_part)
for part in body_parts:
if part is None:
continue
payload = part.get_payload()
if payload is None:
continue
charset = part.charset or default_charset
return payload.decode(charset, errors='replace')
return ''
def _parse_attachments(mailparts: Iterable[MailPart]) -> Iterable[dict]:
attachment_parts = (part for part in mailparts if not part.is_body)
for part in attachment_parts:
filename = part.sanitized_filename
payload = part.get_payload()
attachment_id = part.content_id
if filename and payload:
attachment = {'filename': filename, 'content': payload}
if attachment_id:
attachment['cid'] = attachment_id
yield attachment
def _parse_addresses(message: PyzMessage, address_type: str) -> List[str]:
return sorted(email for _, email in message.get_addresses(address_type) if email)
def _parse_address(message: PyzMessage, address_type: str) -> Optional[str]:
return next(iter(_parse_addresses(message, address_type)), None)
def _parse_sent_at(message: PyzMessage) -> Optional[str]:
rfc_822 = message.get_decoded_header('date')
if not rfc_822:
return None
date_tz = parsedate_tz(rfc_822)
if not date_tz:
return None
timestamp = mktime_tz(date_tz)
# noinspection PyUnresolvedReferences
date_utc = datetime.fromtimestamp(timestamp, timezone.utc)
return date_utc.strftime('%Y-%m-%d %H:%M')
def parse_mime_email(mime_email: str) -> dict:
message = PyzMessage.factory(mime_email)
return {
'sent_at': _parse_sent_at(message),
'to': _parse_addresses(message, 'to'),
'cc': _parse_addresses(message, 'cc'),
'bcc': _parse_addresses(message, 'bcc'),
'from': _parse_address(message, 'from'),
'subject': message.get_subject(),
'body': _parse_body(message),
'attachments': list(_parse_attachments(message.mailparts)),
}
def format_attachments(email: dict) -> dict:
attachments = email.get('attachments', [])
if not attachments:
return email
formatted_attachments = deepcopy(attachments)
is_any_attachment_changed = False
for i, attachment in enumerate(attachments):
filename = attachment.get('filename', '')
content = attachment.get('content', b'')
formatted_content = _format_attachment(filename, content)
if content != formatted_content:
formatted_attachments[i]['content'] = formatted_content
is_any_attachment_changed = True
if not is_any_attachment_changed:
return email
new_email = dict(email)
new_email['attachments'] = formatted_attachments
return new_email
def _format_attachment(filename: str, content: bytes) -> bytes:
attachment_type = guess_type(filename)[0]
if not attachment_type:
return content
if 'image' in attachment_type.lower():
content = _change_image_size(content)
return content
def get_recipients(email: dict) -> Iterable[str]:
return chain(email.get('to') or [], email.get('cc') or [], email.get('bcc') or [])
def get_domains(email: dict) -> Iterable[str]:
return frozenset(get_domain(address) for address in get_recipients(email))
def get_domain(address: str) -> str:
return address.split('@')[-1]
def ensure_has_sent_at(email: dict):
if not email.get('sent_at'):
email['sent_at'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M')
def _get_image_type(response: Response, url: str) -> Optional[str]:
content_type = response.headers.get('Content-Type')
if not content_type:
content_type = guess_type(url.split('?')[0])[0]
return content_type
def _is_already_small(size: Tuple[int, int]) -> bool:
width, height = size
return width <= MAX_WIDTH_IMAGES and height <= MAX_HEIGHT_IMAGES
def _change_image_size(image_content_bytes: bytes) -> bytes:
image_bytes = BytesIO(image_content_bytes)
image_bytes.seek(0)
image = Image.open(image_bytes)
if _is_already_small(image.size):
return image_content_bytes
new_size = (MAX_WIDTH_IMAGES, MAX_HEIGHT_IMAGES)
image.thumbnail(new_size, Image.ANTIALIAS)
new_image = BytesIO()
image.save(new_image, image.format)
new_image.seek(0)
new_image_bytes = new_image.read()
return new_image_bytes
def _fetch_image_to_base64(image_url: str) -> Optional[str]:
response = http_get(image_url)
if not response.ok:
return None
image_type = _get_image_type(response, image_url)
if not image_type:
return None
if not response.content:
return None
small_image_bytes = _change_image_size(response.content)
small_image_base64 = to_base64(small_image_bytes)
return f'data:{image_type};base64,{small_image_base64}'
def _is_valid_url(url: Optional[str]) -> bool:
if not url:
return False
has_http_prefix = url.startswith('http://')
has_https_prefix = url.startswith('https://')
return has_http_prefix or has_https_prefix
def format_inline_images(email: dict, on_error: Callable) -> dict:
email_body = email.get('body', '')
if not email_body:
return email
soup = BeautifulSoup(email_body, 'html.parser')
image_tags = soup.find_all('img')
if not image_tags:
return email
for image_tag in image_tags:
image_url = image_tag.get('src')
if not _is_valid_url(image_url):
continue
try:
encoded_image = _fetch_image_to_base64(image_url)
except Exception as ex:
on_error('Unable to inline image %s: %s', image_url, ex)
else:
if encoded_image:
image_tag['src'] = encoded_image
new_email = dict(email)
new_email['body'] = str(soup)
return new_email
def descending_timestamp(email_sent_at: str) -> str:
return str(mailbox.FUTURE_TIMESTAMP - int(datetime.fromisoformat(email_sent_at).timestamp()))
class MimeEmailParser(LogMixin):
def __call__(self, mime_email: str) -> dict:
email = parse_mime_email(mime_email)
email = format_attachments(email)
email = format_inline_images(email, self.log_warning)
return email
|
the-stack_106_28923 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Defines an rst directive to auto-document AiiDA processes.
"""
from docutils import nodes
from docutils.core import publish_doctree
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.ext.autodoc import ClassDocumenter
from plumpy.ports import OutputPort
from aiida.common.utils import get_object_from_string
def setup_extension(app):
app.add_directive_to_domain('py', AiidaProcessDocumenter.directivetype, AiidaProcessDirective)
app.add_autodocumenter(AiidaProcessDocumenter)
class AiidaProcessDocumenter(ClassDocumenter):
"""Sphinx Documenter class for AiiDA Processes."""
directivetype = 'aiida-process'
objtype = 'process'
priority = 10
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
from aiida.engine import Process
return issubclass(cls, Process)
class AiidaProcessDirective(Directive):
"""
Directive to auto-document AiiDA processes.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
HIDE_UNSTORED_INPUTS_FLAG = 'hide-nondb-inputs'
EXPAND_NAMESPACES_FLAG = 'expand-namespaces'
option_spec = {
'module': directives.unchanged,
HIDE_UNSTORED_INPUTS_FLAG: directives.flag,
EXPAND_NAMESPACES_FLAG: directives.flag
}
signature = 'Process'
annotation = 'process'
has_content = True
def run(self):
self.initialize()
return self.build_node_tree()
def initialize(self):
"""Set internal attributes of the class.
Includes importing the process class.
"""
# pylint: disable=attribute-defined-outside-init
from aiida.manage.configuration import load_profile
load_profile()
self.class_name = self.arguments[0].split('(')[0]
self.module_name = self.options['module']
self.process_name = f'{self.module_name}.{self.class_name}'
self.process = get_object_from_string(self.process_name)
try:
self.process_spec = self.process.spec()
except Exception as exc:
raise RuntimeError(f"Error while building the spec for process '{self.process_name}': '{exc!r}.'") from exc
def build_node_tree(self):
"""Returns the docutils node tree."""
process_node = addnodes.desc(desctype='class', domain='py', noindex=False, objtype='class')
process_node += self.build_signature()
process_node += self.build_content()
return [process_node]
def build_signature(self):
"""Returns the signature of the process."""
signature = addnodes.desc_signature(first=False, fullname=self.signature)
signature += addnodes.desc_annotation(text=self.annotation)
signature += addnodes.desc_addname(text=f'{self.module_name}.')
signature += addnodes.desc_name(text=self.class_name)
return signature
def build_content(self):
"""
Returns the main content (docstring, inputs, outputs) of the documentation.
"""
content = addnodes.desc_content()
content += nodes.paragraph(text=self.process.__doc__)
content += self.build_doctree(
title='Inputs:',
port_namespace=self.process_spec.inputs,
)
content += self.build_doctree(title='Outputs:', port_namespace=self.process_spec.outputs)
if hasattr(self.process_spec, 'get_outline'):
outline = self.process_spec.get_outline()
if outline is not None:
content += self.build_outline_doctree(outline=outline)
return content
def build_doctree(self, title, port_namespace):
"""
Returns a doctree for a given port namespace, including a title.
"""
paragraph = nodes.paragraph()
paragraph += nodes.strong(text=title)
namespace_doctree = self.build_portnamespace_doctree(port_namespace)
if namespace_doctree:
paragraph += namespace_doctree
else:
paragraph += nodes.paragraph(text='None defined.')
return paragraph
def build_portnamespace_doctree(self, port_namespace):
"""
Builds the doctree for a port namespace.
"""
from aiida.engine.processes.ports import InputPort, PortNamespace
if not port_namespace:
return None
result = nodes.bullet_list(bullet='*')
for name, port in sorted(port_namespace.items()):
item = nodes.list_item()
if _is_non_db(port) and self.HIDE_UNSTORED_INPUTS_FLAG in self.options:
continue
if isinstance(port, (InputPort, OutputPort)):
item.extend(self.build_port_content(name, port))
elif isinstance(port, PortNamespace):
item += addnodes.literal_strong(text=name)
item += nodes.Text(', ')
item += nodes.emphasis(text='Namespace')
if port.help is not None:
item += nodes.Text(' -- ')
item.extend(publish_doctree(port.help)[0].children)
sub_doctree = self.build_portnamespace_doctree(port)
if sub_doctree:
from sphinxcontrib.details.directive import details, summary
sub_item = details(opened=self.EXPAND_NAMESPACES_FLAG in self.options)
sub_item += summary(text='Namespace Ports')
sub_item += sub_doctree
item += sub_item
else:
raise NotImplementedError
result += item
return result
def build_port_content(self, name, port):
"""
Build the content that describes a single port.
"""
res = []
res.append(addnodes.literal_strong(text=name))
res.append(nodes.Text(', '))
res.append(nodes.emphasis(text=self.format_valid_types(port.valid_type)))
res.append(nodes.Text(', '))
res.append(nodes.Text('required' if port.required else 'optional'))
if _is_non_db(port):
res.append(nodes.Text(', '))
res.append(nodes.emphasis(text='non_db'))
if port.help:
res.append(nodes.Text(' -- '))
# publish_doctree returns <document: <paragraph...>>.
# Here we only want the content (children) of the paragraph.
res.extend(publish_doctree(port.help)[0].children)
return res
@staticmethod
def format_valid_types(valid_type):
"""Format valid types."""
try:
return valid_type.__name__
except AttributeError:
try:
return f"({', '.join(v.__name__ for v in valid_type)})"
except (AttributeError, TypeError):
return str(valid_type)
def build_outline_doctree(self, outline):
"""Build the doctree for a spec outline."""
paragraph = nodes.paragraph()
paragraph += nodes.strong(text='Outline:')
outline_str = '\n'.join(self.build_outline_lines(outline.get_description(), indent=0))
paragraph += nodes.literal_block(outline_str, outline_str)
return paragraph
def build_outline_lines(self, outline, indent):
"""Return a list of lines which describe the process outline."""
from collections.abc import Mapping, Iterable
indent_str = ' ' * indent
res = []
if isinstance(outline, str):
res.append(indent_str + outline)
else:
if isinstance(outline, Mapping):
for key, outline_part in outline.items():
res.append(indent_str + key)
res.extend(self.build_outline_lines(outline_part, indent=indent + 4))
else:
assert isinstance(outline, Iterable)
for outline_part in outline:
res.extend(self.build_outline_lines(outline_part, indent=indent))
return res
def _is_non_db(port):
return getattr(port, 'non_db', False)
|
the-stack_106_28924 | from .base import AkiObject
from ..data.company import Company
from typing import NamedTuple
from decimal import Decimal
class AkiCompany(AkiObject):
__tuple: Company = None
def __init__(self, company_tuple: NamedTuple):
self.__original = company_tuple
cnpj_basico = getattr(company_tuple, "cnpj_basico", "0")
razao_social = getattr(company_tuple, "razao_social", "")
natureza_legal = getattr(company_tuple, "natureza_legal", "0000")
qualificacao_responsavel = getattr(company_tuple, "qualificacao_responsavel", "00")
capital_social = str(getattr(company_tuple, "capital_social", "0.0")).replace(",", ".")
porte = getattr(company_tuple, "porte", "1")
if porte == "":
porte = "1"
ente_federativo = getattr(company_tuple, "ente_federativo", "")
self.__tuple = Company(
index=int(cnpj_basico),
cnpj_basico=str(cnpj_basico).ljust(14, "X"),
razao_social=str(razao_social),
natureza_legal=str(natureza_legal),
qualificacao_responsavel=str(qualificacao_responsavel),
capital_social=Decimal(capital_social),
porte=int(porte),
ente_federativo=str(ente_federativo)
)
@property
def tuple(self) -> Company:
return self.__tuple
|
the-stack_106_28925 | import io
import time
import picamera
import picamera.array
import cv2
from keras.applications.resnet50 import ResNet50
from keras.preprocessing.image import array_to_img
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
t1= time.clock()
model = ResNet50(weights='imagenet')
t2= time.clock()
print('load resnet50: %.3f s' % (t2 - t1))
WINDOW_WIDTH = 640 #854
WINDOW_HEIGHT = 480
ORG_WIDTH = 640 #1280
ORG_HEIGHT = 480 #720
windowName = 'Pi NoIR'
cv2.namedWindow(windowName)
cv2.resizeWindow(windowName, WINDOW_WIDTH, WINDOW_HEIGHT)
w_offset = int((ORG_WIDTH - ORG_HEIGHT)/2)
with picamera.PiCamera() as camera:
camera.start_preview()
camera.resolution = (ORG_WIDTH, ORG_HEIGHT)
time.sleep(2)
# str = 'not yet classified'
while True:
t1= time.clock()
with picamera.array.PiRGBArray(camera) as stream:
camera.capture(stream, 'bgr')
image = stream.array
if WINDOW_WIDTH != ORG_WIDTH:
image = cv2.resize(image, (WINDOW_WIDTH, WINDOW_HEIGHT), interpolation = cv2.INTER_AREA)
#cv2.putText(image, str,(0, CAMERA_HEIGHT - 30), cv2.FONT_HERSHEY_PLAIN, 2, (0,0,255))
cv2.imshow(windowName, image)
t2 = time.clock()
# print('capture image : %.3f s' % (t2 - t1))
key = cv2.waitKey(12)
#press Esc(27) to quit, press c(99) to classify
if key==27:
break
elif key==99:
print('classifying image...')
t2 = time.clock()
image = image[: ,w_offset:w_offset + ORG_HEIGHT, :]
image = cv2.resize(image, (224, 224), interpolation = cv2.INTER_AREA)
x = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
array_to_img(x).save('classified.jpg')
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
# str = '{}'.format(decode_predictions(preds)[0][0][1:])
t3 = time.clock()
print('inference : %.3f s' % (t3 - t2))
cv2.destroyAllWindows() |
the-stack_106_28927 | import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='colorsrc',
parent_name='contour.hoverlabel.font',
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
the-stack_106_28928 | r"""
Implements the a multivariate Gaussian approximation to a uniform simplex
distribution.
The expected values may be computed explicitly for this distribution.
"""
import numpy as np
from smm.rvs.normalrv import NormalRV
from smm.rvs.basesimplexrv import BaseSimplexRV
from scipy.linalg import eigh
class NormalSimplexRV(NormalRV, BaseSimplexRV):
r"""
A class to generate points randomly from a normal approximation to a
simplex distribution in :math:`\mathbb{R}^m`.
It is initialised by providing a set of vertices, S. The random variable
is then a multi-variate Gaussian with mean
.. math:: \mu_S = \frac1{|S|}\sum_{v\in S} e_v
and variance
.. math::
\Sigma_S = \frac1{|S|}\sum_{v\in S}(e_v - \mu_S)(e_v - \mu_S)^t
= \frac1{|S|}\sum_{v\in S} e_ve_v^t - \mu_S\mu_S^t
The eigenvalues of :math:`\Sigma_S` are 0 and :math:`\frac1{|S|}` with
multiplicities :math:`m-k` and :math:`k` respectively, where
:math:`k=|S|-1`.
The support of the random variable is the k-dimensional plane containing
:math:`e_v` for :math:`v\in S`.
"""
def __init__(self, m, S, alpha=1.0):
"""
Initialises the NormalSimplexRV class.
This involves computing the defining matrix for a NormalRV and
handling inheritance from both BaseSimplexRV and NormalRV.
Parameters
----------
m : int
the number of hidden features.
S : list of elements in range(m)
the vertices of the simplex.
alpha : float
factor to multiply covariance by.
"""
# Note, there is a diamond of classes here, with BaseRV at the other
# vertex. So BaseRV.__init__ is called twice. This could be fixed
# by using super(), however this gets complicated because the various
# __init__'s take different arguments. As it is, so little is done
# in BaseRV.__init__ that running it twice has little penalty and no
# adverse consequences. I hope... :-)
BaseSimplexRV.__init__(self, m, S)
self.k = len(self.support) - 1
self.covar *= alpha # Modify covariance according to scaling factor
self.alpha = alpha
if self.k > 0:
v, M = eigh(self.covar, eigvals=(m-self.k, m-1))
M *= np.sqrt(v)
else:
M = np.zeros((m, 0))
NormalRV.__init__(self, self.mean, M.T)
# Check that we didn't make a mistake
assert np.allclose(self.M.T.dot(self.M), self.covar)
def __str__(self):
return BaseSimplexRV.__str__(self)
def __repr__(self):
return BaseSimplexRV.__repr__(self)
def __eq__(self, other):
"""
Two simplices are equal if they have the same ambient dimension,
the same list of (ordered) vertices and the same scaling factor.
"""
return BaseSimplexRV.__eq__(self, other) and self.alpha == other.alpha
def __hash__(self):
"""
Two simplices hash to the same value if they have the same ambient
dimension and the same set of vertices, notice that self.S is sorted
in __init__.
"""
return hash((self.m, self.S, self.alpha))
__all__ = ["NormalSimplexRV"]
|
the-stack_106_28929 |
import sys
from os.path import join as path_join
from os.path import dirname
from sys import path as sys_path
# assume script in brat tools/ directory, extend path to find sentencesplit.py
sys_path.append(path_join(dirname(__file__), '.'))
sys.path.append('.')
import torch
import argparse
import numpy as np
from features import Features
from sklearn.metrics import *
from torch.autograd import Variable
import rules
import fasttext
from model import NeuralClassifier
from config_ctc import parameters_ctc
from collections import Counter
from torch.optim import lr_scheduler
fasttext_model = fasttext.load_model('/root/Project/data/fasttext.bin')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device=='cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
RESOURCES = {
"train": "/root/Project/data/train_freq.txt",
"gigaword_word": "/root/Project/data/gigaword_gt_2.bin",
"gigaword_char": "/root/Project/data/gigaword_char_unique.bin",
"stackoverflow_char": "/root/Project/data/no_eng_char_uniq.bin",
"stackoverflow_word": "/root/Project/data/words.bin",
"cs": "/root/Project/data/sorted_semantic_scholar_words.txt"
}
def eval(predictions, gold_labels, phase):
# print(predictions)
print("--------------------",phase,"--------------------")
precision = round(precision_score(gold_labels, predictions) * 100.0, 4)
recall = round(recall_score(gold_labels, predictions) * 100.0, 4)
f1 = round(f1_score(gold_labels, predictions) * 100.0, 4)
print("P: ", precision, " R:", recall, " F: ", f1)
print(classification_report(gold_labels, predictions))
print("-------------------------------------------------")
def get_word_dict_pre_embeds(train_file, test_file):
word_id=0
id_to_word={}
word_to_id={}
word_to_vec={}
for line in open(train_file, encoding='utf-8'):
word=line.split()[0]
if word not in word_to_id:
word=word.strip()
word_to_id[word]=word_id
id_to_word[word_id]=word
word_to_vec[word]=fasttext_model[word]
word_id+=1
for line in open(test_file, encoding='utf-8'):
word=line.split()[0]
if word not in word_to_id:
word=word.strip()
word_to_id[word]=word_id
id_to_word[word_id]=word
word_to_vec[word]=fasttext_model[word]
word_id+=1
vocab_size = len(word_to_id)
return vocab_size, word_to_id, id_to_word, word_to_vec
def popluate_word_id_from_file(file_name, word_to_id):
list_of_ids=[]
for line in open(file_name, encoding='utf-8'):
word=line.split()[0].strip()
word_one_hot_vec= np.zeros(len(word_to_id))
word_id=word_to_id[word]
word_one_hot_vec[word_id]=1.0
# list_of_ids.append(word_one_hot_vec)
list_of_ids.append(word_id)
arr2d = np.array(list_of_ids)
# print(arr2d.shape)
return arr2d
def popluate_word_id_from_token(token, word_to_id):
list_of_ids=[]
word=token.split()[0].strip()
if word not in word_to_id:
word= "**UNK**"
word_one_hot_vec= np.zeros(len(word_to_id))
word_id=word_to_id[word]
word_one_hot_vec[word_id]=1.0
# list_of_ids.append(word_one_hot_vec)
list_of_ids.append(word_id)
arr2d = np.array(list_of_ids)
# print(arr2d.shape)
return arr2d
def get_train_test_word_id(train_file, test_file, word_to_id):
train_ids=popluate_word_id_from_file(train_file, word_to_id)
test_ids=popluate_word_id_from_file(test_file, word_to_id)
return train_ids, test_ids
def prediction_on_token_input(ctc_ip_token, ctc_classifier, vocab_size, word_to_id, id_to_word, word_to_vec, features):
ctc_tokens, ctc_features, ctc_labels = features.get_features_from_token(ctc_ip_token, False)
ctc_ids=popluate_word_id_from_token(ctc_ip_token, word_to_id)
ctc_x = Variable(torch.FloatTensor(ctc_features))
ctc_x_words = Variable(torch.LongTensor(ctc_ids))
ctc_y = Variable(torch.LongTensor(ctc_labels))
ctc_scores, ctc_preds = ctc_classifier(ctc_features, ctc_x_words)
preds=[]
# fp = open("ctc_ops.tsv", "w")
# fp.write("token"+"\t"+"true_label"+"\t"+"pred_label"+"\t"+"scores"+"\n")
for tok, gold, pred, sc in zip(ctc_tokens, ctc_labels, ctc_preds, ctc_scores):
if rules.IS_NUMBER(tok):
pred=1
if rules.IS_URL(tok):
pred=0
if pred==1:
# print(tok, pred)
pred=1
preds.append(pred)
# for tok, gold, pred, sc in zip(ctc_tokens, ctc_labels, ctc_preds, ctc_scores):
# fp.write(tok + "\t" + str(pred) + "\n")
# fp.close()
# print(preds[0])
return preds[0]
def prediction_on_file_input(ctc_input_file, ctc_classifier, vocab_size, word_to_id, id_to_word, word_to_vec, features):
ctc_tokens, ctc_features, ctc_labels = features.get_features(ctc_input_file, False)
ctc_ids=popluate_word_id_from_file(ctc_input_file, word_to_id)
ctc_x = Variable(torch.FloatTensor(ctc_features))
ctc_x_words = Variable(torch.LongTensor(ctc_ids))
ctc_y = Variable(torch.LongTensor(ctc_labels))
ctc_scores, ctc_preds = ctc_classifier(ctc_features, ctc_x_words)
preds=[]
fp = open("ctc_ops.tsv", "w", encoding='utf-8')
# fp.write("token"+"\t"+"true_label"+"\t"+"pred_label"+"\t"+"scores"+"\n")
for tok, gold, pred, sc in zip(ctc_tokens, ctc_labels, ctc_preds, ctc_scores):
if rules.IS_NUMBER(tok):
pred=1
if rules.IS_URL(tok):
pred=0
if pred==1:
# print(tok, pred)
pred=1
preds.append(pred)
for tok, gold, pred, sc in zip(ctc_tokens, ctc_labels, ctc_preds, ctc_scores):
fp.write(tok + "\t" + str(pred) + "\n")
fp.close()
def train_ctc_model(train_file, test_file):
""" Function of training Code Recognizer """
# training and test dataset (default)
train_file = parameters_ctc['train_file']
test_file = parameters_ctc['test_file']
# extract features from two language models trained on Gigaword and StackOverflow
features = Features(RESOURCES)
train_tokens, train_features, train_labels = features.get_features(train_file, True)
test_tokens, test_features, test_labels = features.get_features(test_file, False)
# fastText embedding
vocab_size, word_to_id, id_to_word, word_to_vec = get_word_dict_pre_embeds(train_file, test_file)
train_ids, test_ids = get_train_test_word_id(train_file, test_file, word_to_id)
# transform each ngram probability into a k-dimensional vector using Gaussian binning
word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (vocab_size, parameters_ctc['word_dim']))
for word in word_to_vec:
word_embeds[word_to_id[word]]=word_to_vec[word]
# concatenate the outputs with fastText embedding
ctc_classifier = NeuralClassifier(len(train_features[0]), max(train_labels) + 1, vocab_size, word_embeds)
ctc_classifier.to(device)
# binary classifier
optimizer = torch.optim.Adam(ctc_classifier.parameters(), lr=parameters_ctc["LR"])
step_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8)
# prepare dataset
train_x = Variable(torch.FloatTensor(train_features).to(device))
train_x_words = Variable(torch.LongTensor(train_ids).to(device))
train_y = Variable(torch.LongTensor(train_labels).to(device))
test_x = Variable(torch.FloatTensor(test_features).to(device))
test_x_words = Variable(torch.LongTensor(test_ids).to(device))
test_y = Variable(torch.LongTensor(test_labels).to(device))
# training
for epoch in range(parameters_ctc['epochs']):
loss = ctc_classifier.CrossEntropy(train_features, train_x_words, train_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_scores, train_preds = ctc_classifier(train_features, train_x_words)
test_scores, test_preds = ctc_classifier(test_features, test_x_words)
eval(test_preds, test_labels, "test")
return ctc_classifier, vocab_size, word_to_id, id_to_word, word_to_vec, features
if __name__ == '__main__':
train_file=parameters_ctc['train_file']
test_file=parameters_ctc['test_file']
ctc_classifier, vocab_size, word_to_id, id_to_word, word_to_vec, features= train_model(train_file, test_file)
ip_token = "app"
op_ctc = prediction_on_token_input(ip_token, ctc_classifier, vocab_size, word_to_id, id_to_word, word_to_vec, features)
print(op_ctc)
ip_token = "commit-2"
op_ctc =prediction_on_token_input(ip_token, ctc_classifier, vocab_size, word_to_id, id_to_word, word_to_vec, features)
print(op_ctc) |
the-stack_106_28930 | import unittest
from pathlib import Path
from filecmp import cmp
from striprtf.striprtf import rtf_to_text
RTF_DIR = Path.cwd() / 'tests' / 'rtf'
TEXT_DIR = Path.cwd() / 'tests' / 'text'
class TestSimple(unittest.TestCase):
def test_sample(self):
example_rtf = RTF_DIR / 'sample_3.rtf'
example_txt = TEXT_DIR / 'sample_3.txt'
with example_rtf.open() as source:
result = rtf_to_text(source.read())
print(result)
with example_txt.open() as destination:
self.assertEqual(destination.read(), result)
|
the-stack_106_28932 | import datetime
import pandas as pd
import numpy as np
from lifetimes.utils import summary_data_from_transaction_data
df_columns = pd.read_table('../data/db_dict.txt')
def txt_to_df(directory):
headers = df_columns['field'][df_columns['table'] == 'atk_transaction'].tolist()
df = pd.read_table(directory, header=None, names=headers, dtype = {'customer_id': str})
df['cnt_trans'] = 1
df = df[df['customer_id'] != '0']
df = df[df['customer_id'] != 'UNKOWN']
return df
def df_transform(df, site_id=0):
'''
input:
site_id = type int
'''
# splits df by site if site_id is provided
if site_id != 0:
df = df[df.site_id == site_id]
df.sort_values(by='created',ascending=True, axis=0, inplace=True)
# check if created column is datetime, else convert
if isinstance(df.created.iloc[0], datetime.date) == False:
df['created'] = pd.to_datetime(df['created'])
df['created'] = df['created'].dt.date
# drop non used columns
df = df.ix[:,['customer_id', 'site_id', 'created', 'cnt_trans', 'amount_usd']]
return df
def test_holdout(summary_train_df, holdoutratio=0.2, high_freq_split=4):
'''
splits the summary_train_df into a holdout test set. That we need to test our model trained model on
'''
pass
def train_test_split(df, first_purch_weeks, train_end_week=39, train_start_date=''):
'''
train_start_date, of training data, needs to by type string "yyyy-mm-dd"
first_purch_weeks = only customers are trained that have a transactions within the first purch weeks (from train_start_date)
train_end_week = used to calculate T and to split data into train and test, needs to be type string "yyyy-mm-dd"
Set up raw data columns as input for the model
- t0 = 2015-04-18
- t = 12 weeks (similar to paper)
- first_purch
- T = t - first purch; or time a customer could make a repeated trans
- x = repeated transactions (= total frequency - 1) 0 if no repeated transaction occured
- t_x = recency, time of last transaction - time of first purchase
Exclude any customers that dont have a purchase within the first purchase weeks
'''
if train_start_date == '':
df['t0'] = df.created.min()
else:
df['t0'] = pd.to_datetime(train_start_date)
df['t0'] = df['t0'].dt.date
# split df into train and test based on transaction dates
train_end_date = (df['t0'] + datetime.timedelta(days=train_end_week*7)).iloc[0]
test_df = df[df.created > train_end_date]
# create column to split test data by predicted weeks
test_df['test_weeks'] = (test_df['created'] - train_end_date).apply(lambda x: x.days/ 7.)
# import pdb; pdb.set_trace()
train_df = df[df.created <= train_end_date]
# summary_train_df = summary_data_from_transaction_data(train_df, 'customer_id', 'created', 'amount_usd', freq='D', observation_period_end=train_end_date.strftime('%Y-%m-%d'))
#
# create input data from train_df, counting all customer transactions and other required inputs
train_df['first_purch'] = train_df['created']
train_df['t_x'] = train_df['created']
train_df.sort_values(by='created',ascending=True, axis=0, inplace=True)
summary_train_df = train_df.groupby(['customer_id', 'site_id', 't0']).agg({'cnt_trans' : np.sum, 'first_purch' : np.min, 't_x' : np.max, 'amount_usd': np.mean}).reset_index()
# summary_train_df, summary_holdout_df = train_holdout(summary_train_df)
# create cohort df for customers that have transactions in the first purch weeks
first_purch_cutoff = summary_train_df['t0'].iloc[0] + datetime.timedelta(days=first_purch_weeks*7)
summary_train_df = summary_train_df[summary_train_df.first_purch < first_purch_cutoff]
# convert all transactions to repeated transactions
summary_train_df['cnt_trans'] = summary_train_df['cnt_trans'] - 1
# T: time (days) to make repeated transactions in training period
summary_train_df['T'] = (train_end_date - summary_train_df['first_purch']).apply(lambda x: x.days/ 7.)
# t_x: time (days) of last transaction
summary_train_df['t_x'] = (summary_train_df['t_x'] - summary_train_df['first_purch']).apply(lambda x: x.days/ 7.)
summary_train_df = summary_train_df.rename(columns={'cnt_trans': 'frequency', 't_x': 'recency'})
return summary_train_df, test_df, first_purch_weeks, train_end_date
def undersampling(train_df, threshold=0.2, split=2):
train_df_minor = train_df[train_df.x < split]
train_df_major = train_df[train_df.x >= split]
undersampled_df = train_df
ratio = len(train_df_major) / float(len(undersampled_df))
np.random.seed(10)
while ratio < threshold:
drop_indices = np.random.choice(train_df_minor.index, 5, replace=False)
undersampled_df = undersampled_df.drop(drop_indices)
train_df_minor = train_df_minor.drop(drop_indices)
ratio = len(train_df_major) / float(len(undersampled_df))
return undersampled_df
def oversampling(train_df, threshold=0.15, split=3):
train_df_minor = train_df[train_df.x < split]
train_df_major = train_df[train_df.x >= split]
oversampled_df = train_df
ratio = len(train_df_major) / float(len(oversampled_df))
np.random.seed(10)
while ratio < threshold:
train_df_major = oversampled_df[oversampled_df.x >= split]
oversampled_df = oversampled_df.append(train_df_major.sample(n=5, replace=True))
ratio = len(train_df_major) / float(len(oversampled_df))
return oversampled_df
# def train_test_split_transactions_only(df, train_start_date='', first_purch_weeks, train_end_week=52):
# if train_start_date == '':
# df['t0'] = df.created.min()
# else:
# df['t0'] = pd.to_datetime(train_start_date)
# df['t0'] = df['t0'].dt.date
# # split df into train and test based on transaction dates
# train_end_date = (df['t0'] + datetime.timedelta(days=train_end_week*7)).iloc[0]
# test_df = df[df.created > train_end_date]
# # create column to split test data by predicted weeks
# test_df['test_weeks'] = (test_df['created'] - train_end_date).apply(lambda x: x.days/ 7.)
#
# train_df = df[df.created <= train_end_date]
# # create input data from train_df, counting all customer transactions and other required inputs
# train_df['first_purch'] = train_df['created']
# train_df['t_x'] = train_df['created']
#
# train_df.sort_values(by='created',ascending=True, axis=0, inplace=True)
def split_by_freq(train_df, freq_split):
'''
splits train_df into high_freq and low_freq, to better train the model
'''
train_df_high_freq = train_df[train_df['frequency'] > freq_split]
train_df_low_freq = train_df[train_df['frequency'] <= freq_split]
return train_df_high_freq, train_df_low_freq
def split_test_df_by_pred_period(df, pred_weeks):
'''
splits df into only the transactions that we are trying to predict
used to calculate RRS for example
'''
df = df[df['test_weeks'] <= pred_weeks]
test_transactions = df.groupby(['customer_id'])['cnt_trans'].sum().reset_index()
test_dict = test_transactions.set_index('customer_id')['cnt_trans'].to_dict()
return test_dict
# if __name__ == '__main__':
#
# df = txt_to_df('../data/atk_transaction.txt')
# transformed_df = df_transform(df, 23395)
# train_df, test_df, first_purch_weeks = train_test_split(transformed_df, first_purch_weeks=2, train_end_week=10)
# check ID '415306753', '382184409'
# export_df_to_csv(train_df)
|
the-stack_106_28933 | # -*- mode: python; coding: utf-8 -*-
# Copyright 2019 the .Net Foundation
# Distributed under the terms of the revised (3-clause) BSD license.
"""Note! This test suite will hit the network!
"""
import pytest
from xml.etree import ElementTree
from .. import Client
INF = float('inf')
NAN = float('nan')
def _assert_xml_trees_equal(path, e1, e2, care_text_tags):
"Derived from https://stackoverflow.com/a/24349916/3760486"
assert e1.tag == e2.tag, \
'at XML path {0}, tags {1} and {2} differed'.format(path, e1.tag, e2.tag)
# We only sometimes care about this; often it's just whitespace
if e1.tag in care_text_tags:
assert e1.text == e2.text, \
'at XML path {0}, texts {1!r} and {2!r} differed'.format(path, e1.text, e2.text)
# We never care about this, right?
#assert e1.tail == e2.tail, \
# 'at XML path {0}, tails {1!r} and {2!r} differed'.format(path, e1.tail, e2.tail)
assert e1.attrib == e2.attrib, \
'at XML path {0}, attributes {1!r} and {2!r} differed'.format(path, e1.attrib, e2.attrib)
assert len(e1) == len(e2), \
'at XML path {0}, number of children {1} and {2} differed'.format(path, len(e1), len(e2))
subpath = '{0}>{1}'.format(path, e1.tag)
for c1, c2 in zip (e1, e2):
_assert_xml_trees_equal(subpath, c1, c2, care_text_tags)
def assert_xml_trees_equal(e1, e2, care_text_tags=()):
_assert_xml_trees_equal('(root)', e1, e2, care_text_tags)
@pytest.fixture
def client():
return Client()
@pytest.fixture
def login(client):
"Return a valid login request object."
return client.login()
def test_login_basic(login):
assert login.invalidity_reason() is None
login.send()
@pytest.fixture
def showimage(client):
"Return a valid ShowImage request object."
return client.show_image('http://localhost/image.jpg', 'name')
SHOWIMAGE_BAD_SETTINGS = [
('credits', b'\xff not unicodable'),
('credits_url', u'http://olé/not_ascii_unicode_url'),
('credits_url', b'http://host/\x81/not_ascii_bytes_url'),
('credits_url', 'not_absolute_url'),
('dec_deg', -90.00001),
('dec_deg', 90.00001),
('dec_deg', NAN),
('dec_deg', INF),
('dec_deg', 'not numeric'),
('image_url', None),
('image_url', u'http://olé/not_ascii_unicode_url'),
('image_url', b'http://host/\x81/not_ascii_bytes_url'),
('image_url', 'not_absolute_url'),
('name', None),
('ra_deg', NAN),
('ra_deg', INF),
('ra_deg', 'not numeric'),
('reverse_parity', 1), # only bools allowed
('reverse_parity', 't'), # only bools allowed
('rotation_deg', NAN),
('rotation_deg', INF),
('rotation_deg', 'not numeric'),
('scale_arcsec', 0.),
('scale_arcsec', NAN),
('scale_arcsec', INF),
('scale_arcsec', 'not numeric'),
('thumbnail_url', u'http://olé/not_ascii_unicode_url'),
('thumbnail_url', b'http://host/\x81/not_ascii_bytes_url'),
('thumbnail_url', 'not_absolute_url'),
('x_offset_pixels', NAN),
('x_offset_pixels', INF),
('x_offset_pixels', 'not numeric'),
('y_offset_pixels', NAN),
('y_offset_pixels', INF),
('y_offset_pixels', 'not numeric'),
]
@pytest.mark.parametrize(('attr', 'val'), SHOWIMAGE_BAD_SETTINGS)
def test_showimage_invalid_settings(showimage, attr, val):
setattr(showimage, attr, val)
assert showimage.invalidity_reason() is not None
SHOWIMAGE_GOOD_SETTINGS = [
('credits', b'unicodable bytes'),
('credits', u'unicode é'),
('credits', None),
('credits_url', b'http://localhost/absolute_bytes_url'),
('credits_url', u'//localhost/absolute_unicode_url'),
('credits_url', None),
('dec_deg', -90),
('dec_deg', 90),
('image_url', b'http://localhost/absolute_bytes_url'),
('image_url', u'//localhost/absolute_unicode_url'),
('name', b'unicodable bytes'),
('name', u'unicode é'),
('ra_deg', -720.),
('ra_deg', 980.),
('reverse_parity', False),
('reverse_parity', True),
('rotation_deg', -1),
('scale_arcsec', -1.),
('thumbnail_url', b'http://localhost/absolute_bytes_url'),
('thumbnail_url', u'//localhost/absolute_unicode_url'),
('thumbnail_url', None),
('x_offset_pixels', -1.),
('x_offset_pixels', 0),
('y_offset_pixels', -1.),
('y_offset_pixels', 0),
]
@pytest.mark.parametrize(('attr', 'val'), SHOWIMAGE_GOOD_SETTINGS)
def test_showimage_valid_settings(showimage, attr, val):
setattr(showimage, attr, val)
assert showimage.invalidity_reason() is None
SHOWIMAGE_CARE_TEXT_TAGS = set(('Credits', 'CreditsUrl'))
def _make_showimage_result(credurl='', name='name'):
return '''<?xml version="1.0" encoding="UTF-8"?>
<Folder Name="{name}" Group="Goto">
<Place Name="{name}" RA="0" Dec="0" ZoomLevel="0" DataSetType="Sky" Opacity="100"
Thumbnail="" Constellation="">
<ForegroundImageSet>
<ImageSet DataSetType="Sky" BandPass="Visible" Url="http://localhost/image.jpg"
TileLevels="0" WidthFactor="2" Rotation="0" Projection="SkyImage"
FileType=".tif" CenterY="0" CenterX="0" BottomsUp="False" OffsetX="0"
OffsetY="0" BaseTileLevel="0" BaseDegreesPerTile="0.000277777777777778">
<Credits></Credits>
<CreditsUrl>{credurl}</CreditsUrl>
</ImageSet>
</ForegroundImageSet>
</Place>
</Folder>
'''.format(credurl=credurl, name=name)
SHOWIMAGE_RESULTS = [
(dict(), _make_showimage_result()),
(dict(name='test&xml"esc'), _make_showimage_result(name='test&xml"esc')),
(dict(credits_url='http://a/b&c'), _make_showimage_result(credurl='http://a/b&c')),
]
@pytest.mark.parametrize(('attrs', 'expected'), SHOWIMAGE_RESULTS)
def test_showimage_valid_settings(showimage, attrs, expected):
expected = ElementTree.fromstring(expected)
for name, value in attrs.items():
setattr(showimage, name, value)
found_text = showimage.send()
found = ElementTree.fromstring(found_text)
assert_xml_trees_equal(expected, found, SHOWIMAGE_CARE_TEXT_TAGS)
@pytest.fixture
def tileimage(client):
"Return a valid TileImage request object."
return client.tile_image('http://www.spitzer.caltech.edu/uploaded_files/images/0009/0848/sig12-011.jpg')
TILEIMAGE_BAD_SETTINGS = [
('credits', b'\xff not unicodable'),
('credits_url', u'http://olé/not_ascii_unicode_url'),
('credits_url', b'http://host/\x81/not_ascii_bytes_url'),
('credits_url', 'not_absolute_url'),
('dec_deg', -90.00001),
('dec_deg', 90.00001),
('dec_deg', NAN),
('dec_deg', INF),
('dec_deg', 'not numeric'),
('image_url', None),
('image_url', u'http://olé/not_ascii_unicode_url'),
('image_url', b'http://host/\x81/not_ascii_bytes_url'),
('image_url', 'not_absolute_url'),
('ra_deg', NAN),
('ra_deg', INF),
('ra_deg', 'not numeric'),
('rotation_deg', NAN),
('rotation_deg', INF),
('rotation_deg', 'not numeric'),
('scale_deg', 0.),
('scale_deg', NAN),
('scale_deg', INF),
('scale_deg', 'not numeric'),
('thumbnail_url', u'http://olé/not_ascii_unicode_url'),
('thumbnail_url', b'http://host/\x81/not_ascii_bytes_url'),
('thumbnail_url', 'not_absolute_url'),
('x_offset_deg', NAN),
('x_offset_deg', INF),
('x_offset_deg', 'not numeric'),
('y_offset_deg', NAN),
('y_offset_deg', INF),
('y_offset_deg', 'not numeric'),
]
@pytest.mark.parametrize(('attr', 'val'), TILEIMAGE_BAD_SETTINGS)
def test_tileimage_invalid_settings(tileimage, attr, val):
setattr(tileimage, attr, val)
assert tileimage.invalidity_reason() is not None
TILEIMAGE_GOOD_SETTINGS = [
('credits', b'unicodable bytes'),
('credits', u'unicode é'),
('credits', None),
('credits_url', b'http://localhost/absolute_bytes_url'),
('credits_url', u'//localhost/absolute_unicode_url'),
('credits_url', None),
('dec_deg', 90),
('dec_deg', 90),
('dec_deg', None),
('image_url', b'http://localhost/absolute_bytes_url'),
('image_url', u'//localhost/absolute_unicode_url'),
('ra_deg', -720.),
('ra_deg', 980.),
('ra_deg', None),
('rotation_deg', -1),
('rotation_deg', None),
('scale_deg', -1.),
('scale_deg', None),
('thumbnail_url', b'http://localhost/absolute_bytes_url'),
('thumbnail_url', u'//localhost/absolute_unicode_url'),
('thumbnail_url', None),
('x_offset_deg', -1.),
('x_offset_deg', 0),
('x_offset_deg', None),
('y_offset_deg', -1.),
('y_offset_deg', 0),
('y_offset_deg', None),
]
@pytest.mark.parametrize(('attr', 'val'), TILEIMAGE_GOOD_SETTINGS)
def test_tileimage_valid_settings(tileimage, attr, val):
setattr(tileimage, attr, val)
assert tileimage.invalidity_reason() is None
TILEIMAGE_CARE_TEXT_TAGS = set(('Credits', 'CreditsUrl'))
def _make_tileimage_result(credits='', credurl='', ident=None, name='Image File'):
return '''<Folder Name="{name}" Group="Explorer">
<Place Name="{name}" RA="0" Dec="0" ZoomLevel="32768" DataSetType="Sky"
Opacity="100" Thumbnail="http://www.worldwidetelescope.org/wwtweb/tilethumb.aspx?name={ident}"
Constellation="">
<ForegroundImageSet>
<ImageSet DataSetType="Sky" Name="{name}" BandPass="Visible"
Url="http://www.worldwidetelescope.org/wwtweb/GetTile.aspx?q={{1}},{{2}},{{3}},{ident}"
TileLevels="5" WidthFactor="1" Rotation="0" Projection="Tan" FileType=".png"
CenterY="0" CenterX="0" BottomsUp="False" OffsetX="0" OffsetY="0"
BaseTileLevel="0" BaseDegreesPerTile="8192">
<Credits>{credits}</Credits>
<CreditsUrl>{credurl}</CreditsUrl>
<ThumbnailUrl>http://www.worldwidetelescope.org/wwtweb/tilethumb.aspx?name={ident}</ThumbnailUrl>
</ImageSet>
</ForegroundImageSet>
</Place>
</Folder>
'''.format(credits=credits, credurl=credurl, ident=ident, name=name)
TILEIMAGE_RESULTS = [
(dict(), _make_tileimage_result(
credits = ' NASA/JPL-Caltech',
credurl = 'http://www.spitzer.caltech.edu/images/5259-sig12-011-The-Helix-Nebula-Unraveling-at-the-Seams',
ident = '1176481368',
name = 'Helix Nebula',
)),
]
@pytest.mark.parametrize(('attrs', 'expected'), TILEIMAGE_RESULTS)
def test_tileimage_valid_settings(tileimage, attrs, expected):
expected = ElementTree.fromstring(expected)
for name, value in attrs.items():
setattr(tileimage, name, value)
found_text = tileimage.send()
found = ElementTree.fromstring(found_text)
assert_xml_trees_equal(expected, found, TILEIMAGE_CARE_TEXT_TAGS)
|
the-stack_106_28934 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import threading
import sys
from modules.exception.parameternotfoundexception import ParameterNotFoundException
from modules.subscriber import eventsubscriber
from modules.publisher import cartridgeagentpublisher
from modules.event.instance.notifier.events import *
from modules.event.tenant.events import *
from modules.event.topology.events import *
from modules.tenant.tenantcontext import *
from modules.topology.topologycontext import *
from modules.datapublisher.logpublisher import *
from modules.config import cartridgeagentconfiguration
from modules.extensions import defaultextensionhandler
class CartridgeAgent(threading.Thread):
extension_handler = defaultextensionhandler.DefaultExtensionHandler()
def __init__(self):
threading.Thread.__init__(self)
mb_ip = cartridgeagentconfiguration.CartridgeAgentConfiguration().read_property(cartridgeagentconstants.MB_IP)
mb_port = cartridgeagentconfiguration.CartridgeAgentConfiguration().read_property(cartridgeagentconstants.MB_PORT)
self.__instance_event_subscriber = eventsubscriber.EventSubscriber(
cartridgeagentconstants.INSTANCE_NOTIFIER_TOPIC,
mb_ip,
mb_port)
self.__tenant_event_subscriber = eventsubscriber.EventSubscriber(
cartridgeagentconstants.TENANT_TOPIC,
mb_ip,
mb_port)
self.__topology_event_subscriber = eventsubscriber.EventSubscriber(
cartridgeagentconstants.TOPOLOGY_TOPIC,
mb_ip,
mb_port)
self.__tenant_context_initialized = False
self.log_publish_manager = None
self.terminated = False
self.log = LogFactory().get_log(__name__)
self.cartridge_agent_config = CartridgeAgentConfiguration()
def run(self):
self.log.info("Starting Cartridge Agent...")
#Check if required properties are set
self.validate_required_properties()
#Start instance notifier listener thread
self.subscribe_to_topics_and_register_listeners()
#Start topology event receiver thread
self.register_topology_event_listeners()
#Start tenant event receiver thread
self.register_tenant_event_listeners()
#wait for intance spawned event
while not self.cartridge_agent_config.initialized:
self.log.debug("Waiting for cartridge agent to be initialized...")
time.sleep(1)
#Execute instance started shell script
CartridgeAgent.extension_handler.on_instance_started_event()
#Publish instance started event
cartridgeagentpublisher.publish_instance_started_event()
#Execute start servers extension
try:
CartridgeAgent.extension_handler.start_server_extension()
except:
self.log.exception("Error processing start servers event")
#Wait for all ports to be active
cartridgeagentutils.wait_until_ports_active(
self.cartridge_agent_config.listen_address,
self.cartridge_agent_config.ports,
int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False))
)
# check if artifact management is required before publishing instance activated event
repo_url = self.cartridge_agent_config.repo_url
if repo_url is None or str(repo_url).strip() == "":
self.log.info("No artifact repository found")
CartridgeAgent.extension_handler.on_instance_activated_event()
cartridgeagentpublisher.publish_instance_activated_event()
else:
self.log.info("Artifact repository found, waiting for artifact updated event to checkout artifacts: [repo_url] %s", repo_url)
persistence_mappping_payload = self.cartridge_agent_config.persistence_mappings
if persistence_mappping_payload is not None:
CartridgeAgent.extension_handler.volume_mount_extension(persistence_mappping_payload)
# start log publishing thread
if DataPublisherConfiguration.get_instance().enabled:
log_file_paths = self.cartridge_agent_config.log_file_paths
if log_file_paths is None:
self.log.exception("No valid log file paths found, no logs will be published")
else:
self.log_publish_manager = LogPublisherManager(log_file_paths)
self.log_publish_manager.start()
while not self.terminated:
time.sleep(1)
if DataPublisherConfiguration.get_instance().enabled:
self.log_publish_manager.terminate_all_publishers()
def terminate(self):
"""
Allows the CartridgeAgent thread to be terminated
:return: void
"""
self.terminated = True
def validate_required_properties(self):
"""
Checks if required properties are set
:return: void
"""
#PARAM_FILE_PATH
try:
self.cartridge_agent_config.read_property(cartridgeagentconstants.PARAM_FILE_PATH)
except ParameterNotFoundException:
self.log.error("System property not found: %r" % cartridgeagentconstants.PARAM_FILE_PATH)
return
#EXTENSIONS_DIR
try:
self.cartridge_agent_config.read_property(cartridgeagentconstants.EXTENSIONS_DIR)
except ParameterNotFoundException:
self.log.error("System property not found: %r" % cartridgeagentconstants.EXTENSIONS_DIR)
return
def subscribe_to_topics_and_register_listeners(self):
self.log.debug("Starting instance notifier event message receiver thread")
self.__instance_event_subscriber.register_handler("ArtifactUpdatedEvent", self.on_artifact_updated)
self.__instance_event_subscriber.register_handler("InstanceCleanupMemberEvent", self.on_instance_cleanup_member)
self.__instance_event_subscriber.register_handler("InstanceCleanupClusterEvent", self.on_instance_cleanup_cluster)
self.__instance_event_subscriber.start()
self.log.info("Instance notifier event message receiver thread started")
# wait till subscribed to continue
while not self.__instance_event_subscriber.is_subscribed():
time.sleep(2)
def on_artifact_updated(self, msg):
event_obj = ArtifactUpdatedEvent.create_from_json(msg.payload)
CartridgeAgent.extension_handler.on_artifact_updated_event(event_obj)
def on_instance_cleanup_member(self, msg):
member_in_payload = self.cartridge_agent_config.member_id
event_obj = InstanceCleanupMemberEvent.create_from_json(msg.payload)
member_in_event = event_obj.member_id
if member_in_payload == member_in_event:
CartridgeAgent.extension_handler.on_instance_cleanup_member_event(event_obj)
def on_instance_cleanup_cluster(self, msg):
event_obj = InstanceCleanupClusterEvent.create_from_json(msg.payload)
cluster_in_payload = self.cartridge_agent_config.cluster_id
cluster_in_event = event_obj.cluster_id
instance_in_payload = self.cartridge_agent_config.cluster_instance_id
instance_in_event = event_obj.cluster_instance_id
if cluster_in_event == cluster_in_payload and instance_in_payload == instance_in_event:
CartridgeAgent.extension_handler.on_instance_cleanup_cluster_event(event_obj)
def register_topology_event_listeners(self):
self.log.debug("Starting topology event message receiver thread")
self.__topology_event_subscriber.register_handler("MemberActivatedEvent", self.on_member_activated)
self.__topology_event_subscriber.register_handler("MemberTerminatedEvent", self.on_member_terminated)
self.__topology_event_subscriber.register_handler("MemberSuspendedEvent", self.on_member_suspended)
self.__topology_event_subscriber.register_handler("CompleteTopologyEvent", self.on_complete_topology)
self.__topology_event_subscriber.register_handler("MemberStartedEvent", self.on_member_started)
self.__topology_event_subscriber.register_handler("MemberCreatedEvent", self.on_member_created)
self.__topology_event_subscriber.register_handler("MemberInitializedEvent", self.on_member_initialized)
self.__topology_event_subscriber.start()
self.log.info("Cartridge agent topology receiver thread started")
def on_member_created(self, msg):
self.log.debug("Member created event received: %r" % msg.payload)
def on_member_initialized(self, msg):
self.log.debug("Member initialized event received: %r" % msg.payload)
if self.cartridge_agent_config.initialized:
return
event_obj = MemberInitializedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_member_initialized_event(event_obj)
except:
self.log.exception("Error processing member initialized event")
def on_member_activated(self, msg):
self.log.debug("Member activated event received: %r" % msg.payload)
if not self.cartridge_agent_config.initialized:
return
event_obj = MemberActivatedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_member_activated_event(event_obj)
except:
self.log.exception("Error processing member activated event")
def on_member_terminated(self, msg):
self.log.debug("Member terminated event received: %r" % msg.payload)
if not self.cartridge_agent_config.initialized:
return
event_obj = MemberTerminatedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_member_terminated_event(event_obj)
except:
self.log.exception("Error processing member terminated event")
def on_member_suspended(self, msg):
self.log.debug("Member suspended event received: %r" % msg.payload)
if not self.cartridge_agent_config.initialized:
return
event_obj = MemberSuspendedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_member_suspended_event(event_obj)
except:
self.log.exception("Error processing member suspended event")
def on_complete_topology(self, msg):
if not self.cartridge_agent_config.initialized:
self.log.debug("Complete topology event received")
event_obj = CompleteTopologyEvent.create_from_json(msg.payload)
TopologyContext.update(event_obj.topology)
try:
CartridgeAgent.extension_handler.on_complete_topology_event(event_obj)
except:
self.log.exception("Error processing complete topology event")
else:
self.log.info("Complete topology event updating task disabled")
def on_member_started(self, msg):
self.log.debug("Member started event received: %r" % msg.payload)
if not self.cartridge_agent_config.initialized:
return
event_obj = MemberStartedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_member_started_event(event_obj)
except:
self.log.exception("Error processing member started event")
def register_tenant_event_listeners(self):
self.log.debug("Starting tenant event message receiver thread")
self.__tenant_event_subscriber.register_handler("SubscriptionDomainAddedEvent", self.on_subscription_domain_added)
self.__tenant_event_subscriber.register_handler("SubscriptionDomainsRemovedEvent", self.on_subscription_domain_removed)
self.__tenant_event_subscriber.register_handler("CompleteTenantEvent", self.on_complete_tenant)
self.__tenant_event_subscriber.register_handler("TenantSubscribedEvent", self.on_tenant_subscribed)
self.__tenant_event_subscriber.register_handler("TenantUnSubscribedEvent", self.on_tenant_unsubscribed)
self.__tenant_event_subscriber.start()
self.log.info("Tenant event message receiver thread started")
def on_subscription_domain_added(self, msg):
self.log.debug("Subscription domain added event received : %r" % msg.payload)
event_obj = SubscriptionDomainAddedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_subscription_domain_added_event(event_obj)
except:
self.log.exception("Error processing subscription domains added event")
def on_subscription_domain_removed(self, msg):
self.log.debug("Subscription domain removed event received : %r" % msg.payload)
event_obj = SubscriptionDomainRemovedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_subscription_domain_removed_event(event_obj)
except:
self.log.exception("Error processing subscription domains removed event")
def on_complete_tenant(self, msg):
if not self.__tenant_context_initialized:
self.log.debug("Complete tenant event received")
event_obj = CompleteTenantEvent.create_from_json(msg.payload)
TenantContext.update(event_obj.tenants)
try:
CartridgeAgent.extension_handler.on_complete_tenant_event(event_obj)
self.__tenant_context_initialized = True
except:
self.log.exception("Error processing complete tenant event")
else:
self.log.info("Complete tenant event updating task disabled")
def on_tenant_subscribed(self, msg):
self.log.debug("Tenant subscribed event received: %r" % msg.payload)
event_obj = TenantSubscribedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_tenant_subscribed_event(event_obj)
except:
self.log.exception("Error processing tenant subscribed event")
def on_tenant_unsubscribed(self, msg):
self.log.debug("Tenant unSubscribed event received: %r" % msg.payload)
event_obj = TenantUnsubscribedEvent.create_from_json(msg.payload)
try:
CartridgeAgent.extension_handler.on_tenant_unsubscribed_event(event_obj)
except:
self.log.exception("Error processing tenant unSubscribed event")
def uncaught_exception_mg(exctype, value, tb):
log = LogFactory().get_log(__name__)
log.exception("UNCAUGHT EXCEPTION:", value)
def main():
sys.excepthook = uncaught_exception_mg
cartridge_agent = CartridgeAgent()
log = LogFactory().get_log(__name__)
try:
log.debug("Starting cartridge agent")
cartridge_agent.start()
except:
log.exception("Cartridge Agent Exception")
cartridge_agent.terminate()
if __name__ == "__main__":
main()
|
the-stack_106_28935 | """
Inline Bayesian Linear Regression
---------------------------------
Figure 8.1
An example showing the online nature of Bayesian regression. The upper panel
shows the four points used in regression, drawn from the line
y = theta_1 x + theta_0 with theta_1 = 1 and theta_0 = 0. The lower panel shows
the posterior pdf in the (theta_1, theta_0) plane as each point is added in
sequence. For clarity, the implied dark regions for sigma > 3 have been
removed. The fourth point is an upper-limit measurement of y, and the resulting
posterior cuts off half the parameter space.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.plotting.mcmc import convert_to_stdev
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the data and errors
np.random.seed(13)
a = 1
b = 0
x = np.array([-1, 0.44, -0.16])
y = a * x + b
dy = np.array([0.25, 0.22, 0.2])
y = np.random.normal(y, dy)
# add a fourth point which is a lower bound
x4 = 1.0
y4 = a * x4 + b + 0.2
#------------------------------------------------------------
# Compute the likelihoods for each point
a_range = np.linspace(0, 2, 80)
b_range = np.linspace(-1, 1, 80)
logL = -((a_range[:, None, None] * x + b_range[None, :, None] - y) / dy) ** 2
sigma = [convert_to_stdev(logL[:, :, i]) for i in range(3)]
# compute best-fit from first three points
logL_together = logL.sum(-1)
i, j = np.where(logL_together == np.max(logL_together))
amax = a_range[i[0]]
bmax = b_range[j[0]]
#------------------------------------------------------------
# Plot the first figure: the points and errorbars
fig1 = plt.figure(figsize=(5, 3.75))
ax1 = fig1.add_subplot(111)
# Draw the true and best-fit lines
xfit = np.array([-1.5, 1.5])
ax1.plot(xfit, a * xfit + b, ':k', label='True fit')
ax1.plot(xfit, amax * xfit + bmax, '--k', label='fit to $\{x_1, x_2, x_3\}$')
ax1.legend(loc=2)
ax1.errorbar(x, y, dy, fmt='ok')
ax1.errorbar([x4], [y4], [[0.5], [0]], fmt='_k', lolims=True)
for i in range(3):
ax1.text(x[i] + 0.05, y[i] - 0.3, "$x_{%i}$" % (i + 1))
ax1.text(x4 + 0.05, y4 - 0.5, "$x_4$")
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax1.set_xlim(-1.5, 1.5)
ax1.set_ylim(-2, 2)
#------------------------------------------------------------
# Plot the second figure: likelihoods for each point
fig2 = plt.figure(figsize=(5, 5))
fig2.subplots_adjust(hspace=0.05, wspace=0.05)
# plot likelihood contours
for i in range(4):
ax = fig2.add_subplot(221 + i)
for j in range(min(i + 1, 3)):
ax.contourf(a_range, b_range, sigma[j].T,
levels=(0, 0.683, 0.955, 0.997),
cmap=plt.cm.binary, alpha=0.5)
# plot the excluded area from the fourth point
axpb = a_range[:, None] * x4 + b_range[None, :]
mask = y4 < axpb
fig2.axes[3].fill_between(a_range, y4 - x4 * a_range, 2, color='k', alpha=0.5)
# plot ellipses
for i in range(1, 4):
ax = fig2.axes[i]
logL_together = logL[:, :, :i + 1].sum(-1)
if i == 3:
logL_together[mask] = -np.inf
sigma_together = convert_to_stdev(logL_together)
ax.contour(a_range, b_range, sigma_together.T,
levels=(0.683, 0.955, 0.997),
colors='k')
# Label and adjust axes
for i in range(4):
ax = fig2.axes[i]
ax.text(1.98, -0.98, "$x_{%i}$" % (i + 1), ha='right', va='bottom')
ax.plot([0, 2], [0, 0], ':k', lw=1)
ax.plot([1, 1], [-1, 1], ':k', lw=1)
ax.set_xlim(0.001, 2)
ax.set_ylim(-0.999, 1)
if i in (1, 3):
ax.yaxis.set_major_formatter(plt.NullFormatter())
if i in (0, 1):
ax.xaxis.set_major_formatter(plt.NullFormatter())
if i in (0, 2):
ax.set_ylabel(r'$\theta_0$')
if i in (2, 3):
ax.set_xlabel(r'$\theta_1$')
plt.show()
|
the-stack_106_28936 | from sys import stderr
from numpy.core.fromnumeric import std
from numpy.lib.utils import source
from pcbnewTransition import pcbnew, isV6
from kikit.panelize_ui_impl import loadPresetChain, obtainPreset
from kikit import panelize_ui
from kikit.panelize import appendItem
from kikit.common import PKG_BASE
import kikit.panelize_ui_sections
import wx
import json
import tempfile
import shutil
import os
from threading import Thread
from itertools import chain
class ExceptionThread(Thread):
def run(self):
self.exception = None
try:
super().run()
except Exception as e:
self.exception = e
def pcbnewPythonPath():
return os.path.dirname(pcbnew.__file__)
def presetDifferential(source, target):
result = {}
for sectionName, section in target.items():
if sectionName not in source:
result[sectionName] = section
continue
updateKeys = {}
sourceSection = source[sectionName]
for key, value in section.items():
if key not in sourceSection or str(sourceSection[key]).lower() != str(value).lower():
updateKeys[key] = value
if len(updateKeys) > 0:
result[sectionName] = updateKeys
return result
def transplateBoard(source, target):
items = chain(
list(target.GetDrawings()),
list(target.GetFootprints()),
list(target.GetTracks()),
list(target.Zones()))
for x in items:
target.Remove(x)
for x in source.GetDrawings():
appendItem(target, x)
for x in source.GetFootprints():
appendItem(target, x)
for x in source.GetTracks():
appendItem(target, x)
for x in source.Zones():
appendItem(target, x)
if isV6():
d = target.GetDesignSettings()
d.CloneFrom(source.GetDesignSettings())
else:
target.SetDesignSettings(source.GetDesignSettings())
target.SetProperties(source.GetProperties())
target.SetPageSettings(source.GetPageSettings())
target.SetTitleBlock(source.GetTitleBlock())
target.SetZoneSettings(source.GetZoneSettings())
class SFile():
def __init__(self, nameFilter):
self.nameFilter = nameFilter
self.description = ""
self.isGuiRelevant = lambda section: True
def validate(self, x):
return x
class ParameterWidgetBase:
def __init__(self, parent, name, parameter):
self.name = name
self.parameter = parameter
self.label = wx.StaticText(parent,
label=name,
size=wx.Size(150, -1),
style=wx.ALIGN_RIGHT)
self.label.SetToolTip(parameter.description)
def showIfRelevant(self, preset):
relevant = self.parameter.isGuiRelevant(preset)
self.label.Show(relevant)
self.widget.Show(relevant)
class TextWidget(ParameterWidgetBase):
def __init__(self, parent, name, parameter, onChange):
super().__init__(parent, name, parameter)
self.widget = wx.TextCtrl(
parent, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
self.widget.Bind(wx.EVT_TEXT, onChange)
def setValue(self, value):
self.widget.ChangeValue(str(value))
def getValue(self):
return self.widget.GetValue()
class ChoiceWidget(ParameterWidgetBase):
def __init__(self, parent, name, parameter, onChange):
super().__init__(parent, name, parameter)
self.widget = wx.Choice(parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,
parameter.vals, 0)
self.widget.SetSelection(0)
self.widget.Bind(wx.EVT_CHOICE, onChange)
def setValue(self, value):
for i, option in enumerate(self.parameter.vals):
if option.lower() == str(value).lower():
self.widget.SetSelection(i)
break
def getValue(self):
return self.parameter.vals[self.widget.GetSelection()]
class InputFileWidget(ParameterWidgetBase):
def __init__(self, parent, name, parameter, onChange):
super().__init__(parent, name, parameter)
self.widget = wx.FilePickerCtrl(
parent, wx.ID_ANY, wx.EmptyString, name,
parameter.nameFilter, wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE)
self.widget.Bind(wx.EVT_FILEPICKER_CHANGED, onChange)
def getValue(self):
return self.widget.GetPath()
def obtainParameterWidget(parameter):
if isinstance(parameter, kikit.panelize_ui_sections.SChoiceBase):
return ChoiceWidget
if isinstance(parameter, SFile):
return InputFileWidget
return TextWidget
class SectionGui():
def __init__(self, parent, name, section, onResize, onChange):
self.name = name
self.container = wx.CollapsiblePane(
parent, wx.ID_ANY, name, wx.DefaultPosition, wx.DefaultSize,
wx.CP_DEFAULT_STYLE)
self.container.Collapse(False)
self.container.Bind(wx.EVT_COLLAPSIBLEPANE_CHANGED, onResize)
self.container.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.container.GetPane().SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.itemGrid = wx.FlexGridSizer(0, 2, 2, 2)
self.itemGrid.AddGrowableCol(1)
self.itemGrid.SetFlexibleDirection(wx.BOTH)
self.itemGrid.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.items = {
name: obtainParameterWidget(param)(
self.container.GetPane(), name, param, onChange)
for name, param in section.items()
}
for widget in self.items.values():
self.itemGrid.Add(widget.label, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | wx.RIGHT, 5)
self.itemGrid.Add(widget.widget, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | wx.RIGHT, 5)
self.container.GetPane().SetSizer(self.itemGrid)
self.onResize()
def onResize(self):
self.itemGrid.Layout()
self.container.GetPane().Fit()
self.container.Fit()
def populateInitialValue(self, values):
for name, widget in self.items.items():
if name not in values:
continue
widget.setValue(values[name])
def collectPreset(self):
return {name: widget.getValue() for name, widget in self.items.items()}
def showOnlyRelevantFields(self):
preset = self.collectPreset()
for name, widget in self.items.items():
if name not in preset:
continue
widget.showIfRelevant(preset)
self.onResize()
def collectReleventPreset(self):
preset = self.collectPreset()
return {name: widget.getValue()
for name, widget in self.items.items()
if widget.parameter.isGuiRelevant(preset)}
class PanelizeDialog(wx.Dialog):
def __init__(self, parent=None, board=None):
wx.Dialog.__init__(
self, parent, title=f'Panelize a board (version {kikit.__version__})',
style=wx.DEFAULT_DIALOG_STYLE)
self.Bind(wx.EVT_CLOSE, self.OnClose, id=self.GetId())
topMostBoxSizer = wx.BoxSizer(wx.VERTICAL)
middleSizer = wx.BoxSizer(wx.HORIZONTAL)
maxDisplayArea = wx.Display().GetClientArea()
self.maxDialogSize = wx.Size(
min(500, maxDisplayArea.Width),
min(800, maxDisplayArea.Height - 200))
self.scrollWindow = wx.ScrolledWindow(
self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.VSCROLL)
self.scrollWindow.SetSizeHints(self.maxDialogSize, wx.Size(self.maxDialogSize.width, -1))
self.scrollWindow.SetScrollRate(5, 5)
self._buildSections(self.scrollWindow)
middleSizer.Add(self.scrollWindow, 0, wx.EXPAND | wx.ALL, 5)
self._buildOutputSections(middleSizer)
topMostBoxSizer.Add(middleSizer, 1, wx.EXPAND | wx.ALL, 5)
self._buildBottomButtons(topMostBoxSizer)
self.SetSizer(topMostBoxSizer)
self.populateInitialValue()
self.buildOutputSections()
self.showOnlyRelevantFields()
self.OnResize()
def _buildOutputSections(self, sizer):
internalSizer = wx.BoxSizer(wx.VERTICAL)
cliLabel = wx.StaticText(self, label="KiKit CLI command:",
size=wx.DefaultSize, style=wx.ALIGN_LEFT)
internalSizer.Add(cliLabel, 0, wx.EXPAND | wx.ALL, 2)
self.kikitCmdWidget = wx.TextCtrl(
self, wx.ID_ANY, "KiKit Command", wx.DefaultPosition, wx.DefaultSize,
wx.TE_MULTILINE | wx.TE_READONLY)
self.kikitCmdWidget.SetSizeHints(
wx.Size(self.maxDialogSize.width,
self.maxDialogSize.height // 2),
wx.Size(self.maxDialogSize.width, -1))
cmdFont = self.kikitCmdWidget.GetFont()
cmdFont.SetFamily(wx.FONTFAMILY_TELETYPE)
self.kikitCmdWidget.SetFont(cmdFont)
internalSizer.Add(self.kikitCmdWidget, 0, wx.EXPAND | wx.ALL, 2)
jsonLabel = wx.StaticText(self, label="KiKit JSON preset (contains only changed keys):",
size=wx.DefaultSize, style=wx.ALIGN_LEFT)
internalSizer.Add(jsonLabel, 0, wx.EXPAND | wx.ALL, 2)
self.kikitJsonWidget = wx.TextCtrl(
self, wx.ID_ANY, "KiKit JSON", wx.DefaultPosition, wx.DefaultSize,
wx.TE_MULTILINE | wx.TE_READONLY)
self.kikitJsonWidget.SetSizeHints(
wx.Size(self.maxDialogSize.width,
self.maxDialogSize.height // 2),
wx.Size(self.maxDialogSize.width, -1))
cmdFont = self.kikitJsonWidget.GetFont()
cmdFont.SetFamily(wx.FONTFAMILY_TELETYPE)
self.kikitJsonWidget.SetFont(cmdFont)
internalSizer.Add(self.kikitJsonWidget, 0, wx.EXPAND | wx.ALL, 2)
sizer.Add(internalSizer, 0, wx.EXPAND | wx.ALL, 2)
def _buildSections(self, parentWindow):
sectionsSizer = wx.BoxSizer(wx.VERTICAL)
sections = {
"Input": {
"Input file": SFile("*.kicad_pcb")
}
}
sections.update(kikit.panelize_ui_sections.availableSections)
self.sections = {
name: SectionGui(parentWindow, name, section,
lambda evt: self.OnResize(), lambda evt: self.OnChange())
for name, section in sections.items()
}
for section in self.sections.values():
sectionsSizer.Add(section.container, 0, wx.ALL | wx.EXPAND, 5)
parentWindow.SetSizer(sectionsSizer)
def _buildBottomButtons(self, parentSizer):
button_box = wx.BoxSizer(wx.HORIZONTAL)
closeButton = wx.Button(self, label='Close')
self.Bind(wx.EVT_BUTTON, self.OnClose, id=closeButton.GetId())
button_box.Add(closeButton, 1, wx.RIGHT, 10)
self.okButton = wx.Button(self, label='Panelize')
self.Bind(wx.EVT_BUTTON, self.OnPanelize, id=self.okButton.GetId())
button_box.Add(self.okButton, 1)
parentSizer.Add(button_box, 0, wx.ALIGN_RIGHT |
wx.LEFT | wx.RIGHT | wx.BOTTOM, 20)
def OnResize(self):
for section in self.sections.values():
section.onResize()
self.scrollWindow.GetSizer().Layout()
self.scrollWindow.FitInside()
self.scrollWindow.Fit()
self.GetSizer().Layout()
self.Fit()
def OnClose(self, event):
self.EndModal(0)
def OnPanelize(self, event):
# You might be wondering, why we specify delete=False. The reason is
# Windows - the file cannot be opened for the second time. So we use
# this only to get a valid temporary name. This is why we close the file
# ASAP and only use its name
with tempfile.NamedTemporaryFile(suffix=".kicad_pcb", delete=False) as f:
try:
fname = f.name
f.close()
progressDlg = wx.ProgressDialog(
"Running kikit", "Running kikit, please wait")
progressDlg.Show()
progressDlg.Pulse()
args = self.kikitArgs()
preset = obtainPreset([], **args)
input = self.sections["Input"].items["Input file"].getValue()
if len(input) == 0:
dlg = wx.MessageDialog(
None, f"No input file specified", "Error", wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
output = fname
thread = ExceptionThread(target=panelize_ui.doPanelization,
args=(input, output, preset))
thread.daemon = True
thread.start()
while True:
progressDlg.Pulse()
thread.join(timeout=1)
if not thread.is_alive():
break
if thread.exception:
raise thread.exception
# KiCAD 6 does something strange here, so we will load
# an empty file if we read it directly, but we can always make
# a copy and read that:
with tempfile.NamedTemporaryFile(suffix=".kicad_pcb", delete=False) as tp:
tpname = tp.name
tp.close()
shutil.copy(f.name, tpname)
panel = pcbnew.LoadBoard(tpname)
transplateBoard(panel, self.board)
except Exception as e:
dlg = wx.MessageDialog(
None, f"Cannot perform:\n\n{e}", "Error", wx.OK)
dlg.ShowModal()
dlg.Destroy()
finally:
progressDlg.Hide()
progressDlg.Destroy()
try:
os.remove(fname)
os.remove(tpname)
except Exception:
pass
pcbnew.Refresh()
def populateInitialValue(self):
preset = loadPresetChain([":default"])
for name, section in self.sections.items():
if name.lower() not in preset:
continue
section.populateInitialValue(preset[name.lower()])
def showOnlyRelevantFields(self):
for section in self.sections.values():
section.showOnlyRelevantFields()
def collectPreset(self):
preset = loadPresetChain([":default"])
for name, section in self.sections.items():
if name.lower() not in preset:
continue
preset[name.lower()].update(section.collectPreset())
return preset
def collectReleventPreset(self):
preset = {}
for name, section in self.sections.items():
preset[name.lower()] = section.collectReleventPreset()
del preset["input"]
return preset
def OnChange(self):
self.showOnlyRelevantFields()
self.OnResize()
self.buildOutputSections()
def buildOutputSections(self):
defaultPreset = loadPresetChain([":default"])
preset = self.collectReleventPreset()
presetUpdates = presetDifferential(defaultPreset, preset)
self.kikitJsonWidget.ChangeValue(json.dumps(presetUpdates, indent=4))
kikitCommand = "kikit panelize \\\n"
for section, values in presetUpdates.items():
if len(values) == 0:
continue
attrs = "; ".join(
[f"{key}: {value}" for key, value in values.items()])
kikitCommand += f" --{section} '{attrs}' \\\n"
inputFilename = self.sections["Input"].items["Input file"].getValue()
if len(inputFilename) == 0:
inputFilename = "<missingInput>"
kikitCommand += f" {inputFilename} panel.kicad_pcb"
self.kikitCmdWidget.ChangeValue(kikitCommand)
def kikitArgs(self):
defaultPreset = loadPresetChain([":default"])
preset = self.collectReleventPreset()
presetUpdates = presetDifferential(defaultPreset, preset)
args = {}
for section, values in presetUpdates.items():
if len(values) == 0:
continue
args[section] = values
return args
class PanelizePlugin(pcbnew.ActionPlugin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dialog = None
def defaults(self):
self.name = "KiKit: Panelize PCB"
self.category = "KiKit"
self.description = "Create a panel"
self.icon_file_name = os.path.join(PKG_BASE, "resources", "graphics", "panelizeIcon_24x24.png")
self.show_toolbar_button = True
def Run(self):
try:
if self.dialog is None:
self.dialog = PanelizeDialog()
board = pcbnew.GetBoard()
self.dialog.board = board
self.dialog.ShowModal()
except Exception as e:
dlg = wx.MessageDialog(
None, f"Cannot perform: {e}", "Error", wx.OK)
dlg.ShowModal()
dlg.Destroy()
plugin = PanelizePlugin
if __name__ == "__main__":
# Run test dialog
app = wx.App()
dialog = PanelizeDialog()
dialog.ShowModal()
app.MainLoop()
|
the-stack_106_28937 | import numpy as np
from growth_procs import direction_to,\
normalize_length,\
get_entity,\
prepare_next_front
L_NORM=3.0
def extend_front(front,seed,constellation) :
# attract by a different neuron, get information
other_entities = get_entity("cell_type_2",constellation)
if not len(other_entities) == 0:
dir_to_entity = direction_to(front,other_entities,what="nearest")
dir_to_entity = normalize_length(dir_to_entity,1.0)
else: # during the first extension cycle, no distal info is known locally
dir_to_entity = np.array([.5,.0,.0])
new_pos = front.xyz + normalize_length(dir_to_entity,L_NORM)
new_front = prepare_next_front(front,new_pos,set_radius=1.5)
return [new_front]
|
the-stack_106_28938 | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1KMoI6pkllZPbHV--HvMa3BgxNgrO8bVgQKCfWwJ3FuA'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
the-stack_106_28940 | # Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import os
import argparse
import numpy as np
import pandas as pd
from copy import deepcopy
import neat
# Path to file containing neat prescriptors. Here we simply use a
# recent checkpoint of the population from train_prescriptor.py,
# but this is likely not the most complementary set of prescriptors.
# Many approaches can be taken to generate/collect more diverse sets.
# Note: this set can contain up to 10 prescriptors for evaluation.
from covid_xprize.examples.prescriptors.neat.utils import prepare_historical_df, CASES_COL, IP_COLS, IP_MAX_VALUES, \
add_geo_id, get_predictions, PRED_CASES_COL
# Number of days the prescriptors look at in the past.
NB_LOOKBACK_DAYS = 14
def prescribe(start_date_str: str,
end_date_str: str,
path_to_prior_ips_file: str,
path_to_cost_file: str,
output_file_path,
prescriptors_file,
) -> None:
print('output file:', output_file_path, ' file:', prescriptors_file)
start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')
# Load historical data with basic preprocessing
print("Loading historical data...")
df = prepare_historical_df()
# Restrict it to dates before the start_date
df = df[df['Date'] <= start_date]
# Fill in any missing case data using predictor given ips_df.
# todo: ignore ips_df for now, and instead assume we have case
# data for all days and geos up until the start_date.
# Create historical data arrays for all geos
past_cases = {}
past_ips = {}
for geo in df['GeoID'].unique():
geo_df = df[df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
past_ips[geo] = np.array(geo_df[IP_COLS])
# Gather values for scaling network output
ip_max_values_arr = np.array([IP_MAX_VALUES[ip] for ip in IP_COLS])
# Load prescriptors
checkpoint = neat.Checkpointer.restore_checkpoint(prescriptors_file)
prescriptors = checkpoint.population.values()
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-prescriptor')
# Load IP costs to condition prescriptions
cost_df = pd.read_csv(path_to_cost_file)
cost_df['RegionName'] = cost_df['RegionName'].fillna("")
cost_df = add_geo_id(cost_df)
geo_costs = {}
for geo in cost_df['GeoID'].unique():
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[IP_COLS])[0]
geo_costs[geo] = cost_arr
# Generate prescriptions
prescription_dfs = []
for prescription_idx, prescriptor in enumerate(prescriptors):
print("Generating prescription", prescription_idx, "...")
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(prescriptor, config)
# Set up dictionary for keeping track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in sorted(IP_MAX_VALUES.keys()):
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Generate prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(start_date, end_date):
date_str = date.strftime("%Y-%m-%d")
# Get prescription for all regions
for geo in df['GeoID'].unique():
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-NB_LOOKBACK_DAYS:] + 1)
X_ips = eval_past_ips[geo][-NB_LOOKBACK_DAYS:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = geo.split('__')
if region_name == 'nan':
region_name = np.nan
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(IP_COLS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Create dataframe from prescriptions
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = get_predictions(start_date_str, date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df['GeoID'] = pres_df['CountryName'] + '__' + pres_df['RegionName'].astype(str)
pred_df['RegionName'] = pred_df['RegionName'].fillna("")
pred_df['GeoID'] = pred_df['CountryName'] + '__' + pred_df['RegionName'].astype(str)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in df['GeoID'].unique():
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in IP_COLS]).reshape(1,-1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# It is possible that the predictor does not return values for some regions.
# To make sure we generate full prescriptions, this script continues anyway.
# Geos that are ignored in this way by the predictor, will not be used in
# quantitative evaluation. A list of such geos can be found in unused_geos.txt.
if len(geo_pred) != 0:
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Add prescription df to list of all prescriptions for this submission
pres_df['PrescriptionIndex'] = prescription_idx
prescription_dfs.append(pres_df)
# Combine dfs for all prescriptions into a single df for the submission
prescription_df = pd.concat(prescription_dfs)
# Create the output path
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
# Save to a csv file
prescription_df.to_csv(output_file_path, index=False)
print('Prescriptions saved to', output_file_path)
return
import subprocess
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date",
dest="start_date",
type=str,
required=True,
help="Start date from which to prescribe, included, as YYYY-MM-DD."
"For example 2020-08-01")
parser.add_argument("-e", "--end_date",
dest="end_date",
type=str,
required=True,
help="End date for the last prescription, included, as YYYY-MM-DD."
"For example 2020-08-31")
parser.add_argument("-ip", "--interventions_past",
dest="prior_ips_file",
type=str,
required=True,
help="The path to a .csv file of previous intervention plans")
parser.add_argument("-c", "--intervention_costs",
dest="cost_file",
type=str,
required=True,
help="Path to a .csv file containing the cost of each IP for each geo")
parser.add_argument("-o", "--output_file",
dest="output_file",
type=str,
required=True,
help="The path to an intervention plan .csv file")
args = parser.parse_args()
print(f"Generating prescriptions from {args.start_date} to {args.end_date}...")
cp_path = "/content/covid-xprize/covid_xprize/examples/prescriptors/neat/run3cp"
for file_name in sorted(os.listdir(cp_path)):
print("prescribing", file_name)
prescribe(args.start_date, args.end_date, args.prior_ips_file, args.cost_file, f"run3csv/{file_name}.csv", f"run3cp/{file_name}")
rc = subprocess.call("/content/covid-xprize/covid_xprize/examples/prescriptors/neat/pushtogit.sh")
print("Done!")
|
the-stack_106_28941 | from notest.notest_lib import notest_run
import logging
logging.basicConfig(level=logging.INFO)
args = {
# 'config_file': '../examples/config.json',
# 'default_base_url': None,
'override_config_variable_binds': {
'title': 'GodQ-override'
},
# 'ext_dir': None,
'loop_interval': 1,
# 'request_client': None,
# 'working_directory': '../examples',
'test_structure': [{'config': {'default_base_url': 'http://localhost:5000',
'generators': [{'id': {'start': 10,
'type': 'number_sequence'}}],
'testset': 'Quickstart app tests',
'variable_binds': {'done': 'true',
'title': 'GodQ'}}},
{'test': {'expected_status': [201],
'method': 'POST',
'name': 'post ready task',
'url': '/delay_task',
'body': '$title'}},
{'test': {'expected_status': 200,
'headers': {'Content-Type': 'application/json',
'Token': 123},
'loop_until': [{'extract_test': {'jsonpath_mini': 'state',
'test': 'exists'}},
{'compare': {'comparator': 'str_eq',
'expected': 'ready',
'jsonpath_mini': 'state'}}],
'method': 'GET',
'name': 'get ready task',
'url': '/delay_task',
'body': '{"title": "$title"}'}}]
}
total_results = notest_run(args)
print("TestCase Count: {}".format(total_results.test_count))
print("Failure Count: {}".format(total_results.failure_count))
print("Failure List: {}".format(total_results.get_failures())) |
the-stack_106_28942 | # Copyright 2022 The Sigstore Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from locust import HttpUser, task, constant_throughput, events
import jwt
@events.init_command_line_parser.add_listener
def _(parser):
parser.add_argument("--token", type=str, env_var="LOCUST_OIDC_TOKEN", default="", help="OIDC token for authentication with Fulcio")
parser.add_argument("--max-qps-per-user", type=float, env_var="LOCUST_MAX_QPS_PER_USER", default=1.0, help="Maximum QPS per user")
class FulcioUser(HttpUser):
# FulcioUser represents an instance of a user making requests.
# Maximum number of requests per second per user. For example, to reach 25 QPS,
# run Locust with 25 users with a constant throughput of 1.
def wait_time(self):
return constant_throughput(self.environment.parsed_options.max_qps_per_user)(self)
@task
def create_cert(self):
# create_cert generates a keypair and makes a request to Fulcio to fetch a certificate.
# Static ID token. This avoids hitting the OIDC provider with each request to fetch a new token.
token = self.environment.parsed_options.token
# Generate keypair for challenge.
privkey = ec.generate_private_key(ec.SECP256R1)
pubkey = privkey.public_key()
pubbytes = pubkey.public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
content = base64.b64encode(pubbytes).decode("utf-8")
# Fetch identity of token and sign.
email = jwt.decode(token, options={"verify_signature":False})['email']
data = email.encode()
signature = privkey.sign(data, ec.ECDSA(hashes.SHA256()))
challenge = base64.b64encode(signature).decode("utf-8")
json = {"publicKey": {"content": content,"algorithm":"ecdsa"},"signedEmailAddress":challenge}
response = self.client.post("/api/v1/signingCert", json=json, headers={"Authorization": f"Bearer {token}", "Content-Type":"application/json"})
print("Response status code:", response.status_code)
|
the-stack_106_28947 | # -*- coding: utf-8 -*-
"""
Testing phrasedml.
"""
from __future__ import print_function
import tellurium as te
ant = '''
model myModel
S1 -> S2; k1*S1
S1 = 10; S2 = 0
k1 = 1
end
'''
phrasedml = '''
model1 = model "myModel"
sim1 = simulate uniform(0, 5, 100)
task1 = run sim1 on model1
plot "Figure 1" time vs S1, S2
'''
# create experiment
exp = te.experiment(ant, phrasedml)
exp.execute()
exp.printPython()
|
the-stack_106_28948 | from PIL import Image, ImageColor
import numpy as np
import matplotlib.pyplot as plt
from opensimplex import OpenSimplex
import skimage.transform as tf
class ImageMap:
def __init__(self, in_map, mapping=None):
if not isinstance(in_map, np.ndarray):
in_map = plt.imread(in_map)[:, :, :3] # RGBA -> RGB
self.map = in_map
self.height = self.map.shape[0]
self.width = self.map.shape[1]
self.mapping = mapping
def read_rgb(self):
return Image.fromarray((self.map * 255).astype('uint8'), 'RGB')
def read_l(self):
return Image.fromarray((self.map * 255).astype('uint8'), 'L')
def apply_mask(self, mask, weight):
if len(self.map.shape) == 3:
mask = mask[:, :, None]
result = ImageMap(self.map * (1 - weight) + mask * weight)
result.normalize()
return result
def apply_circular_mask(self, weight, n=1.25):
interpolation = lambda x: x ** n
mask = np.outer(
self.create_gradient(self.height, f=interpolation, two_dir=True),
self.create_gradient(self.width, f=interpolation, two_dir=True),
)
return self.apply_mask(mask, weight)
def apply_square_mask(self, weight, edge_size):
mask = np.ones([self.height, self.width])
gradient = self.create_gradient(edge_size)
for i in range(self.height):
mask[i, :edge_size] *= gradient
mask[i, self.width - edge_size :] *= gradient[::-1]
for i in range(self.width):
mask[:edge_size, i] *= gradient
mask[self.height - edge_size :, i] *= gradient[::-1]
return self.apply_mask(mask, weight)
def create_gradient(self, size, f=lambda x: x, two_dir=False):
"""
f : [0, 1] -> [0, 1]
"""
gradient = np.zeros([size])
if two_dir:
size = size // 2
for i in range(size):
gradient[i] = f(i / size)
if two_dir:
gradient[-i - 1] = f(i / size)
return gradient
def resize(self, new_dims):
return ImageMap(tf.resize(self.map, new_dims), self.mapping)
def normalize(self):
self.map -= np.min(self.map)
self.map /= np.max(self.map)
def colorize(self):
colorized = Image.new('RGB', (self.width, self.height))
for i in range(self.height):
for j in range(self.width):
for m in self.mapping:
if self.map[i, j] <= m.upper_bound:
colorized.putpixel((j, i), m.color)
break
return colorized
def texturize(self, blend_factor=0.08):
texturized = np.zeros([self.height, self.width, 3])
divisor = np.zeros_like(texturized)
no_blend_count = sum([int(not m.blend) for m in self.mapping])
for i, m in enumerate(self.mapping):
mask = (m.upper_bound >= self.map).astype(int)
mask *= (self.map >= m.lower_bound).astype(int)
if i >= no_blend_count and blend_factor != 0:
# generate boolean mask of edges
# special cases: firsst and last layer
if i < len(self.mapping):
blend_mask = (m.upper_bound + blend_factor >= self.map).astype(int)
else:
blend_mask = np.ones_like(mask)
if i >= no_blend_count + 1:
blend_mask *= (self.map >= (m.lower_bound - blend_factor)).astype(int)
else:
blend_mask *= (self.map >= m.lower_bound).astype(int)
blend_mask -= mask
# make mask relative to edges
blend_mask = blend_mask.astype(float)
blend_mask *= self.map
blend_mask[blend_mask != 0] -= (m.lower_bound + m.upper_bound) / 2
blend_mask = abs(blend_mask)
# normalize mask and transform 0s to 1s and 1s to 0s
blend_mask[blend_mask != 0] -= np.min(blend_mask[blend_mask != 0])
blend_mask /= np.max(blend_mask)
blend_mask[blend_mask != 0] -= 1
blend_mask *= -1
mask = mask.astype(float)
mask += blend_mask
layer = m.texture.make_composite((self.height, self.width)).map
texturized += layer * mask[:, :, None]
divisor += mask[:, :, None]
result = ImageMap(texturized)
result.map /= divisor
return result.read_rgb()
def blank_like(self):
return ImageMap(np.ones([self.height, self.width]))
class Texture:
def __init__(self, path, block_size, copy_overlap=1):
self.name = path.split('/')[-1].replace('.png', '')
self.path = path
self.original = ImageMap(self.path)
self.block_size = block_size
self.blocks = self._get_blocks(copy_overlap)
def make_composite(self, size, paste_overlap=2):
return ImageMap(self._create(size, paste_overlap))
def _get_blocks(self, overlap_factor):
blocks = []
block_inc = int(self.block_size / overlap_factor)
for i in range(0, self.original.height - self.block_size, block_inc):
for j in range(0, self.original.width - self.block_size, block_inc):
blocks.append(
self.original.map[i : i + self.block_size, j : j + self.block_size].astype(
np.float64
)
)
return blocks
def random_sample(self):
return self.blocks[int(np.random.rand() * len(self.blocks))]
def _create(self, img_size, overlap_factor):
img_size = [x + 2 for x in img_size]
block_overlap = int(self.block_size / overlap_factor)
img = np.zeros((img_size[0], img_size[1], 3))
window = np.outer(np.hanning(self.block_size), np.hanning(self.block_size))
divisor = np.zeros_like(img) + 1e-10
def set_pixels(coords, incs, end):
adj_window = window[: end[0], : end[1], None]
adj_block = block[: end[0], : end[1]]
img[coords[0] : coords[0] + incs[0], coords[1] : coords[1] + incs[1]] += (
adj_window * adj_block
)
divisor[coords[0] : coords[0] + incs[0], coords[1] : coords[1] + incs[1]] += adj_window
for i in range(0, img_size[1], block_overlap):
for j in range(0, img_size[0], block_overlap):
block = self.blocks[int(np.random.rand() * len(self.blocks))]
# if on the bottom or right edges of the image, block must be cropped
if i > img_size[1] - self.block_size or j > img_size[0] - self.block_size:
gap = [min(img_size[1] - i, self.block_size), min(img_size[0] - j, self.block_size)]
set_pixels([i, j], gap, gap)
else:
set_pixels([i, j], [self.block_size] * 2, [self.block_size] * 2)
return (img / divisor)[1:-1, 1:-1]
class NoiseMap(ImageMap):
"""
Useful resources
https://www.youtube.com/watch?v=eaXk97ujbPQ
https://medium.com/@travall/procedural-2d-island-generation-noise-functions-13976bddeaf9
https://www.redblobgames.com/maps/terrain-from-noise/
"""
def __init__(self, dimensions, flatness=1, octaves=None, show_components=False):
self.width = dimensions[0]
self.height = dimensions[1]
if octaves is None:
self.octaves = int(np.log2(self.width))
else:
self.octaves = octaves
self.show_components = show_components
if self.show_components:
self.layers = [Image.new('L', (self.width, self.height)) for _ in range(self.octaves)]
self.generate_noise_map(flatness)
def generate_noise_map(self, flatness):
self.map = np.zeros([self.height, self.width])
divisor = 0
for n in range(self.octaves):
simplex = OpenSimplex(int(np.random.rand() * 1e5))
frequency = 2 ** n / 1e2
amplitude = 1 / frequency
divisor += amplitude
for i in range(self.height):
for j in range(self.width):
rand = simplex.noise2d(x=frequency * i, y=frequency * j)
self.map[i, j] += ((rand + 1) / 2) * amplitude
if self.show_components:
self.layers[n].putpixel((j, i), int(255 * ((rand + 1) / 2)))
if self.show_components:
for x in self.layers:
x.show()
quit()
self.map /= divisor
self.map = self.map ** flatness
self.normalize()
class Mapping:
biomes = None
def __init__(self, lower_bound, upper_bound, color, name, blend=True):
if not Mapping.biomes:
Mapping.biomes = self.create_biomes()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.name = name
self.blend = blend
self.color = color if type(color) == tuple else ImageColor.getrgb(color)
for b in Mapping.biomes:
if b.name == name:
self.texture = b
break
def create_biomes(self):
biomes = []
for biome in ['desert', 'grass', 'snow', 'stone', 'coast']:
biomes.append(Texture('images/samples/' + biome + '.png', 10, copy_overlap=1.5))
for biome in ['hills', 'forest']:
biomes.append(Texture('images/samples/' + biome + '.png', 15))
biomes.append(Texture('images/samples/ocean.png', 50))
return biomes
class GeneratedIsland:
def __init__(self, size, flatness):
self.size = size
self.terrain = NoiseMap(size, flatness=flatness)
self.moisture = NoiseMap(size)
def create_mapping(self, mapping):
self.terrain.mapping = []
for i in range(len(mapping)):
m = mapping[i]
lower_bound = 0.0 if i == 0 else mapping[i - 1][0]
blend = False if m[2] in ['coast', 'ocean'] else True
self.terrain.mapping.append(Mapping(lower_bound, m[0], m[1], m[2], blend))
# def resize(self, new_dims):
# result = self.terrain.resize(new_dims)
# mask = ImageMap(result.map > self.terrain.mapping[0].upper_bound).resize(new_dims)
# print(mask.map)
# return ImageMap(result.map * (mask.map >= 1.0), self.terrain.mapping)
class BigIsland(GeneratedIsland):
def __init__(self, size, flatness=0.5):
super().__init__(size, flatness)
self.shape = NoiseMap(size)
self.shape = self.shape.apply_circular_mask(0.75)
self.shape.map = (self.shape.map > 0.3).astype(int) # convert into boolean array
self.terrain = self.terrain.apply_circular_mask(0.4)
self.terrain = self.terrain.apply_mask(self.moisture.map, 0.3)
self.terrain.map *= self.shape.map
super().create_mapping(
[
[0.3, '#135AD4', 'ocean'],
[0.4, '#F1DA7A', 'desert'],
[0.5, '#CF8C36', 'hills'],
[0.6, '#0ADD08', 'grass'],
[0.8, '#228B22', 'forest'],
[0.9, '#516572', 'stone'],
[1.0, '#FFFFFF', 'snow'],
]
)
class SmallIsland(GeneratedIsland):
def __init__(self, size, flatness=0.7):
super().__init__(size, flatness)
self.terrain = self.terrain.apply_circular_mask(0.75)
self.moisture = self.moisture.apply_circular_mask(0.4)
self.terrain = self.terrain.apply_mask(self.moisture.map, 0.4)
super().create_mapping(
[
[0.4, '#135AD4', 'ocean'],
[0.5, '#7BC8F6', 'coast'],
[0.6, '#F1DA7A', 'desert'],
[0.8, '#0ADD08', 'grass'],
[0.9, '#228B22', 'forest'],
[1.0, '#516572', 'stone'],
]
)
class Continent:
def __init__(self, name, path, coordinates, size=None):
self.name = name
self.image = ImageMap(path)
if size:
self.image = self.image.resize(size)
self.coordinates = coordinates
class World:
def __init__(self, width):
self.width = width
self.height = int(self.width / 2)
self.image = Image.new('RGB', (self.width, self.height))
def small(self):
return self.image.resize((800, int(800 * 2 / 3)))
def smooth_paste(self, inimage, coordinates, edge_size=None):
if edge_size is None:
edge_size = inimage.width // 40
mask = inimage.blank_like().apply_square_mask(1, edge_size=edge_size)
self.image.paste(inimage.read_rgb(), coordinates, mask=mask.read_l())
def stitch_world_map():
world = World(2400)
ocean_texture = Texture('images/samples/ocean.png', 50).make_composite((300, 300))
# storm = Texture('images/samples/storm.png')
for i in range(-20, world.width, ocean_texture.width - 20):
for j in range(-20, world.height, ocean_texture.height - 20):
world.smooth_paste(ocean_texture, (i, j))
piskus = Continent('Piskus', 'images/map/piskus.png', [0, 500])
erebos = Continent(
'Erebos', 'images/map/erebos.png', [piskus.coordinates[0] + 350, piskus.coordinates[1] - 500]
)
orestes = Continent(
'Orestes', 'images/map/orestes.png', [erebos.coordinates[0] + 625, erebos.coordinates[1] + 250]
)
kestren = Continent(
'Kestren',
'images/map/kestren.png',
[orestes.coordinates[0] + 900, erebos.coordinates[1] + 250],
(204, 300),
)
offset_x, offset_y = 50, 50
for cont in [piskus, erebos, orestes, kestren]:
location = (cont.coordinates[0] + offset_x, cont.coordinates[1] + offset_y)
world.smooth_paste(cont.image, location)
return world
world.image.show()
# world.small().save('images/map/world_map.png')
def main():
island = BigIsland((200, 200))
island.terrain.colorize().show()
scaled_island = island.terrain.resize((1000, 1000))
scaled_island.texturize(0).show()
scaled_island.texturize().show()
# world = stitch_world_map()
# world.image.show()
if __name__ == '__main__':
main()
|
the-stack_106_28951 | # Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from random import randint
import sys
import time
import unittest
sys.path.append("../")
from mfn_test_utils import MFNTest
class SessionSupportTest(unittest.TestCase):
@classmethod
def setUpClass(self):
# 1. parse and obtain workflow
self._test = MFNTest(workflow_filename="wf_session_all.json")
time.sleep(3)
def setUp(self):
self._session_id = self._setup_new_session()
time.sleep(5)
#@unittest.skip("")
def test01_setup_session(self):
self._clear_workflow_logs()
session_info = self._get_session_info()
#print(session_info)
if "session_id" not in session_info:
self._test.report(False, "setup_session: session_id", True, "session_id" in session_info)
elif session_info["session_id"] != self._session_id:
self._test.report(False, "setup_session: session_id equality", True, session_info["session_id"] == self._session_id)
elif "session_function_ids" not in session_info:
self._test.report(False, "setup_session: session_function_ids", True, "session_function_ids" in session_info)
else:
self._test.report(True, "setup_session", True, True)
#@unittest.skip("")
def test02_stop_session_with_message_to_session(self):
self._clear_workflow_logs()
old_session_id = self._session_id
self._stop_session()
time.sleep(5)
self._session_id = None
session_info = self._get_session_info()
#print(session_info)
if "session_id" not in session_info:
self._test.report(False, "stop_session: session_id", True, "session_id" in session_info)
elif "session_function_ids" not in session_info:
self._test.report(False, "stop_session: session_function_ids", True, "session_function_ids" in session_info)
elif session_info["session_id"] == old_session_id:
self._test.report(False, "stop_session: session_id change", False, session_info["session_id"] == old_session_id)
elif len(session_info["session_function_ids"]) != 0:
self._test.report(False, "stop_session: session_function_ids count", True, len(session_info["session_function_ids"]) == 0)
else:
self._test.report(True, "stop_session", True, True)
# stopping a session involves sending a message to the entire session
self._test.report(True, "send_message_to_session", True, True)
#@unittest.skip("")
def test03_send_message_with_function_name(self):
success = True
update_message = "config2"
self._send_message_to_function_with_name("sessionFunction1", update_message)
time.sleep(5)
new_telemetry_content = self._get_telemetry_content()
if len(new_telemetry_content) != 2:
self._test.report(False, "telemetry_content: length", 2, len(new_telemetry_content))
success = False
for function_info in new_telemetry_content:
function_name = function_info.split(":")[0]
for telemetry in new_telemetry_content[function_info]:
if function_name == "sessionFunction1":
if telemetry != "telemetry_" + function_name + "::doSomethingElse()":
self._test.report(False, "telemetry_content", "telemetry_" + function_name + "::doSomethingElse()", telemetry)
success = False
else:
if telemetry != "telemetry_" + function_name + "::doStuff()":
self._test.report(False, "telemetry_content", "telemetry_" + function_name + "::doStuff()", telemetry)
success = False
if success:
self._test.report(True, "send_message_to_function", True, True)
#@unittest.skip("")
def test04_send_message_with_session_function_id(self):
success = True
session_info = self._get_session_info()
# pick a session function and use its id
picked_function_id = session_info["session_function_ids"][randint(0,1)]
update_message = "config2"
self._send_message_to_function_with_id(picked_function_id, update_message)
time.sleep(5)
new_telemetry_content = self._get_telemetry_content()
if len(new_telemetry_content) != 2:
self._test.report(False, "telemetry_content: length", 2, len(new_telemetry_content))
success = False
for function_info in new_telemetry_content:
info = function_info.split(":")
function_name = info[0]
function_id = info[2]
for telemetry in new_telemetry_content[function_info]:
if function_id == picked_function_id:
if telemetry != "telemetry_" + function_name + "::doSomethingElse()":
self._test.report(False, "telemetry_content", "telemetry_" + function_name + "::doSomethingElse()", telemetry)
success = False
else:
if telemetry != "telemetry_" + function_name + "::doStuff()":
self._test.report(False, "telemetry_content", "telemetry_" + function_name + "::doStuff()", telemetry)
success = False
if success:
self._test.report(True, "send_message_to_function", True, True)
#@unittest.skip("")
def test05_session_alias_operations(self):
session_alias_summary = self._get_session_alias_summary()
session_info = self._get_session_info()
function_id_list = session_info["session_function_ids"]
if "session" not in session_alias_summary:
self._test.report(False, "alias_summary: session", True, "session" in session_alias_summary)
elif self._session_id not in session_alias_summary["session"]:
self._test.report(False, "alias_summary: session_id", True, self._session_id in session_alias_summary["session"])
elif session_alias_summary["session"][self._session_id] != "":
self._test.report(False, "alias_summary: alias", True, session_alias_summary["session"][self._session_id] is None)
else:
self._test.report(True, "alias_summary (session): init", True, True)
success = True
if "session_functions" not in session_alias_summary:
self._test.report(False, "alias_summary: session functions 2", True, "session_functions" in session_alias_summary)
else:
for function_id in function_id_list:
if function_id not in session_alias_summary["session_functions"]:
self._test.report(False, "alias_summary: function id", True, function_id in session_alias_summary["session_functions"])
success = False
elif session_alias_summary["session_functions"][function_id] != "":
self._test.report(False, "alias_summary: function alias 1", True, session_alias_summary["session_functions"][function_id] is None)
success = False
if success:
self._test.report(True, "alias_summary (functions): init", True, True)
# session alias operations
message = {}
message["session_id"] = self._session_id
message["alias_type"] = "session"
message["action"] = "--set-alias"
message["alias"] = "mySessionAlias"
self._send_message(message)
time.sleep(5)
session_alias_summary = self._get_session_alias_summary()
if "session" not in session_alias_summary:
self._test.report(False, "alias_summary: session 2", True, "session" in session_alias_summary)
elif self._session_id not in session_alias_summary["session"]:
self._test.report(False, "alias_summary: session_id 2", True, self._session_id in session_alias_summary["session"])
elif session_alias_summary["session"][self._session_id] != "mySessionAlias":
self._test.report(False, "alias_summary: alias 2", True, session_alias_summary["session"][self._session_id] == "mySessionAlias")
else:
self._test.report(True, "alias_summary (session): set", True, True)
message["action"] = "--unset-alias"
# unset session alias
self._send_message(message)
# session function alias operations
time.sleep(5)
message = {}
message["sessionId"] = self._session_id
message["alias_type"] = "function"
message["action"] = "--set-alias"
for function_id in function_id_list:
message["function_id"] = function_id
message["alias"] = "alias_" + function_id
self._send_message(message)
time.sleep(5)
session_alias_summary = self._get_session_alias_summary()
if "session" not in session_alias_summary:
self._test.report(False, "alias_summary: session 3", True, "session" in session_alias_summary)
elif self._session_id not in session_alias_summary["session"]:
self._test.report(False, "alias_summary: session_id 3", True, self._session_id in session_alias_summary["session"])
elif session_alias_summary["session"][self._session_id] != "":
self._test.report(False, "alias_summary: alias 3", True, session_alias_summary["session"][self._session_id] is None)
else:
self._test.report(True, "alias_summary (session): unset", True, True)
success = True
if "session_functions" not in session_alias_summary:
self._test.report(False, "alias_summary: session functions 4", True, "session_functions" in session_alias_summary)
else:
for function_id in function_id_list:
if function_id not in session_alias_summary["session_functions"]:
self._test.report(False, "alias_summary: function id", True, function_id in session_alias_summary["session_functions"])
success = False
elif session_alias_summary["session_functions"][function_id] != "alias_" + function_id:
self._test.report(False, "alias_summary: function alias 2", True, session_alias_summary["session_functions"][function_id] == "alias_" + function_id)
success = False
if success:
self._test.report(True, "alias_summary (functions): set", True, True)
# pick a session function and use its id
picked_function_id = session_info["session_function_ids"][randint(0,1)]
update_message = "config2"
self._send_message_to_function_with_alias("alias_" + picked_function_id, update_message)
time.sleep(5)
success = True
new_telemetry_content = self._get_telemetry_content()
if len(new_telemetry_content) != 2:
self._test.report(False, "telemetry_content: length", 2, len(new_telemetry_content))
success = False
for function_info in new_telemetry_content:
info = function_info.split(":")
function_name = info[0]
function_id = info[2]
for telemetry in new_telemetry_content[function_info]:
if function_id == picked_function_id:
if telemetry != "telemetry_" + function_name + "::doSomethingElse()":
self._test.report(False, "telemetry_content", "telemetry_" + function_name + "::doSomethingElse()", telemetry)
success = False
else:
if telemetry != "telemetry_" + function_name + "::doStuff()":
self._test.report(False, "telemetry_content", "telemetry_" + function_name + "::doStuff()", telemetry)
success = False
if success:
self._test.report(True, "send_message_to_function_with_alias", True, True)
message["action"] = "--unset-alias"
for function_id in function_id_list:
message["function_id"] = function_id
self._send_message(message)
session_info = self._get_session_info()
function_id_list = session_info["session_function_ids"]
session_alias_summary = self._get_session_alias_summary()
success = True
if "session_functions" not in session_alias_summary:
self._test.report(False, "alias_summary: session functions 6", True, "session_functions" in session_alias_summary)
else:
for function_id in function_id_list:
if function_id not in session_alias_summary["session_functions"]:
self._test.report(False, "alias_summary: function id", True, function_id in session_alias_summary["session_functions"])
success = False
elif session_alias_summary["session_functions"][function_id] != "":
self._test.report(False, "alias_summary: function alias 3", True, session_alias_summary["session_functions"][function_id] is None)
success = False
if success:
self._test.report(True, "alias_summary (functions): unset", True, True)
#@unittest.skip("")
def test06_send_heartbeat_update_message(self):
# old heartbeat was 10000ms, new heartbeat should be 5000ms
# defined in the workflow description
old_heartbeat_interval = 10000.0
new_heartbeat_interval = 5000.0
should_be_ratio = round(old_heartbeat_interval / new_heartbeat_interval, 1)
old_heartbeat_timestamps = self._get_heartbeat_timestamps()
old_interval_map = self._get_heartbeat_intervals(old_heartbeat_timestamps)
#HBUPDATEMSG="{\"action\":\"--update-heartbeat\",\"heartbeat_parameters\":{\"heartbeat_interval_ms\":2000,\"heartbeat_function\":\"heartbeatHandler\"}}"
heartbeat_parameters = {}
heartbeat_parameters["heartbeat_method"] = "function"
heartbeat_parameters["heartbeat_interval_ms"] = new_heartbeat_interval
heartbeat_parameters["heartbeat_function"] = "heartbeatHandler"
message = {}
message["action"] = "--update-heartbeat"
message["heartbeat_parameters"] = heartbeat_parameters
self._send_message_to_entire_session(message)
new_heartbeat_timestamps = self._get_heartbeat_timestamps()
new_interval_map = self._get_heartbeat_intervals(new_heartbeat_timestamps)
#print("ratio should be: " + str(should_be_ratio))
if len(old_interval_map) != len(new_interval_map):
self._test.report(False, "heartbeat_intervals: length", True, len(old_interval_map) == len(new_interval_map))
else:
for function_info in new_interval_map:
ratio = old_interval_map[function_info] / new_interval_map[function_info]
# sometimes fails when run on a loaded system due to scheduling
#print(function_info[:10] + " ratio: " + str(ratio) + " old: " + str(old_interval_map[function_info]) + " new: " + str(new_interval_map[function_info]))
if round(ratio, 1) == should_be_ratio:
self._test.report(True, "heartbeat_intervals: ratio (almost equal)", True, round(ratio) == should_be_ratio)
elif old_interval_map[function_info] > new_interval_map[function_info]:
self._test.report(True, "heartbeat_intervals: ratio 2", True, old_interval_map[function_info] > new_interval_map[function_info])
else:
self._test.report(False, "heartbeat_intervals: ratio 3", True, old_interval_map[function_info] > new_interval_map[function_info])
def tearDown(self):
if self._session_id is not None:
self._stop_session()
@classmethod
def tearDownClass(self):
self._test.undeploy_workflow()
self._test.cleanup()
####################
# internal functions
####################
def _setup_new_session(self):
#MESSAGE="{\"action\":\"--create-new-session\",\"session\":[{\"name\":\"sessionFunction1\",\"parameters\":\"config1\"},{\"name\":\"sessionFunction2\",\"parameters\":\"config1\"}]}"
message = {}
message["action"] = "--create-new-session"
session_function_list = []
session_function1 = {}
session_function1["name"] = "sessionFunction1"
session_function1["parameters"] = "config1"
session_function_list.append(session_function1)
session_function2 = {}
session_function2["name"] = "sessionFunction2"
session_function2["parameters"] = "config1"
session_function_list.append(session_function2)
message["session"] = session_function_list
session_id = self._send_message(message)
return session_id
def _stop_session(self):
#STOPMSG="{\"action\":\"--stop\"}"
stop_message = {}
stop_message["action"] = "--stop"
self._send_message_to_entire_session(stop_message)
def _get_heartbeat_intervals(self, heartbeat_timestamps):
heartbeat_intervals = {}
for function_info in heartbeat_timestamps:
timestamps = heartbeat_timestamps[function_info]
size = len(timestamps)
ts_list = []
for ts in timestamps:
ts_list.append(float(ts))
total_diff = 0.0
ts_list.sort()
for i in range(size-1):
ts_diff = ts_list[i+1] - ts_list[i]
#print(str(i) + " " + function_info + " ts_diff: " + str(ts_diff))
total_diff += ts_diff
heartbeat_intervals[function_info] = total_diff/(size-1)
diff = ts_list[-1] - ts_list[0]
heartbeat_intervals[function_info + "_first_last_average"] = diff/(size-1)
return heartbeat_intervals
def _get_log_lines(self, contained_text):
workflow_logs = self._get_workflow_logs()
#progress_log = workflow_logs["progress"]
#print(progress_log)
log = workflow_logs["log"]
log_lines = log.split("\n")
#last_timestamp = workflow_logs["timestamp"]
#asctime = datetime.utcfromtimestamp(last_timestamp/1000.0/1000.0)
#print("Log last timestamp: " + str(type(last_timestamp)) + " " + str(last_timestamp) + " " + str(asctime))
lines = []
for line in log_lines:
if line.find("[FunctionWorker]") != -1:
continue
if line.find("[__mfn_progress]") != -1:
continue
if line.find(contained_text) != -1:
lines.append(line)
return lines
def _get_heartbeat_timestamps(self):
self._clear_workflow_logs()
# allow some time for heartbeat content to accummulate
time.sleep(30)
heartbeat_lines = self._get_log_lines("[heartbeatHandler]")
heartbeat_timestamps = {}
for line in heartbeat_lines:
line = line.split(" ")[-1]
line = line.strip()
if line == "":
continue
fields = line.split("@")
timestamp = fields[1]
function_info = fields[0]
if function_info not in heartbeat_timestamps:
heartbeat_timestamps[function_info] = []
heartbeat_timestamps[function_info].append(timestamp)
return heartbeat_timestamps
def _get_telemetry_content(self):
self._clear_workflow_logs()
# allow some time for telemetry content to accummulate
time.sleep(30)
telemetry_lines = self._get_log_lines("[telemetryHandler]")
telemetry_content = {}
for line in telemetry_lines:
line = line.split(" ")[-1]
line = line.strip()
if line == "":
continue
fields = line.split("@")
function_info = fields[0]
telemetry = fields[1].split(">")[-1]
if function_info not in telemetry_content:
telemetry_content[function_info] = []
telemetry_content[function_info].append(telemetry)
return telemetry_content
def _get_session_info(self):
#MESSAGE="{\"sessionId\":\"$SESSIONID\",\"action\":\"--get-session-info\"}"
message = {}
message["sessionId"] = self._session_id
message["action"] = "--get-session-info"
session_info = self._send_message(message)
return session_info
def _get_session_alias_summary(self):
message = {}
message["sessionId"] = self._session_id
message["action"] = "--get-session-alias-summary"
session_alias_summary = self._send_message(message)
return session_alias_summary
def _send_message_to_entire_session(self, update_message):
#MESSAGE="{\"immediate\":false,\"sessionId\":\"$SESSIONID\",\"action\":\"--update-session\",\"messageType\":\"$MSGTYPE\",\"sessionUpdateParams\":$DATA}"
message = {}
message["immediate"] = False
message["sessionId"] = self._session_id
message["action"] = "--update-session"
message["messageType"] = "session"
message["sessionUpdateParams"] = update_message
self._send_message(message)
def _send_message_to_function_with_name(self, function_name, update_message):
#MESSAGE="{\"immediate\":true,\"sessionId\":\"$SESSIONID\",\"action\":\"--update-session\",\"messageType\":\"$MSGTYPE\",\"messageToFunction\":\"sessionFunction1\",\"sessionUpdateParams\":$DATA}"
message = {}
message["immediate"] = False
message["sessionId"] = self._session_id
message["action"] = "--update-session"
message["messageType"] = "name"
message["messageToFunction"] = function_name
message["sessionUpdateParams"] = update_message
self._send_message(message)
def _send_message_to_function_with_id(self, function_id, update_message):
#MESSAGE="{\"immediate\":true,\"sessionId\":\"$SESSIONID\",\"action\":\"--update-session-function\",\"sessionFunctionId\":\"$SGID\",\"sessionUpdateParams\":$DATA}"
message = {}
message["immediate"] = False
message["sessionId"] = self._session_id
message["action"] = "--update-session-function"
message["sessionFunctionId"] = function_id
message["sessionUpdateParams"] = update_message
self._send_message(message)
def _send_message_to_function_with_alias(self, function_alias, update_message):
message = {}
message["immediate"] = False
message["sessionId"] = self._session_id
message["action"] = "--update-session-function-with-alias"
message["alias"] = function_alias
message["sessionUpdateParams"] = update_message
self._send_message(message)
def _get_workflow_logs(self):
logs = self._test.get_workflow_logs()
return logs
def _send_message(self, message):
response = self._test.execute(message)
return response
def _clear_workflow_logs(self):
self._test.clear_workflow_logs()
def main():
unittest.main()
if __name__ == '__main__':
main()
|
the-stack_106_28952 | from __future__ import print_function, absolute_import, division
import sys
import inspect
import socket
import numpy as np
from sklearn.utils import check_random_state
from sklearn.model_selection import ParameterGrid
import math
try:
from hyperopt import (Trials, tpe, fmin, STATUS_OK, STATUS_RUNNING,
STATUS_FAIL)
except ImportError:
# hyperopt is optional, but required for hyperopt_tpe()
pass
try:
from GPy import kern
from GPy.kern import RBF, Fixed, Bias
from GPy.util.linalg import tdot
from GPy.models import GPRegression
from scipy.optimize import minimize
from scipy.stats import norm
# If the GPy modules fail we won't do this unnecessarily.
from .entry_point import load_entry_point
KERNEL_BASE_CLASS = kern.src.kern.Kern
except:
# GPy is optional, but required for gp
GPRegression = kern = minimize = None
pass
from .search_space import EnumVariable
try:
from SALib.sample import sobol_sequence as ss
except:
ss = None
pass
DEFAULT_TIMEOUT = socket._GLOBAL_DEFAULT_TIMEOUT
class BaseStrategy(object):
short_name = None
def suggest(self, history, searchspace):
"""
Parameters
----------
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
searchspace : SearchSpace
Instance of search_space.SearchSpace
random_state :i nteger or numpy.RandomState, optional
The random seed for sampling. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
new_params : dict
"""
raise NotImplementedError()
@staticmethod
def is_repeated_suggestion(params, history):
"""
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
"""
if any(params == hparams and hstatus == 'SUCCEEDED' for hparams, hscore, hstatus in history):
return True
else:
return False
class SobolSearch(BaseStrategy):
short_name = 'sobol'
_SKIP = int(1e4)
def __init__(self, length=1000):
#TODO length should be n_trials. But this doesn't seem to be accessible to strategies without major re-write.
self.sequence = None
self.length = length
self.n_dims = 0
self.offset = 0
self.counter = 0
def _set_sequence(self):
#TODO could get rid of first part of sequence
self.sequence = ss.sample(self.length + self._SKIP, self.n_dims)
def _from_unit_cube(self, result, searchspace):
# TODO this should be a method common to both Sobol and GP.
# Note that Sobol only deals with float-valued variables, so we have
# a transform step on either side, where int and enum valued variables
# are transformed before calling gp, and then the result suggested by
# Sobol needs to be reverse-transformed.
out = {}
for gpvalue, var in zip(result, searchspace):
out[var.name] = var.point_from_gp(float(gpvalue))
return out
def suggest(self, history, searchspace):
if 'SALib' not in sys.modules:
raise ImportError('No module named SALib')
if self.sequence is None:
self.n_dims = searchspace.n_dims
self.offset = len(history) + self._SKIP
self._set_sequence()
try:
points = self.sequence[self.offset+ self.counter]
self.counter += 1
except IndexError:
raise RuntimeError('Increase sobol sequence length')
return self._from_unit_cube(points, searchspace)
class RandomSearch(BaseStrategy):
short_name = 'random'
def __init__(self, seed=None):
self.seed = seed
def suggest(self, history, searchspace):
"""Randomly suggest params from searchspace.
"""
return searchspace.rvs(self.seed)
class HyperoptTPE(BaseStrategy):
short_name = 'hyperopt_tpe'
def __init__(self, seed=None, gamma=0.25, seeds=20):
self.seed = seed
self.gamma = gamma
self.seeds = seeds
def suggest(self, history, searchspace):
"""
Suggest params to maximize an objective function based on the
function evaluation history using a tree of Parzen estimators (TPE),
as implemented in the hyperopt package.
Use of this function requires that hyperopt be installed.
"""
# This function is very odd, because as far as I can tell there's
# no real documented API for any of the internals of hyperopt. Its
# execution model is that hyperopt calls your objective function
# (instead of merely providing you with suggested points, and then
# you calling the function yourself), and its very tricky (for me)
# to use the internal hyperopt data structures to get these predictions
# out directly.
# so they path we take in this function is to construct a synthetic
# hyperopt.Trials database which from the `history`, and then call
# hyoperopt.fmin with a dummy objective function that logs the value
# used, and then return that value to our client.
# The form of the hyperopt.Trials database isn't really documented in
# the code -- most of this comes from reverse engineering it, by
# running fmin() on a simple function and then inspecting the form of
# the resulting trials object.
if 'hyperopt' not in sys.modules:
raise ImportError('No module named hyperopt')
random = check_random_state(self.seed)
hp_searchspace = searchspace.to_hyperopt()
trials = Trials()
for i, (params, scores, status) in enumerate(history):
if status == 'SUCCEEDED':
# we're doing maximization, hyperopt.fmin() does minimization,
# so we need to swap the sign
result = {'loss': -np.mean(scores), 'status': STATUS_OK}
elif status == 'PENDING':
result = {'status': STATUS_RUNNING}
elif status == 'FAILED':
result = {'status': STATUS_FAIL}
else:
raise RuntimeError('unrecognized status: %s' % status)
# the vals key in the trials dict is basically just the params
# dict, but enum variables (hyperopt hp.choice() nodes) are
# different, because the index of the parameter is specified
# in vals, not the parameter itself.
vals = {}
for var in searchspace:
if isinstance(var, EnumVariable):
# get the index in the choices of the parameter, and use
# that.
matches = [i for i, c in enumerate(var.choices)
if c == params[var.name]]
assert len(matches) == 1
vals[var.name] = matches
else:
# the other big difference is that all of the param values
# are wrapped in length-1 lists.
vals[var.name] = [params[var.name]]
trials.insert_trial_doc({
'misc': {
'cmd': ('domain_attachment', 'FMinIter_Domain'),
'idxs': dict((k, [i]) for k in hp_searchspace.keys()),
'tid': i,
'vals': vals,
'workdir': None},
'result': result,
'tid': i,
# bunch of fixed fields that hyperopt seems to require
'owner': None, 'spec': None, 'state': 2, 'book_time': None,
'exp_key': None, 'refresh_time': None, 'version': 0
})
trials.refresh()
chosen_params_container = []
def suggest(*args, **kwargs):
return tpe.suggest(*args, **kwargs, gamma=self.gamma, n_startup_jobs=self.seeds)
def mock_fn(x):
# http://stackoverflow.com/a/3190783/1079728
# to get around no nonlocal keywork in python2
chosen_params_container.append(x)
return 0
fmin(fn=mock_fn, algo=suggest, space=hp_searchspace, trials=trials,
max_evals=len(trials.trials)+1,
**self._hyperopt_fmin_random_kwarg(random))
chosen_params = chosen_params_container[0]
return chosen_params
@staticmethod
def _hyperopt_fmin_random_kwarg(random):
if 'rstate' in inspect.getargspec(fmin).args:
# 0.0.3-dev version uses this argument
kwargs = {'rstate': random, 'allow_trials_fmin': False}
elif 'rseed' in inspect.getargspec(fmin).args:
# 0.0.2 version uses different argument
kwargs = {'rseed': random.randint(2**32-1)}
return kwargs
class GP(BaseStrategy):
short_name = 'gp'
def __init__(self, kernels=None, acquisition=None, seed=None, seeds=1, n_iter=50,
n_init = 20, sobol_init=False, optimize_best=False, max_iter=1E5,
predict_from_gp=True):
self.seed = seed
self.seeds = seeds
self.max_iter = int(max_iter)
self.n_iter = int(n_iter)
self.n_init = int(n_init)
self.sobol_init = bool(sobol_init)
self.optimize_best = bool(optimize_best)
self.predict_from_gp = bool(predict_from_gp)
self.model = None
self.n_dims = None
self.kernel = None
self.x_best = None
self.y_best = None
self.transformed = False
if kernels is None:
kernels = [{'name': 'GPy.kern.Matern52', 'params': {'ARD': True},
'options': {'independent': False}}]
self._kerns = kernels
if acquisition is None:
acquisition = {'name': 'osprey', 'params': {}}
self.acquisition_function = acquisition
self._acquisition_function = None
self._set_acquisition()
def _create_kernel(self):
# Check kernels
kernels = self._kerns
if not isinstance(kernels, list):
raise RuntimeError('Must provide enumeration of kernels')
for kernel in kernels:
if sorted(list(kernel.keys())) != ['name', 'options', 'params']:
raise RuntimeError(
'strategy/params/kernels must contain keys: "name", "options", "params"')
# Turn into entry points.
# TODO use eval to allow user to specify internal variables for kernels (e.g. V) in config file.
kernels = []
for kern in self._kerns:
params = kern['params']
options = kern['options']
name = kern['name']
kernel_ep = load_entry_point(name, 'strategy/params/kernels')
if issubclass(kernel_ep, KERNEL_BASE_CLASS):
if options['independent']:
#TODO Catch errors here? Estimator entry points don't catch instantiation errors
kernel = np.sum([kernel_ep(1, active_dims=[i], **params) for i in range(self.n_dims)])
else:
kernel = kernel_ep(self.n_dims, **params)
if not isinstance(kernel, KERNEL_BASE_CLASS):
raise RuntimeError('strategy/params/kernel must load a'
'GPy derived Kernel')
kernels.append(kernel)
self.kernel = np.sum(kernels)
def _fit_model(self, X, Y):
if max(Y) < 0:
self.transformed = True
else:
self.transformed = False
Y_trans = self._transform_score(Y)
model = GPRegression(X, Y_trans, self.kernel)
# Catch fitting error
try:
model.optimize_restarts(num_restarts=self.n_init, verbose=False)
self.model = model
except np.linalg.linalg.LinAlgError:
self.model = None
def _transform_score(self, Y):
if self.transformed:
return -np.log(-Y)
return Y
def _back_transform_score(self, Y):
if self.transformed:
return - np.exp(-Y)
return Y
def _get_init(self):
if self.sobol_init:
return self._get_sobol_points()
return self._get_random_points()
def _get_random_points(self):
return np.random.random((self.n_iter, self.n_dims))
def _get_sobol_points(self):
return ss.sample(self.n_iter + np.random.randint(1000), self.n_dims)[-self.n_iter:]
def _is_var_positive(self, var):
if np.any(var < 0):
# RuntimeError may be overkill
raise RuntimeError('Negative variance predicted from regression model.')
else:
return True
#def _ei(self, x, y_mean, y_var):
# y_std = np.sqrt(y_var)
# z = (y_mean - self._transform_score(self.y_best))/y_std
# result = y_std*(z*norm.cdf(z) + norm.pdf(z))
# return result
def _ei(self, x, y_mean, y_var, kappa=0.01):
y_std = np.sqrt(y_var + self.y_best_var)
z = (y_mean - self._transform_score(self.y_best) - kappa)/y_std
result = y_std*(z*norm.cdf(z) + norm.pdf(z))
return result
def _ucb(self, x, y_mean, y_var, kappa=1.0):
result = y_mean + kappa*np.sqrt(y_var + self.y_best_var)
return result
def _osprey(self, x, y_mean, y_var):
return (y_mean+y_var).flatten()
def get_gp_best(self):
# Objective function
def z(x):
X = x.reshape(-1, self.n_dims)
y_mean, y_var = self.model.predict(X)
return -y_mean
best_observation = self.model.X[self.model.Y.argmax(axis=0)].flatten()
res = minimize(z, best_observation, bounds=self.n_dims*[(0., 1.)],
options={'maxiter': self.max_iter, 'disp': 0})
return res.x
def _optimize_acquisition(self):
# Objective function
def z(x):
# TODO make spread of points around x and take mean value.
X = x.reshape(-1, self.n_dims)
y_mean, y_var = self.model.predict(X)
# This code is for debug/testing phase only.
# Ideally we should test for negative variance regardless of the AF.
# However, we want to recover the original functionality of Osprey, hence the conditional block.
# TODO remove this.
if self.acquisition_function['name'] in ['osprey', 'ucb']:
af = self._acquisition_function(X, y_mean=y_mean, y_var=y_var)
else:
if self._is_var_positive(y_var):
af = self._acquisition_function(X, y_mean=y_mean, y_var=y_var)
else:
return 0
return (-1)*af
init_tries = self._get_init()
# Optimization loop
acquisition_fns = []
candidates = []
for i in range(self.n_iter):
init = init_tries[i]
if self.max_iter > 0:
res = minimize(z, init, bounds=self.n_dims*[(0., 1.)],
options={'maxiter': self.max_iter, 'disp': 0})
candidates.append(res.x)
acquisition_fns.append(res.fun)
else:
candidates.append(init)
acquisition_fns.append(z(init))
# Choose the best
acquisition_fns = np.array(acquisition_fns).flatten()
candidates = np.array(candidates)
best_index = int(np.argmin(acquisition_fns))
best_candidate = candidates[best_index]
return best_candidate
def _set_acquisition(self):
if isinstance(self.acquisition_function, list):
raise RuntimeError('Must specify only one acquisition function')
if sorted(self.acquisition_function.keys()) != ['name', 'params']:
raise RuntimeError('strategy/params/acquisition must contain keys '
'"name" and "params"')
if self.acquisition_function['name'] not in ['ei', 'ucb', 'osprey']:
raise RuntimeError('strategy/params/acquisition name must be one of '
'"ei", "ucb", "osprey"')
if 'params' in self.acquisition_function \
and 'kappa' in self.acquisition_function['params']:
self.acquisition_function['params']['kappa'] = \
float(self.acquisition_function['params']['kappa'])
f = eval('self._'+self.acquisition_function['name'])
def g(x, y_mean, y_var):
return f(x, y_mean, y_var, **self.acquisition_function['params'])
self._acquisition_function = g
def _get_data(self, history, searchspace):
X = []
Y = []
V = []
ignore = []
for param_dict, scores, status in history:
# transform points into the GP domain. This invloves bringing
# int and enum variables to floating point, etc.
if status == 'FAILED':
# not sure how to deal with these yet
continue
point = searchspace.point_to_gp(param_dict)
if status == 'SUCCEEDED':
X.append(point)
Y.append(np.mean(scores))
V.append(np.var(scores))
elif status == 'PENDING':
ignore.append(point)
else:
raise RuntimeError('unrecognized status: %s' % status)
return (np.array(X).reshape(-1, self.n_dims),
np.array(Y).reshape(-1, 1),
np.array(V).reshape(-1, 1),
np.array(ignore).reshape(-1, self.n_dims))
def _from_gp(self, result, searchspace):
# Note that GP only deals with float-valued variables, so we have
# a transform step on either side, where int and enum valued variables
# are transformed before calling gp, and then the result suggested by
# GP needs to be reverse-transformed.
out = {}
for gpvalue, var in zip(result, searchspace):
out[var.name] = var.point_from_gp(float(gpvalue))
return out
def _is_within(self, point, X, tol=1E-2):
if True in (np.sqrt(((point - X)**2).sum(axis=0)) <= tol):
return True
return False
def suggest(self, history, searchspace, max_tries=5):
if not GPRegression:
raise ImportError('No module named GPy')
if not minimize:
raise ImportError('No module named SciPy')
if len(history) < self.seeds:
return RandomSearch().suggest(history, searchspace)
self.n_dims = searchspace.n_dims
X, Y, V, ignore = self._get_data(history, searchspace)
if len(Y) < self.seeds:
return RandomSearch().suggest(history, searchspace)
# TODO make _create_kernel accept optional args.
self._create_kernel()
self._fit_model(X, Y)
# Catch fitting error
if self.model is None:
return RandomSearch().suggest(history, searchspace)
if self.optimize_best:
x_best = self.get_gp_best()
y_best, self.y_best_var = self.model.predict(x_best.reshape(-1, self.n_dims))
else:
best_idx = self.model.Y.argmax(axis=0)
x_best = self.model.X[best_idx].flatten()
if self.predict_from_gp:
y_best, self.y_best_var = self.model.predict(x_best.reshape(-1, self.n_dims))
else:
y_best = self.model.Y[best_idx].flatten()[0]
self.y_best_var = 0
self.y_best = self._back_transform_score(y_best)
self.x_best = self._from_gp(x_best, searchspace)
suggestion = self._optimize_acquisition()
if suggestion in ignore or self._is_within(suggestion, X):
return RandomSearch().suggest(history, searchspace)
return self._from_gp(suggestion, searchspace)
class GridSearch(BaseStrategy):
short_name = 'grid'
def __init__(self):
self.param_grid = None
self.current = -1
def suggest(self, history, searchspace):
# Convert searchspace to param_grid
if self.param_grid is None:
if not all(isinstance(v, EnumVariable) for v in searchspace):
raise RuntimeError("GridSearchStrategy is defined only for all-enum search space")
self.param_grid = ParameterGrid(dict((v.name, v.choices) for v in searchspace))
# NOTE: there is no way of signaling end of parameters to be searched against
# so user should pick correctly number of evaluations
self.current += 1
return self.param_grid[self.current % len(self.param_grid)]
|
the-stack_106_28955 | from django.utils import timezone
from digitalsky_provider.models import DigitalSkyLog
from gcs_operations.models import FlightPlan, FlightOperation, Transaction, FlightPermission, FlightLog
from pki_framework.models import AerobridgeCredential
from registry.models import Person, Address, Activity, Authorization, Operator, Contact, Test, TypeCertificate, \
Manufacturer, Engine, Firmware, Pilot, TestValidity, Aircraft
from .test_setup import TestModels
class TestModelsCreate(TestModels):
fixtures = ['Activity', 'Address', 'Authorization', 'Engine', 'Manufacturer', 'Operator', 'Person', 'Test',
'TypeCertificate', 'Pilot', 'FlightPlan', 'FlightOperation', 'Aircraft', 'Transaction']
def test_digitalsky_provider_digitalsky_log_create(self):
digitalsky_log = DigitalSkyLog(txn=Transaction.objects.first(), response_code=self.faker.numerify('###'),
response=self.faker.sentence(), timestamp=timezone.now())
self.assertNotIn(digitalsky_log, DigitalSkyLog.objects.all())
digitalsky_log.save()
self.assertIn(digitalsky_log, DigitalSkyLog.objects.all())
self.assertEqual(digitalsky_log.txn, Transaction.objects.first())
def test_gcs_operations_flight_plan_create(self):
flight_plan = FlightPlan(name=self.faker.word(), geo_json=self.faker.sentence(), start_datetime=timezone.now(),
end_datetime=timezone.now() + timezone.timedelta(minutes=30))
self.assertNotIn(flight_plan, FlightPlan.objects.all())
flight_plan.save()
self.assertIn(flight_plan, FlightPlan.objects.all())
def test_gcs_operations_flight_operation_create(self):
flight_operation = FlightOperation(name=self.faker.word(), drone=Aircraft.objects.first(),
flight_plan=FlightPlan.objects.first(), purpose=Activity.objects.first(),
type_of_operation=self.faker.pyint(min_value=0, max_value=len(
FlightOperation.OPERATION_TYPES) - 1))
self.assertNotIn(flight_operation, FlightOperation.objects.all())
flight_operation.save()
self.assertIn(flight_operation, FlightOperation.objects.all())
self.assertEqual(flight_operation.drone, Aircraft.objects.first())
self.assertEqual(flight_operation.flight_plan, FlightPlan.objects.first())
self.assertEqual(flight_operation.purpose, Activity.objects.first())
def test_gcs_operations_transaction_create(self):
transaction = Transaction(prefix=self.faker.word(), aircraft=Aircraft.objects.first())
self.assertNotIn(transaction, Transaction.objects.all())
transaction.save()
self.assertIn(transaction, Transaction.objects.all())
self.assertEqual(transaction.aircraft, Aircraft.objects.first())
def test_gcs_operations_flight_permission_create(self):
flight_permission = FlightPermission(operation=FlightOperation.objects.first(), is_successful=True,
artefact=self.faker.text())
self.assertNotIn(flight_permission, FlightPermission.objects.all())
flight_permission.save()
self.assertIn(flight_permission, FlightPermission.objects.all())
self.assertEqual(flight_permission.operation, FlightOperation.objects.first())
def test_gcs_operations_flight_log_create(self):
flight_log = FlightLog(operation=FlightOperation.objects.first(), signed_log=self.faker.uri_path(),
raw_log=self.faker.uri_path(), is_submitted=True)
self.assertNotIn(flight_log, FlightLog.objects.all())
flight_log.save()
self.assertIn(flight_log, FlightLog.objects.all())
self.assertEqual(flight_log.operation, FlightOperation.objects.first())
def test_registry_person_create(self):
person = Person(first_name=self.faker.first_name(), last_name=self.faker.last_name(), email=self.faker.email(),
phone_number=self.faker.numerify('+' + '#' * 9),
identification_number=self.faker.numerify('#' * 15),
social_security_number=self.faker.ssn(), date_of_birth=self.faker.date_of_birth())
self.assertNotIn(person, Person.objects.all())
person.save()
self.assertIn(person, Person.objects.all())
def test_registry_address_create(self):
fake_address = self.faker.address().split('\n')
address = Address(address_line_1=fake_address[0], address_line_2=fake_address[1],
postcode=self.faker.postcode(), city=self.faker.city(), state=self.faker.state(),
country='IN')
self.assertNotIn(address, Address.objects.all())
address.save()
self.assertIn(address, Address.objects.all())
def test_registry_activity_create(self):
activity = Activity(name=self.faker.word(), activity_type=self.faker.pyint(min_value=0, max_value=len(
Activity.ACTIVITYTYPE_CHOICES) - 1))
self.assertNotIn(activity, Activity.objects.all())
activity.save()
self.assertIn(activity, Activity.objects.all())
def test_registry_authorization_create(self):
authorization = Authorization(title=self.faker.sentence(), operation_max_height=self.faker.pyint(),
operation_altitude_system=self.faker.pyint(min_value=0, max_value=len(
Authorization.ALTITUDE_SYSTEM) - 1),
airspace_type=self.faker.pyint(min_value=0, max_value=len(
Authorization.AIRSPACE_CHOICES) - 1),
permit_to_fly_above_crowd=self.faker.pybool(),
operation_area_type=self.faker.pyint(min_value=0, max_value=len(
Authorization.AREATYPE_CHOICES) - 1),
risk_type=self.faker.pyint(min_value=0, max_value=len(
Authorization.RISKCLASS_CHOICES) - 1),
authorization_type=self.faker.pyint(min_value=0, max_value=len(
Authorization.AUTHTYPE_CHOICES) - 1))
self.assertNotIn(authorization, Authorization.objects.all())
authorization.save()
self.assertIn(authorization, Authorization.objects.all())
def test_registry_operator_create(self):
operator = Operator(company_name=self.faker.company(), website=self.faker.url(), email=self.faker.company_email,
phone_number=self.faker.numerify('+' + '#' * 9),
operator_type=self.faker.pyint(min_value=0, max_value=len(
Operator.OPTYPE_CHOICES) - 1), address=Address.objects.first(),
vat_number=self.faker.numerify('+' + '#' * 24),
insurance_number=self.faker.numerify('+' + '#' * 24), country='IN')
self.assertNotIn(operator, Operator.objects.all())
operator.save()
self.assertIn(operator, Operator.objects.all())
self.assertEqual(operator.address, Address.objects.first())
self.assertNotIn(Authorization.objects.first(), operator.operational_authorizations.all())
operator.operational_authorizations.add(Authorization.objects.first())
self.assertIn(Authorization.objects.first(), operator.operational_authorizations.all())
self.assertNotIn(Activity.objects.first(), operator.authorized_activities.all())
operator.authorized_activities.add(Activity.objects.first())
self.assertIn(Activity.objects.first(), operator.authorized_activities.all())
def test_registry_contact_create(self):
contact = Contact(operator=Operator.objects.first(), person=Person.objects.first(),
address=Address.objects.first(),
role_type=self.faker.pyint(min_value=0, max_value=len(Contact.ROLE_CHOICES) - 1))
self.assertNotIn(contact, Contact.objects.all())
contact.save()
self.assertIn(contact, Contact.objects.all())
self.assertEqual(contact.operator, Operator.objects.first())
self.assertEqual(contact.person, Person.objects.first())
self.assertEqual(contact.address, Address.objects.first())
def test_registry_test_create(self):
test = Test(test_type=self.faker.pyint(min_value=0, max_value=len(Test.TESTTYPE_CHOICES) - 1),
taken_at=self.faker.pyint(min_value=0, max_value=len(Test.TAKEN_AT_CHOICES) - 1),
name=self.faker.name())
self.assertNotIn(test, Test.objects.all())
test.save()
self.assertIn(test, Test.objects.all())
def test_registry_pilot_create(self):
pilot = Pilot(operator=Operator.objects.first(), person=Person.objects.first(), photo=self.faker.uri(),
photo_small=self.faker.uri(), address=Address.objects.first(),
identification_photo=self.faker.uri(), identification_photo_small=self.faker.uri())
self.assertNotIn(pilot, Pilot.objects.all())
pilot.save()
self.assertIn(pilot, Pilot.objects.all())
self.assertEqual(pilot.operator, Operator.objects.first())
self.assertEqual(pilot.person, Person.objects.first())
self.assertEqual(pilot.address, Address.objects.first())
self.assertNotIn(Test.objects.first(), pilot.tests.all())
pilot.tests.add(Test.objects.first())
self.assertIn(Test.objects.first(), pilot.tests.all())
def test_registry_testValidity_create(self):
test_validity = TestValidity(test=Test.objects.first(), pilot=Pilot.objects.first(), taken_at=timezone.now(),
expiration=timezone.now() + timezone.timedelta(days=365 * 5))
self.assertNotIn(test_validity, TestValidity.objects.all())
test_validity.save()
self.assertIn(test_validity, TestValidity.objects.all())
self.assertEqual(test_validity.test, Test.objects.first())
self.assertEqual(test_validity.pilot, Pilot.objects.first())
def test_registry_typeCertificate_create(self):
type_certificate = TypeCertificate(type_certificate_id=self.faker.numerify('#' * 100),
type_certificate_issuing_country=self.faker.country(),
type_certificate_holder=self.faker.name(),
type_certificate_holder_country=self.faker.country())
self.assertNotIn(type_certificate, TypeCertificate.objects.all())
type_certificate.save()
self.assertIn(type_certificate, TypeCertificate.objects.all())
def test_registry_manufacturer_create(self):
manufacturer = Manufacturer(full_name=self.faker.company(), common_name=self.faker.company_suffix(),
address=Address.objects.first(), acronym=self.faker.word(), role=self.faker.word(),
country=self.faker.country_code(), digital_sky_id=self.faker.numerify('#' * 20))
self.assertNotIn(manufacturer, Manufacturer.objects.all())
manufacturer.save()
self.assertIn(manufacturer, Manufacturer.objects.all())
def test_registry_engine_create(self):
engine = Engine(power=self.faker.pyfloat(min_value=0, max_value=100.00, right_digits=2),
count=self.faker.pyint(min_value=0, max_value=50), engine_type=self.faker.word(),
propellor=self.faker.sentence())
self.assertNotIn(engine, Engine.objects.all())
engine.save()
self.assertIn(engine, Engine.objects.all())
def test_registry_firmware_create(self):
firmware = Firmware(binary_file_url=self.faker.uri(), public_key=self.faker.text(),
version=self.faker.pyfloat(min_value=0, max_value=10.00, right_digits=2),
manufacturer=Manufacturer.objects.first(), friendly_name=self.faker.name())
self.assertNotIn(firmware, Firmware.objects.all())
firmware.save()
self.assertIn(firmware, Firmware.objects.all())
def test_registry_aircraft_create(self):
aircraft = Aircraft(operator=Operator.objects.first(), mass=self.faker.pyint(min_value=0, max_value=50),
is_airworthy=True, make=self.faker.sentence(), master_series=self.faker.sentence(),
series=self.faker.sentence(), popular_name=self.faker.word(),
manufacturer=Manufacturer.objects.first(),
category=self.faker.pyint(min_value=0, max_value=len(Aircraft.AIRCRAFT_CATEGORY) - 1),
registration_mark=self.faker.numerify('#' * 10),
sub_category=self.faker.pyint(min_value=0, max_value=len(
Aircraft.AIRCRAFT_SUB_CATEGORY) - 1),
icao_aircraft_type_designator=self.faker.numerify('#' * 4),
max_certified_takeoff_weight=self.faker.pyfloat(min_value=0, max_value=50.00,
right_digits=2),
max_height_attainable=self.faker.pyfloat(min_value=0, max_value=160.00, right_digits=2),
compatible_payload=self.faker.text(max_nb_chars=20), commission_date=timezone.now(),
type_certificate=TypeCertificate.objects.first(), model=self.faker.text(),
esn=self.faker.numerify('#' * 20), digital_sky_uin_number=self.faker.numerify('#' * 30),
maci_number=self.faker.mac_address(),
flight_controller_number=self.faker.numerify('#' * 60),
controller_public_key=self.faker.text(),
operating_frequency=self.faker.pyfloat(min_value=0, max_value=500.00, right_digits=2),
status=self.faker.pyint(min_value=0, max_value=len(Aircraft.STATUS_CHOICES) - 1),
photo=self.faker.uri(), photo_small=self.faker.uri(), identification_photo=self.faker.uri(),
identification_photo_small=self.faker.uri(), engine=Engine.objects.first(),
is_registered=True,
fuel_capacity=self.faker.pyfloat(min_value=0, max_value=50, right_digits=1),
max_endurance=self.faker.pyfloat(min_value=0, max_value=20, right_digits=2),
max_range=self.faker.pyfloat(min_value=0, max_value=100.00, right_digits=2),
max_speed=self.faker.pyfloat(min_value=0, max_value=70.00, right_digits=2),
dimension_length=self.faker.pyfloat(min_value=0, max_value=200.00, right_digits=2),
dimension_breadth=self.faker.pyfloat(min_value=0, max_value=200.00, right_digits=2),
dimension_height=self.faker.pyfloat(min_value=0, max_value=20.00, right_digits=2),
manufactured_at=timezone.now(), dot_permission_document=self.faker.uri(),
operataions_manual_document=self.faker.uri()
)
self.assertNotIn(aircraft, Aircraft.objects.all())
aircraft.save()
self.assertIn(aircraft, Aircraft.objects.all())
self.assertEqual(aircraft.operator, Operator.objects.first())
self.assertEqual(aircraft.manufacturer, Manufacturer.objects.first())
self.assertEqual(aircraft.type_certificate, TypeCertificate.objects.first())
self.assertEqual(aircraft.engine, Engine.objects.first())
def test_pki_framework_aerobridge_credentials_create(self):
aerobridge_credentials = AerobridgeCredential(name=self.faker.name(),
token_type=self.faker.pyint(min_value=0, max_value=len(
AerobridgeCredential.TOKEN_TYPE) - 1),
association=self.faker.pyint(min_value=0, max_value=len(
AerobridgeCredential.KEY_ENVIRONMENT) - 1),
token=self.faker.binary(length=1024), is_active=True)
self.assertNotIn(aerobridge_credentials, AerobridgeCredential.objects.all())
aerobridge_credentials.save()
self.assertIn(aerobridge_credentials, AerobridgeCredential.objects.all())
|
the-stack_106_28956 | """
Dueling Double DQN
Zhiang Chen, Jan 3 2018
MIT License
"""
import tensorflow as tf
import numpy as np
class Dueling_DDQN(object):
def __init__(self,
n_action,
n_feature,
learning_rate,
batch_size,
gamma,
e_greedy
):
"""
1. get hyperparameters
2. set placeholders
3. build networks
4. build an optimizer
5. save the model
6. initialize a session
"""
# 1. get hyperparameters
self.n_action = n_action
self.n_feature = n_feature
self.learning_rate = learning_rate
self.batch_size = batch_size
self.gamma = gamma
self.e_greedy = e_greedy
# 2. set placeholders
self.state = tf.placeholder(tf.float32, [self.batch_size, n_feature], name='state')
self.state_ = tf.placeholder(tf.float32, [self.batch_size, n_feature], name='state_')
self.G = tf.placeholder(tf.float32, [self.batch_size, 1], 'return')
self.action = tf.placeholder(tf.int32, [self.batch_size, 1], 'action')
self.is_training = tf.placeholder(tf.bool, name='is_training')
# 3. build networks
self.beh_a_value, self.beh_s_value, self.beh_adv = self._build_network(input=self.state, scope='behavior')
self.tar_a_value, self.tar_s_value, self.tar_adv = self._build_network(input=self.state_, scope='target')
t_params = tf.get_collection('target')
b_params = tf.get_collection('behavior')
self.replace_target_op = [tf.assign(t, b) for t, b in zip(t_params, b_params)]
# 4. build an optimizer
self.loss, self.opt = self._build_optimizer()
# 5. save the model
self.saver = tf.train.Saver()
# 6. initialize a session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
tf.summary.FileWriter("logs/", self.sess.graph)
# tensorboard --logdir=./logs
def _build_network(self, input, scope, trainable=True):
hid_num1 = 200
hid_num2 = 20
with tf.variable_scope(scope):
hidden1 = tf.layers.dense(input, hid_num1, activation=tf.nn.relu, name='fc1', trainable=trainable,
kernel_initializer=tf.contrib.layers.xavier_initializer())
hidden2 = tf.layers.dense(hidden1, hid_num2, activation=tf.nn.relu, name='fc2', trainable=trainable,
kernel_initializer=tf.contrib.layers.xavier_initializer())
adv = tf.layers.dense(hidden2, self.n_action, activation=None, name='advantages', trainable=trainable,
kernel_initializer=tf.contrib.layers.xavier_initializer())
hidden1_ = tf.layers.dense(input, hid_num1, activation=tf.nn.relu, name='fc1_', trainable=trainable,
kernel_initializer=tf.contrib.layers.xavier_initializer())
s_value = tf.layers.dense(hidden1_, 1, activation=None, name='state_value', trainable=trainable,
kernel_initializer=tf.contrib.layers.xavier_initializer())
with tf.variable_scope('action_value'):
a_value = s_value + (adv - tf.reduce_mean(adv, axis=1, keep_dims=True))
return a_value, s_value, adv
def _build_optimizer(self):
with tf.variable_scope('optimization'):
with tf.variable_scope('loss'):
batch_index = tf.range(self.batch_size, dtype=tf.int32)
batch_index = tf.reshape(batch_index, [-1,1])
indices = tf.concat([batch_index, self.action], axis=1)
Q = tf.gather_nd(self.beh_a_value, indices)
Q = tf.reshape(Q,[-1,1])
loss = tf.reduce_mean(tf.squared_difference(self.G, Q))
with tf.variable_scope('optimizer'):
opt = tf.train.RMSPropOptimizer(self.learning_rate).minimize(loss)
return loss, opt
def choose_beh_action(self, state):
state = state[np.newaxis, :]
if np.random.uniform() < self.e_greedy: # choosing action
actions_value = self.sess.run(self.beh_a_value, feed_dict={self.state: state})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_action)
return action
def choose_tar_action(self, state):
state = state[np.newaxis, :]
actions_value = self.sess.run(self.tar_a_value, feed_dict={self.state: state})
return np.argmax(actions_value)
def learn(self, state, action, G):
state = state[np.newaxis, :]
los, _ = self.sess.run([self.loss, self.opt], feed_dict={self.state: state, self.action: action, self.G: G})
def save_model(self, loc='model'):
save_path = self.saver.save(self.sess, "./" + loc + "/model.ckpt")
print("Model saved in file: %s" % save_path)
def restore_model(self, loc='model'):
print("Restored model")
self.saver.restore(self.sess, "./" + loc + "/model.ckpt")
def replace_target(self):
self.sess.run(self.replace_target_op)
if __name__ == '__main__':
net = Dueling_DDQN(n_action=8, n_feature=6, learning_rate=0.001,
batch_size=64, gamma=0.9, e_greedy=0.9)
|
the-stack_106_28958 | import cv2 as cv
from configparser import ConfigParser
from utils.capture_stream import CaptureStream
from utils.bounding_box import BoundingBox
# Detect objects from a live camera feed
def main():
# create parser instance
config = ConfigParser()
# Read detection.cfg
config.read('config/detection.cfg')
crop = config.get('blob', 'crop')
swap_rb = config.get('blob', 'swap_rb')
scale_factor = config.get('blob', 'scale_factor')
output_width = config.get('blob', 'output_width')
output_length = config.get('blob', 'output_length')
confidence = config.get('blob', 'confidence')
threshold = config.get('blob', 'threshold')
yolov_labels = config.get('yolov3', 'yolov3_labels')
yolov_weights = config.get('yolov3', 'yolov3_test_weights_file')
yolov_config = config.get('yolov3', 'yolov3_config_file')
device_id = config.get('camera', 'device_id')
frame_wait_ms = config.get('camera', 'frame_wait_ms')
frame_pause_interval = config.get('camera', 'frame_pause_interval')
bounding_box = BoundingBox(scale_factor, output_length, output_width, swap_rb, crop,
yolov_labels, yolov_config, yolov_weights, confidence, threshold)
capture_stream = CaptureStream(device_id)
camera = capture_stream.get_camera_stream()
frame_count = 1
if camera.isOpened():
while True:
# Read each frame
ret_val, frame = camera.read()
# Draw new bounding box
if frame_count == 1 or frame_count % int(frame_pause_interval) == 0:
input_height, input_width = frame.shape[:2]
blob = bounding_box.get_blob(frame)
output_layers = bounding_box.get_output_layers(blob)
boxes, confidences, class_identifiers = bounding_box.get_bounding_boxes(output_layers, input_height, input_width)
suppression = bounding_box.suppress_weak_overlapping_boxes(boxes, confidences)
frame_with_boxes = bounding_box.draw_boxes_labels(frame, boxes, confidences, class_identifiers, suppression)
# Use existing bounding box
else:
frame_with_boxes = bounding_box.draw_boxes_labels(frame, boxes, confidences, class_identifiers, suppression)
frame_count += 1
cv.imshow("device: " + str(device_id), frame_with_boxes)
# End camera capture with escape key
interrupt_key = cv.waitKey(int(frame_wait_ms))
if interrupt_key == 27:
break
# Cleanup
capture_stream.close_camera_stream()
capture_stream.destory_all_windows()
if __name__ == "__main__":
main()
|
the-stack_106_28959 | from flask import Blueprint, jsonify, request
from flask_cors import cross_origin
from server import aql
info_bp = Blueprint("info_bp", __name__)
@info_bp.route("/info/image")
@cross_origin()
def fetch_image():
img_id = request.args.get("id")
if img_id and img_id.isdigit():
data = aql.fetch_image_info(img_id)
return jsonify({"data": data}), 200
else:
return jsonify("User must pass image ID to view."), 400
@info_bp.route("/info/randomtags")
@cross_origin()
def fetch_random_tags():
keyword = aql.fetch_surprise_tags()
if keyword:
return jsonify({"keyword": keyword}), 200
else:
return jsonify("Error fetching surprise keys"), 500
@info_bp.route("/info/metrics")
@cross_origin()
def fetch_metrics():
data = aql.fetch_db_metrics()
if data:
return jsonify({"data": data}), 200
else:
return jsonify("Error fetching metrics"), 500
|
the-stack_106_28962 | from algoritmia.datastructures.digraphs import UndirectedGraph
from algoritmia.datastructures.mergefindsets import MergeFindSet
from algoritmia.datastructures.queues import Fifo
from random import shuffle
from victor.lab3._aux.labyrinthviewer import LabyrinthViewer
def create_labyrinth(rows, cols, n=0):
# for i in range(rows):
# for j in range(cols):
# vertices.append((i, j))
vertices = [(row, col) for row in range(rows)
for col in range(cols)]
print(vertices)
mfs = MergeFindSet()
for elem in vertices:
mfs.add(elem)
edges = []
for row, col in vertices:
if row+1 < rows:
edges.append(((row, col), (row+1, col)))
if col+1 < cols:
edges.append(((row, col),(row, col+1)))
shuffle(edges)
contador = 0
corridors = []
for u, v in edges:
if mfs.find(u) != mfs.find(v):
mfs.merge(u, v)
corridors.append((u, v))
elif n > contador:
corridors.append((u, v))
contador+=1
return UndirectedGraph(E=corridors)
def shortest_path(g, source, target):
def rec_aristas(u, v):
cola.push((u, u))
visitados.add(u)
while len(cola) > 0:
u, v = cola.pop()
aristas.append((u, v))
for suc in g.succs(v):
if suc not in visitados:
visitados.add(suc)
cola.push((v, suc))
aristas = []
cola = Fifo()
visitados = set()
rec_aristas(source, target)
return busca_camino(aristas, target)
def path(g, source, target):
def rec_aristas(u, v):
visitados.add(v)
listaAristas.append((u, v))
for suc in g.succs(v):
if suc not in visitados:
rec_aristas(v, suc)
listaAristas = []
visitados = set()
rec_aristas(source, source)
return busca_camino(listaAristas, target)
def busca_camino(listaAristas, target):
bp = {}
for (u, v) in listaAristas:
bp[v] = u
camino = []
camino.append(target)
v=target
while v != bp[v]:
v = bp[v]
camino.append(v)
camino.reverse()
return camino
if __name__ == '__main__':
alto = 100
ancho = 120
laberinto = create_labyrinth(alto, ancho, 1000)
# Obligatorio: Crea un LabyrinthViewer pasándole el grafo del laberinto
lv = LabyrinthViewer(laberinto, canvas_width=800, canvas_height=600, margin=10)
camino = path(laberinto, (0, 0), (alto - 1, ancho - 1))
caminoCorto = shortest_path(laberinto, (0,0), (alto-1, ancho-1))
lv.add_path(camino)
lv.add_path(caminoCorto, offset=3, color='Blue')
lv.run() |
the-stack_106_28963 | #!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@version:
author:Sleepy
@time: 2019/02/02
@file: DataTable.py
@function:
@modify:
"""
from os import sys, path
root_path = path.dirname(path.dirname(path.abspath(__file__)))
from Utiltity.common import *
from Database.SqlRw import SqlAccess
class AliasTable:
"""
Table field : alias name | standard name | comments
An alias name can only map to one standard name.
An alias name can NOT also be a standard name.
A standard name can be mapped to multiple alias name.
When we meet a new name. It's standard name is empty. It's in ISOLATED mode.
When we assign a standard name to an ISOLATED alias name. We need to update all the field that includes this name.
When we rename a standard name. We need to update all the field that includes this name.
"""
class Participant:
def name(self) -> str: pass
def get_using_names(self) -> [str]: pass
def on_std_name_updating(self, old_name: str, new_name: str) -> (bool, str):
"""
This function will be invoked before name updating.
:param old_name: The old name
:param new_name: The new name
:return: (True if it's OK ; False if it's NOK,
The reason that why this name can not be changed)
"""
nop(self)
nop(old_name)
nop(new_name)
return True, ''
def on_std_name_removed(self, name: str): pass
def on_std_name_updated(self, old_name: str, new_name: str): pass
TABLE = 'AliasTable'
FIELD = ['alias_name', 'standard_name', 'comments']
def __init__(self, sql_db: SqlAccess):
self.__sql_db = sql_db
self.__has_update = False
self.__participants = []
self.__using_name_list = []
self.__alias_standard_table = {} # Key: alias_name; Value: standard_name
self.__standard_alias_table = {} # Key: standard_name; Value: [alias_name]
def init(self, auto: bool) -> bool:
if auto:
if not self.load_from_db():
print('Error: Load Aliases Table Fail!')
return False
return True
def register_participant(self, participant: Participant):
self.__participants.append(participant)
# ------------------------------------------------------------------------------------------------------------------
def reset(self):
self.__has_update = False
self.__using_name_list = []
self.__alias_standard_table = {}
self.__standard_alias_table = {}
def reload(self):
self.load_from_db()
def collect_names(self):
names = []
for participant in self.__participants:
name = participant.get_using_names()
names.extend(name)
self.__using_name_list = list(set(names))
def check_naming_error(self):
pass
# ------------------------------------------------------------------------------------------------------------------
def add_alias(self, alias_name: str, standard_name: str) -> bool:
# """
# Add or re-assign a alias name to a standard name.
# > alias_name is empty: Illegal parameter, return False
# > alias_name is also a standard name: Illegal case, return False
# > standard_name is empty: Reset the alias-standard linkage
# > alias_name exists but haven't linked to a standard name: New assignment, check update all related fields.
# :param alias_name: Must not be an empty string.
# :param standard_name: Empty string to reset the alias-standard linkage.
# :param reason: The reason that add this mapping.
# :return: True if updated else False
# """
if alias_name == '' or alias_name == standard_name:
return False
if alias_name in self.__standard_alias_table.keys():
return False
self.__alias_standard_table[alias_name] = standard_name
if standard_name not in self.__standard_alias_table.keys():
self.__standard_alias_table[standard_name] = []
if alias_name not in self.__standard_alias_table[standard_name]:
self.__standard_alias_table[standard_name].append(alias_name)
self.__has_update = True
return True
# exists_std_name = self.__alias_standard_table[alias_name]
# if exists_std_name == standard_name:
# return True
# if exists_std_name == '':
# updated = self.__handle_name_change(alias_name, standard_name, alias_name)
# else:
# self.__alias_standard_table[alias_name] = standard_name
# updated = True
# if updated:
# self.__has_update = True
# return updated
def del_alias(self, alias_name: str) -> bool:
if alias_name == '':
return False
if alias_name in self.__alias_standard_table.keys():
del self.__alias_standard_table[alias_name]
self.__has_update = True
for key in list(self.__standard_alias_table.keys()):
val = self.__standard_alias_table[key]
if alias_name in val:
val.remove(alias_name)
self.__has_update = True
if len(val) == 0:
del self.__standard_alias_table[key]
return True
def update_standard_name(self, standard_name: str, standard_name_new: str) -> (bool, str):
update, reason = self.__query_name_updating(standard_name, standard_name_new)
if not update:
return update, reason
for alias in self.__alias_standard_table.keys():
if self.__alias_standard_table[alias] == standard_name:
self.__alias_standard_table[alias] = standard_name_new
self.__has_update = True
if standard_name_new not in self.__standard_alias_table.keys():
self.__standard_alias_table[standard_name_new] = {}
if standard_name in self.__standard_alias_table.keys():
# Merge alias and remove duplicates, finally del old standard name
alias = self.__standard_alias_table[standard_name]
alias.extend(self.__standard_alias_table[standard_name_new])
self.__standard_alias_table[standard_name_new] = list(set(alias))
del self.__standard_alias_table[standard_name]
self.__notify_name_updated(standard_name, standard_name_new)
return True, ''
def del_standard_name(self, standard_name: str):
if standard_name in self.__standard_alias_table:
del self.__standard_alias_table[standard_name]
self.__has_update = True
for alias in self.__alias_standard_table.keys():
if self.__alias_standard_table[alias] == standard_name:
self.__alias_standard_table[alias] = ''
self.__has_update = True
if standard_name in self.__using_name_list:
self.__using_name_list.remove(standard_name)
self.__notify_name_removed(standard_name)
# ------------------------------------------------------------------------------------------------------------------
def standardize(self, name: list or str) -> list or str:
if isinstance(name, str):
return self.__do_standardize(name)
elif isinstance(name, list):
return [self.__do_standardize(n) for n in name]
return None
def get_standard_name(self, name: str) -> str:
if name in self.__standard_alias_table.keys():
return name
return self.__alias_standard_table.get(name, '')
def get_alias_standard_table(self) -> dict:
return self.__alias_standard_table
def get_standard_name_list(self) -> list:
all_names = []
all_names.extend(self.__using_name_list)
all_names.extend(list(self.__standard_alias_table.keys()))
return list(set(all_names))
def get_uncategorized_name_list(self) -> list:
tmp_list = []
for key in self.__alias_standard_table.keys():
if self.__alias_standard_table[key] == '':
tmp_list.append(key)
return tmp_list
# --------------------------------------------------- Load/Save ---------------------------------------------------
def check_save(self):
if self.__has_update:
self.__has_update = not self.dump_to_db()
def load_from_db(self) -> bool:
self.reset()
tmp_list = self.__sql_db.ListFromDB(
AliasTable.TABLE, AliasTable.FIELD)
if tmp_list is None or len(tmp_list) == 0:
return False
for alias, standard, comments in tmp_list:
self.add_alias(alias, standard)
self.__has_update = False
return True
def dump_to_db(self) -> bool:
tmp_list = []
for alias in self.__alias_standard_table.keys():
standard = self.__alias_standard_table[alias]
tmp_list.append(alias)
tmp_list.append(standard)
tmp_list.append('')
self.__sql_db.ListToDB(
AliasTable.TABLE, tmp_list, -1, 3,
AliasTable.FIELD)
return True
def load_from_csv(self, file_name: str):
"""
Load alias - standard name mapping from a csv
The CSV file should have columns named: 'alias' and 'standard'
:param file_name: The CSV file that you want to load
:return: True if OK. Else False
"""
df = pd.read_csv(file_name, header=0)
column_alias_name = df['alias']
column_standard_name = df['standard']
for alias, standard in zip(column_alias_name, column_standard_name):
# self.add_alias(self.__trim_name(alias), standard)
try:
self.add_alias(alias.strip(), standard.strip())
except Exception as e:
self.add_alias(str(alias).strip(), str(standard).strip())
finally:
pass
return True
def dump_to_csv(self, file_name: str) -> str:
tmp_list = []
for k in self.__alias_standard_table.keys():
tmp_list.append(k)
alias = self.__alias_standard_table.get(k, [])
tmp_list.append('|'.join(alias))
for n in self.__uncategorized_name_list:
tmp_list.append('-')
tmp_list.append(n)
df = pd.DataFrame(np.array(tmp_list).reshape(-1, 2))
df.columns = ['standard_name', 'alias_name']
try:
df.to_csv(file_name, encoding='utf_8_sig')
return True
except Exception as e:
print(e)
return False
finally:
pass
# ---------------------------------------------------- private -----------------------------------------------------
def __query_name_updating(self, old_std_name: str, standard_name: str):
for participant in self.__participants:
update, reason = participant.on_std_name_updating(old_std_name, standard_name)
if not update:
return False, reason
return True, ''
def __notify_name_updated(self, old_std_name: str, standard_name: str):
for participant in self.__participants:
participant.on_std_name_updated(old_std_name, standard_name)
def __notify_name_removed(self, standard_name: str):
for participant in self.__participants:
participant.on_std_name_removed(standard_name)
# def __handle_name_change(self, alias_name: str, standard_name, old_std_name: str) -> (bool, str):
# for listener in self.__listeners:
# update, reason = listener.on_std_name_updating(old_std_name, standard_name)
# if not update:
# return False, reason
#
# self.__alias_standard_table[alias_name] = standard_name
# if standard_name not in self.__standard_alias_table.keys():
# self.__standard_alias_table.keys().append(standard_name)
#
# for listener in self.__listeners:
# listener.on_std_name_updated(old_std_name, standard_name)
# return True, ''
def __do_standardize(self, name: str):
alias_name = name.strip()
standard_name = self.get_standard_name(alias_name)
return standard_name
def __trim_name(self, name: str) -> str:
name = name.strip()
name = self.__trim_space(name)
name = self.__trim_unit(name)
return name
def __trim_space(self, name) -> str:
TRIM_LIST = [' ']
return self.__list_trim(name, TRIM_LIST)
def __trim_unit(self, name: str) -> str:
# How to make it better? Regex? Semantic Analysis?
TRIM_LIST = ['(万元)', '(万元)']
return self.__list_trim(name, TRIM_LIST)
@staticmethod
def __list_trim(name: str, trim_list: [str]) -> str:
for t in trim_list:
name = name.replace(t, '')
return name
# ----------------------------------------- Cache for Quick Indexing -----------------------------------------
def __find_alias(self, alias_name: str) -> str:
pass
# --------------------------------------------------- Build ---------------------------------------------------
# def RebuildTable(self):
# self.__alias_standard_table.clear()
# self.__alias_standard_table.clear()
# self.__update_from_internet()
# # self.__update_from_local()
# return self.SaveTable()
# def __update_from_internet(self) -> bool:
# df = self.__fetch_standard_table()
# if '英文表达法' not in df.columns and '会计科目名称' not in df.columns:
# print('Cannot find the column in web.')
# return False
# column_alias_name = df['英文表达法']
# column_standard_name = df['会计科目名称']
# for s, a in zip(column_standard_name, column_alias_name):
# self.__add_alias(self.__trim_name(s), a)
# return True
#
# def __update_from_local(self) -> bool:
# df = pd.read_csv('Utiltity/NameTable.csv', header=0)
# column_alias_name = df['英文']
# column_standard_name = df['中文']
# for s, a in zip(column_standard_name, column_alias_name):
# self.__add_alias(self.__trim_name(s), a)
# return True
# @staticmethod
# def __fetch_standard_table() -> pd.DataFrame:
# # From baike.baidu.com
# soup = Utiltity.common.GetWebAsSoap(
# 'https://baike.baidu.com/item/%E4%BC%9A%E8%AE%A1%E7%A7%91%E7%9B%AE%E4%B8%AD%E8%8B%B1%E6%96%87%E5%AF%B9%E7%85%A7%20%EF%BC%88%E5%8C%97%E4%BA%AC%E5%B8%82%E5%AE%A1%E8%AE%A1%E5%B1%80%E5%8F%91%E5%B8%83%EF%BC%89',
# 'utf-8')
# table = soup.find('table', {'log-set-param': 'table_view'})
# if table is None:
# return None
#
# tr_list = table.findAll('tr')
# if len(tr_list) == 0:
# return None
#
# df_list = []
# for tr in tr_list:
# tmp_list = []
# td_list = tr.findAll('td')
# if len(td_list) != 5:
# continue
# for td in td_list:
# div = td.find('div')
# if div is not None:
# tmp_list.append(div.string.strip())
# else:
# tmp_list.append('')
# df_list.extend(tmp_list)
#
# df = pd.DataFrame(np.array(df_list).reshape(-1, 5))
# df.columns = df.iloc[0]
# df = df[1:]
#
# # print(df)
# return df
# ----------------------------------------------------- Test Code ------------------------------------------------------
class TestAliasParticipant(AliasTable.Participant):
def __init__(self):
self.old_std_name = ''
self.new_std_name = ''
self.del_std_name = ''
self.invoke_count = {
'on_std_name_updating': 0,
'on_std_name_removed': 0,
'on_std_name_updated': 0,
}
self.reject = False
self.reject_reason = ''
def name(self) -> str:
nop(self)
return 'test_participant'
def get_using_names(self) -> [str]:
nop(self)
return []
def on_std_name_updating(self, old_name: str, new_name: str) -> (bool, str):
self.old_std_name = old_name
self.new_std_name = new_name
self.invoke_count['on_std_name_updating'] += 1
return not self.reject, self.reject_reason
def on_std_name_removed(self, name: str):
self.del_std_name = name
self.invoke_count['on_std_name_removed'] += 1
def on_std_name_updated(self, old_name: str, new_name: str):
self.old_std_name = old_name
self.new_std_name = new_name
self.invoke_count['on_std_name_updated'] += 1
def __default_prepare_test() -> (AliasTable, TestAliasParticipant):
data_path = root_path + '/Data/'
sql_db = SqlAccess(data_path + 'sAsUtility.db')
alias_table = AliasTable(sql_db)
participant = TestAliasParticipant()
alias_table.register_participant(participant)
return alias_table, participant
def __default_prepare_test_data(alias_table: AliasTable):
pass
def test_alias_multiple_mapping():
alias_table, participant = __default_prepare_test()
alias_table.add_alias('A1', 'S1')
assert(alias_table.standardize('A1') == 'S1')
alias_table.add_alias('A1', 'S2')
assert(alias_table.standardize('A1') == 'S2')
alias_table.add_alias('A1', 'S3')
assert(alias_table.standardize('A1') == 'S3')
alias_table.add_alias('A2', 'S3')
alias_table.add_alias('A3', 'S3')
assert(alias_table.standardize('A1') == 'S3')
assert(alias_table.standardize('A2') == 'S3')
assert(alias_table.standardize('A3') == 'S3')
def test_add_remove_alias():
alias_table, participant = __default_prepare_test()
alias_table.add_alias('A1', 'S1')
alias_table.add_alias('A2', 'S1')
alias_table.add_alias('A3', 'S3')
alias_table.del_alias('A2')
assert(alias_table.standardize('A1') == 'S1')
assert(alias_table.standardize('A2') == '')
assert(alias_table.standardize('A3') == 'S3')
def test_operation_denied():
alias_table, participant = __default_prepare_test()
alias_table.add_alias('A1', 'S1')
assert(alias_table.add_alias('S1', 'S2') is False)
assert(alias_table.add_alias('A2', 'A2') is False)
def test_update_standard_name_allowed():
alias_table, participant = __default_prepare_test()
alias_table.add_alias('A1', 'S1')
alias_table.add_alias('A2', 'S1')
alias_table.add_alias('A3', 'S2')
alias_table.update_standard_name('S1', 'S2')
assert(participant.invoke_count['on_std_name_updating'] == 1)
assert(participant.invoke_count['on_std_name_updated'] == 1)
assert(alias_table.standardize('A1') == 'S2')
assert(alias_table.standardize('A2') == 'S2')
assert(alias_table.standardize('A3') == 'S2')
def test_del_standard_name():
alias_table, participant = __default_prepare_test()
alias_table.add_alias('A1', 'S1')
alias_table.add_alias('A2', 'S2')
alias_table.add_alias('A3', 'S3')
alias_table.del_standard_name('S1')
assert(participant.invoke_count['on_std_name_removed'] == 1)
assert(participant.del_std_name == 'S1')
assert(alias_table.standardize('A1') == '')
def test_entry():
test_alias_multiple_mapping()
test_add_remove_alias()
test_operation_denied()
test_update_standard_name_allowed()
test_del_standard_name()
pass
# ----------------------------------------------------- File Entry -----------------------------------------------------
def main():
test_entry()
# If program reaches here, all test passed.
print('All test passed.')
# ------------------------------------------------- Exception Handling -------------------------------------------------
def exception_hook(type, value, tback):
# log the exception here
print('Exception hook triggered.')
print(type)
print(value)
print(tback)
# then call the default handler
sys.__excepthook__(type, value, tback)
sys.excepthook = exception_hook
if __name__ == "__main__":
try:
main()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
exit()
finally:
pass
|
the-stack_106_28970 | import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE,
RegexObject,
resolve_relation,
)
from django.utils.topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {key: self.deep_deconstruct(value) for key, value in obj.items()}
elif isinstance(obj, functools.partial):
return (
obj.func,
self.deep_deconstruct(obj.args),
self.deep_deconstruct(obj.keywords),
)
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, "deconstruct"):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{key: self.deep_deconstruct(value) for key, value in kwargs.items()},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to. Used for detecting renames (as
the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields.items()):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
deconstruction[2].pop("to", None)
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
self.altered_constraints = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_model_keys = set()
self.old_proxy_keys = set()
self.old_unmanaged_keys = set()
self.new_model_keys = set()
self.new_proxy_keys = set()
self.new_unmanaged_keys = set()
for (app_label, model_name), model_state in self.from_state.models.items():
if not model_state.options.get("managed", True):
self.old_unmanaged_keys.add((app_label, model_name))
elif app_label not in self.from_state.real_apps:
if model_state.options.get("proxy"):
self.old_proxy_keys.add((app_label, model_name))
else:
self.old_model_keys.add((app_label, model_name))
for (app_label, model_name), model_state in self.to_state.models.items():
if not model_state.options.get("managed", True):
self.new_unmanaged_keys.add((app_label, model_name))
elif app_label not in self.from_state.real_apps or (
convert_apps and app_label in convert_apps
):
if model_state.options.get("proxy"):
self.new_proxy_keys.add((app_label, model_name))
else:
self.new_model_keys.add((app_label, model_name))
self.from_state.resolve_fields_and_relations()
self.to_state.resolve_fields_and_relations()
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
self.create_altered_constraints()
# Generate index removal operations before field is removed
self.generate_removed_constraints()
self.generate_removed_indexes()
# Generate field renaming operations.
self.generate_renamed_fields()
# Generate removal of foo together.
self.generate_removed_altered_unique_together()
self.generate_removed_altered_index_together()
# Generate field operations.
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_order_with_respect_to()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_added_constraints()
self.generate_altered_db_table()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = self.old_model_keys & self.new_model_keys
self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys
self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys
self.through_users = {}
self.old_field_keys = {
(app_label, model_name, field_name)
for app_label, model_name in self.kept_model_keys
for field_name in self.from_state.models[
app_label, self.renamed_models.get((app_label, model_name), model_name)
].fields
}
self.new_field_keys = {
(app_label, model_name, field_name)
for app_label, model_name in self.kept_model_keys
for field_name in self.to_state.models[app_label, model_name].fields
}
def _generate_through_model_map(self):
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields.items():
if hasattr(field, "remote_field") and getattr(
field.remote_field, "through", None
):
through_key = resolve_relation(
field.remote_field.through, app_label, model_name
)
self.through_users[through_key] = (
app_label,
old_model_name,
field_name,
)
@staticmethod
def _resolve_dependency(dependency):
"""
Return the resolved dependency and a boolean denoting whether or not
it was swappable.
"""
if dependency[0] != "__setting__":
return dependency, False
resolved_app_label, resolved_object_name = getattr(
settings, dependency[1]
).split(".")
return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True
def _build_migration_list(self, graph=None):
"""
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
# Temporarily resolve the swappable dependency to
# prevent circular references. While keeping the
# dependency checks on the resolved model, add the
# swappable dependencies.
original_dep = dep
dep, is_swappable_dep = self._resolve_dependency(dep)
if dep[0] != app_label:
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(
dep[0], []
):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add(
(original_dep[0], original_dep[1])
)
elif dep[0] in self.migrations:
operation_dependencies.add(
(dep[0], self.migrations[dep[0]][-1].name)
)
else:
# If we can't find the other app, we add a
# first/last dependency, but only if we've
# already been through once and checked
# everything.
if chop_mode:
# If the app already exists, we add a
# dependency on the last migration, as
# we don't know which migration
# contains the target field. If it's
# not yet migrated or has no
# migrations, we use __first__.
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(
graph.leaf_nodes(dep[0])[0]
)
else:
operation_dependencies.add(
(dep[0], "__first__")
)
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
del self.generated_operations[app_label][0]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(
"Migration",
(Migration,),
{"operations": [], "dependencies": []},
)
instance = subclass(
"auto_%i" % (len(self.migrations.get(app_label, [])) + 1),
app_label,
)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = (
chopped + self.generated_operations[app_label]
)
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError(
"Cannot resolve operation dependencies: %r"
% self.generated_operations
)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
# Resolve intra-app dependencies to handle circular
# references involving a swappable model.
dep = self._resolve_dependency(dep)[0]
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(
ops, dependency_graph
)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for migrations in self.migrations.values():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(
migration.operations, app_label
)
def check_dependency(self, operation, dependency):
"""
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel)
and operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel)
and operation.name_lower == dependency[1].lower()
and any(dependency[2] == x for x, y in operation.fields)
) or (
isinstance(operation, operations.AddField)
and operation.model_name_lower == dependency[1].lower()
and operation.name_lower == dependency[2].lower()
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField)
and operation.model_name_lower == dependency[1].lower()
and operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel)
and operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField)
and operation.model_name_lower == dependency[1].lower()
and operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo)
and operation.name_lower == dependency[1].lower()
and (operation.order_with_respect_to or "").lower()
!= dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(
operation,
(operations.AlterUniqueTogether, operations.AlterIndexTogether),
)
and operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency,))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are
# (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model_state = self.to_state.models[item]
base_names = {
base if isinstance(base, str) else base.__name__
for base in model_state.bases
}
string_version = "%s.%s" % (item[0], item[1])
if (
model_state.options.get("swappable")
or "AbstractUser" in base_names
or "AbstractBaseUser" in base_names
or settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = self.new_model_keys - self.old_model_keys
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = self.old_model_keys - self.new_model_keys
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[
rem_app_label, rem_model_name
]
rem_model_fields_def = self.only_relation_agnostic_fields(
rem_model_state.fields
)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(
rem_model_state, model_state
):
dependencies = []
fields = list(model_state.fields.values()) + [
field.remote_field
for relations in self.to_state.relations[
app_label, model_name
].values()
for field in relations.values()
]
for field in fields:
if field.is_relation:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
),
dependencies=dependencies,
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = "%s.%s" % (
rem_model_state.app_label,
rem_model_state.name_lower,
)
self.renamed_models_rel[
renamed_models_rel_key
] = "%s.%s" % (
model_state.app_label,
model_state.name_lower,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.add((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (these are optimized later, if
possible).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together, index_together).
"""
old_keys = self.old_model_keys | self.old_unmanaged_keys
added_models = self.new_model_keys - old_keys
added_unmanaged_models = self.new_unmanaged_keys - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True),
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
primary_key_rel = None
for field_name, field in model_state.fields.items():
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field_name] = field
if getattr(field.remote_field, "through", None):
related_fields[field_name] = field
# Are there indexes/unique|index_together to defer?
indexes = model_state.options.pop("indexes")
constraints = model_state.options.pop("constraints")
unique_together = model_state.options.pop("unique_together", None)
index_together = model_state.options.pop("index_together", None)
order_with_respect_to = model_state.options.pop(
"order_with_respect_to", None
)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the removal of base fields if the new model has
# a field with the same name.
old_base_model_state = self.from_state.models.get(
(base_app_label, base_name)
)
new_base_model_state = self.to_state.models.get(
(base_app_label, base_name)
)
if old_base_model_state and new_base_model_state:
removed_base_fields = (
set(old_base_model_state.fields)
.difference(
new_base_model_state.fields,
)
.intersection(model_state.fields)
)
for removed_base_field in removed_base_fields:
dependencies.append(
(base_app_label, base_name, removed_base_field, False)
)
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append(
resolve_relation(
primary_key_rel,
app_label,
model_name,
)
+ (None, True)
)
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[
d
for d in model_state.fields.items()
if d[0] not in related_fields
],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_state.options.get("managed", True):
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
],
)
related_dependencies = [
(app_label, model_name, name, True) for name in sorted(related_fields)
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
for constraint in constraints:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies,
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies,
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
relations = self.to_state.relations
if (app_label, model_name) in self.old_proxy_keys:
for related_model_key, related_fields in relations[
app_label, model_name
].items():
related_model_state = self.to_state.models[related_model_key]
for related_field_name, related_field in related_fields.items():
self.add_operation(
related_model_state.app_label,
operations.AlterField(
model_name=related_model_state.name,
name=related_field_name,
field=related_field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
"""
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but for proxy models it's
safe to skip all the pointless field stuff and chuck out an operation.
"""
added = self.new_proxy_keys - self.old_proxy_keys
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (these are optimized later, if
possible).
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = self.new_model_keys | self.new_unmanaged_keys
deleted_models = self.old_model_keys - new_keys
deleted_unmanaged_models = self.old_unmanaged_keys - new_keys
all_deleted_models = chain(
sorted(deleted_models), sorted(deleted_unmanaged_models)
)
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
for field_name, field in model_state.fields.items():
if field.remote_field:
if field.remote_field.model:
related_fields[field_name] = field
if getattr(field.remote_field, "through", None):
related_fields[field_name] = field
# Generate option removal first
unique_together = model_state.options.pop("unique_together", None)
index_together = model_state.options.pop("index_together", None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
),
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
),
)
# Then remove each related field
for name in sorted(related_fields):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
),
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
relations = self.from_state.relations
for (
related_object_app_label,
object_name,
), relation_related_fields in relations[app_label, model_name].items():
for field_name, field in relation_related_fields.items():
dependencies.append(
(related_object_app_label, object_name, field_name, False),
)
if not field.many_to_many:
dependencies.append(
(
related_object_app_label,
object_name,
field_name,
"alter",
),
)
for name in sorted(related_fields):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append(
(through_user[0], through_user[1], through_user[2], False)
)
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""Make DeleteModel options for proxy models."""
deleted = self.old_proxy_keys - self.new_proxy_keys
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""Work out renamed fields."""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(
self.new_field_keys - self.old_field_keys
):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
field = new_model_state.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(
self.old_field_keys - self.new_field_keys
):
if rem_app_label == app_label and rem_model_name == model_name:
old_field = old_model_state.get_field(rem_field_name)
old_field_dec = self.deep_deconstruct(old_field)
if (
field.remote_field
and field.remote_field.model
and "to" in old_field_dec[2]
):
old_rel_to = old_field_dec[2]["to"]
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]["to"] = self.renamed_models_rel[old_rel_to]
old_field.set_attributes_from_name(rem_field_name)
old_db_column = old_field.get_attname_column()[1]
if old_field_dec == field_dec or (
# Was the field renamed and db_column equal to the
# old field's column added?
old_field_dec[0:2] == field_dec[0:2]
and dict(old_field_dec[2], db_column=old_db_column)
== field_dec[2]
):
if self.questioner.ask_rename(
model_name, rem_field_name, field_name, field
):
# A db_column mismatch requires a prior noop
# AlterField for the subsequent RenameField to be a
# noop on attempts at preserving the old name.
if old_field.db_column != field.db_column:
altered_field = field.clone()
altered_field.name = rem_field_name
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=rem_field_name,
field=altered_field,
),
)
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
),
)
self.old_field_keys.remove(
(rem_app_label, rem_model_name, rem_field_name)
)
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[
app_label, model_name, field_name
] = rem_field_name
break
def generate_added_fields(self):
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(
self.new_field_keys - self.old_field_keys
):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.to_state.models[app_label, model_name].get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
preserve_default = (
field.null
or field.has_default()
or field.many_to_many
or (field.blank and field.empty_strings_allowed)
or (isinstance(field, time_fields) and field.auto_now)
)
if not preserve_default:
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(
field_name, model_name
)
else:
field.default = self.questioner.ask_not_null_addition(
field_name, model_name
)
if (
field.unique
and field.default is not models.NOT_PROVIDED
and callable(field.default)
):
self.questioner.ask_unique_callable_default_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(
self.old_field_keys - self.new_field_keys
):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Make AlterField operations, or possibly RemovedField/AddField if alter
isn't possible.
"""
for app_label, model_name, field_name in sorted(
self.old_field_keys & self.new_field_keys
):
# Did the field change?
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_field_name = self.renamed_fields.get(
(app_label, model_name, field_name), field_name
)
old_field = self.from_state.models[app_label, old_model_name].get_field(
old_field_name
)
new_field = self.to_state.models[app_label, model_name].get_field(
field_name
)
dependencies = []
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(
new_field.remote_field, "model", None
):
rename_key = resolve_relation(
new_field.remote_field.model, app_label, model_name
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
# Handle ForeignKey which can only have a single to_field.
remote_field_name = getattr(new_field.remote_field, "field_name", None)
if remote_field_name:
to_field_rename_key = rename_key + (remote_field_name,)
if to_field_rename_key in self.renamed_fields:
# Repoint both model and field name because to_field
# inclusion in ForeignKey.deconstruct() is based on
# both.
new_field.remote_field.model = old_field.remote_field.model
new_field.remote_field.field_name = (
old_field.remote_field.field_name
)
# Handle ForeignObjects which can have multiple from_fields/to_fields.
from_fields = getattr(new_field, "from_fields", None)
if from_fields:
from_rename_key = (app_label, model_name)
new_field.from_fields = tuple(
[
self.renamed_fields.get(
from_rename_key + (from_field,), from_field
)
for from_field in from_fields
]
)
new_field.to_fields = tuple(
[
self.renamed_fields.get(rename_key + (to_field,), to_field)
for to_field in new_field.to_fields
]
)
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
new_field,
self.to_state,
)
)
if hasattr(new_field, "remote_field") and getattr(
new_field.remote_field, "through", None
):
rename_key = resolve_relation(
new_field.remote_field.through, app_label, model_name
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
# If the field was confirmed to be renamed it means that only
# db_column was allowed to change which generate_renamed_fields()
# already accounts for by adding an AlterField operation.
if old_field_dec != new_field_dec and old_field_name == field_name:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (
old_field.null
and not new_field.null
and not new_field.has_default()
and not new_field.many_to_many
):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(
field_name, model_name
)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update(
{
(app_label, model_name): {
"added_indexes": add_idx,
"removed_indexes": rem_idx,
}
}
)
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes["added_indexes"]:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes["removed_indexes"]:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
),
)
def create_altered_constraints(self):
option_name = operations.AddConstraint.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_constraints = old_model_state.options[option_name]
new_constraints = new_model_state.options[option_name]
add_constraints = [c for c in new_constraints if c not in old_constraints]
rem_constraints = [c for c in old_constraints if c not in new_constraints]
self.altered_constraints.update(
{
(app_label, model_name): {
"added_constraints": add_constraints,
"removed_constraints": rem_constraints,
}
}
)
def generate_added_constraints(self):
for (
app_label,
model_name,
), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints["added_constraints"]:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
)
def generate_removed_constraints(self):
for (
app_label,
model_name,
), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints["removed_constraints"]:
self.add_operation(
app_label,
operations.RemoveConstraint(
model_name=model_name,
name=constraint.name,
),
)
@staticmethod
def _get_dependencies_for_foreign_key(app_label, model_name, field, project_state):
remote_field_model = None
if hasattr(field.remote_field, "model"):
remote_field_model = field.remote_field.model
else:
relations = project_state.relations[app_label, model_name]
for (remote_app_label, remote_model_name), fields in relations.items():
if any(
field == related_field.remote_field
for related_field in fields.values()
):
remote_field_model = f"{remote_app_label}.{remote_model_name}"
break
# Account for FKs to swappable models
swappable_setting = getattr(field, "swappable_setting", None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label, dep_object_name = resolve_relation(
remote_field_model,
app_label,
model_name,
)
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None):
through_app_label, through_object_name = resolve_relation(
remote_field_model,
app_label,
model_name,
)
dependencies.append((through_app_label, through_object_name, None, True))
return dependencies
def _get_altered_foo_together_operations(self, option_name):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name)
old_value = (
{
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
if old_value
else set()
)
new_value = new_model_state.options.get(option_name)
new_value = set(new_value) if new_value else set()
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = new_model_state.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
yield (
old_value,
new_value,
app_label,
model_name,
dependencies,
)
def _generate_removed_altered_foo_together(self, operation):
for (
old_value,
new_value,
app_label,
model_name,
dependencies,
) in self._get_altered_foo_together_operations(operation.option_name):
removal_value = new_value.intersection(old_value)
if removal_value or old_value:
self.add_operation(
app_label,
operation(
name=model_name, **{operation.option_name: removal_value}
),
dependencies=dependencies,
)
def generate_removed_altered_unique_together(self):
self._generate_removed_altered_foo_together(operations.AlterUniqueTogether)
def generate_removed_altered_index_together(self):
self._generate_removed_altered_foo_together(operations.AlterIndexTogether)
def _generate_altered_foo_together(self, operation):
for (
old_value,
new_value,
app_label,
model_name,
dependencies,
) in self._get_altered_foo_together_operations(operation.option_name):
removal_value = new_value.intersection(old_value)
if new_value != removal_value:
self.add_operation(
app_label,
operation(name=model_name, **{operation.option_name: new_value}),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys, self.kept_unmanaged_keys
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get("db_table")
new_db_table_name = new_model_state.options.get("db_table")
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
),
)
def generate_altered_options(self):
"""
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
self.old_model_keys & self.new_unmanaged_keys,
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = {
key: value
for key, value in old_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
new_options = {
key: value
for key, value in new_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
),
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get(
"order_with_respect_to"
) != new_model_state.options.get("order_with_respect_to"):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append(
(
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
)
)
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get(
"order_with_respect_to"
),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
),
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
new_name_parts = ["%04i" % next_number]
if migration_name:
new_name_parts.append(migration_name)
elif i == 0 and not app_leaf:
new_name_parts.append("initial")
else:
new_name_parts.append(migration.suggest_name()[:100])
new_name = "_".join(new_name_parts)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for migrations in changes.values():
for migration in migrations:
migration.dependencies = [
name_map.get(d, d) for d in migration.dependencies
]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
required_apps.update(
*[app_dependencies.get(app_label, ()) for app_label in required_apps]
)
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def parse_number(cls, name):
"""
Given a migration name, try to extract a number from the beginning of
it. For a squashed migration such as '0001_squashed_0004…', return the
second number. If no number is found, return None.
"""
if squashed_match := re.search(r".*_squashed_(\d+)", name):
return int(squashed_match[1])
match = re.match(r"^\d+", name)
if match:
return int(match[0])
return None
|
the-stack_106_28971 | # coding: utf-8
"""
Copyright 2018 OSIsoft, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class PITimedValues(object):
swagger_types = {
'items': 'list[PITimedValue]',
'units_abbreviation': 'str',
'web_exception': 'PIWebException',
}
attribute_map = {
'items': 'Items',
'units_abbreviation': 'UnitsAbbreviation',
'web_exception': 'WebException',
}
def __init__(self, items=None, units_abbreviation=None, web_exception=None):
self._items = None
self._units_abbreviation = None
self._web_exception = None
if items is not None:
self.items = items
if units_abbreviation is not None:
self.units_abbreviation = units_abbreviation
if web_exception is not None:
self.web_exception = web_exception
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
@property
def units_abbreviation(self):
return self._units_abbreviation
@units_abbreviation.setter
def units_abbreviation(self, units_abbreviation):
self._units_abbreviation = units_abbreviation
@property
def web_exception(self):
return self._web_exception
@web_exception.setter
def web_exception(self, web_exception):
self._web_exception = web_exception
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if not isinstance(other, PITimedValues):
return False
return self.__dict__ == other.__dict__
|
the-stack_106_28972 | import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
sys.path.append("..")
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow.python.keras.backend as K
import numpy as np
import sympy as sp
import time
import math
import argparse
import MLGeometry as mlg
from models import *
z0, z1, z2, z3, z4 = sp.symbols('z0, z1, z2, z3, z4')
Z = [z0,z1,z2,z3,z4]
parser = argparse.ArgumentParser()
# Data generation
parser.add_argument('--seed', type=int)
parser.add_argument('--n_pairs', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--function')
parser.add_argument('--psi', type=float)
parser.add_argument('--phi', type=float)
parser.add_argument('--alpha', type=float)
# Network
parser.add_argument('--OuterProductNN_k', type=int)
parser.add_argument('--layers')
parser.add_argument('--k2_as_first_layer', action='store_true')
parser.add_argument('--k4_as_first_layer', action='store_true')
parser.add_argument('--load_model')
parser.add_argument('--save_dir')
parser.add_argument('--save_name')
# Training
parser.add_argument('--max_epochs', type=int)
parser.add_argument('--loss_func')
parser.add_argument('--clip_threshold', type=float)
parser.add_argument('--optimizer', default='Adam')
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--decay_rate', type=float, default=1.0)
parser.add_argument('--num_correction_pairs', type=int, default=10)
args = parser.parse_args()
print("Processing model: " + args.save_name)
# Data generation
seed = args.seed
n_pairs = args.n_pairs
batch_size = args.batch_size
psi = args.psi
f = z0**5 + z1**5 + z2**5 + z3**5 + z4**5 + psi*z0*z1*z2*z3*z4
if args.function == 'f1':
phi = args.phi
f = f + phi*(z3*z4**4 + z3**2*z4**3 + z3**3*z4**2 + z3**4*z4)
elif args.function == 'f2':
alpha = args.alpha
f = f + alpha*(z2*z0**4 + z0*z4*z1**3 + z0*z2*z3*z4**2 + z3**2*z1**3 + z4*z1**2*z2**2 + z0*z1*z2*z3**2 +
z2*z4*z3**3 + z0*z1**4 + z0*z4**2*z2**2 + z4**3*z1**2 + z0*z2*z3**3 + z3*z4*z0**3 + z1**3*z4**2 +
z0*z2*z4*z1**2 + z1**2*z3**3 + z1*z4**4 + z1*z2*z0**3 + z2**2*z4**3 + z4*z2**4 + z1*z3**4)
np.random.seed(seed)
tf.random.set_seed(seed)
HS = mlg.hypersurface.Hypersurface(Z, f, n_pairs)
HS_test = mlg.hypersurface.Hypersurface(Z, f, n_pairs)
train_set = mlg.tf_dataset.generate_dataset(HS)
test_set = mlg.tf_dataset.generate_dataset(HS_test)
#if batch_size is None or args.optimizer.lower() == 'lbfgs':
if batch_size is None:
batch_size = HS.n_points
train_set = train_set.shuffle(HS.n_points).batch(batch_size)
test_set = test_set.shuffle(HS_test.n_points).batch(batch_size)
# Network
if args.OuterProductNN_k is not None:
k = args.OuterProductNN_k
else:
layers = args.layers
n_units = layers.split('_')
for i in range(0, len(n_units)):
n_units[i] = int(n_units[i])
n_hidden = len(n_units) - 1
if args.k2_as_first_layer is True:
k = 2**(n_hidden+1)
else:
k = 2**n_hidden
model_list_OuterProductNN = [OuterProductNN_k2, OuterProductNN_k3, OuterProductNN_k4]
model_list_k2_as_first_layer = [k2_twolayers, k2_threelayers]
model_list_k4_as_first_layer = [k4_onelayer, k4_twolayers]
model_list = [zerolayer, onelayer, twolayers, threelayers, fourlayers, fivelayers]
load_path = args.load_model
if load_path is not None:
model = tf.keras.models.load_model(load_path, compile=False)
elif args.OuterProductNN_k is not None:
try:
model = model_list_OuterProductNN[k-2]()
except IndexError:
print("Error: Only k = 2,3,4 are supported now")
elif args.k2_as_first_layer:
try:
model = model_list_k2_as_first_layer[n_hidden-2](n_units)
except IndexError:
print("Error: Only two and three layers are supported")
elif args.k4_as_first_layer:
try:
model = model_list_k4_as_first_layer[n_hidden-1](n_units)
except IndexError:
print("Error: Only one and two layers is supported")
else:
try:
model = model_list[n_hidden](n_units)
except IndexError:
print("Error: Only k <= 32 is supported")
max_epochs = args.max_epochs
func_dict = {"weighted_MAPE": mlg.loss.weighted_MAPE, "weighted_MSE": mlg.loss.weighted_MSE, "max_error":mlg.loss.max_error,
"MAPE_plus_max_error": mlg.loss.MAPE_plus_max_error}
loss_func = func_dict[args.loss_func]
#early_stopping = False
clip_threshold = args.clip_threshold
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_name = args.save_name
@tf.function
def volume_form(x, Omega_Omegabar, mass, restriction):
kahler_metric = mlg.complex_math.complex_hessian(tf.math.real(model(x)), x)
volume_form = tf.math.real(tf.linalg.det(tf.matmul(restriction, tf.matmul(kahler_metric, restriction, adjoint_b=True))))
weights = mass / tf.reduce_sum(mass)
factor = tf.reduce_sum(weights * volume_form / Omega_Omegabar)
#factor = tf.constant(35.1774, dtype=tf.complex64)
return volume_form / factor
def cal_total_loss(dataset, loss_function):
total_loss = tf.constant(0, dtype=tf.float32)
total_mass= tf.constant(0, dtype=tf.float32)
for step, (points, Omega_Omegabar, mass, restriction) in enumerate(dataset):
det_omega = volume_form(points, Omega_Omegabar, mass, restriction)
mass_sum = tf.reduce_sum(mass)
total_loss += loss_function(Omega_Omegabar, det_omega, mass) * mass_sum
total_mass += mass_sum
total_loss = total_loss / total_mass
return total_loss.numpy()
def cal_max_error(dataset):
'''
find max|eta - 1| over the whole dataset: calculate the error on each batch then compare.
'''
max_error_tmp = 0
for step, (points, Omega_Omegabar, mass, restriction) in enumerate(dataset):
det_omega = volume_form(points, Omega_Omegabar, mass, restriction)
error = mlg.loss.max_error(Omega_Omegabar, det_omega, mass).numpy()
if error > max_error_tmp:
max_error_tmp = error
return max_error_tmp
# Training
start_time = time.time()
if args.optimizer.lower() == 'lbfgs':
# iter+1 everytime f is evoked, which will also be invoked when calculationg the hessian, etc
# So the true max_epochs will be 3 times user's input
max_epochs = int(max_epochs/3)
train_func = mlg.lbfgs.function_factory(model, loss_func, train_set)
init_params = tf.dynamic_stitch(train_func.idx, model.trainable_variables)
results = tfp.optimizer.lbfgs_minimize(value_and_gradients_function=train_func,
initial_position=init_params,
max_iterations=max_epochs,
num_correction_pairs=args.num_correction_pairs)
train_func.assign_new_model_parameters(results.position)
else:
if args.optimizer.lower() == 'sgd':
optimizer = tf.keras.optimizers.SGD(args.learning_rate)
else:
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=args.learning_rate,
decay_steps = HS.n_points/batch_size,
decay_rate = args.decay_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
#optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate)
train_log_dir = save_dir + '/logs/' + save_name + '/train'
test_log_dir = save_dir + '/logs/' + save_name + '/test'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
stop = False
loss_old = 100000
epoch = 0
while epoch < max_epochs and stop is False:
epoch = epoch + 1
for step, (points, Omega_Omegabar, mass, restriction) in enumerate(train_set):
with tf.GradientTape() as tape:
det_omega = volume_form(points, Omega_Omegabar, mass, restriction)
loss = loss_func(Omega_Omegabar, det_omega, mass)
grads = tape.gradient(loss, model.trainable_weights)
if clip_threshold is not None:
grads = [tf.clip_by_value(grad, -clip_threshold, clip_threshold) for grad in grads]
optimizer.apply_gradients(zip(grads, model.trainable_weights))
#tf.print(model.tranable_weights)
#if step % 500 == 0:
# print("step %d: loss = %.4f" % (step, loss))
if epoch % 10 == 0:
sigma_max_train = cal_max_error(train_set)
sigma_max_test = cal_max_error(test_set)
E_train = cal_total_loss(train_set, mlg.loss.weighted_MSE)
E_test = cal_total_loss(test_set, mlg.loss.weighted_MSE)
sigma_train = cal_total_loss(train_set, mlg.loss.weighted_MAPE)
sigma_test = cal_total_loss(test_set, mlg.loss.weighted_MAPE)
def delta_sigma_square_train(y_true, y_pred, mass):
weights = mass / K.sum(mass)
return K.sum((K.abs(y_true - y_pred) / y_true - sigma_train)**2 * weights)
def delta_sigma_square_test(y_true, y_pred, mass):
weights = mass / K.sum(mass)
return K.sum((K.abs(y_true - y_pred) / y_true - sigma_test)**2 * weights)
delta_sigma_train = math.sqrt(cal_total_loss(train_set, delta_sigma_square_train) / HS.n_points)
delta_sigma_test = math.sqrt(cal_total_loss(test_set, delta_sigma_square_test) / HS.n_points)
print("train_loss:", loss.numpy())
print("test_loss:", cal_total_loss(test_set, loss_func))
with train_summary_writer.as_default():
tf.summary.scalar('max_error', sigma_max_train, step=epoch)
tf.summary.scalar('delta_sigma', delta_sigma_train, step=epoch)
tf.summary.scalar('E', E_train, step=epoch)
tf.summary.scalar('sigma', sigma_train , step=epoch)
with test_summary_writer.as_default():
tf.summary.scalar('max_error', sigma_max_test, step=epoch)
tf.summary.scalar('delta_sigma', delta_sigma_test, step=epoch)
tf.summary.scalar('E', E_test, step=epoch)
tf.summary.scalar('sigma', sigma_test, step=epoch) # Early stopping
# if early_stopping is True and epoch > 800:
# if epoch % 5 == 0:
# if train_loss > loss_old:
# stop = True
# loss_old = train_loss
train_time = time.time() - start_time
model.save(save_dir + '/' + save_name)
sigma_train = cal_total_loss(train_set, mlg.loss.weighted_MAPE)
sigma_test = cal_total_loss(test_set, mlg.loss.weighted_MAPE)
E_train = cal_total_loss(train_set, mlg.loss.weighted_MSE)
E_test = cal_total_loss(test_set, mlg.loss.weighted_MSE)
sigma_max_train = cal_max_error(train_set)
sigma_max_test = cal_max_error(test_set)
#######################################################################
# Calculate delta_sigma
def delta_sigma_square_train(y_true, y_pred, mass):
weights = mass / K.sum(mass)
return K.sum((K.abs(y_true - y_pred) / y_true - sigma_train)**2 * weights)
def delta_sigma_square_test(y_true, y_pred, mass):
weights = mass / K.sum(mass)
return K.sum((K.abs(y_true - y_pred) / y_true - sigma_test)**2 * weights)
def delta_E_square_train(y_true, y_pred, mass):
weights = mass / K.sum(mass)
return K.sum(((y_pred / y_true - 1)**2 - E_train)**2 * weights)
def delta_E_square_test(y_true, y_pred, mass):
weights = mass / K.sum(mass)
return K.sum(((y_pred / y_true - 1)**2 - E_test)**2 * weights)
delta_sigma_train = math.sqrt(cal_total_loss(train_set, delta_sigma_square_train) / HS.n_points)
delta_sigma_test = math.sqrt(cal_total_loss(test_set, delta_sigma_square_test) / HS.n_points)
delta_E_train = math.sqrt(cal_total_loss(train_set, delta_E_square_train) / HS.n_points)
delta_E_test = math.sqrt(cal_total_loss(test_set, delta_E_square_test) / HS.n_points)
#print(delta_sigma_train)
#print(delta_sigma_test)
#####################################################################
# Write to file
with open(save_dir + save_name + ".txt", "w") as f:
f.write('[Results] \n')
f.write('model_name = {} \n'.format(save_name))
f.write('seed = {} \n'.format(seed))
f.write('n_pairs = {} \n'.format(n_pairs))
f.write('n_points = {} \n'.format(HS.n_points))
f.write('batch_size = {} \n'.format(batch_size))
f.write('function = {} \n'.format(args.function))
f.write('psi = {} \n'.format(psi))
if args.function == 'f1':
f.write('phi = {} \n'.format(phi))
elif args.function == 'f2':
f.write('alpha = {} \n'.format(alpha))
f.write('k = {} \n'.format(k))
f.write('n_parameters = {} \n'.format(model.count_params()))
f.write('loss function = {} \n'.format(loss_func.__name__))
if clip_threshold is not None:
f.write('clip_threshold = {} \n'.format(clip_threshold))
f.write('\n')
f.write('n_epochs = {} \n'.format(max_epochs))
f.write('train_time = {:.6g} \n'.format(train_time))
f.write('sigma_train = {:.6g} \n'.format(sigma_train))
f.write('sigma_test = {:.6g} \n'.format(sigma_test))
f.write('delta_sigma_train = {:.6g} \n'.format(delta_sigma_train))
f.write('delta_sigma_test = {:.6g} \n'.format(delta_sigma_test))
f.write('E_train = {:.6g} \n'.format(E_train))
f.write('E_test = {:.6g} \n'.format(E_test))
f.write('delta_E_train = {:.6g} \n'.format(delta_E_train))
f.write('delta_E_test = {:.6g} \n'.format(delta_E_test))
f.write('sigma_max_train = {:.6g} \n'.format(sigma_max_train))
f.write('sigma_max_test = {:.6g} \n'.format(sigma_max_test))
with open(save_dir + "summary.txt", "a") as f:
if args.function == 'f0':
f.write('{} {} {} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g}\n'.format(save_name, args.function, psi, train_time, sigma_train, sigma_test, E_train, E_test, sigma_max_train, sigma_max_test))
elif args.function == 'f1':
f.write('{} {} {} {} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g}\n'.format(save_name, args.function, psi, phi, train_time, sigma_train, sigma_test, E_train, E_test, sigma_max_train, sigma_max_test))
elif args.function == 'f2':
f.write('{} {} {} {} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g} {:.6g}\n'.format(save_name, args.function, psi, alpha, train_time, sigma_train, sigma_test, E_train, E_test, sigma_max_train, sigma_max_test))
|
the-stack_106_28973 | # type: ignore
import cflearn
from cflearn.misc.toolkit import check_is_ci
is_ci = check_is_ci()
num_classes = 10
data = cflearn.cv.MNISTData(batch_size=4 if is_ci else 64, transform="for_generation")
m = cflearn.api.vanilla_vae_gray(
28,
model_config={"num_classes": num_classes},
debug=is_ci,
)
m.fit(data, cuda=None if is_ci else 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.