repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
e-valuation/EvaP
1,371
e-valuation__EvaP-1371
[ "1362" ]
8412cdbcf78194019300912a5e82a23258254b25
diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py --- a/evap/contributor/forms.py +++ b/evap/contributor/forms.py @@ -70,6 +70,7 @@ def __init__(self, *args, **kwargs): self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter( Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct() + self.fields['contributor'].queryset = UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True) class DelegatesForm(forms.ModelForm): diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -368,7 +368,7 @@ def save(self, *args, **kw): class ContributionForm(forms.ModelForm): - contributor = UserModelChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True)) + contributor = UserModelChoiceField(queryset=UserProfile.objects.exclude(is_active=False)) responsibility = forms.ChoiceField(widget=forms.RadioSelect(), choices=Contribution.RESPONSIBILITY_CHOICES) evaluation = forms.ModelChoiceField(Evaluation.objects.all(), disabled=True, required=False, widget=forms.HiddenInput()) questionnaires = forms.ModelMultipleChoiceField(
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -389,6 +389,12 @@ def test_hidden_and_managers_only(self): self.assertEqual(expected, set(formset.forms[0].fields['questionnaires'].queryset.all())) self.assertEqual(expected, set(formset.forms[1].fields['questionnaires'].queryset.all())) + def test_staff_can_select_proxy_user(self): + proxy_user = mommy.make(UserProfile, is_proxy_user=True) + course = mommy.make(Course, semester=mommy.make(Semester)) + form = CourseForm(instance=course) + self.assertIn(proxy_user, form.fields['responsibles'].queryset) + class ContributionFormset775RegressionTests(TestCase): """ @@ -519,6 +525,12 @@ def test_uniqueness_constraint_error_shown(self): self.assertIn('name_de', form.errors) self.assertEqual(form.errors['name_de'], ['Course with this Semester and Name (german) already exists.']) + def test_that_proxy_user_can_be_responsible(self): + course = mommy.make(Course, semester=mommy.make(Semester), degrees=[mommy.make(Degree)]) + proxy = mommy.make(UserProfile, is_proxy_user=True, is_active=True) + form = CourseForm(instance=course) + self.assertIn(proxy, form.fields['responsibles'].queryset) + class EvaluationFormTests(TestCase): def test_evaluation_form_same_name(self):
Add proxy users to selection lists for staff #1304 specified that staff users still need to be able to select proxy users from selection lists. This wasn't implemented in #1336, so this should be changed. Staff users should be able to select proxy users in all user selection fields.
2019-11-04T19:44:46
e-valuation/EvaP
1,376
e-valuation__EvaP-1376
[ "1361" ]
c1955c7a6d7b3295622173facbb6d9ea0f85edcd
diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -795,7 +795,12 @@ def evaluation_edit(request, semester_id, evaluation_id): @manager_required def helper_evaluation_edit(request, semester, evaluation): - @receiver(RewardPointGranting.granted_by_removal) + # Show a message when reward points are granted during the lifetime of the calling view. + # The @receiver will only live as long as the request is processed + # as the callback is captured by a weak reference in the Django Framework + # and no other strong references are being kept. + # See https://github.com/fsr-de/EvaP/issues/1361 for more information and discussion. + @receiver(RewardPointGranting.granted_by_removal, weak=True) def notify_reward_points(grantings, **_kwargs): for granting in grantings: messages.info(request, @@ -1460,7 +1465,8 @@ def user_import(request): @manager_required def user_edit(request, user_id): - @receiver(RewardPointGranting.granted_by_removal) + # See comment in helper_evaluation_edit + @receiver(RewardPointGranting.granted_by_removal, weak=True) def notify_reward_points(grantings, **_kwargs): assert len(grantings) == 1
Make weak reference in signal dispatcher clearer In lines 796 to 808 here: https://github.com/fsr-de/EvaP/blob/b3715158306c27734b29253598c0a644c840b9b0/evap/staff/views.py#L798 (and there's a similar case nearby) we register a receiver inside a view. To my understanding, every time the view is accessed, a new receiver function is added to the signal. And as we reference the view's request inside the receiver it is captured in the [clojure](https://en.wikipedia.org/wiki/Closure_(computer_programming)) used to store the receiver. That means we keep references and objects in memory until application exit, which is bad right?
The `@receiver` decorator uses its arguments for `Signal.connect` which has a `weak=True`. That way, the callback is stored only with a weak reference in the signal dispatcher. As the view function does not use the callback, it should get destroyed when the view function finishes its scope. That's for the theory, so I tried a little good ol' print debugging: ```diff @receiver(RewardPointGranting.granted_by_removal) def notify_reward_points(grantings, **_kwargs): + print('user_edit') messages.info(request, …) ``` … prints exactly once when points are granted. ```diff - @receiver(RewardPointGranting.granted_by_removal) + @receiver(RewardPointGranting.granted_by_removal, weak=False) def notify_reward_points(grantings, **_kwargs): + print('user_edit') messages.info(request, …) ``` … prints multiple times when points are granted, as past instances are kept. That's only a tiny example, no real proof. The inner function seams to be destroyed as it only has weak references, and according to the [docs](https://docs.python.org/3/library/weakref.html?highlight=garbage+collection) the gc is _free to destroy the object_. As this is a rather simple case, the reference-counting approach would suffice to do this directly after this. Even if the gc does not do it immediately, it can do so later - a memory leak should not be possible. For testing purposes, these two courses (in the test db) grant points when all participants are removed: [`1545`](http://localhost:8000/staff/semester/21/evaluation/1545/edit) [`1499`](http://localhost:8000/staff/semester/21/evaluation/1499/edit) So `notify_reward_points` is a receiver while the response for the POST request is worked on in the view function, which is where the signal is signaled. So this actually does what we want, although it's a little unconventional. I'd still like to have some kind of comment there explaining that it will use a weak reference and thus does not cause memory leak, maybe linking to this explanation. Agreed, it caused enough confusion for one day. If it is sufficient in your opinion, I'd explicitly set `weak=True` to replace a comment or to augment a comment stating, 'This callback will only live for the lifetime of one request' (or similiar). the whole signal thing was used here because we thought it might be nice to separate the rewards app from everything else as much as possible. some years later i now don't really see a good reason for handling this app differently from everything else. putting `weak` there for some explanation is fine for me and i would also be happy to discuss not using signals at all if that would be a preferred solution.
2019-11-18T17:46:58
e-valuation/EvaP
1,377
e-valuation__EvaP-1377
[ "1081" ]
8c2e813513d602e3525dad6da4a54cfb3ca74e0a
diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -576,8 +576,17 @@ def semester_preparation_reminder(request, semester_id): responsible_list = [(responsible, [evaluation for evaluation in evaluations if responsible in evaluation.course.responsibles.all()], responsible.delegates.all()) for responsible in responsibles] - template_data = dict(semester=semester, responsible_list=responsible_list) - return render(request, "staff_semester_preparation_reminder.html", template_data) + if request.method == "POST": + template = EmailTemplate.objects.get(name=EmailTemplate.EDITOR_REVIEW_REMINDER) + subject_params = {} + for responsible, evaluations, __ in responsible_list: + body_params = {"user": responsible, "evaluations": evaluations} + EmailTemplate.send_to_user(responsible, template, subject_params, body_params, use_cc=True, request=request) + messages.success(request, _("Successfully sent reminders to everyone.")) + return HttpResponse() + else: + template_data = dict(semester=semester, responsible_list=responsible_list) + return render(request, "staff_semester_preparation_reminder.html", template_data) @manager_required
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -1,6 +1,7 @@ import datetime import os import glob +from unittest.mock import patch from django.conf import settings from django.contrib.auth.models import Group @@ -512,6 +513,7 @@ def test_assign_questionnaires(self): class TestSemesterPreparationReminderView(WebTestWith200Check): url = '/staff/semester/1/preparation_reminder' test_users = ['manager'] + csrf_checks = False @classmethod def setUpTestData(cls): @@ -527,6 +529,22 @@ def test_preparation_reminder(self): self.assertContains(response, 'user_to_find') self.assertContains(response, 'name_to_find') + @patch("evap.staff.views.EmailTemplate.send_to_user") + def test_remind_all(self, send_to_user_mock): + user = mommy.make(UserProfile) + evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester, responsibles=[user]), state='prepared') + + response = self.app.post(self.url, user='manager') + self.assertEqual(response.status_code, 200) + + template = EmailTemplate.objects.get(name=EmailTemplate.EDITOR_REVIEW_REMINDER) + subject_params = {} + body_params = {"user": user, "evaluations": [evaluation]} + expected = (user, template, subject_params, body_params) + + send_to_user_mock.assert_called_once() + self.assertEqual(send_to_user_mock.call_args_list[0][0][:4], expected) + class TestSendReminderView(WebTest): url = '/staff/semester/1/responsible/3/send_reminder'
Remind all On the Todo list a button `Remind all` should be added, that allows to send reminders to all responsibles at once. This button opens a modal with a list of the responsibles, asking for confirmation. Confirming will then send all emails that would be sent by clicking the `Send reminder` buttons for all responsibles individually.
2019-11-18T19:30:53
e-valuation/EvaP
1,385
e-valuation__EvaP-1385
[ "1372" ]
c10bd2ec457c9ed82d378aacd738ce171127d695
diff --git a/evap/evaluation/migrations/0106_add_short_long_question.py b/evap/evaluation/migrations/0106_add_short_long_question.py new file mode 100644 --- /dev/null +++ b/evap/evaluation/migrations/0106_add_short_long_question.py @@ -0,0 +1,37 @@ +# Generated by Django 2.2.7 on 2019-11-18 20:20 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('evaluation', '0105_uuids_for_ratinganswercounter'), + ] + + operations = [ + migrations.AlterField( + model_name='question', + name='type', + field=models.PositiveSmallIntegerField( + choices=[ + ('Text', ((0, 'Text question'),)), + ('Unipolar Likert', ((1, 'Agreement question'),)), + ('Grade', ((2, 'Grade question'),)), + ('Bipolar Likert', ( + (6, 'Easy-difficult question'), + (7, 'Few-many question'), + (8, 'Little-much question'), + (9, 'Small-large question'), + (10, 'Slow-fast question'), + (11, 'Short-long question') + )), + ('Yes-no', ( + (3, 'Positive yes-no question'), + (4, 'Negative yes-no question') + )), + ('Layout', ((5, 'Heading'),)) + ], + verbose_name='question type'), + ), + ] diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -780,6 +780,7 @@ class Question(models.Model): LITTLE_MUCH = 8 SMALL_LARGE = 9 SLOW_FAST = 10 + SHORT_LONG = 11 POSITIVE_YES_NO = 3 NEGATIVE_YES_NO = 4 HEADING = 5 @@ -799,6 +800,7 @@ class Question(models.Model): (LITTLE_MUCH, _("Little-much question")), (SMALL_LARGE, _("Small-large question")), (SLOW_FAST, _("Slow-fast question")), + (SHORT_LONG, _("Short-long question")), )), (_("Yes-no"), ( (POSITIVE_YES_NO, _("Positive yes-no question")), @@ -837,7 +839,7 @@ def is_likert_question(self): @property def is_bipolar_likert_question(self): - return self.type in (self.EASY_DIFFICULT, self.FEW_MANY, self.LITTLE_MUCH, self.SLOW_FAST, self.SMALL_LARGE) + return self.type in (self.EASY_DIFFICULT, self.FEW_MANY, self.LITTLE_MUCH, self.SLOW_FAST, self.SMALL_LARGE, self.SHORT_LONG) @property def is_text_question(self): @@ -995,6 +997,21 @@ def is_heading_question(self): ], **BASE_BIPOLAR_CHOICES ), + Question.SHORT_LONG: BipolarChoices( + minus_name=_("Short"), + plus_name=_("Long"), + names=[ + _("Way too\nshort"), + _("Too\nshort"), + _("Slightly too\nshort"), + _("Ideal"), + _("Slightly too\nlong"), + _("Too\nlong"), + _("Way too\nlong"), + _("no answer") + ], + **BASE_BIPOLAR_CHOICES + ), Question.POSITIVE_YES_NO: Choices( names=[ _("Yes"),
New bipolar label "Short/Long" For the Erstifahrt Evaluation, we wanted to evaluate the length of the trip. For that we would have liked to have a short <-> long bipolar scale.
While possible to use the existing answer options also in this case by formulating the question in a slightly different way, we decided to add this sixth option for the ease of use.
2019-12-02T16:43:18
e-valuation/EvaP
1,395
e-valuation__EvaP-1395
[ "1393" ]
ff840bc04ba9ef3ab4bc1630f57e82c037c5a9d1
diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py --- a/evap/evaluation/views.py +++ b/evap/evaluation/views.py @@ -100,6 +100,12 @@ def login_key_authentication(request, key): return redirect('evaluation:index') if user and user.login_key_valid_until >= date.today(): + if request.method != "POST": + template_data = { + 'username': user.full_name + } + return render(request, "external_user_confirm_login.html", template_data) + # User is valid. Set request.user and persist user in the session by logging the user in. request.user = user auth.login(request, user)
diff --git a/evap/evaluation/tests/test_auth.py b/evap/evaluation/tests/test_auth.py --- a/evap/evaluation/tests/test_auth.py +++ b/evap/evaluation/tests/test_auth.py @@ -33,22 +33,36 @@ def test_login_url_works(self): self.assertRedirects(self.app.get(reverse("contributor:index")), "/?next=/contributor/") url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key]) - page = self.app.get(url_with_key).follow().follow() + old_login_key = self.external_user.login_key + old_login_key_valid_until = self.external_user.login_key_valid_until + page = self.app.get(url_with_key) + self.external_user.refresh_from_db() + self.assertEqual(old_login_key, self.external_user.login_key) + self.assertEqual(old_login_key_valid_until, self.external_user.login_key_valid_until) + self.assertContains(page, 'Login') + self.assertContains(page, self.external_user.full_name) + + page = self.app.post(url_with_key).follow().follow() self.assertContains(page, 'Logout') + self.assertContains(page, self.external_user.full_name) def test_login_key_valid_only_once(self): - page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])).follow().follow() + page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])) self.assertContains(page, self.external_user.full_name) + url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key]) + page = self.app.post(url_with_key).follow().follow() + self.assertContains(page, 'Logout') + page = self.app.get(reverse("django-auth-logout")).follow() self.assertNotContains(page, 'Logout') - page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])).follow() + page = self.app.get(url_with_key).follow() self.assertContains(page, 'The login URL is not valid anymore.') self.assertEqual(len(mail.outbox), 1) # a new login key was sent new_key = UserProfile.objects.get(id=self.external_user.id).login_key - page = self.app.get(reverse("evaluation:login_key_authentication", args=[new_key])).follow().follow() + page = self.app.post(reverse("evaluation:login_key_authentication", args=[new_key])).follow().follow() self.assertContains(page, self.external_user.full_name) def test_inactive_external_users_can_not_login(self):
Add confirmation for login with login key External users can login by clicking on the login URL sent to them via email. Before users are actually authenticated on the platform after clicking on this link, they should have to confirm the login by, e.g., clicking a confirmation button in a modal or on a designated page. This prevents cases in which the URL is already requested for, e.g., a preview of the page and thus invalidates the login key which results in a new link being sent when users actively click on the link (which is then already the second GET request).
Why not invalidate the key when navigating away from the page the invite link leads to? That way, there is no additional step. then we'd have to add authentication logic on some random page which doesn't seem optimal. i think one click on "I want to log in as John Doe" is perfectly fine. fwiw, https://stackoverflow.com/questions/41699071/bingpreview-invalidates-one-time-links-in-email/42525744. i suspect that's the reason why this is happening and personally i would prefer a targeted solution, and not degrade the experience of all because some email client is misbehaving.
2019-12-16T19:42:47
e-valuation/EvaP
1,398
e-valuation__EvaP-1398
[ "1392" ]
76da56811c1c34b82a5efc4d5a5267b04237fa7e
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -323,6 +323,10 @@ def midterm_grade_documents(self): def responsibles_names(self): return ", ".join([responsible.full_name for responsible in self.responsibles.all().order_by("last_name")]) + @property + def has_external_responsible(self): + return any(responsible.is_external for responsible in self.responsibles.all()) + @property def all_evaluations_finished(self): return not self.evaluations.exclude(state__in=['evaluated', 'reviewed', 'published']).exists()
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -376,6 +376,19 @@ def test_access_to_semester_with_archived_results(self): # managers can access the page self.app.get('/staff/semester/2', user='manager', status=200) + @override_settings(INSTITUTION_EMAIL_DOMAINS=["institution.com"]) + def test_badge_for_external_responsibles(self): + responsible = mommy.make(UserProfile, email='[email protected]') + course = mommy.make(Course, semester=self.semester, responsibles=[responsible]) + mommy.make(Evaluation, course=course) + response = self.app.get(self.url, user='manager') + self.assertNotContains(response, 'External responsible') + + responsible.email = '[email protected]' + responsible.save() + response = self.app.get(self.url, user='manager') + self.assertContains(response, 'External responsible') + class TestGetEvaluationsWithPrefetchedData(TestCase): def test_get_evaluations_with_prefetched_data(self):
Badge for external responsible Evaluations with (at least one) external responsible for their Course should get an additional `badge-warning` (in `evaluation_badges.html`), visible for managers and displaying `"External responsible"`.
2019-12-16T21:19:14
e-valuation/EvaP
1,402
e-valuation__EvaP-1402
[ "1381" ]
7e614076e48b68ce7f959018f680fb6d4ae6ec3f
diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -419,8 +419,8 @@ }, } # give random char field values a reasonable length - from model_mommy import random_gen - MOMMY_CUSTOM_FIELDS_GEN = {'django.db.models.CharField': lambda: random_gen.gen_string(20)} + from model_bakery import random_gen + BAKER_CUSTOM_FIELDS_GEN = {'django.db.models.CharField': lambda: random_gen.gen_string(20)} # Django debug toolbar settings
diff --git a/evap/contributor/tests/test_forms.py b/evap/contributor/tests/test_forms.py --- a/evap/contributor/tests/test_forms.py +++ b/evap/contributor/tests/test_forms.py @@ -4,7 +4,7 @@ from evap.evaluation.models import Contribution, Evaluation, Questionnaire, UserProfile from evap.evaluation.tests.tools import WebTest, get_form_data_from_instance from evap.staff.forms import ContributionFormSet -from model_mommy import mommy +from model_bakery import baker class UserFormTests(TestCase): @@ -13,8 +13,8 @@ def test_settings_form(self): """ Tests whether the settings form can be submitted without errors """ - user = mommy.make(UserProfile, username="testuser") - delegate = mommy.make(UserProfile, username="delegate") + user = baker.make(UserProfile, username="testuser") + delegate = baker.make(UserProfile, username="delegate") self.assertFalse(user.delegates.filter(username="delegate").exists()) @@ -37,14 +37,14 @@ def test_managers_only(self): contribution of the Evaluation. Regression test for #593. """ - evaluation = mommy.make(Evaluation) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.EDITORS) - questionnaire_managers_only = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.MANAGERS) + evaluation = baker.make(Evaluation) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.EDITORS) + questionnaire_managers_only = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.MANAGERS) # one hidden questionnaire that should never be shown - mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.HIDDEN) + baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.HIDDEN) # just the normal questionnaire should be shown. - contribution1 = mommy.make(Contribution, evaluation=evaluation, contributor=mommy.make(UserProfile), questionnaires=[]) + contribution1 = baker.make(Contribution, evaluation=evaluation, contributor=baker.make(UserProfile), questionnaires=[]) InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1) formset = InlineContributionFormset(instance=evaluation, form_kwargs={'evaluation': evaluation}) @@ -74,12 +74,12 @@ def test_form_ordering(self): when the user submits the form with errors. Regression test for #456. """ - evaluation = mommy.make(Evaluation, pk=1, state="prepared") - user1 = mommy.make(UserProfile) - user2 = mommy.make(UserProfile) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - contribution1 = mommy.make(Contribution, evaluation=evaluation, contributor=user1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[questionnaire], order=1) - contribution2 = mommy.make(Contribution, evaluation=evaluation, contributor=user2, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[questionnaire], order=2) + evaluation = baker.make(Evaluation, pk=1, state="prepared") + user1 = baker.make(UserProfile) + user2 = baker.make(UserProfile) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + contribution1 = baker.make(Contribution, evaluation=evaluation, contributor=user1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[questionnaire], order=1) + contribution2 = baker.make(Contribution, evaluation=evaluation, contributor=user2, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[questionnaire], order=2) # almost everything is missing in this set of data, # so we're guaranteed to have some errors diff --git a/evap/contributor/tests/test_views.py b/evap/contributor/tests/test_views.py --- a/evap/contributor/tests/test_views.py +++ b/evap/contributor/tests/test_views.py @@ -1,7 +1,7 @@ from django.core import mail from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import Evaluation, UserProfile, Contribution from evap.evaluation.tests.tools import WebTestWith200Check, create_evaluation_with_responsible_and_editor @@ -14,11 +14,11 @@ class TestContributorDirectDelegationView(WebTest): @classmethod def setUpTestData(cls): - cls.evaluation = mommy.make(Evaluation, state='prepared') + cls.evaluation = baker.make(Evaluation, state='prepared') - cls.editor = mommy.make(UserProfile) - cls.non_editor = mommy.make(UserProfile, email="[email protected]") - mommy.make(Contribution, evaluation=cls.evaluation, contributor=cls.editor, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + cls.editor = baker.make(UserProfile) + cls.non_editor = baker.make(UserProfile, email="[email protected]") + baker.make(Contribution, evaluation=cls.evaluation, contributor=cls.editor, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) def test_direct_delegation_request(self): data = {"delegate_to": self.non_editor.id} @@ -35,7 +35,7 @@ def test_direct_delegation_request(self): self.assertEqual(len(mail.outbox), 1) def test_direct_delegation_request_with_existing_contribution(self): - contribution = mommy.make(Contribution, evaluation=self.evaluation, contributor=self.non_editor, can_edit=False) + contribution = baker.make(Contribution, evaluation=self.evaluation, contributor=self.non_editor, can_edit=False) old_contribution_count = Contribution.objects.count() data = {"delegate_to": self.non_editor.id} @@ -71,7 +71,7 @@ def setUpTestData(cls): create_evaluation_with_responsible_and_editor() def test_save_settings(self): - user = mommy.make(UserProfile) + user = baker.make(UserProfile) page = self.app.get(self.url, user="responsible", status=200) form = page.forms["settings-form"] form["delegates"] = [user.pk] diff --git a/evap/evaluation/tests/test_auth.py b/evap/evaluation/tests/test_auth.py --- a/evap/evaluation/tests/test_auth.py +++ b/evap/evaluation/tests/test_auth.py @@ -2,7 +2,7 @@ from django.core import mail from django.test import override_settings -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import Contribution, Evaluation, UserProfile from evap.evaluation.tests.tools import WebTest @@ -13,13 +13,13 @@ class LoginTests(WebTest): @classmethod def setUpTestData(cls): - cls.external_user = mommy.make(UserProfile, email="[email protected]") + cls.external_user = baker.make(UserProfile, email="[email protected]") cls.external_user.ensure_valid_login_key() - cls.inactive_external_user = mommy.make(UserProfile, email="[email protected]", is_active=False) + cls.inactive_external_user = baker.make(UserProfile, email="[email protected]", is_active=False) cls.inactive_external_user.ensure_valid_login_key() - evaluation = mommy.make(Evaluation, state='published') - mommy.make(Contribution, evaluation=evaluation, contributor=cls.external_user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - mommy.make(Contribution, evaluation=evaluation, contributor=cls.inactive_external_user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + evaluation = baker.make(Evaluation, state='published') + baker.make(Contribution, evaluation=evaluation, contributor=cls.external_user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(Contribution, evaluation=evaluation, contributor=cls.inactive_external_user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) @override_settings(PAGE_URL='https://example.com') def test_login_url_generation(self): diff --git a/evap/evaluation/tests/test_commands.py b/evap/evaluation/tests/test_commands.py --- a/evap/evaluation/tests/test_commands.py +++ b/evap/evaluation/tests/test_commands.py @@ -12,7 +12,7 @@ from django.test import TestCase from django.test.utils import override_settings -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import (CHOICES, Contribution, Course, Evaluation, EmailTemplate, NO_ANSWER, Question, Questionnaire, RatingAnswerCounter, Semester, UserProfile) @@ -21,8 +21,8 @@ class TestAnonymizeCommand(TestCase): @classmethod def setUpTestData(cls): - mommy.make(EmailTemplate, name="name", subject="Subject", body="Body.") - mommy.make(UserProfile, + baker.make(EmailTemplate, name="name", subject="Subject", body="Body.") + baker.make(UserProfile, username="secret.username", email="[email protected]", title="Prof.", @@ -30,45 +30,45 @@ def setUpTestData(cls): last_name="User", login_key=1234567890, login_key_valid_until=date.today()) - semester1 = mommy.make(Semester, name_de="S1", name_en="S1") - mommy.make(Semester, name_de="S2", name_en="S2") - cls.course = mommy.make( + semester1 = baker.make(Semester, name_de="S1", name_en="S1") + baker.make(Semester, name_de="S2", name_en="S2") + cls.course = baker.make( Course, semester=semester1, name_de="Eine private Veranstaltung", name_en="A private course", is_private=True, ) - course2 = mommy.make( + course2 = baker.make( Course, semester=semester1, name_de="Veranstaltungsexperimente", name_en="Course experiments", ) - cls.evaluation = mommy.make( + cls.evaluation = baker.make( Evaluation, course=cls.course, name_de="Wie man Software testet", name_en="Testing your software", ) - mommy.make( + baker.make( Evaluation, course=course2, name_de="Die Entstehung von Unicode 😄", name_en="History of Unicode 😄", ) - cls.contributor_questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - cls.general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) + cls.contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + cls.general_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) - cls.contributor_questions = mommy.make(Question, _quantity=10, + cls.contributor_questions = baker.make(Question, _quantity=10, questionnaire=cls.contributor_questionnaire, type=cycle(iter(CHOICES.keys()))) - cls.general_questions = mommy.make(Question, _quantity=10, + cls.general_questions = baker.make(Question, _quantity=10, questionnaire=cls.contributor_questionnaire, type=cycle(iter(CHOICES.keys()))) - cls.contributor = mommy.make(UserProfile) + cls.contributor = baker.make(UserProfile) - cls.contribution = mommy.make(Contribution, contributor=cls.contributor, evaluation=cls.evaluation, + cls.contribution = baker.make(Contribution, contributor=cls.contributor, evaluation=cls.evaluation, questionnaires=[cls.contributor_questionnaire, cls.contributor_questionnaire]) cls.general_contribution = cls.evaluation.general_contribution @@ -85,7 +85,7 @@ def test_no_empty_rating_answer_counters_left(self): for question in chain(self.contributor_questions, self.general_questions): choices = [choice for choice in CHOICES[question.type].values if choice != NO_ANSWER] for answer in choices: - mommy.make(RatingAnswerCounter, question=question, contribution=self.contribution, count=1, answer=answer) + baker.make(RatingAnswerCounter, question=question, contribution=self.contribution, count=1, answer=answer) old_count = RatingAnswerCounter.objects.count() @@ -109,7 +109,7 @@ def test_answer_count_unchanged(self): choices = [choice for choice in CHOICES[question.type].values if choice != NO_ANSWER] for answer in choices: count = random.randint(10, 100) - mommy.make(RatingAnswerCounter, question=question, contribution=self.contribution, count=count, answer=answer) + baker.make(RatingAnswerCounter, question=question, contribution=self.contribution, count=count, answer=answer) answers_per_question[question] += count management.call_command('anonymize', stdout=StringIO()) @@ -120,7 +120,7 @@ def test_answer_count_unchanged(self): def test_single_result_anonymization(self): questionnaire = Questionnaire.single_result_questionnaire() - single_result = mommy.make(Evaluation, is_single_result=True, course=self.course) + single_result = baker.make(Evaluation, is_single_result=True, course=self.course) single_result.general_contribution.questionnaires.set([questionnaire]) question = Question.objects.get(questionnaire=questionnaire) @@ -129,7 +129,7 @@ def test_single_result_anonymization(self): random.seed(0) for answer in choices: count = random.randint(50, 100) - mommy.make(RatingAnswerCounter, question=question, contribution=single_result.general_contribution, count=count, answer=answer) + baker.make(RatingAnswerCounter, question=question, contribution=single_result.general_contribution, count=count, answer=answer) answer_count_before += count management.call_command('anonymize', stdout=StringIO()) @@ -177,7 +177,7 @@ def test_executes_key_commands(self, mock_call_command, mock_input): class TestRefreshResultsCacheCommand(TestCase): def test_calls_collect_results(self): - mommy.make(Evaluation) + baker.make(Evaluation) with patch('evap.evaluation.management.commands.refresh_results_cache.collect_results') as mock: management.call_command('refresh_results_cache', stdout=StringIO()) @@ -205,8 +205,8 @@ def test_dumpdata_called(self): @override_settings(REMIND_X_DAYS_AHEAD_OF_END_DATE=[0, 2]) class TestSendRemindersCommand(TestCase): def test_remind_user_about_one_evaluation(self): - user_to_remind = mommy.make(UserProfile) - evaluation = mommy.make( + user_to_remind = baker.make(UserProfile) + evaluation = baker.make( Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=1), @@ -220,14 +220,14 @@ def test_remind_user_about_one_evaluation(self): mock.assert_called_once_with(user_to_remind, first_due_in_days=2, due_evaluations=[(evaluation, 2)]) def test_remind_user_once_about_two_evaluations(self): - user_to_remind = mommy.make(UserProfile) - evaluation1 = mommy.make( + user_to_remind = baker.make(UserProfile) + evaluation1 = baker.make( Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=1), vote_end_date=date.today() + timedelta(days=0), participants=[user_to_remind]) - evaluation2 = mommy.make( + evaluation2 = baker.make( Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=1), @@ -241,8 +241,8 @@ def test_remind_user_once_about_two_evaluations(self): mock.assert_called_once_with(user_to_remind, first_due_in_days=0, due_evaluations=[(evaluation1, 0), (evaluation2, 2)]) def test_dont_remind_already_voted(self): - user_no_remind = mommy.make(UserProfile) - mommy.make( + user_no_remind = baker.make(UserProfile) + baker.make( Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=1), diff --git a/evap/evaluation/tests/test_forms.py b/evap/evaluation/tests/test_forms.py --- a/evap/evaluation/tests/test_forms.py +++ b/evap/evaluation/tests/test_forms.py @@ -1,6 +1,6 @@ from django.test import TestCase -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import UserProfile from evap.evaluation.forms import NewKeyForm @@ -9,7 +9,7 @@ class TestNewKeyForm(TestCase): @classmethod def setUpTestData(cls): - cls.inactive_external_user = mommy.make(UserProfile, email="[email protected]", is_active=False) + cls.inactive_external_user = baker.make(UserProfile, email="[email protected]", is_active=False) def test_inactive_external_users_can_not_request_login_key(self): data = { diff --git a/evap/evaluation/tests/test_misc.py b/evap/evaluation/tests/test_misc.py --- a/evap/evaluation/tests/test_misc.py +++ b/evap/evaluation/tests/test_misc.py @@ -7,7 +7,7 @@ from django.test import TestCase from django.test.utils import override_settings -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import Semester, UserProfile, CourseType from evap.evaluation.tests.tools import WebTest @@ -20,10 +20,10 @@ class SampleXlsTests(WebTest): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester) - mommy.make(UserProfile, username="user", groups=[Group.objects.get(name="Manager")]) - mommy.make(CourseType, name_de="Vorlesung", name_en="Vorlesung") - mommy.make(CourseType, name_de="Seminar", name_en="Seminar") + cls.semester = baker.make(Semester) + baker.make(UserProfile, username="user", groups=[Group.objects.get(name="Manager")]) + baker.make(CourseType, name_de="Vorlesung", name_en="Vorlesung") + baker.make(CourseType, name_de="Seminar", name_en="Seminar") def test_sample_xls(self): page = self.app.get(reverse("staff:semester_import", args=[self.semester.pk]), user='user') diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -7,7 +7,7 @@ from django.core import mail from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import (Contribution, Course, CourseType, EmailTemplate, Evaluation, NotArchiveable, Question, Questionnaire, RatingAnswerCounter, Semester, TextAnswer, UserProfile) @@ -19,7 +19,7 @@ @override_settings(EVALUATION_END_OFFSET_HOURS=0) class TestEvaluations(WebTest): def test_approved_to_in_evaluation(self): - evaluation = mommy.make(Evaluation, state='approved', vote_start_datetime=datetime.now()) + evaluation = baker.make(Evaluation, state='approved', vote_start_datetime=datetime.now()) with patch('evap.evaluation.models.EmailTemplate.send_to_users_in_evaluations') as mock: Evaluation.update_evaluations() @@ -32,7 +32,7 @@ def test_approved_to_in_evaluation(self): self.assertEqual(evaluation.state, 'in_evaluation') def test_in_evaluation_to_evaluated(self): - evaluation = mommy.make(Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), + evaluation = baker.make(Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today() - timedelta(days=1)) with patch('evap.evaluation.models.Evaluation.is_fully_reviewed') as mock: @@ -44,7 +44,7 @@ def test_in_evaluation_to_evaluated(self): def test_in_evaluation_to_reviewed(self): # Evaluation is "fully reviewed" as no open text answers are present by default. - evaluation = mommy.make(Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), + evaluation = baker.make(Evaluation, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today() - timedelta(days=1)) Evaluation.update_evaluations() @@ -54,8 +54,8 @@ def test_in_evaluation_to_reviewed(self): def test_in_evaluation_to_published(self): # Evaluation is "fully reviewed" and not graded, thus gets published immediately. - course = mommy.make(Course, is_graded=False) - evaluation = mommy.make(Evaluation, course=course, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), + course = baker.make(Course, is_graded=False) + evaluation = baker.make(Evaluation, course=course, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today() - timedelta(days=1)) with patch('evap.evaluation.tools.send_publish_notifications') as mock: @@ -68,7 +68,7 @@ def test_in_evaluation_to_published(self): @override_settings(EVALUATION_END_WARNING_PERIOD=24) def test_evaluation_ends_soon(self): - evaluation = mommy.make(Evaluation, vote_start_datetime=datetime.now() - timedelta(days=2), + evaluation = baker.make(Evaluation, vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today() + timedelta(hours=24)) self.assertFalse(evaluation.evaluation_ends_soon()) @@ -81,7 +81,7 @@ def test_evaluation_ends_soon(self): @override_settings(EVALUATION_END_WARNING_PERIOD=24, EVALUATION_END_OFFSET_HOURS=24) def test_evaluation_ends_soon_with_offset(self): - evaluation = mommy.make(Evaluation, vote_start_datetime=datetime.now() - timedelta(days=2), + evaluation = baker.make(Evaluation, vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today()) self.assertFalse(evaluation.evaluation_ends_soon()) @@ -94,12 +94,12 @@ def test_evaluation_ends_soon_with_offset(self): def test_evaluation_ended(self): # Evaluation is out of evaluation period. - course_1 = mommy.make(Course, is_graded=False) - course_2 = mommy.make(Course, is_graded=False) - mommy.make(Evaluation, course=course_1, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), + course_1 = baker.make(Course, is_graded=False) + course_2 = baker.make(Course, is_graded=False) + baker.make(Evaluation, course=course_1, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today() - timedelta(days=1)) # This evaluation is not. - mommy.make(Evaluation, course=course_2, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), + baker.make(Evaluation, course=course_2, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today()) with patch('evap.evaluation.models.Evaluation.evaluation_end') as mock: @@ -109,8 +109,8 @@ def test_evaluation_ended(self): def test_approved_to_in_evaluation_sends_emails(self): """ Regression test for #945 """ - participant = mommy.make(UserProfile, email='[email protected]') - evaluation = mommy.make(Evaluation, state='approved', vote_start_datetime=datetime.now(), participants=[participant]) + participant = baker.make(UserProfile, email='[email protected]') + evaluation = baker.make(Evaluation, state='approved', vote_start_datetime=datetime.now(), participants=[participant]) Evaluation.update_evaluations() @@ -120,27 +120,27 @@ def test_approved_to_in_evaluation_sends_emails(self): def test_has_enough_questionnaires(self): # manually circumvent Evaluation's save() method to have a Evaluation without a general contribution - # the semester must be specified because of https://github.com/vandersonmota/model_mommy/issues/258 - course = mommy.make(Course, semester=mommy.make(Semester), type=mommy.make(CourseType)) - Evaluation.objects.bulk_create([mommy.prepare(Evaluation, course=course)]) + # the semester must be specified because of https://github.com/vandersonmota/model_bakery/issues/258 + course = baker.make(Course, semester=baker.make(Semester), type=baker.make(CourseType)) + Evaluation.objects.bulk_create([baker.prepare(Evaluation, course=course)]) evaluation = Evaluation.objects.get() self.assertEqual(evaluation.contributions.count(), 0) self.assertFalse(evaluation.general_contribution_has_questionnaires) self.assertFalse(evaluation.all_contributions_have_questionnaires) - editor_contribution = mommy.make( - Contribution, evaluation=evaluation, contributor=mommy.make(UserProfile), + editor_contribution = baker.make( + Contribution, evaluation=evaluation, contributor=baker.make(UserProfile), can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) evaluation = Evaluation.objects.get() self.assertFalse(evaluation.general_contribution_has_questionnaires) self.assertFalse(evaluation.all_contributions_have_questionnaires) - general_contribution = mommy.make(Contribution, evaluation=evaluation, contributor=None) + general_contribution = baker.make(Contribution, evaluation=evaluation, contributor=None) evaluation = Evaluation.objects.get() self.assertFalse(evaluation.general_contribution_has_questionnaires) self.assertFalse(evaluation.all_contributions_have_questionnaires) - questionnaire = mommy.make(Questionnaire) + questionnaire = baker.make(Questionnaire) general_contribution.questionnaires.add(questionnaire) self.assertTrue(evaluation.general_contribution_has_questionnaires) self.assertFalse(evaluation.all_contributions_have_questionnaires) @@ -150,19 +150,19 @@ def test_has_enough_questionnaires(self): self.assertTrue(evaluation.all_contributions_have_questionnaires) def test_deleting_last_modified_user_does_not_delete_evaluation(self): - user = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, last_modified_user=user) + user = baker.make(UserProfile) + evaluation = baker.make(Evaluation, last_modified_user=user) user.delete() self.assertTrue(Evaluation.objects.filter(pk=evaluation.pk).exists()) def test_single_result_can_be_deleted_only_in_reviewed(self): - responsible = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, is_single_result=True) - contribution = mommy.make(Contribution, + responsible = baker.make(UserProfile) + evaluation = baker.make(Evaluation, is_single_result=True) + contribution = baker.make(Contribution, evaluation=evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[Questionnaire.single_result_questionnaire()] ) - mommy.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution) + baker.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution) evaluation.single_result_created() evaluation.publish() evaluation.save() @@ -179,24 +179,24 @@ def test_single_result_can_be_deleted_only_in_reviewed(self): def test_single_result_can_be_published(self): """ Regression test for #1238 """ - responsible = mommy.make(UserProfile) - single_result = mommy.make(Evaluation, is_single_result=True, _participant_count=5, _voter_count=5) - contribution = mommy.make(Contribution, + responsible = baker.make(UserProfile) + single_result = baker.make(Evaluation, is_single_result=True, _participant_count=5, _voter_count=5) + contribution = baker.make(Contribution, evaluation=single_result, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[Questionnaire.single_result_questionnaire()] ) - mommy.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution) + baker.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution) single_result.single_result_created() single_result.publish() # used to crash def test_adding_second_voter_sets_can_publish_text_results_to_true(self): - student1 = mommy.make(UserProfile) - student2 = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, participants=[student1, student2], voters=[student1], state="in_evaluation") + student1 = baker.make(UserProfile) + student2 = baker.make(UserProfile) + evaluation = baker.make(Evaluation, participants=[student1, student2], voters=[student1], state="in_evaluation") evaluation.save() - top_general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - mommy.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) + top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) self.assertFalse(evaluation.can_publish_text_results) @@ -207,40 +207,40 @@ def test_adding_second_voter_sets_can_publish_text_results_to_true(self): self.assertTrue(evaluation.can_publish_text_results) def test_textanswers_get_deleted_if_they_cannot_be_published(self): - student = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, state='reviewed', participants=[student], voters=[student], can_publish_text_results=False) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire) + student = baker.make(UserProfile) + evaluation = baker.make(Evaluation, state='reviewed', participants=[student], voters=[student], can_publish_text_results=False) + questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) - mommy.make(TextAnswer, question=question, contribution=evaluation.general_contribution) + baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution) self.assertEqual(evaluation.textanswer_set.count(), 1) evaluation.publish() self.assertEqual(evaluation.textanswer_set.count(), 0) def test_textanswers_do_not_get_deleted_if_they_can_be_published(self): - student = mommy.make(UserProfile) - student2 = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire) + student = baker.make(UserProfile) + student2 = baker.make(UserProfile) + evaluation = baker.make(Evaluation, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True) + questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) - mommy.make(TextAnswer, question=question, contribution=evaluation.general_contribution) + baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution) self.assertEqual(evaluation.textanswer_set.count(), 1) evaluation.publish() self.assertEqual(evaluation.textanswer_set.count(), 1) def test_hidden_textanswers_get_deleted_on_publish(self): - student = mommy.make(UserProfile) - student2 = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire) + student = baker.make(UserProfile) + student2 = baker.make(UserProfile) + evaluation = baker.make(Evaluation, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True) + questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) - mommy.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="hidden", state=TextAnswer.HIDDEN) - mommy.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="published", state=TextAnswer.PUBLISHED) - mommy.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="private", state=TextAnswer.PRIVATE) + baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="hidden", state=TextAnswer.HIDDEN) + baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="published", state=TextAnswer.PUBLISHED) + baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="private", state=TextAnswer.PRIVATE) self.assertEqual(evaluation.textanswer_set.count(), 3) evaluation.publish() @@ -248,13 +248,13 @@ def test_hidden_textanswers_get_deleted_on_publish(self): self.assertFalse(TextAnswer.objects.filter(answer="hidden").exists()) def test_original_textanswers_get_deleted_on_publish(self): - student = mommy.make(UserProfile) - student2 = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire) + student = baker.make(UserProfile) + student2 = baker.make(UserProfile) + evaluation = baker.make(Evaluation, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True) + questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) - mommy.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="published answer", original_answer="original answer", state=TextAnswer.PUBLISHED) + baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution, answer="published answer", original_answer="original answer", state=TextAnswer.PUBLISHED) self.assertEqual(evaluation.textanswer_set.count(), 1) self.assertFalse(TextAnswer.objects.get().original_answer is None) @@ -263,8 +263,8 @@ def test_original_textanswers_get_deleted_on_publish(self): self.assertTrue(TextAnswer.objects.get().original_answer is None) def test_publishing_and_unpublishing_effect_on_template_cache(self): - student = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, state='reviewed', participants=[student], voters=[student], can_publish_text_results=True) + student = baker.make(UserProfile) + evaluation = baker.make(Evaluation, state='reviewed', participants=[student], voters=[student], can_publish_text_results=True) self.assertIsNone(caches['results'].get(get_evaluation_result_template_fragment_cache_key(evaluation.id, "en", True))) self.assertIsNone(caches['results'].get(get_evaluation_result_template_fragment_cache_key(evaluation.id, "en", False))) @@ -290,8 +290,8 @@ def test_publishing_and_unpublishing_effect_on_template_cache(self): class TestCourse(TestCase): def test_can_be_deleted_by_manager(self): - course = mommy.make(Course) - evaluation = mommy.make(Evaluation, course=course) + course = baker.make(Course) + evaluation = baker.make(Evaluation, course=course) self.assertFalse(course.can_be_deleted_by_manager) evaluation.delete() @@ -299,31 +299,31 @@ def test_can_be_deleted_by_manager(self): def test_responsibles_names(self): # last names required for sorting - user1 = mommy.make(UserProfile, last_name="Doe") - user2 = mommy.make(UserProfile, last_name="Meyer") - course = mommy.make(Course, responsibles=[user1, user2]) + user1 = baker.make(UserProfile, last_name="Doe") + user2 = baker.make(UserProfile, last_name="Meyer") + course = baker.make(Course, responsibles=[user1, user2]) self.assertEqual(course.responsibles_names, ("{}, {}").format(user1.full_name, user2.full_name)) class TestUserProfile(TestCase): def test_is_student(self): - some_user = mommy.make(UserProfile) + some_user = baker.make(UserProfile) self.assertFalse(some_user.is_student) - student = mommy.make(UserProfile, evaluations_participating_in=[mommy.make(Evaluation)]) + student = baker.make(UserProfile, evaluations_participating_in=[baker.make(Evaluation)]) self.assertTrue(student.is_student) - contributor = mommy.make(UserProfile, contributions=[mommy.make(Contribution)]) + contributor = baker.make(UserProfile, contributions=[baker.make(Contribution)]) self.assertFalse(contributor.is_student) - semester_contributed_to = mommy.make(Semester, created_at=date.today()) - semester_participated_in = mommy.make(Semester, created_at=date.today()) - course_contributed_to = mommy.make(Course, semester=semester_contributed_to) - course_participated_in = mommy.make(Course, semester=semester_participated_in) - evaluation_contributed_to = mommy.make(Evaluation, course=course_contributed_to) - evaluation_participated_in = mommy.make(Evaluation, course=course_participated_in) - contribution = mommy.make(Contribution, evaluation=evaluation_contributed_to) - user = mommy.make(UserProfile, contributions=[contribution], evaluations_participating_in=[evaluation_participated_in]) + semester_contributed_to = baker.make(Semester, created_at=date.today()) + semester_participated_in = baker.make(Semester, created_at=date.today()) + course_contributed_to = baker.make(Course, semester=semester_contributed_to) + course_participated_in = baker.make(Course, semester=semester_participated_in) + evaluation_contributed_to = baker.make(Evaluation, course=course_contributed_to) + evaluation_participated_in = baker.make(Evaluation, course=course_participated_in) + contribution = baker.make(Contribution, evaluation=evaluation_contributed_to) + user = baker.make(UserProfile, contributions=[contribution], evaluations_participating_in=[evaluation_participated_in]) self.assertTrue(user.is_student) @@ -338,73 +338,73 @@ def test_is_student(self): self.assertFalse(user.is_student) def test_can_be_deleted_by_manager(self): - user = mommy.make(UserProfile) - mommy.make(Evaluation, participants=[user], state="new") + user = baker.make(UserProfile) + baker.make(Evaluation, participants=[user], state="new") self.assertFalse(user.can_be_deleted_by_manager) - user2 = mommy.make(UserProfile) - mommy.make(Evaluation, participants=[user2], state="in_evaluation") + user2 = baker.make(UserProfile) + baker.make(Evaluation, participants=[user2], state="in_evaluation") self.assertFalse(user2.can_be_deleted_by_manager) - contributor = mommy.make(UserProfile) - mommy.make(Contribution, contributor=contributor) + contributor = baker.make(UserProfile) + baker.make(Contribution, contributor=contributor) self.assertFalse(contributor.can_be_deleted_by_manager) - proxy_user = mommy.make(UserProfile, is_proxy_user=True) + proxy_user = baker.make(UserProfile, is_proxy_user=True) self.assertFalse(proxy_user.can_be_deleted_by_manager) def test_inactive_users_hidden(self): - active_user = mommy.make(UserProfile) - mommy.make(UserProfile, is_active=False) + active_user = baker.make(UserProfile) + baker.make(UserProfile, is_active=False) self.assertEqual(list(UserProfile.objects.exclude(is_active=False)), [active_user]) def test_inactive_users_shown(self): - active_user = mommy.make(UserProfile) - inactive_user = mommy.make(UserProfile, is_active=False) + active_user = baker.make(UserProfile) + inactive_user = baker.make(UserProfile, is_active=False) user_list = list(UserProfile.objects.all()) self.assertIn(active_user, user_list) self.assertIn(inactive_user, user_list) def test_can_be_marked_inactive_by_manager(self): - user = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, state="new") + user = baker.make(UserProfile) + evaluation = baker.make(Evaluation, state="new") self.assertTrue(user.can_be_marked_inactive_by_manager) evaluation.participants.set([user]) evaluation.save() self.assertFalse(user.can_be_marked_inactive_by_manager) - contributor = mommy.make(UserProfile) - mommy.make(Contribution, contributor=contributor) + contributor = baker.make(UserProfile) + baker.make(Contribution, contributor=contributor) self.assertFalse(contributor.can_be_marked_inactive_by_manager) - reviewer = mommy.make(UserProfile, groups=[Group.objects.get(name="Reviewer")]) + reviewer = baker.make(UserProfile, groups=[Group.objects.get(name="Reviewer")]) self.assertFalse(reviewer.can_be_marked_inactive_by_manager) - grade_publisher = mommy.make(UserProfile, groups=[Group.objects.get(name="Grade publisher")]) + grade_publisher = baker.make(UserProfile, groups=[Group.objects.get(name="Grade publisher")]) self.assertFalse(grade_publisher.can_be_marked_inactive_by_manager) - super_user = mommy.make(UserProfile, is_superuser=True) + super_user = baker.make(UserProfile, is_superuser=True) self.assertFalse(super_user.can_be_marked_inactive_by_manager) - proxy_user = mommy.make(UserProfile, is_proxy_user=True) + proxy_user = baker.make(UserProfile, is_proxy_user=True) self.assertFalse(proxy_user.can_be_marked_inactive_by_manager) @override_settings(INSTITUTION_EMAIL_REPLACEMENTS=[("example.com","institution.com")]) def test_email_domain_replacement(self): - user = mommy.make(UserProfile, email="[email protected]") + user = baker.make(UserProfile, email="[email protected]") self.assertEqual(user.email, "[email protected]") class ParticipationArchivingTests(TestCase): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester) - cls.evaluation = mommy.make(Evaluation, state="published", course=mommy.make(Course, semester=cls.semester)) - cls.evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + cls.semester = baker.make(Semester) + cls.evaluation = baker.make(Evaluation, state="published", course=baker.make(Course, semester=cls.semester)) + cls.evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) - users = mommy.make(UserProfile, _quantity=3) + users = baker.make(UserProfile, _quantity=3) cls.evaluation.participants.set(users) cls.evaluation.voters.set(users[:2]) @@ -468,14 +468,14 @@ def test_archiving_participations_twice_raises_exception(self): self.semester.courses.first().evaluations.first()._archive_participations() def test_evaluation_participations_are_not_archived_if_participant_count_is_set(self): - evaluation = mommy.make(Evaluation, state="published", _participant_count=1, _voter_count=1) + evaluation = baker.make(Evaluation, state="published", _participant_count=1, _voter_count=1) self.assertFalse(evaluation.participations_are_archived) self.assertTrue(evaluation.participations_can_be_archived) def test_archiving_participations_doesnt_change_single_results_participant_count(self): - responsible = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, state="published", is_single_result=True, _participant_count=5, _voter_count=5) - contribution = mommy.make(Contribution, evaluation=evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + responsible = baker.make(UserProfile) + evaluation = baker.make(Evaluation, state="published", is_single_result=True, _participant_count=5, _voter_count=5) + contribution = baker.make(Contribution, evaluation=evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) contribution.questionnaires.add(Questionnaire.single_result_questionnaire()) evaluation.course.semester.archive_participations() @@ -487,14 +487,14 @@ def test_archiving_participations_doesnt_change_single_results_participant_count class TestLoginUrlEmail(TestCase): @classmethod def setUpTestData(cls): - cls.other_user = mommy.make(UserProfile, email="[email protected]") - cls.user = mommy.make(UserProfile, email="[email protected]") + cls.other_user = baker.make(UserProfile, email="[email protected]") + cls.user = baker.make(UserProfile, email="[email protected]") cls.user.ensure_valid_login_key() - cls.evaluation = mommy.make(Evaluation) - mommy.make(Contribution, evaluation=cls.evaluation, contributor=cls.user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + cls.evaluation = baker.make(Evaluation) + baker.make(Contribution, evaluation=cls.evaluation, contributor=cls.user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - cls.template = mommy.make(EmailTemplate, body="{{ login_url }}") + cls.template = baker.make(EmailTemplate, body="{{ login_url }}") EmailTemplate.objects.filter(name="Login Key Created").update(body="{{ user.login_url }}") @@ -537,23 +537,23 @@ def test_missing_email_address(self): Tests that __send_to_user behaves when the user has no email address. Regression test to https://github.com/fsr-de/EvaP/issues/825 """ - user = mommy.make(UserProfile, email=None) + user = baker.make(UserProfile, email=None) template = EmailTemplate.objects.get(name=EmailTemplate.STUDENT_REMINDER) EmailTemplate.send_to_user(user, template, {}, {}, False, None) class TestEmailRecipientList(TestCase): def test_recipient_list(self): - evaluation = mommy.make(Evaluation) - responsible = mommy.make(UserProfile) - editor = mommy.make(UserProfile) - contributor = mommy.make(UserProfile) + evaluation = baker.make(Evaluation) + responsible = baker.make(UserProfile) + editor = baker.make(UserProfile) + contributor = baker.make(UserProfile) evaluation.course.responsibles.set([responsible]) - mommy.make(Contribution, evaluation=evaluation, contributor=editor, can_edit=True) - mommy.make(Contribution, evaluation=evaluation, contributor=contributor) + baker.make(Contribution, evaluation=evaluation, contributor=editor, can_edit=True) + baker.make(Contribution, evaluation=evaluation, contributor=contributor) - participant1 = mommy.make(UserProfile, evaluations_participating_in=[evaluation]) - participant2 = mommy.make(UserProfile, evaluations_participating_in=[evaluation]) + participant1 = baker.make(UserProfile, evaluations_participating_in=[evaluation]) + participant2 = baker.make(UserProfile, evaluations_participating_in=[evaluation]) evaluation.voters.set([participant1]) recipient_list = EmailTemplate.recipient_list_for_evaluation(evaluation, [], filter_users_in_cc=False) @@ -575,13 +575,13 @@ def test_recipient_list(self): self.assertCountEqual(recipient_list, [participant2]) def test_recipient_list_filtering(self): - evaluation = mommy.make(Evaluation) + evaluation = baker.make(Evaluation) - contributor1 = mommy.make(UserProfile) - contributor2 = mommy.make(UserProfile, delegates=[contributor1]) + contributor1 = baker.make(UserProfile) + contributor2 = baker.make(UserProfile, delegates=[contributor1]) - mommy.make(Contribution, evaluation=evaluation, contributor=contributor1) - mommy.make(Contribution, evaluation=evaluation, contributor=contributor2) + baker.make(Contribution, evaluation=evaluation, contributor=contributor1) + baker.make(Contribution, evaluation=evaluation, contributor=contributor2) # no-one should get filtered. recipient_list = EmailTemplate.recipient_list_for_evaluation(evaluation, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False) @@ -591,8 +591,8 @@ def test_recipient_list_filtering(self): recipient_list = EmailTemplate.recipient_list_for_evaluation(evaluation, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True) self.assertCountEqual(recipient_list, [contributor2]) - contributor3 = mommy.make(UserProfile, delegates=[contributor2]) - mommy.make(Contribution, evaluation=evaluation, contributor=contributor3) + contributor3 = baker.make(UserProfile, delegates=[contributor2]) + baker.make(Contribution, evaluation=evaluation, contributor=contributor3) # again, no-one should get filtered. recipient_list = EmailTemplate.recipient_list_for_evaluation(evaluation, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False) diff --git a/evap/evaluation/tests/test_tools.py b/evap/evaluation/tests/test_tools.py --- a/evap/evaluation/tests/test_tools.py +++ b/evap/evaluation/tests/test_tools.py @@ -6,7 +6,7 @@ from django.test.testcases import TestCase from django.utils import translation -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.tests.tools import WebTest from evap.evaluation.models import UserProfile @@ -17,7 +17,7 @@ def test_signal_sets_language_if_none(self): """ Check that a user gets the default language set if they have none """ - user = mommy.make(UserProfile, language=None, email="[email protected]") + user = baker.make(UserProfile, language=None, email="[email protected]") user.ensure_valid_login_key() self.app.get("/", user=user) @@ -30,7 +30,7 @@ def test_signal_doesnt_set_language(self): Activate 'en' as langauge and check, that user does not get this langauge as he has one. """ translation.activate('en') - user = mommy.make(UserProfile, language='de', email="[email protected]") + user = baker.make(UserProfile, language='de', email="[email protected]") user.ensure_valid_login_key() self.app.get(reverse("evaluation:login_key_authentication", args=[user.login_key])) diff --git a/evap/evaluation/tests/test_views.py b/evap/evaluation/tests/test_views.py --- a/evap/evaluation/tests/test_views.py +++ b/evap/evaluation/tests/test_views.py @@ -2,7 +2,7 @@ from django.contrib.auth.hashers import make_password from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import UserProfile from evap.evaluation.tests.tools import WebTestWith200Check @@ -13,7 +13,7 @@ class TestIndexView(WebTest): def test_passworduser_login(self): """ Tests whether a user can login with an incorrect and a correct password. """ - mommy.make(UserProfile, username='password.user', password=make_password('evap')) + baker.make(UserProfile, username='password.user', password=make_password('evap')) response = self.app.get(self.url) password_form = response.forms[0] password_form['username'] = 'password.user' @@ -26,7 +26,7 @@ def test_send_new_login_key(self): """ Tests whether requesting a new login key is only possible for existing users, shows the expected success message and sends only one email to the requesting user without people in cc even if the user has delegates and cc users. """ - mommy.make(UserProfile, email='[email protected]') + baker.make(UserProfile, email='[email protected]') response = self.app.get(self.url) email_form = response.forms[1] email_form['email'] = "[email protected]" @@ -53,7 +53,7 @@ class TestContactEmail(WebTest): csrf_checks = False def test_sends_mail(self): - user = mommy.make(UserProfile) + user = baker.make(UserProfile) self.app.post('/contact', params={'message': 'feedback message', 'title': 'some title', 'sender_email': '[email protected]'}, user=user.username) self.assertEqual(len(mail.outbox), 1) @@ -63,7 +63,7 @@ class TestChangeLanguageView(WebTest): csrf_checks = False def test_changes_language(self): - user = mommy.make(UserProfile, username='tester', language='de') + user = baker.make(UserProfile, username='tester', language='de') self.app.post(self.url, params={'language': 'en'}, user='tester') diff --git a/evap/evaluation/tests/tools.py b/evap/evaluation/tests/tools.py --- a/evap/evaluation/tests/tools.py +++ b/evap/evaluation/tests/tools.py @@ -4,7 +4,7 @@ from django.utils import timezone from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import Contribution, Course, Degree, Evaluation, Questionnaire, UserProfile from evap.student.tools import question_id @@ -62,14 +62,14 @@ def get_form_data_from_instance(FormClass, instance): def create_evaluation_with_responsible_and_editor(evaluation_id=None): - responsible = mommy.make(UserProfile, username='responsible') - editor = mommy.make(UserProfile, username='editor') + responsible = baker.make(UserProfile, username='responsible') + editor = baker.make(UserProfile, username='editor') in_one_hour = (timezone.now() + timedelta(hours=1)).replace(second=0, microsecond=0) tomorrow = (timezone.now() + timedelta(days=1)).date evaluation_params = dict( state='prepared', - course=mommy.make(Course, degrees=[mommy.make(Degree)], responsibles=[responsible]), + course=baker.make(Course, degrees=[baker.make(Degree)], responsibles=[responsible]), vote_start_datetime=in_one_hour, vote_end_date=tomorrow ) @@ -77,8 +77,8 @@ def create_evaluation_with_responsible_and_editor(evaluation_id=None): if evaluation_id: evaluation_params['id'] = evaluation_id - evaluation = mommy.make(Evaluation, **evaluation_params) - mommy.make(Contribution, evaluation=evaluation, contributor=editor, can_edit=True, questionnaires=[mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR)]) - evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire, type=Questionnaire.TOP)]) + evaluation = baker.make(Evaluation, **evaluation_params) + baker.make(Contribution, evaluation=evaluation, contributor=editor, can_edit=True, questionnaires=[baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR)]) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire, type=Questionnaire.TOP)]) return evaluation diff --git a/evap/grades/tests.py b/evap/grades/tests.py --- a/evap/grades/tests.py +++ b/evap/grades/tests.py @@ -4,7 +4,7 @@ from django.contrib.auth.models import Group from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import Contribution, Course, Evaluation, Questionnaire, Semester, UserProfile @@ -14,15 +14,15 @@ class GradeUploadTest(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) - cls.student = mommy.make(UserProfile, username="student", email="[email protected]") - cls.student2 = mommy.make(UserProfile, username="student2", email="[email protected]") - cls.student3 = mommy.make(UserProfile, username="student3", email="[email protected]") - editor = mommy.make(UserProfile, username="editor", email="[email protected]") - - cls.semester = mommy.make(Semester, grade_documents_are_deleted=False) - cls.course = mommy.make(Course, semester=cls.semester) - cls.evaluation = mommy.make( + baker.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) + cls.student = baker.make(UserProfile, username="student", email="[email protected]") + cls.student2 = baker.make(UserProfile, username="student2", email="[email protected]") + cls.student3 = baker.make(UserProfile, username="student3", email="[email protected]") + editor = baker.make(UserProfile, username="editor", email="[email protected]") + + cls.semester = baker.make(Semester, grade_documents_are_deleted=False) + cls.course = baker.make(Course, semester=cls.semester) + cls.evaluation = baker.make( Evaluation, course=cls.course, vote_start_datetime=datetime.now() - timedelta(days=10), @@ -31,11 +31,11 @@ def setUpTestData(cls): voters=[cls.student, cls.student2], ) - contribution = mommy.make(Contribution, evaluation=cls.evaluation, contributor=editor, can_edit=True, + contribution = baker.make(Contribution, evaluation=cls.evaluation, contributor=editor, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - contribution.questionnaires.set([mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR)]) + contribution.questionnaires.set([baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR)]) - cls.evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + cls.evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) def setUp(self): self.evaluation = Evaluation.objects.get(pk=self.evaluation.pk) @@ -126,7 +126,7 @@ def test_upload_final_grades(self): self.helper_check_final_grade_upload(course, 0) def test_toggle_no_grades(self): - evaluation = mommy.make( + evaluation = baker.make( Evaluation, name_en="Toggle", vote_start_datetime=datetime.now(), @@ -137,9 +137,9 @@ def test_toggle_no_grades(self): contribution = Contribution(evaluation=evaluation, contributor=UserProfile.objects.get(username="editor"), can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) contribution.save() - contribution.questionnaires.set([mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR)]) + contribution.questionnaires.set([baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR)]) - evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) self.assertFalse(evaluation.course.gets_no_grade_documents) @@ -173,9 +173,9 @@ class GradeDocumentIndexTest(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) - cls.semester = mommy.make(Semester, grade_documents_are_deleted=False) - cls.archived_semester = mommy.make(Semester, grade_documents_are_deleted=True) + baker.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) + cls.semester = baker.make(Semester, grade_documents_are_deleted=False) + cls.archived_semester = baker.make(Semester, grade_documents_are_deleted=True) def test_visible_semesters(self): page = self.app.get(self.url, user="grade_publisher", status=200) @@ -188,17 +188,17 @@ class GradeSemesterViewTest(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) + baker.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) def test_does_not_crash(self): - semester = mommy.make(Semester, pk=1, grade_documents_are_deleted=False) - course = mommy.make(Course, semester=semester) - mommy.make(Evaluation, course=course, state="prepared") + semester = baker.make(Semester, pk=1, grade_documents_are_deleted=False) + course = baker.make(Course, semester=semester) + baker.make(Evaluation, course=course, state="prepared") page = self.app.get(self.url, user="grade_publisher", status=200) self.assertIn(course.name, page) def test_403_on_deleted(self): - mommy.make(Semester, pk=1, grade_documents_are_deleted=True) + baker.make(Semester, pk=1, grade_documents_are_deleted=True) self.app.get('/grades/semester/1', user="grade_publisher", status=403) @@ -207,14 +207,14 @@ class GradeCourseViewTest(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) + baker.make(UserProfile, username="grade_publisher", groups=[Group.objects.get(name="Grade publisher")]) def test_does_not_crash(self): - semester = mommy.make(Semester, pk=1, grade_documents_are_deleted=False) - mommy.make(Evaluation, course=mommy.make(Course, pk=1, semester=semester), state="prepared") + semester = baker.make(Semester, pk=1, grade_documents_are_deleted=False) + baker.make(Evaluation, course=baker.make(Course, pk=1, semester=semester), state="prepared") self.app.get('/grades/semester/1/course/1', user="grade_publisher", status=200) def test_403_on_archived_semester(self): - archived_semester = mommy.make(Semester, pk=1, grade_documents_are_deleted=True) - mommy.make(Evaluation, course=mommy.make(Course, pk=1, semester=archived_semester), state="prepared") + archived_semester = baker.make(Semester, pk=1, grade_documents_are_deleted=True) + baker.make(Evaluation, course=baker.make(Course, pk=1, semester=archived_semester), state="prepared") self.app.get('/grades/semester/1/course/1', user="grade_publisher", status=403) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -1,6 +1,6 @@ import xlrd from io import BytesIO -from model_mommy import mommy +from model_bakery import baker from django.test import TestCase from django.utils import translation @@ -26,31 +26,31 @@ def test_grade_color_calculation(self): self.assertEqual(exporter.normalize_number(2.8), 2.8) def test_questionnaire_ordering(self): - degree = mommy.make(Degree) - evaluation = mommy.make( + degree = baker.make(Degree) + evaluation = baker.make( Evaluation, - course=mommy.make(Course, degrees=[degree]), + course=baker.make(Course, degrees=[degree]), state='published', _participant_count=2, _voter_count=2 ) - questionnaire_1 = mommy.make(Questionnaire, order=1, type=Questionnaire.TOP) - questionnaire_2 = mommy.make(Questionnaire, order=4, type=Questionnaire.TOP) - questionnaire_3 = mommy.make(Questionnaire, order=1, type=Questionnaire.BOTTOM) - questionnaire_4 = mommy.make(Questionnaire, order=4, type=Questionnaire.BOTTOM) + questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.TOP) + questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.TOP) + questionnaire_3 = baker.make(Questionnaire, order=1, type=Questionnaire.BOTTOM) + questionnaire_4 = baker.make(Questionnaire, order=4, type=Questionnaire.BOTTOM) - question_1 = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire_1) - question_2 = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire_2) - question_3 = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire_3) - question_4 = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire_4) + question_1 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_1) + question_2 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_2) + question_3 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_3) + question_4 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_4) evaluation.general_contribution.questionnaires.set([questionnaire_1, questionnaire_2, questionnaire_3, questionnaire_4]) - mommy.make(RatingAnswerCounter, question=question_1, contribution=evaluation.general_contribution, answer=3, count=100) - mommy.make(RatingAnswerCounter, question=question_2, contribution=evaluation.general_contribution, answer=3, count=100) - mommy.make(RatingAnswerCounter, question=question_3, contribution=evaluation.general_contribution, answer=3, count=100) - mommy.make(RatingAnswerCounter, question=question_4, contribution=evaluation.general_contribution, answer=3, count=100) + baker.make(RatingAnswerCounter, question=question_1, contribution=evaluation.general_contribution, answer=3, count=100) + baker.make(RatingAnswerCounter, question=question_2, contribution=evaluation.general_contribution, answer=3, count=100) + baker.make(RatingAnswerCounter, question=question_3, contribution=evaluation.general_contribution, answer=3, count=100) + baker.make(RatingAnswerCounter, question=question_4, contribution=evaluation.general_contribution, answer=3, count=100) binary_content = BytesIO() ExcelExporter(evaluation.course.semester).export( @@ -75,25 +75,25 @@ def test_questionnaire_ordering(self): self.assertEqual(workbook.sheets()[0].row_values(14)[0], question_4.text) def test_heading_question_filtering(self): - degree = mommy.make(Degree) - evaluation = mommy.make( + degree = baker.make(Degree) + evaluation = baker.make( Evaluation, - course=mommy.make(Course, degrees=[degree]), + course=baker.make(Course, degrees=[degree]), state='published', _participant_count=2, _voter_count=2 ) - contributor = mommy.make(UserProfile) - evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + contributor = baker.make(UserProfile) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) - questionnaire = mommy.make(Questionnaire) - mommy.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0) - heading_question = mommy.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1) - likert_question = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) - mommy.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3) + questionnaire = baker.make(Questionnaire) + baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0) + heading_question = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) + baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3) - contribution = mommy.make(Contribution, evaluation=evaluation, questionnaires=[questionnaire], contributor=contributor) - mommy.make(RatingAnswerCounter, question=likert_question, contribution=contribution, answer=3, count=100) + contribution = baker.make(Contribution, evaluation=evaluation, questionnaires=[questionnaire], contributor=contributor) + baker.make(RatingAnswerCounter, question=likert_question, contribution=contribution, answer=3, count=100) binary_content = BytesIO() ExcelExporter(evaluation.course.semester).export( @@ -111,20 +111,20 @@ def test_heading_question_filtering(self): self.assertEqual(workbook.sheets()[0].row_values(7)[0], "") def test_view_excel_file_sorted(self): - semester = mommy.make(Semester) - course_type = mommy.make(CourseType) - degree = mommy.make(Degree) - mommy.make( + semester = baker.make(Semester) + course_type = baker.make(CourseType) + degree = baker.make(Degree) + baker.make( Evaluation, state='published', - course=mommy.make(Course, degrees=[degree], type=course_type, semester=semester, name_de="A", name_en="B"), + course=baker.make(Course, degrees=[degree], type=course_type, semester=semester, name_de="A", name_en="B"), name_de='Evaluation1', name_en='Evaluation1' ) - mommy.make( + baker.make( Evaluation, state='published', - course=mommy.make(Course, degrees=[degree], type=course_type, semester=semester, name_de="B", name_en="A"), + course=baker.make(Course, degrees=[degree], type=course_type, semester=semester, name_de="B", name_en="A"), name_de='Evaluation2', name_en='Evaluation2' ) @@ -150,31 +150,31 @@ def test_view_excel_file_sorted(self): self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation1") def test_course_type_ordering(self): - degree = mommy.make(Degree) - course_type_1 = mommy.make(CourseType, order=1) - course_type_2 = mommy.make(CourseType, order=2) - semester = mommy.make(Semester) - evaluation_1 = mommy.make(Evaluation, - course=mommy.make(Course, semester=semester, degrees=[degree], type=course_type_1), + degree = baker.make(Degree) + course_type_1 = baker.make(CourseType, order=1) + course_type_2 = baker.make(CourseType, order=2) + semester = baker.make(Semester) + evaluation_1 = baker.make(Evaluation, + course=baker.make(Course, semester=semester, degrees=[degree], type=course_type_1), state='published', _participant_count=2, _voter_count=2 ) - evaluation_2 = mommy.make(Evaluation, - course=mommy.make(Course, semester=semester, degrees=[degree], type=course_type_2), + evaluation_2 = baker.make(Evaluation, + course=baker.make(Course, semester=semester, degrees=[degree], type=course_type_2), state='published', _participant_count=2, _voter_count=2 ) - questionnaire = mommy.make(Questionnaire) - question = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire) + questionnaire = baker.make(Questionnaire) + question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire) evaluation_1.general_contribution.questionnaires.set([questionnaire]) - mommy.make(RatingAnswerCounter, question=question, contribution=evaluation_1.general_contribution, answer=3, count=2) + baker.make(RatingAnswerCounter, question=question, contribution=evaluation_1.general_contribution, answer=3, count=2) evaluation_2.general_contribution.questionnaires.set([questionnaire]) - mommy.make(RatingAnswerCounter, question=question, contribution=evaluation_2.general_contribution, answer=3, count=2) + baker.make(RatingAnswerCounter, question=question, contribution=evaluation_2.general_contribution, answer=3, count=2) binary_content = BytesIO() ExcelExporter(semester).export(binary_content, [([degree.id], [course_type_1.id, course_type_2.id])], True, True) diff --git a/evap/results/tests/test_tools.py b/evap/results/tests/test_tools.py --- a/evap/results/tests/test_tools.py +++ b/evap/results/tests/test_tools.py @@ -3,7 +3,7 @@ from django.core.cache import caches from django.test import override_settings from django.test.testcases import TestCase -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import (Contribution, Course, Evaluation, Question, Questionnaire, RatingAnswerCounter, TextAnswer, UserProfile) @@ -16,7 +16,7 @@ class TestCalculateResults(TestCase): def test_caches_published_evaluation(self): - evaluation = mommy.make(Evaluation, state='published') + evaluation = baker.make(Evaluation, state='published') self.assertIsNone(caches['results'].get(get_collect_results_cache_key(evaluation))) @@ -25,7 +25,7 @@ def test_caches_published_evaluation(self): self.assertIsNotNone(caches['results'].get(get_collect_results_cache_key(evaluation))) def test_cache_unpublished_evaluation(self): - evaluation = mommy.make(Evaluation, state='published', _voter_count=0, _participant_count=0) + evaluation = baker.make(Evaluation, state='published', _voter_count=0, _participant_count=0) collect_results(evaluation) evaluation.unpublish() evaluation.save() @@ -33,19 +33,19 @@ def test_cache_unpublished_evaluation(self): self.assertIsNone(caches['results'].get(get_collect_results_cache_key(evaluation))) def test_calculation_unipolar_results(self): - contributor1 = mommy.make(UserProfile) - student = mommy.make(UserProfile) + contributor1 = baker.make(UserProfile) + student = baker.make(UserProfile) - evaluation = mommy.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1]) - questionnaire = mommy.make(Questionnaire) - question = mommy.make(Question, questionnaire=questionnaire, type=Question.GRADE) - contribution1 = mommy.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire]) + evaluation = baker.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1]) + questionnaire = baker.make(Questionnaire) + question = baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) + contribution1 = baker.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire]) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=5) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=40) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=4, count=60) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=5, count=30) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=5) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=40) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=4, count=60) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=5, count=30) evaluation_results = collect_results(evaluation) @@ -59,21 +59,21 @@ def test_calculation_unipolar_results(self): self.assertEqual(question_result.counts, (5, 15, 40, 60, 30)) def test_calculation_bipolar_results(self): - contributor1 = mommy.make(UserProfile) - student = mommy.make(UserProfile) - - evaluation = mommy.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1]) - questionnaire = mommy.make(Questionnaire) - question = mommy.make(Question, questionnaire=questionnaire, type=Question.EASY_DIFFICULT) - contribution1 = mommy.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire]) - - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-3, count=5) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-2, count=5) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-1, count=15) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=0, count=30) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=25) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=10) + contributor1 = baker.make(UserProfile) + student = baker.make(UserProfile) + + evaluation = baker.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1]) + questionnaire = baker.make(Questionnaire) + question = baker.make(Question, questionnaire=questionnaire, type=Question.EASY_DIFFICULT) + contribution1 = baker.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire]) + + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-3, count=5) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-2, count=5) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-1, count=15) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=0, count=30) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=25) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15) + baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=10) evaluation_results = collect_results(evaluation) @@ -97,14 +97,14 @@ def test_calculation_bipolar_results(self): def test_collect_results_after_user_merge(self): """ Asserts that merge_users leaves the results cache in a consistent state. Regression test for #907 """ - contributor = mommy.make(UserProfile) - main_user = mommy.make(UserProfile) - student = mommy.make(UserProfile) + contributor = baker.make(UserProfile) + main_user = baker.make(UserProfile) + student = baker.make(UserProfile) - evaluation = mommy.make(Evaluation, state='published', participants=[student]) - questionnaire = mommy.make(Questionnaire) - mommy.make(Question, questionnaire=questionnaire, type=Question.GRADE) - mommy.make(Contribution, contributor=contributor, evaluation=evaluation, questionnaires=[questionnaire]) + evaluation = baker.make(Evaluation, state='published', participants=[student]) + questionnaire = baker.make(Questionnaire) + baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) + baker.make(Contribution, contributor=contributor, evaluation=evaluation, questionnaires=[questionnaire]) collect_results(evaluation) @@ -119,33 +119,33 @@ def test_collect_results_after_user_merge(self): class TestCalculateAverageDistribution(TestCase): @classmethod def setUpTestData(cls): - cls.student1 = mommy.make(UserProfile) - cls.student2 = mommy.make(UserProfile) - - cls.evaluation = mommy.make(Evaluation, state='published', participants=[cls.student1, cls.student2], voters=[cls.student1, cls.student2]) - cls.questionnaire = mommy.make(Questionnaire) - cls.question_grade = mommy.make(Question, questionnaire=cls.questionnaire, type=Question.GRADE) - cls.question_likert = mommy.make(Question, questionnaire=cls.questionnaire, type=Question.LIKERT) - cls.question_likert_2 = mommy.make(Question, questionnaire=cls.questionnaire, type=Question.LIKERT) - cls.question_bipolar = mommy.make(Question, questionnaire=cls.questionnaire, type=Question.FEW_MANY) - cls.question_bipolar_2 = mommy.make(Question, questionnaire=cls.questionnaire, type=Question.LITTLE_MUCH) + cls.student1 = baker.make(UserProfile) + cls.student2 = baker.make(UserProfile) + + cls.evaluation = baker.make(Evaluation, state='published', participants=[cls.student1, cls.student2], voters=[cls.student1, cls.student2]) + cls.questionnaire = baker.make(Questionnaire) + cls.question_grade = baker.make(Question, questionnaire=cls.questionnaire, type=Question.GRADE) + cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=Question.LIKERT) + cls.question_likert_2 = baker.make(Question, questionnaire=cls.questionnaire, type=Question.LIKERT) + cls.question_bipolar = baker.make(Question, questionnaire=cls.questionnaire, type=Question.FEW_MANY) + cls.question_bipolar_2 = baker.make(Question, questionnaire=cls.questionnaire, type=Question.LITTLE_MUCH) cls.general_contribution = cls.evaluation.general_contribution cls.general_contribution.questionnaires.set([cls.questionnaire]) - cls.contribution1 = mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=cls.evaluation, questionnaires=[cls.questionnaire]) - cls.contribution2 = mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=cls.evaluation, questionnaires=[cls.questionnaire]) + cls.contribution1 = baker.make(Contribution, contributor=baker.make(UserProfile), evaluation=cls.evaluation, questionnaires=[cls.questionnaire]) + cls.contribution2 = baker.make(Contribution, contributor=baker.make(UserProfile), evaluation=cls.evaluation, questionnaires=[cls.questionnaire]) @override_settings(CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT=4, CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT=6, CONTRIBUTIONS_WEIGHT=3, GENERAL_GRADE_QUESTIONS_WEIGHT=2, GENERAL_NON_GRADE_QUESTIONS_WEIGHT=5) def test_average_grade(self): - question_grade2 = mommy.make(Question, questionnaire=self.questionnaire, type=Question.GRADE) + question_grade2 = baker.make(Question, questionnaire=self.questionnaire, type=Question.GRADE) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=2, count=1) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=2) - mommy.make(RatingAnswerCounter, question=question_grade2, contribution=self.contribution1, answer=1, count=1) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=4) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5) - mommy.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3) - mommy.make(RatingAnswerCounter, question=self.question_bipolar, contribution=self.general_contribution, answer=3, count=2) - mommy.make(RatingAnswerCounter, question=self.question_bipolar_2, contribution=self.general_contribution, answer=-1, count=4) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=2, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=2) + baker.make(RatingAnswerCounter, question=question_grade2, contribution=self.contribution1, answer=1, count=1) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=4) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5) + baker.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3) + baker.make(RatingAnswerCounter, question=self.question_bipolar, contribution=self.general_contribution, answer=3, count=2) + baker.make(RatingAnswerCounter, question=self.question_bipolar_2, contribution=self.general_contribution, answer=-1, count=4) contributor_weights_sum = settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT + settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT contributor1_average = ((settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT * ((2 * 1) + (1 * 1)) / (1 + 1)) + (settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT * 3)) / contributor_weights_sum # 2.4 @@ -165,14 +165,14 @@ def test_average_grade(self): @override_settings(CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT=4, CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT=6, CONTRIBUTIONS_WEIGHT=3, GENERAL_GRADE_QUESTIONS_WEIGHT=2, GENERAL_NON_GRADE_QUESTIONS_WEIGHT=5) def test_distribution_without_general_grade_question(self): - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=1) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=3, count=1) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=1) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=2, count=1) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=3) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=5, count=3) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5) - mommy.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=3, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=2, count=1) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=3) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=5, count=3) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5) + baker.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3) # contribution1: 0.4 * (0.5, 0, 0.5, 0, 0) + 0.6 * (0, 0, 0.5, 0, 0.5) = (0.2, 0, 0.5, 0, 0.3) # contribution2: (0, 0.5, 0, 0.5, 0) @@ -191,15 +191,15 @@ def test_distribution_without_general_grade_question(self): @override_settings(CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT=4, CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT=6, CONTRIBUTIONS_WEIGHT=3, GENERAL_GRADE_QUESTIONS_WEIGHT=2, GENERAL_NON_GRADE_QUESTIONS_WEIGHT=5) def test_distribution_with_general_grade_question(self): - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=1) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=3, count=1) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=1) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=2, count=1) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=3) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=5, count=3) - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5) - mommy.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.general_contribution, answer=2, count=10) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=3, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=2, count=1) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=3) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=5, count=3) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5) + baker.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.general_contribution, answer=2, count=10) # contributions and general_non_grade are as above # general_grade: (0, 1, 0, 0, 0) @@ -214,24 +214,24 @@ def test_distribution_with_general_grade_question(self): self.assertAlmostEqual(distribution[4], 0.38) def test_get_single_result_rating_result(self): - single_result_evaluation = mommy.make(Evaluation, state='published', is_single_result=True) + single_result_evaluation = baker.make(Evaluation, state='published', is_single_result=True) questionnaire = Questionnaire.objects.get(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME) - contribution = mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=single_result_evaluation, questionnaires=[questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - mommy.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=1, count=1) - mommy.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=4, count=1) + contribution = baker.make(Contribution, contributor=baker.make(UserProfile), evaluation=single_result_evaluation, questionnaires=[questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=1, count=1) + baker.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=4, count=1) distribution = calculate_average_distribution(single_result_evaluation) self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0)) rating_result = get_single_result_rating_result(single_result_evaluation) self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0)) def test_result_calculation_with_no_contributor_rating_question_does_not_fail(self): - evaluation = mommy.make(Evaluation, state='published', participants=[self.student1, self.student2], voters=[self.student1, self.student2]) - questionnaire_text = mommy.make(Questionnaire) - mommy.make(Question, questionnaire=questionnaire_text, type=Question.TEXT) - mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=evaluation, questionnaires=[questionnaire_text]) + evaluation = baker.make(Evaluation, state='published', participants=[self.student1, self.student2], voters=[self.student1, self.student2]) + questionnaire_text = baker.make(Questionnaire) + baker.make(Question, questionnaire=questionnaire_text, type=Question.TEXT) + baker.make(Contribution, contributor=baker.make(UserProfile), evaluation=evaluation, questionnaires=[questionnaire_text]) evaluation.general_contribution.questionnaires.set([self.questionnaire]) - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=evaluation.general_contribution, answer=1, count=1) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=evaluation.general_contribution, answer=1, count=1) distribution = calculate_average_distribution(evaluation) self.assertEqual(distribution[0], 1) @@ -240,7 +240,7 @@ def test_unipolarized_unipolar(self): counts = (5, 3, 1, 1, 0) answer_counters = [ - mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=answer, count=count) + baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=answer, count=count) for answer, count in enumerate(counts, start=1) ] @@ -256,7 +256,7 @@ def test_unipolarized_bipolar(self): counts = (0, 1, 4, 8, 2, 2, 3) answer_counters = [ - mommy.make(RatingAnswerCounter, question=self.question_bipolar, contribution=self.general_contribution, answer=answer, count=count) + baker.make(RatingAnswerCounter, question=self.question_bipolar, contribution=self.general_contribution, answer=answer, count=count) for answer, count in enumerate(counts, start=-3) ] @@ -270,10 +270,10 @@ def test_unipolarized_bipolar(self): def test_unipolarized_yesno(self): counts = (57, 43) - question_yesno = mommy.make(Question, questionnaire=self.questionnaire, type=Question.POSITIVE_YES_NO) + question_yesno = baker.make(Question, questionnaire=self.questionnaire, type=Question.POSITIVE_YES_NO) answer_counters = [ - mommy.make(RatingAnswerCounter, question=question_yesno, contribution=self.general_contribution, answer=1, count=counts[0]), - mommy.make(RatingAnswerCounter, question=question_yesno, contribution=self.general_contribution, answer=5, count=counts[1]) + baker.make(RatingAnswerCounter, question=question_yesno, contribution=self.general_contribution, answer=1, count=counts[0]), + baker.make(RatingAnswerCounter, question=question_yesno, contribution=self.general_contribution, answer=5, count=counts[1]) ] result = RatingResult(question_yesno, answer_counters) @@ -285,10 +285,10 @@ def test_unipolarized_yesno(self): self.assertAlmostEqual(distribution[4], 0.43) def test_calculate_average_course_distribution(self): - mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=2) + baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=2) course = self.evaluation.course - single_result = mommy.make( + single_result = baker.make( Evaluation, name_de="Single Result", name_en="Single Result", @@ -302,9 +302,9 @@ def test_calculate_average_course_distribution(self): single_result_questionnaire = Questionnaire.single_result_questionnaire() single_result_question = single_result_questionnaire.questions.first() - contribution = mommy.make(Contribution, evaluation=single_result, contributor=None, questionnaires=[single_result_questionnaire]) - mommy.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=2, count=1) - mommy.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=3, count=1) + contribution = baker.make(Contribution, evaluation=single_result, contributor=None, questionnaires=[single_result_questionnaire]) + baker.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=2, count=1) + baker.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=3, count=1) distribution = calculate_average_course_distribution(course) self.assertEqual(distribution[0], 0.25) @@ -317,33 +317,33 @@ def test_calculate_average_course_distribution(self): class TestTextAnswerVisibilityInfo(TestCase): @classmethod def setUpTestData(cls): - cls.delegate1 = mommy.make(UserProfile, username="delegate1") - cls.delegate2 = mommy.make(UserProfile, username="delegate2") - cls.contributor_own = mommy.make(UserProfile, username="contributor_own", delegates=[cls.delegate1]) - cls.contributor_general = mommy.make(UserProfile, username="contributor_general", delegates=[cls.delegate2]) - cls.responsible1 = mommy.make(UserProfile, username="responsible1", delegates=[cls.delegate1, cls.contributor_general]) - cls.responsible2 = mommy.make(UserProfile, username="responsible2") - cls.responsible_without_contribution = mommy.make(UserProfile, username="responsible_without_contribution") - cls.other_user = mommy.make(UserProfile, username="other_user") - - cls.evaluation = mommy.make(Evaluation, course=mommy.make(Course, responsibles=[cls.responsible1, cls.responsible2, cls.responsible_without_contribution]), state='published', can_publish_text_results=True) - cls.questionnaire = mommy.make(Questionnaire) - cls.question = mommy.make(Question, questionnaire=cls.questionnaire, type=Question.TEXT) + cls.delegate1 = baker.make(UserProfile, username="delegate1") + cls.delegate2 = baker.make(UserProfile, username="delegate2") + cls.contributor_own = baker.make(UserProfile, username="contributor_own", delegates=[cls.delegate1]) + cls.contributor_general = baker.make(UserProfile, username="contributor_general", delegates=[cls.delegate2]) + cls.responsible1 = baker.make(UserProfile, username="responsible1", delegates=[cls.delegate1, cls.contributor_general]) + cls.responsible2 = baker.make(UserProfile, username="responsible2") + cls.responsible_without_contribution = baker.make(UserProfile, username="responsible_without_contribution") + cls.other_user = baker.make(UserProfile, username="other_user") + + cls.evaluation = baker.make(Evaluation, course=baker.make(Course, responsibles=[cls.responsible1, cls.responsible2, cls.responsible_without_contribution]), state='published', can_publish_text_results=True) + cls.questionnaire = baker.make(Questionnaire) + cls.question = baker.make(Question, questionnaire=cls.questionnaire, type=Question.TEXT) cls.general_contribution = cls.evaluation.general_contribution cls.general_contribution.questionnaires.set([cls.questionnaire]) - cls.responsible1_contribution = mommy.make(Contribution, contributor=cls.responsible1, evaluation=cls.evaluation, + cls.responsible1_contribution = baker.make(Contribution, contributor=cls.responsible1, evaluation=cls.evaluation, questionnaires=[cls.questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - cls.responsible2_contribution = mommy.make(Contribution, contributor=cls.responsible2, evaluation=cls.evaluation, + cls.responsible2_contribution = baker.make(Contribution, contributor=cls.responsible2, evaluation=cls.evaluation, questionnaires=[cls.questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - cls.contributor_own_contribution = mommy.make(Contribution, contributor=cls.contributor_own, evaluation=cls.evaluation, + cls.contributor_own_contribution = baker.make(Contribution, contributor=cls.contributor_own, evaluation=cls.evaluation, questionnaires=[cls.questionnaire], textanswer_visibility=Contribution.OWN_TEXTANSWERS) - cls.contributor_general_contribution = mommy.make(Contribution, contributor=cls.contributor_general, evaluation=cls.evaluation, + cls.contributor_general_contribution = baker.make(Contribution, contributor=cls.contributor_general, evaluation=cls.evaluation, questionnaires=[cls.questionnaire], textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - cls.general_contribution_textanswer = mommy.make(TextAnswer, question=cls.question, contribution=cls.general_contribution, state=TextAnswer.PUBLISHED) - cls.responsible1_textanswer = mommy.make(TextAnswer, question=cls.question, contribution=cls.responsible1_contribution, state=TextAnswer.PUBLISHED) - cls.responsible2_textanswer = mommy.make(TextAnswer, question=cls.question, contribution=cls.responsible2_contribution, state=TextAnswer.PUBLISHED) - cls.contributor_own_textanswer = mommy.make(TextAnswer, question=cls.question, contribution=cls.contributor_own_contribution, state=TextAnswer.PUBLISHED) - cls.contributor_general_textanswer = mommy.make(TextAnswer, question=cls.question, contribution=cls.contributor_general_contribution, state=TextAnswer.PUBLISHED) + cls.general_contribution_textanswer = baker.make(TextAnswer, question=cls.question, contribution=cls.general_contribution, state=TextAnswer.PUBLISHED) + cls.responsible1_textanswer = baker.make(TextAnswer, question=cls.question, contribution=cls.responsible1_contribution, state=TextAnswer.PUBLISHED) + cls.responsible2_textanswer = baker.make(TextAnswer, question=cls.question, contribution=cls.responsible2_contribution, state=TextAnswer.PUBLISHED) + cls.contributor_own_textanswer = baker.make(TextAnswer, question=cls.question, contribution=cls.contributor_own_contribution, state=TextAnswer.PUBLISHED) + cls.contributor_general_textanswer = baker.make(TextAnswer, question=cls.question, contribution=cls.contributor_general_contribution, state=TextAnswer.PUBLISHED) def test_text_answer_visible_to_non_contributing_responsible(self): self.assertIn(self.responsible_without_contribution, textanswers_visible_to(self.general_contribution_textanswer.contribution)[0]) diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -10,7 +10,7 @@ from django.test.utils import CaptureQueriesContext from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import (Contribution, Course, Degree, Evaluation, Question, Questionnaire, RatingAnswerCounter, Semester, UserProfile) @@ -25,22 +25,22 @@ class TestResultsView(WebTest): @patch('evap.evaluation.models.Evaluation.can_be_seen_by', new=(lambda self, user: True)) def test_multiple_evaluations_per_course(self): - mommy.make(UserProfile, username='student', email="[email protected]") + baker.make(UserProfile, username='student', email="[email protected]") # course with no evaluations does not show up - course = mommy.make(Course) + course = baker.make(Course) page = self.app.get(self.url, user="student") self.assertNotContains(page, course.name) caches['results'].clear() # course with one evaluation is a single line with the evaluation's full_name - evaluation = mommy.make(Evaluation, course=course, name_en='unique_evaluation_name1', name_de="foo", state='published') + evaluation = baker.make(Evaluation, course=course, name_en='unique_evaluation_name1', name_de="foo", state='published') page = self.app.get(self.url, user="student") self.assertContains(page, evaluation.full_name) caches['results'].clear() # course with two evaluations is three lines without using the full names - evaluation2 = mommy.make(Evaluation, course=course, name_en='unique_evaluation_name2', name_de="bar", state='published') + evaluation2 = baker.make(Evaluation, course=course, name_en='unique_evaluation_name2', name_de="bar", state='published') page = self.app.get(self.url, user="student") self.assertContains(page, course.name) self.assertContains(page, evaluation.name_en) @@ -61,15 +61,15 @@ def test_num_queries_is_constant(self): ensures that the number of queries in the user list is constant and not linear to the number of courses/evaluations """ - mommy.make(UserProfile, username='student', email="[email protected]") + baker.make(UserProfile, username='student', email="[email protected]") # warm up some caches self.app.get(self.url, user="student") def make_course_with_evaluations(unique_suffix): - course = mommy.make(Course) - mommy.make(Evaluation, course=course, name_en='foo' + unique_suffix, name_de='foo' + unique_suffix, state='published', _voter_count=0) - mommy.make(Evaluation, course=course, name_en='bar' + unique_suffix, name_de='bar' + unique_suffix, state='published', _voter_count=0) + course = baker.make(Course) + baker.make(Evaluation, course=course, name_en='foo' + unique_suffix, name_de='foo' + unique_suffix, state='published', _voter_count=0) + baker.make(Evaluation, course=course, name_en='bar' + unique_suffix, name_de='bar' + unique_suffix, state='published', _voter_count=0) # first measure the number of queries with two courses make_course_with_evaluations('frob') @@ -97,8 +97,8 @@ def make_course_with_evaluations(unique_suffix): class TestGetEvaluationsWithPrefetchedData(TestCase): def test_returns_correct_participant_count(self): """ Regression test for #1248 """ - participants = mommy.make(UserProfile, _quantity=2) - evaluation = mommy.make(Evaluation, + participants = baker.make(UserProfile, _quantity=2) + evaluation = baker.make(Evaluation, state='published', _participant_count=2, _voter_count=2, participants=participants, voters=participants ) @@ -116,22 +116,22 @@ def test_returns_correct_participant_count(self): class TestResultsViewContributionWarning(WebTest): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester, id=3) - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - contributor = mommy.make(UserProfile) + cls.semester = baker.make(Semester, id=3) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + contributor = baker.make(UserProfile) # Set up an evaluation with one question but no answers - student1 = mommy.make(UserProfile) - student2 = mommy.make(UserProfile) - cls.evaluation = mommy.make(Evaluation, id=21, state='published', course=mommy.make(Course, semester=cls.semester), participants=[student1, student2], voters=[student1, student2]) - questionnaire = mommy.make(Questionnaire) + student1 = baker.make(UserProfile) + student2 = baker.make(UserProfile) + cls.evaluation = baker.make(Evaluation, id=21, state='published', course=baker.make(Course, semester=cls.semester), participants=[student1, student2], voters=[student1, student2]) + questionnaire = baker.make(Questionnaire) cls.evaluation.general_contribution.questionnaires.set([questionnaire]) - cls.contribution = mommy.make(Contribution, evaluation=cls.evaluation, questionnaires=[questionnaire], contributor=contributor) - cls.likert_question = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) + cls.contribution = baker.make(Contribution, evaluation=cls.evaluation, questionnaires=[questionnaire], contributor=contributor) + cls.likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) cls.url = '/results/semester/%s/evaluation/%s' % (cls.semester.id, cls.evaluation.id) def test_many_answers_evaluation_no_warning(self): - mommy.make(RatingAnswerCounter, question=self.likert_question, contribution=self.contribution, answer=3, count=10) + baker.make(RatingAnswerCounter, question=self.likert_question, contribution=self.contribution, answer=3, count=10) page = self.app.get(self.url, user='manager', status=200) self.assertNotIn("Only a few participants answered these questions.", page) @@ -140,7 +140,7 @@ def test_zero_answers_evaluation_no_warning(self): self.assertNotIn("Only a few participants answered these questions.", page) def test_few_answers_evaluation_show_warning(self): - mommy.make(RatingAnswerCounter, question=self.likert_question, contribution=self.contribution, answer=3, count=3) + baker.make(RatingAnswerCounter, question=self.likert_question, contribution=self.contribution, answer=3, count=3) page = self.app.get(self.url, user='manager', status=200) self.assertIn("Only a few participants answered these questions.", page) @@ -151,37 +151,37 @@ class TestResultsSemesterEvaluationDetailView(WebTestWith200Check): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester, id=2) + cls.semester = baker.make(Semester, id=2) - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") - contributor = mommy.make(UserProfile, username='contributor') - responsible = mommy.make(UserProfile, username='responsible') + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") + contributor = baker.make(UserProfile, username='contributor') + responsible = baker.make(UserProfile, username='responsible') # Normal evaluation with responsible and contributor. - cls.evaluation = mommy.make(Evaluation, id=21, state='published', course=mommy.make(Course, semester=cls.semester)) + cls.evaluation = baker.make(Evaluation, id=21, state='published', course=baker.make(Course, semester=cls.semester)) - mommy.make(Contribution, evaluation=cls.evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - cls.contribution = mommy.make(Contribution, evaluation=cls.evaluation, contributor=contributor, can_edit=True) + baker.make(Contribution, evaluation=cls.evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + cls.contribution = baker.make(Contribution, evaluation=cls.evaluation, contributor=contributor, can_edit=True) def test_questionnaire_ordering(self): - top_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - contributor_questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - bottom_questionnaire = mommy.make(Questionnaire, type=Questionnaire.BOTTOM) + top_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + bottom_questionnaire = baker.make(Questionnaire, type=Questionnaire.BOTTOM) - top_heading_question = mommy.make(Question, type=Question.HEADING, questionnaire=top_questionnaire, order=0) - top_likert_question = mommy.make(Question, type=Question.LIKERT, questionnaire=top_questionnaire, order=1) + top_heading_question = baker.make(Question, type=Question.HEADING, questionnaire=top_questionnaire, order=0) + top_likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=top_questionnaire, order=1) - contributor_likert_question = mommy.make(Question, type=Question.LIKERT, questionnaire=contributor_questionnaire) + contributor_likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=contributor_questionnaire) - bottom_heading_question = mommy.make(Question, type=Question.HEADING, questionnaire=bottom_questionnaire, order=0) - bottom_likert_question = mommy.make(Question, type=Question.LIKERT, questionnaire=bottom_questionnaire, order=1) + bottom_heading_question = baker.make(Question, type=Question.HEADING, questionnaire=bottom_questionnaire, order=0) + bottom_likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=bottom_questionnaire, order=1) self.evaluation.general_contribution.questionnaires.set([top_questionnaire, bottom_questionnaire]) self.contribution.questionnaires.set([contributor_questionnaire]) - mommy.make(RatingAnswerCounter, question=top_likert_question, contribution=self.evaluation.general_contribution, answer=2, count=100) - mommy.make(RatingAnswerCounter, question=contributor_likert_question, contribution=self.contribution, answer=1, count=100) - mommy.make(RatingAnswerCounter, question=bottom_likert_question, contribution=self.evaluation.general_contribution, answer=3, count=100) + baker.make(RatingAnswerCounter, question=top_likert_question, contribution=self.evaluation.general_contribution, answer=2, count=100) + baker.make(RatingAnswerCounter, question=contributor_likert_question, contribution=self.contribution, answer=1, count=100) + baker.make(RatingAnswerCounter, question=bottom_likert_question, contribution=self.evaluation.general_contribution, answer=3, count=100) content = self.app.get(self.url, user='manager').body.decode() @@ -194,16 +194,16 @@ def test_questionnaire_ordering(self): self.assertTrue(top_heading_index < top_likert_index < contributor_likert_index < bottom_heading_index < bottom_likert_index) def test_heading_question_filtering(self): - contributor = mommy.make(UserProfile) - questionnaire = mommy.make(Questionnaire) + contributor = baker.make(UserProfile) + questionnaire = baker.make(Questionnaire) - heading_question_0 = mommy.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0) - heading_question_1 = mommy.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1) - likert_question = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) - heading_question_2 = mommy.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3) + heading_question_0 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0) + heading_question_1 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) + heading_question_2 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3) - contribution = mommy.make(Contribution, evaluation=self.evaluation, questionnaires=[questionnaire], contributor=contributor) - mommy.make(RatingAnswerCounter, question=likert_question, contribution=contribution, answer=3, count=100) + contribution = baker.make(Contribution, evaluation=self.evaluation, questionnaires=[questionnaire], contributor=contributor) + baker.make(RatingAnswerCounter, question=likert_question, contribution=contribution, answer=3, count=100) page = self.app.get(self.url, user='manager') @@ -223,7 +223,7 @@ def test_default_view_is_public(self): self.assertEqual(page_without_get_parameter.body, page_with_random_get_parameter.body) def test_wrong_state(self): - evaluation = mommy.make(Evaluation, state='reviewed', course=mommy.make(Course, semester=self.semester)) + evaluation = baker.make(Evaluation, state='reviewed', course=baker.make(Course, semester=self.semester)) url = '/results/semester/%s/evaluation/%s' % (self.semester.id, evaluation.id) self.app.get(url, user='student', status=403) @@ -231,20 +231,20 @@ def test_wrong_state(self): class TestResultsSemesterEvaluationDetailViewFewVoters(WebTest): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester, id=2) - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") - responsible = mommy.make(UserProfile, username='responsible') - cls.student1 = mommy.make(UserProfile, username='student') - cls.student2 = mommy.make(UserProfile) - students = mommy.make(UserProfile, _quantity=10) + cls.semester = baker.make(Semester, id=2) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") + responsible = baker.make(UserProfile, username='responsible') + cls.student1 = baker.make(UserProfile, username='student') + cls.student2 = baker.make(UserProfile) + students = baker.make(UserProfile, _quantity=10) students.extend([cls.student1, cls.student2]) - cls.evaluation = mommy.make(Evaluation, id=22, state='in_evaluation', course=mommy.make(Course, semester=cls.semester), participants=students) - questionnaire = mommy.make(Questionnaire) - cls.question_grade = mommy.make(Question, questionnaire=questionnaire, type=Question.GRADE) - mommy.make(Question, questionnaire=questionnaire, type=Question.LIKERT) + cls.evaluation = baker.make(Evaluation, id=22, state='in_evaluation', course=baker.make(Course, semester=cls.semester), participants=students) + questionnaire = baker.make(Questionnaire) + cls.question_grade = baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) + baker.make(Question, questionnaire=questionnaire, type=Question.LIKERT) cls.evaluation.general_contribution.questionnaires.set([questionnaire]) - cls.responsible_contribution = mommy.make(Contribution, contributor=responsible, evaluation=cls.evaluation, questionnaires=[questionnaire]) + cls.responsible_contribution = baker.make(Contribution, contributor=responsible, evaluation=cls.evaluation, questionnaires=[questionnaire]) def setUp(self): self.evaluation = Evaluation.objects.get(pk=self.evaluation.pk) @@ -300,22 +300,22 @@ def test_answer_visibility_two_voters(self): class TestResultsSemesterEvaluationDetailViewPrivateEvaluation(WebTest): @patch('evap.results.templatetags.results_templatetags.get_grade_color', new=lambda x: (0, 0, 0)) def test_private_evaluation(self): - semester = mommy.make(Semester) - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") - student = mommy.make(UserProfile, username="student", email="[email protected]") - student_external = mommy.make(UserProfile, username="student_external") - contributor = mommy.make(UserProfile, username="contributor", email="[email protected]") - responsible = mommy.make(UserProfile, username="responsible", email="[email protected]") - responsible_contributor = mommy.make(UserProfile, username="responsible_contributor", email="[email protected]") - test1 = mommy.make(UserProfile, username="test1") - test2 = mommy.make(UserProfile, username="test2") - mommy.make(UserProfile, username="random", email="[email protected]") - degree = mommy.make(Degree) - course = mommy.make(Course, semester=semester, degrees=[degree], is_private=True, responsibles=[responsible, responsible_contributor]) - private_evaluation = mommy.make(Evaluation, course=course, state='published', participants=[student, student_external, test1, test2], voters=[test1, test2]) - private_evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) - mommy.make(Contribution, evaluation=private_evaluation, contributor=responsible_contributor, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - mommy.make(Contribution, evaluation=private_evaluation, contributor=contributor, can_edit=True) + semester = baker.make(Semester) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") + student = baker.make(UserProfile, username="student", email="[email protected]") + student_external = baker.make(UserProfile, username="student_external") + contributor = baker.make(UserProfile, username="contributor", email="[email protected]") + responsible = baker.make(UserProfile, username="responsible", email="[email protected]") + responsible_contributor = baker.make(UserProfile, username="responsible_contributor", email="[email protected]") + test1 = baker.make(UserProfile, username="test1") + test2 = baker.make(UserProfile, username="test2") + baker.make(UserProfile, username="random", email="[email protected]") + degree = baker.make(Degree) + course = baker.make(Course, semester=semester, degrees=[degree], is_private=True, responsibles=[responsible, responsible_contributor]) + private_evaluation = baker.make(Evaluation, course=course, state='published', participants=[student, student_external, test1, test2], voters=[test1, test2]) + private_evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) + baker.make(Contribution, evaluation=private_evaluation, contributor=responsible_contributor, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(Contribution, evaluation=private_evaluation, contributor=contributor, can_edit=True) url = '/results/' self.assertNotIn(private_evaluation.full_name, self.app.get(url, user='random')) @@ -342,7 +342,7 @@ class TestResultsTextanswerVisibilityForManager(WebTest): @classmethod def setUpTestData(cls): manager_group = Group.objects.get(name="Manager") - mommy.make(UserProfile, username="manager", groups=[manager_group]) + baker.make(UserProfile, username="manager", groups=[manager_group]) def test_textanswer_visibility_for_manager_before_publish(self): evaluation = Evaluation.objects.get(id=1) @@ -505,19 +505,19 @@ def test_textanswer_visibility_info_for_proxy_user(self): class TestResultsOtherContributorsListOnExportView(WebTest): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester, id=2) - responsible = mommy.make(UserProfile, username='responsible') - cls.evaluation = mommy.make(Evaluation, id=21, state='published', course=mommy.make(Course, semester=cls.semester, responsibles=[responsible])) + cls.semester = baker.make(Semester, id=2) + responsible = baker.make(UserProfile, username='responsible') + cls.evaluation = baker.make(Evaluation, id=21, state='published', course=baker.make(Course, semester=cls.semester, responsibles=[responsible])) - questionnaire = mommy.make(Questionnaire) - mommy.make(Question, questionnaire=questionnaire, type=Question.LIKERT) + questionnaire = baker.make(Questionnaire) + baker.make(Question, questionnaire=questionnaire, type=Question.LIKERT) cls.evaluation.general_contribution.questionnaires.set([questionnaire]) - mommy.make(Contribution, evaluation=cls.evaluation, contributor=responsible, questionnaires=[questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - cls.other_contributor_1 = mommy.make(UserProfile, username='other contributor 1') - mommy.make(Contribution, evaluation=cls.evaluation, contributor=cls.other_contributor_1, questionnaires=[questionnaire], textanswer_visibility=Contribution.OWN_TEXTANSWERS) - cls.other_contributor_2 = mommy.make(UserProfile, username='other contributor 2') - mommy.make(Contribution, evaluation=cls.evaluation, contributor=cls.other_contributor_2, questionnaires=[questionnaire], textanswer_visibility=Contribution.OWN_TEXTANSWERS) + baker.make(Contribution, evaluation=cls.evaluation, contributor=responsible, questionnaires=[questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + cls.other_contributor_1 = baker.make(UserProfile, username='other contributor 1') + baker.make(Contribution, evaluation=cls.evaluation, contributor=cls.other_contributor_1, questionnaires=[questionnaire], textanswer_visibility=Contribution.OWN_TEXTANSWERS) + cls.other_contributor_2 = baker.make(UserProfile, username='other contributor 2') + baker.make(Contribution, evaluation=cls.evaluation, contributor=cls.other_contributor_2, questionnaires=[questionnaire], textanswer_visibility=Contribution.OWN_TEXTANSWERS) def test_contributor_list(self): url = '/results/semester/{}/evaluation/{}?view=export'.format(self.semester.id, self.evaluation.id) @@ -532,7 +532,7 @@ class TestResultsTextanswerVisibilityForExportView(WebTest): @classmethod def setUpTestData(cls): manager_group = Group.objects.get(name="Manager") - cls.manager = mommy.make(UserProfile, username="manager", groups=[manager_group]) + cls.manager = baker.make(UserProfile, username="manager", groups=[manager_group]) def test_textanswer_visibility_for_responsible(self): page = self.app.get("/results/semester/1/evaluation/1?view=export", user='responsible') @@ -654,19 +654,19 @@ def test_textanswer_visibility_for_manager_contributor(self): class TestArchivedResults(WebTest): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester) - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") - mommy.make(UserProfile, username='reviewer', groups=[Group.objects.get(name='Reviewer')], email="[email protected]") - student = mommy.make(UserProfile, username="student", email="[email protected]") - student_external = mommy.make(UserProfile, username="student_external") - contributor = mommy.make(UserProfile, username="contributor", email="[email protected]") - responsible = mommy.make(UserProfile, username="responsible", email="[email protected]") - - course = mommy.make(Course, semester=cls.semester, degrees=[mommy.make(Degree)], responsibles=[responsible]) - cls.evaluation = mommy.make(Evaluation, course=course, state='published', participants=[student, student_external], voters=[student, student_external]) - cls.evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) - cls.contribution = mommy.make(Contribution, evaluation=cls.evaluation, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, contributor=responsible) - cls.contribution = mommy.make(Contribution, evaluation=cls.evaluation, contributor=contributor) + cls.semester = baker.make(Semester) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')], email="[email protected]") + baker.make(UserProfile, username='reviewer', groups=[Group.objects.get(name='Reviewer')], email="[email protected]") + student = baker.make(UserProfile, username="student", email="[email protected]") + student_external = baker.make(UserProfile, username="student_external") + contributor = baker.make(UserProfile, username="contributor", email="[email protected]") + responsible = baker.make(UserProfile, username="responsible", email="[email protected]") + + course = baker.make(Course, semester=cls.semester, degrees=[baker.make(Degree)], responsibles=[responsible]) + cls.evaluation = baker.make(Evaluation, course=course, state='published', participants=[student, student_external], voters=[student, student_external]) + cls.evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) + cls.contribution = baker.make(Contribution, evaluation=cls.evaluation, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, contributor=responsible) + cls.contribution = baker.make(Contribution, evaluation=cls.evaluation, contributor=contributor) @patch('evap.results.templatetags.results_templatetags.get_grade_color', new=lambda x: (0, 0, 0)) def test_unarchived_results(self): diff --git a/evap/rewards/tests/test_tools.py b/evap/rewards/tests/test_tools.py --- a/evap/rewards/tests/test_tools.py +++ b/evap/rewards/tests/test_tools.py @@ -1,7 +1,7 @@ from django.test import TestCase, override_settings from django.urls import reverse -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import Course, Evaluation, NO_ANSWER, Question, Questionnaire, UserProfile from evap.evaluation.tests.tools import WebTest @@ -19,11 +19,11 @@ class TestGrantRewardPoints(WebTest): @classmethod def setUpTestData(cls): - cls.student = mommy.make(UserProfile, username='student', email='[email protected]') - cls.evaluation = mommy.make(Evaluation, state='in_evaluation', participants=[cls.student]) + cls.student = baker.make(UserProfile, username='student', email='[email protected]') + cls.evaluation = baker.make(Evaluation, state='in_evaluation', participants=[cls.student]) - questionnaire = mommy.make(Questionnaire) - mommy.make(Question, questionnaire=questionnaire, type=Question.GRADE) + questionnaire = baker.make(Questionnaire) + baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) cls.evaluation.general_contribution.questionnaires.set([questionnaire]) def setUp(self): @@ -45,20 +45,20 @@ def test_everything_works(self): def test_semester_activated_not_all_evaluations(self): SemesterActivation.objects.create(semester=self.evaluation.course.semester, is_active=True) - mommy.make(Evaluation, course=mommy.make(Course, semester=self.evaluation.course.semester), participants=[self.student]) + baker.make(Evaluation, course=baker.make(Course, semester=self.evaluation.course.semester), participants=[self.student]) self.form.submit() self.assertEqual(1, reward_points_of_user(self.student)) def test_already_got_grant_objects_but_points_missing(self): SemesterActivation.objects.create(semester=self.evaluation.course.semester, is_active=True) - mommy.make(RewardPointGranting, user_profile=self.student, value=0, semester=self.evaluation.course.semester) + baker.make(RewardPointGranting, user_profile=self.student, value=0, semester=self.evaluation.course.semester) self.form.submit() self.assertEqual(3, reward_points_of_user(self.student)) self.assertEqual(2, RewardPointGranting.objects.filter(user_profile=self.student, semester=self.evaluation.course.semester).count()) def test_already_got_enough_points(self): SemesterActivation.objects.create(semester=self.evaluation.course.semester, is_active=True) - mommy.make(RewardPointGranting, user_profile=self.student, value=3, semester=self.evaluation.course.semester) + baker.make(RewardPointGranting, user_profile=self.student, value=3, semester=self.evaluation.course.semester) self.form.submit() self.assertEqual(3, reward_points_of_user(self.student)) self.assertEqual(1, RewardPointGranting.objects.filter(user_profile=self.student, semester=self.evaluation.course.semester).count()) @@ -72,10 +72,10 @@ def test_already_got_enough_points(self): class TestGrantRewardPointsParticipationChange(TestCase): @classmethod def setUpTestData(cls): - cls.evaluation = mommy.make(Evaluation) - already_evaluated = mommy.make(Evaluation, course=mommy.make(Course, semester=cls.evaluation.course.semester)) + cls.evaluation = baker.make(Evaluation) + already_evaluated = baker.make(Evaluation, course=baker.make(Course, semester=cls.evaluation.course.semester)) SemesterActivation.objects.create(semester=cls.evaluation.course.semester, is_active=True) - cls.student = mommy.make(UserProfile, username="student", email="[email protected]", + cls.student = baker.make(UserProfile, username="student", email="[email protected]", evaluations_participating_in=[cls.evaluation, already_evaluated], evaluations_voted_for=[already_evaluated]) def test_participant_removed_from_evaluation(self): diff --git a/evap/rewards/tests/test_views.py b/evap/rewards/tests/test_views.py --- a/evap/rewards/tests/test_views.py +++ b/evap/rewards/tests/test_views.py @@ -4,7 +4,7 @@ from django.urls import reverse from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import UserProfile, Evaluation, Semester from evap.evaluation.tests.tools import WebTestWith200Check @@ -18,18 +18,18 @@ class TestEventDeleteView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_deletion_success(self): - event = mommy.make(RewardPointRedemptionEvent) + event = baker.make(RewardPointRedemptionEvent) response = self.app.post(self.url, params={'event_id': event.pk}, user='manager') self.assertEqual(response.status_code, 200) self.assertFalse(RewardPointRedemptionEvent.objects.filter(pk=event.pk).exists()) def test_deletion_failure(self): """ try to delete event that can not be deleted, because people already redeemed points """ - event = mommy.make(RewardPointRedemptionEvent) - mommy.make(RewardPointRedemption, value=1, event=event) + event = baker.make(RewardPointRedemptionEvent) + baker.make(RewardPointRedemption, value=1, event=event) response = self.app.post(self.url, params={'event_id': event.pk}, user='manager', expect_errors=True) self.assertEqual(response.status_code, 400) @@ -42,11 +42,11 @@ class TestIndexView(WebTest): @classmethod def setUpTestData(cls): - cls.student = mommy.make(UserProfile, username='student', email='[email protected]') - mommy.make(Evaluation, participants=[cls.student]) - mommy.make(RewardPointGranting, user_profile=cls.student, value=5) - mommy.make(RewardPointRedemptionEvent, pk=1, redeem_end_date=date.today() + timedelta(days=1)) - mommy.make(RewardPointRedemptionEvent, pk=2, redeem_end_date=date.today() + timedelta(days=1)) + cls.student = baker.make(UserProfile, username='student', email='[email protected]') + baker.make(Evaluation, participants=[cls.student]) + baker.make(RewardPointGranting, user_profile=cls.student, value=5) + baker.make(RewardPointRedemptionEvent, pk=1, redeem_end_date=date.today() + timedelta(days=1)) + baker.make(RewardPointRedemptionEvent, pk=2, redeem_end_date=date.today() + timedelta(days=1)) def test_redeem_all_points(self): response = self.app.get(self.url, user='student') @@ -84,9 +84,9 @@ class TestEventsView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - mommy.make(RewardPointRedemptionEvent, redeem_end_date=date.today() + timedelta(days=1)) - mommy.make(RewardPointRedemptionEvent, redeem_end_date=date.today() + timedelta(days=1)) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(RewardPointRedemptionEvent, redeem_end_date=date.today() + timedelta(days=1)) + baker.make(RewardPointRedemptionEvent, redeem_end_date=date.today() + timedelta(days=1)) class TestEventCreateView(WebTest): @@ -95,7 +95,7 @@ class TestEventCreateView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_create_redemption_event(self): """ submits a newly created redemption event and checks that the event has been created """ @@ -118,8 +118,8 @@ class TestEventEditView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.event = mommy.make(RewardPointRedemptionEvent, pk=1, name='old name') + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.event = baker.make(RewardPointRedemptionEvent, pk=1, name='old name') def test_edit_redemption_event(self): """ submits a newly created redemption event and checks that the event has been created """ @@ -139,9 +139,9 @@ class TestExportView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - event = mommy.make(RewardPointRedemptionEvent, pk=1, redeem_end_date=date.today() + timedelta(days=1)) - mommy.make(RewardPointRedemption, value=1, event=event) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + event = baker.make(RewardPointRedemptionEvent, pk=1, redeem_end_date=date.today() + timedelta(days=1)) + baker.make(RewardPointRedemption, value=1, event=event) class TestSemesterActivationView(WebTest): @@ -150,15 +150,15 @@ class TestSemesterActivationView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) def test_activate(self): - mommy.make(SemesterActivation, semester=self.semester, is_active=False) + baker.make(SemesterActivation, semester=self.semester, is_active=False) self.app.post(self.url + 'on', user='manager') self.assertTrue(is_semester_activated(self.semester)) def test_deactivate(self): - mommy.make(SemesterActivation, semester=self.semester, is_active=True) + baker.make(SemesterActivation, semester=self.semester, is_active=True) self.app.post(self.url + 'off', user='manager') self.assertFalse(is_semester_activated(self.semester)) diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -1,7 +1,7 @@ from unittest.mock import patch from django.forms.models import inlineformset_factory from django.test import TestCase -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import (Contribution, Course, Degree, EmailTemplate, Evaluation, Question, Questionnaire, Semester, UserProfile) @@ -14,9 +14,9 @@ class QuestionnaireFormTest(TestCase): def test_force_highest_order(self): - mommy.make(Questionnaire, order=45, type=Questionnaire.TOP) + baker.make(Questionnaire, order=45, type=Questionnaire.TOP) - question = mommy.make(Question) + question = baker.make(Question) data = { 'description_de': 'English description', @@ -37,10 +37,10 @@ def test_force_highest_order(self): self.assertEqual(questionnaire.order, 46) def test_automatic_order_correction_on_type_change(self): - mommy.make(Questionnaire, order=72, type=Questionnaire.BOTTOM) + baker.make(Questionnaire, order=72, type=Questionnaire.BOTTOM) - questionnaire = mommy.make(Questionnaire, order=7, type=Questionnaire.TOP) - question = mommy.make(Question) + questionnaire = baker.make(Questionnaire, order=7, type=Questionnaire.TOP) + question = baker.make(Question) data = { 'description_de': questionnaire.description_de, @@ -82,8 +82,8 @@ def test_user_form(self): """ Tests the UserForm with one valid and one invalid input dataset. """ - user = mommy.make(UserProfile) - another_user = mommy.make(UserProfile) + user = baker.make(UserProfile) + another_user = baker.make(UserProfile) data = {"username": "mklqoep50x2", "email": "[email protected]"} form = UserForm(instance=user, data=data) self.assertTrue(form.is_valid()) @@ -98,7 +98,7 @@ def test_user_with_same_email(self): that already exist in the database Regression test for #590 """ - user = mommy.make(UserProfile, email="[email protected]") + user = baker.make(UserProfile, email="[email protected]") data = {"username": "uiae", "email": user.email} form = UserForm(data=data) @@ -117,7 +117,7 @@ def test_user_with_same_username(self): Tests whether the user form correctly handles usernames that already exist in the database """ - user = mommy.make(UserProfile) + user = baker.make(UserProfile) data = {"username": user.username} form = UserForm(data=data) @@ -132,8 +132,8 @@ def test_user_with_same_username(self): self.assertTrue(form.is_valid()) def test_user_cannot_be_removed_from_evaluation_already_voted_for(self): - student = mommy.make(UserProfile) - mommy.make(Evaluation, participants=[student], voters=[student]) + student = baker.make(UserProfile) + baker.make(Evaluation, participants=[student], voters=[student]) form_data = get_form_data_from_instance(UserForm, student) form_data["evaluations_participating_in"] = [] @@ -146,7 +146,7 @@ def test_user_cannot_be_removed_from_evaluation_already_voted_for(self): class SingleResultFormTests(TestCase): def test_single_result_form_saves_participant_and_voter_count(self): - course = mommy.make(Course) + course = baker.make(Course) evaluation = Evaluation(course=course, is_single_result=True) form_data = { "name_de": "qwertz", @@ -163,7 +163,7 @@ def test_single_result_form_saves_participant_and_voter_count(self): form = SingleResultForm(form_data, instance=evaluation, semester=evaluation.course.semester) self.assertTrue(form.is_valid()) - form.save(user=mommy.make(UserProfile)) + form.save(user=baker.make(UserProfile)) evaluation = Evaluation.objects.get() self.assertEqual(evaluation.num_participants, 10) @@ -175,11 +175,11 @@ def test_contribution_form_set(self): """ Tests the ContributionFormset with various input data sets. """ - evaluation = mommy.make(Evaluation) - user1 = mommy.make(UserProfile) - user2 = mommy.make(UserProfile) - mommy.make(UserProfile) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + evaluation = baker.make(Evaluation) + user1 = baker.make(UserProfile) + user2 = baker.make(UserProfile) + baker.make(UserProfile) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) ContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0) @@ -216,11 +216,11 @@ def test_dont_validate_deleted_contributions(self): Tests whether contributions marked for deletion are validated. Regression test for #415 and #244 """ - evaluation = mommy.make(Evaluation) - user1 = mommy.make(UserProfile) - user2 = mommy.make(UserProfile) - mommy.make(UserProfile) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + evaluation = baker.make(Evaluation) + user1 = baker.make(UserProfile) + user2 = baker.make(UserProfile) + baker.make(UserProfile) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) contribution_formset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0) @@ -267,9 +267,9 @@ def test_deleted_empty_contribution_does_not_crash(self): Similarly, when removing the contribution formset of an existing contributor, and entering some data in the extra formset, it should not crash. Regression test for #1057 """ - evaluation = mommy.make(Evaluation) - user1 = mommy.make(UserProfile) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + evaluation = baker.make(Evaluation) + user1 = baker.make(UserProfile) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) contribution_formset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0) @@ -309,10 +309,10 @@ def test_take_deleted_contributions_into_account(self): when the same contributor got added again in the same formset. Regression test for #415 """ - evaluation = mommy.make(Evaluation) - user1 = mommy.make(UserProfile) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - contribution1 = mommy.make(Contribution, evaluation=evaluation, contributor=user1, can_edit=True, + evaluation = baker.make(Evaluation) + user1 = baker.make(UserProfile) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + contribution1 = baker.make(Contribution, evaluation=evaluation, contributor=user1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[questionnaire]) contribution_formset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0) @@ -346,7 +346,7 @@ def test_there_can_be_no_contributions(self): Tests that there can also be no contribution Regression test for #1347 """ - evaluation = mommy.make(Evaluation) + evaluation = baker.make(Evaluation) contribution_formset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0) data = to_querydict({ @@ -364,13 +364,13 @@ def test_hidden_and_managers_only(self): contribution of the Evaluation, and that manager only questionnaires are always shown. Regression test for #593. """ - evaluation = mommy.make(Evaluation) - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.EDITORS) - questionnaire_hidden = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.HIDDEN) - questionnaire_managers_only = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.MANAGERS) + evaluation = baker.make(Evaluation) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.EDITORS) + questionnaire_hidden = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.HIDDEN) + questionnaire_managers_only = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR, visibility=Questionnaire.MANAGERS) # The normal and managers_only questionnaire should be shown. - contribution1 = mommy.make(Contribution, evaluation=evaluation, contributor=mommy.make(UserProfile), questionnaires=[]) + contribution1 = baker.make(Contribution, evaluation=evaluation, contributor=baker.make(UserProfile), questionnaires=[]) inline_contribution_formset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=1) formset = inline_contribution_formset(instance=evaluation, form_kwargs={'evaluation': evaluation}) @@ -390,8 +390,8 @@ def test_hidden_and_managers_only(self): self.assertEqual(expected, set(formset.forms[1].fields['questionnaires'].queryset.all())) def test_staff_can_select_proxy_user(self): - proxy_user = mommy.make(UserProfile, is_proxy_user=True) - course = mommy.make(Course, semester=mommy.make(Semester)) + proxy_user = baker.make(UserProfile, is_proxy_user=True) + course = baker.make(Course, semester=baker.make(Semester)) form = CourseForm(instance=course) self.assertIn(proxy_user, form.fields['responsibles'].queryset) @@ -402,13 +402,13 @@ class ContributionFormset775RegressionTests(TestCase): """ @classmethod def setUpTestData(cls): - cls.evaluation = mommy.make(Evaluation, name_en="some evaluation") - cls.user1 = mommy.make(UserProfile) - cls.user2 = mommy.make(UserProfile) - mommy.make(UserProfile) - cls.questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - cls.contribution1 = mommy.make(Contribution, contributor=cls.user1, evaluation=cls.evaluation, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - cls.contribution2 = mommy.make(Contribution, contributor=cls.user2, evaluation=cls.evaluation) + cls.evaluation = baker.make(Evaluation, name_en="some evaluation") + cls.user1 = baker.make(UserProfile) + cls.user2 = baker.make(UserProfile) + baker.make(UserProfile) + cls.questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + cls.contribution1 = baker.make(Contribution, contributor=cls.user1, evaluation=cls.evaluation, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + cls.contribution2 = baker.make(Contribution, contributor=cls.user2, evaluation=cls.evaluation) cls.contribution_formset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0) @@ -489,7 +489,7 @@ def test_handle_multivaluedicts(self): self.data['contributions-0-contributor'] = self.user2.pk self.data['contributions-1-contributor'] = self.user1.pk - questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) self.data.appendlist('contributions-0-questionnaires', questionnaire.pk) formset = self.contribution_formset(instance=self.evaluation, form_kwargs={'evaluation': self.evaluation}, data=self.data) formset.save() @@ -502,7 +502,7 @@ def test_course_form_same_name(self): Test whether giving a course the same name as another course in the same semester in the course edit form is invalid. """ - courses = mommy.make(Course, semester=mommy.make(Semester), responsibles=[mommy.make(UserProfile)], degrees=[mommy.make(Degree)], _quantity=2) + courses = baker.make(Course, semester=baker.make(Semester), responsibles=[baker.make(UserProfile)], degrees=[baker.make(Degree)], _quantity=2) form_data = get_form_data_from_instance(CourseForm, courses[0]) form = CourseForm(form_data, instance=courses[0]) @@ -515,7 +515,7 @@ def test_uniqueness_constraint_error_shown(self): """ Tests whether errors being caused by a uniqueness constraint are shown in the form """ - courses = mommy.make(Course, semester=mommy.make(Semester), responsibles=[mommy.make(UserProfile)], degrees=[mommy.make(Degree)], _quantity=2) + courses = baker.make(Course, semester=baker.make(Semester), responsibles=[baker.make(UserProfile)], degrees=[baker.make(Degree)], _quantity=2) form_data = get_form_data_from_instance(CourseForm, courses[1]) form_data["name_de"] = courses[0].name_de @@ -526,8 +526,8 @@ def test_uniqueness_constraint_error_shown(self): self.assertEqual(form.errors['name_de'], ['Course with this Semester and Name (german) already exists.']) def test_that_proxy_user_can_be_responsible(self): - course = mommy.make(Course, semester=mommy.make(Semester), degrees=[mommy.make(Degree)]) - proxy = mommy.make(UserProfile, is_proxy_user=True, is_active=True) + course = baker.make(Course, semester=baker.make(Semester), degrees=[baker.make(Degree)]) + proxy = baker.make(UserProfile, is_proxy_user=True, is_active=True) form = CourseForm(instance=course) self.assertIn(proxy, form.fields['responsibles'].queryset) @@ -538,11 +538,11 @@ def test_evaluation_form_same_name(self): Test whether giving an evaluation the same name as another evaluation in the same course in the evaluation edit form is invalid. """ - course = mommy.make(Course, degrees=[mommy.make(Degree)]) - evaluation1 = mommy.make(Evaluation, course=course, name_de="Evaluierung 1", name_en="Evaluation 1") - evaluation2 = mommy.make(Evaluation, course=course, name_de="Evaluierung 2", name_en="Evaluation 2") - evaluation1.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) - evaluation2.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + course = baker.make(Course, degrees=[baker.make(Degree)]) + evaluation1 = baker.make(Evaluation, course=course, name_de="Evaluierung 1", name_en="Evaluation 1") + evaluation2 = baker.make(Evaluation, course=course, name_de="Evaluierung 2", name_en="Evaluation 2") + evaluation1.general_contribution.questionnaires.set([baker.make(Questionnaire)]) + evaluation2.general_contribution.questionnaires.set([baker.make(Questionnaire)]) form_data = get_form_data_from_instance(EvaluationForm, evaluation1) form_data["vote_start_datetime"] = "2098-01-01" # needed to fix the form @@ -572,8 +572,8 @@ def test_contributor_evaluation_form_date_validation(self): Tests validity of various start/end date combinations in the two evaluation edit forms. """ - evaluation = mommy.make(Evaluation) - evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + evaluation = baker.make(Evaluation) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) # contributors: start date does not have to be in the future self.helper_date_validation(ContributorEvaluationForm, "1999-01-01", "2099-01-01", True) @@ -600,9 +600,9 @@ def test_uniqueness_constraint_error_shown(self): """ Tests whether errors being caused by a uniqueness constraint are shown in the form """ - course = mommy.make(Course) - evaluation1 = mommy.make(Evaluation, course=course, name_de="Evaluierung 1", name_en="Evaluation 1") - evaluation2 = mommy.make(Evaluation, course=course, name_de="Evaluierung 2", name_en="Evaluation 2") + course = baker.make(Course) + evaluation1 = baker.make(Evaluation, course=course, name_de="Evaluierung 1", name_en="Evaluation 1") + evaluation2 = baker.make(Evaluation, course=course, name_de="Evaluierung 2", name_en="Evaluation 2") form_data = get_form_data_from_instance(EvaluationForm, evaluation2) form_data["name_de"] = evaluation1.name_de @@ -613,9 +613,9 @@ def test_uniqueness_constraint_error_shown(self): self.assertEqual(form.errors['name_de'], ['Evaluation with this Course and Name (german) already exists.']) def test_voter_cannot_be_removed_from_evaluation(self): - student = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, course=mommy.make(Course, degrees=[mommy.make(Degree)]), participants=[student], voters=[student]) - evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + student = baker.make(UserProfile) + evaluation = baker.make(Evaluation, course=baker.make(Course, degrees=[baker.make(Degree)]), participants=[student], voters=[student]) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) form_data = get_form_data_from_instance(EvaluationForm, evaluation) form_data["participants"] = [] @@ -626,11 +626,11 @@ def test_voter_cannot_be_removed_from_evaluation(self): self.assertIn("Participants who already voted for the evaluation can't be removed", form.errors['participants'][0]) def test_course_change_updates_cache(self): - semester = mommy.make(Semester) - course1 = mommy.make(Course, semester=semester) - course2 = mommy.make(Course, semester=semester) - evaluation = mommy.make(Evaluation, course=course1) - evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + semester = baker.make(Semester) + course1 = baker.make(Course, semester=semester) + course2 = baker.make(Course, semester=semester) + evaluation = baker.make(Evaluation, course=course1) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) form_data = get_form_data_from_instance(EvaluationForm, evaluation) form = EvaluationForm(form_data, instance=evaluation, semester=semester) diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -3,7 +3,7 @@ from datetime import date, datetime from django.test import TestCase, override_settings from django.conf import settings -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import Course, Degree, UserProfile, Semester, Evaluation, Contribution, CourseType from evap.staff.importers import UserImporter, EnrollmentImporter, ExcelImporter, PersonImporter @@ -60,7 +60,7 @@ def test_created_users(self): self.assertTrue(UserProfile.objects.filter(email="[email protected]").exists()) def test_duplicate_warning(self): - mommy.make(UserProfile, first_name='Lucilia', last_name="Manilium", email="[email protected]") + baker.make(UserProfile, first_name='Lucilia', last_name="Manilium", email="[email protected]") __, __, warnings_test, __ = UserImporter.process(self.valid_excel_content, test_run=True) __, __, warnings_no_test, __ = UserImporter.process(self.valid_excel_content, test_run=False) @@ -94,7 +94,7 @@ def test_invalid_file_error(self): self.assertEqual(UserProfile.objects.count(), original_user_count) def test_import_makes_inactive_user_active(self): - mommy.make(UserProfile, email="[email protected]", is_active=False) + baker.make(UserProfile, email="[email protected]", is_active=False) __, __, warnings_test, __ = UserImporter.process(self.valid_excel_content, test_run=True) self.assertIn("The following user is currently marked inactive and will be marked active upon importing: " @@ -117,11 +117,11 @@ class TestEnrollmentImporter(TestCase): @classmethod def setUpTestData(cls): - cls.semester = mommy.make(Semester) + cls.semester = baker.make(Semester) cls.vote_start_datetime = datetime(2017, 1, 10) cls.vote_end_date = date(2017, 3, 10) - mommy.make(CourseType, name_de="Seminar") - mommy.make(CourseType, name_de="Vorlesung") + baker.make(CourseType, name_de="Seminar") + baker.make(CourseType, name_de="Vorlesung") def test_valid_file_import(self): with open(self.filename_valid, "rb") as excel_file: @@ -224,15 +224,15 @@ def test_invalid_file_error(self): class TestPersonImporter(TestCase): @classmethod def setUpTestData(cls): - cls.participant1 = mommy.make(UserProfile, email="[email protected]") - cls.evaluation1 = mommy.make(Evaluation, participants=[cls.participant1]) - cls.contributor1 = mommy.make(UserProfile) - cls.contribution1 = mommy.make(Contribution, contributor=cls.contributor1, evaluation=cls.evaluation1) - - cls.participant2 = mommy.make(UserProfile, email="[email protected]") - cls.evaluation2 = mommy.make(Evaluation, participants=[cls.participant2]) - cls.contributor2 = mommy.make(UserProfile) - cls.contribution2 = mommy.make(Contribution, contributor=cls.contributor2, evaluation=cls.evaluation2) + cls.participant1 = baker.make(UserProfile, email="[email protected]") + cls.evaluation1 = baker.make(Evaluation, participants=[cls.participant1]) + cls.contributor1 = baker.make(UserProfile) + cls.contribution1 = baker.make(Contribution, contributor=cls.contributor1, evaluation=cls.evaluation1) + + cls.participant2 = baker.make(UserProfile, email="[email protected]") + cls.evaluation2 = baker.make(Evaluation, participants=[cls.participant2]) + cls.contributor2 = baker.make(UserProfile) + cls.contribution2 = baker.make(Contribution, contributor=cls.contributor2, evaluation=cls.evaluation2) def test_import_existing_contributor(self): self.assertEqual(self.evaluation1.contributions.count(), 2) diff --git a/evap/staff/tests/test_tools.py b/evap/staff/tests/test_tools.py --- a/evap/staff/tests/test_tools.py +++ b/evap/staff/tests/test_tools.py @@ -3,7 +3,7 @@ from django.core.cache import cache from django.core.cache.utils import make_template_fragment_key -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.tests.tools import WebTest from evap.evaluation.models import Contribution, Course, Evaluation, UserProfile @@ -13,8 +13,8 @@ class NavbarCacheTest(WebTest): def test_navbar_cache_deletion_for_users(self): - user1 = mommy.make(UserProfile, username='user1', email="[email protected]") - user2 = mommy.make(UserProfile, username='user2', email="[email protected]") + user1 = baker.make(UserProfile, username='user1', email="[email protected]") + user2 = baker.make(UserProfile, username='user2', email="[email protected]") # create navbar caches for anonymous user, user1 and user2 self.app.get("/") @@ -39,12 +39,12 @@ def test_navbar_cache_deletion_for_users(self): class MergeUsersTest(TestCase): @classmethod def setUpTestData(cls): - cls.user1 = mommy.make(UserProfile, username="test1") - cls.user2 = mommy.make(UserProfile, username="test2") - cls.user3 = mommy.make(UserProfile, username="test3") - cls.group1 = mommy.make(Group, pk=4) - cls.group2 = mommy.make(Group, pk=5) - cls.main_user = mommy.make(UserProfile, + cls.user1 = baker.make(UserProfile, username="test1") + cls.user2 = baker.make(UserProfile, username="test2") + cls.user3 = baker.make(UserProfile, username="test3") + cls.group1 = baker.make(Group, pk=4) + cls.group2 = baker.make(Group, pk=5) + cls.main_user = baker.make(UserProfile, username="main_user", title="Dr.", first_name="Main", @@ -56,7 +56,7 @@ def setUpTestData(cls): cc_users=[cls.user1], ccing_users=[] ) - cls.other_user = mommy.make(UserProfile, + cls.other_user = baker.make(UserProfile, username="other_user", title="", first_name="Other", @@ -69,23 +69,23 @@ def setUpTestData(cls): ccing_users=[cls.user1, cls.user2], is_superuser=True ) - cls.course1 = mommy.make(Course, responsibles=[cls.main_user]) - cls.course2 = mommy.make(Course, responsibles=[cls.main_user]) - cls.course3 = mommy.make(Course, responsibles=[cls.other_user]) - cls.evaluation1 = mommy.make(Evaluation, course=cls.course1, name_de="evaluation1", participants=[cls.main_user, cls.other_user]) # this should make the merge fail - cls.evaluation2 = mommy.make(Evaluation, course=cls.course2, name_de="evaluation2", participants=[cls.main_user], voters=[cls.main_user]) - cls.evaluation3 = mommy.make(Evaluation, course=cls.course3, name_de="evaluation3", participants=[cls.other_user], voters=[cls.other_user]) - cls.contribution1 = mommy.make(Contribution, contributor=cls.main_user, evaluation=cls.evaluation1) - cls.contribution2 = mommy.make(Contribution, contributor=cls.other_user, evaluation=cls.evaluation1) # this should make the merge fail - cls.contribution3 = mommy.make(Contribution, contributor=cls.other_user, evaluation=cls.evaluation2) - cls.rewardpointgranting_main = mommy.make(RewardPointGranting, user_profile=cls.main_user) - cls.rewardpointgranting_other = mommy.make(RewardPointGranting, user_profile=cls.other_user) - cls.rewardpointredemption_main = mommy.make(RewardPointRedemption, user_profile=cls.main_user) - cls.rewardpointredemption_other = mommy.make(RewardPointRedemption, user_profile=cls.other_user) + cls.course1 = baker.make(Course, responsibles=[cls.main_user]) + cls.course2 = baker.make(Course, responsibles=[cls.main_user]) + cls.course3 = baker.make(Course, responsibles=[cls.other_user]) + cls.evaluation1 = baker.make(Evaluation, course=cls.course1, name_de="evaluation1", participants=[cls.main_user, cls.other_user]) # this should make the merge fail + cls.evaluation2 = baker.make(Evaluation, course=cls.course2, name_de="evaluation2", participants=[cls.main_user], voters=[cls.main_user]) + cls.evaluation3 = baker.make(Evaluation, course=cls.course3, name_de="evaluation3", participants=[cls.other_user], voters=[cls.other_user]) + cls.contribution1 = baker.make(Contribution, contributor=cls.main_user, evaluation=cls.evaluation1) + cls.contribution2 = baker.make(Contribution, contributor=cls.other_user, evaluation=cls.evaluation1) # this should make the merge fail + cls.contribution3 = baker.make(Contribution, contributor=cls.other_user, evaluation=cls.evaluation2) + cls.rewardpointgranting_main = baker.make(RewardPointGranting, user_profile=cls.main_user) + cls.rewardpointgranting_other = baker.make(RewardPointGranting, user_profile=cls.other_user) + cls.rewardpointredemption_main = baker.make(RewardPointRedemption, user_profile=cls.main_user) + cls.rewardpointredemption_other = baker.make(RewardPointRedemption, user_profile=cls.other_user) def test_merge_handles_all_attributes(self): - user1 = mommy.make(UserProfile) - user2 = mommy.make(UserProfile) + user1 = baker.make(UserProfile) + user2 = baker.make(UserProfile) all_attrs = list(field.name for field in UserProfile._meta.get_fields(include_hidden=True)) diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -11,7 +11,7 @@ from django.test.testcases import TestCase from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker import xlrd from evap.evaluation.models import (Contribution, Course, CourseType, Degree, EmailTemplate, Evaluation, FaqSection, @@ -35,7 +35,7 @@ class TestDownloadSampleXlsView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_sample_file_correctness(self): page = self.app.get(self.url, user='manager') @@ -59,7 +59,7 @@ class TestStaffIndexView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) class TestStaffFAQView(WebTestWith200Check): @@ -68,7 +68,7 @@ class TestStaffFAQView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) class TestStaffFAQEditView(WebTestWith200Check): @@ -77,9 +77,9 @@ class TestStaffFAQEditView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - section = mommy.make(FaqSection, pk=1) - mommy.make(FaqQuestion, section=section) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + section = baker.make(FaqSection, pk=1) + baker.make(FaqQuestion, section=section) class TestUserIndexView(WebTest): @@ -87,7 +87,7 @@ class TestUserIndexView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_num_queries_is_constant(self): """ @@ -95,11 +95,11 @@ def test_num_queries_is_constant(self): and not linear to the number of users """ num_users = 50 - semester = mommy.make(Semester, participations_are_archived=True) + semester = baker.make(Semester, participations_are_archived=True) # this triggers more checks in UserProfile.can_be_deleted_by_manager - evaluation = mommy.make(Evaluation, state="published", course=mommy.make(Course, semester=semester), _participant_count=1, _voter_count=1) - mommy.make(UserProfile, _quantity=num_users, evaluations_participating_in=[evaluation]) + evaluation = baker.make(Evaluation, state="published", course=baker.make(Course, semester=semester), _participant_count=1, _voter_count=1) + baker.make(UserProfile, _quantity=num_users, evaluations_participating_in=[evaluation]) with self.assertNumQueries(FuzzyInt(0, num_users - 1)): self.app.get(self.url, user="manager") @@ -110,7 +110,7 @@ class TestUserCreateView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_user_is_created(self): page = self.app.get(self.url, user="manager", status=200) @@ -135,8 +135,8 @@ class TestUserEditView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - mommy.make(UserProfile, pk=3) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, pk=3) def test_questionnaire_edit(self): page = self.app.get(self.url, user="manager", status=200) @@ -146,10 +146,10 @@ def test_questionnaire_edit(self): self.assertTrue(UserProfile.objects.filter(username='lfo9e7bmxp1xi').exists()) def test_reward_points_granting_message(self): - evaluation = mommy.make(Evaluation) - already_evaluated = mommy.make(Evaluation, course=mommy.make(Course, semester=evaluation.course.semester)) + evaluation = baker.make(Evaluation) + already_evaluated = baker.make(Evaluation, course=baker.make(Course, semester=evaluation.course.semester)) SemesterActivation.objects.create(semester=evaluation.course.semester, is_active=True) - student = mommy.make(UserProfile, email="[email protected]", + student = baker.make(UserProfile, email="[email protected]", evaluations_participating_in=[evaluation, already_evaluated], evaluations_voted_for=[already_evaluated]) page = self.app.get(reverse('staff:user_edit', args=[student.pk]), user='manager', status=200) @@ -170,8 +170,8 @@ class TestUserMergeSelectionView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - mommy.make(UserProfile) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile) class TestUserMergeView(WebTestWith200Check): @@ -180,9 +180,9 @@ class TestUserMergeView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - mommy.make(UserProfile, pk=3) - mommy.make(UserProfile, pk=4) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, pk=3) + baker.make(UserProfile, pk=4) class TestUserBulkDeleteView(WebTest): @@ -191,7 +191,7 @@ class TestUserBulkDeleteView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_testrun_deletes_no_users(self): page = self.app.get(self.url, user='manager') @@ -199,7 +199,7 @@ def test_testrun_deletes_no_users(self): form['username_file'] = (self.filename,) - mommy.make(UserProfile, is_active=False) + baker.make(UserProfile, is_active=False) users_before = UserProfile.objects.count() reply = form.submit(name='operation', value='test') @@ -210,14 +210,14 @@ def test_testrun_deletes_no_users(self): self.assertEqual(users_before, UserProfile.objects.count()) def test_deletes_users(self): - mommy.make(UserProfile, username='testuser1') - mommy.make(UserProfile, username='testuser2') - contribution1 = mommy.make(Contribution) - semester = mommy.make(Semester, participations_are_archived=True) - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=semester), _participant_count=0, _voter_count=0) - contribution2 = mommy.make(Contribution, evaluation=evaluation) - mommy.make(UserProfile, username='contributor1', contributions=[contribution1]) - mommy.make(UserProfile, username='contributor2', contributions=[contribution2]) + baker.make(UserProfile, username='testuser1') + baker.make(UserProfile, username='testuser2') + contribution1 = baker.make(Contribution) + semester = baker.make(Semester, participations_are_archived=True) + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=semester), _participant_count=0, _voter_count=0) + contribution2 = baker.make(Contribution, evaluation=evaluation) + baker.make(UserProfile, username='contributor1', contributions=[contribution1]) + baker.make(UserProfile, username='contributor2', contributions=[contribution2]) page = self.app.get(self.url, user='manager') form = page.forms["user-bulk-delete-form"] @@ -253,7 +253,7 @@ class TestUserImportView(WebTest): @classmethod def setUpTestData(cls): - cls.user = mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + cls.user = baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) def test_success_handling(self): """ @@ -296,7 +296,7 @@ def test_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - mommy.make(UserProfile, email="[email protected]") + baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user='manager') @@ -345,12 +345,12 @@ class TestSemesterView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) - cls.evaluation1 = mommy.make(Evaluation, name_de="Evaluation 1", name_en="Evaluation 1", - course=mommy.make(Course, name_de="A", name_en="B", semester=cls.semester)) - cls.evaluation2 = mommy.make(Evaluation, name_de="Evaluation 2", name_en="Evaluation 2", - course=mommy.make(Course, name_de="B", name_en="A", semester=cls.semester)) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) + cls.evaluation1 = baker.make(Evaluation, name_de="Evaluation 1", name_en="Evaluation 1", + course=baker.make(Course, name_de="A", name_en="B", semester=cls.semester)) + cls.evaluation2 = baker.make(Evaluation, name_de="Evaluation 2", name_en="Evaluation 2", + course=baker.make(Course, name_de="B", name_en="A", semester=cls.semester)) def test_view_list_sorting(self): UserProfile.objects.filter(username='manager').update(language='en') @@ -367,8 +367,8 @@ def test_view_list_sorting(self): self.assertLess(position_evaluation1, position_evaluation2) def test_access_to_semester_with_archived_results(self): - mommy.make(UserProfile, username='reviewer', groups=[Group.objects.get(name='Reviewer')]) - mommy.make(Semester, pk=2, results_are_archived=True) + baker.make(UserProfile, username='reviewer', groups=[Group.objects.get(name='Reviewer')]) + baker.make(Semester, pk=2, results_are_archived=True) # reviewers shouldn't be allowed to access the semester page self.app.get('/staff/semester/2', user='reviewer', status=403) @@ -378,9 +378,9 @@ def test_access_to_semester_with_archived_results(self): @override_settings(INSTITUTION_EMAIL_DOMAINS=["institution.com"]) def test_badge_for_external_responsibles(self): - responsible = mommy.make(UserProfile, email='[email protected]') - course = mommy.make(Course, semester=self.semester, responsibles=[responsible]) - mommy.make(Evaluation, course=course) + responsible = baker.make(UserProfile, email='[email protected]') + course = baker.make(Course, semester=self.semester, responsibles=[responsible]) + baker.make(Evaluation, course=course) response = self.app.get(self.url, user='manager') self.assertNotContains(response, 'External responsible') @@ -392,7 +392,7 @@ def test_badge_for_external_responsibles(self): class TestGetEvaluationsWithPrefetchedData(TestCase): def test_get_evaluations_with_prefetched_data(self): - evaluation = mommy.make(Evaluation, is_single_result=True) + evaluation = baker.make(Evaluation, is_single_result=True) get_evaluations_with_prefetched_data(evaluation.course.semester) @@ -401,7 +401,7 @@ class TestSemesterCreateView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_create(self): name_de = 'name_de' @@ -425,8 +425,8 @@ class TestSemesterEditView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1, name_de='old_name', name_en='old_name') + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1, name_de='old_name', name_en='old_name') def test_name_change(self): new_name_de = 'new_name_de' @@ -451,31 +451,31 @@ class TestSemesterDeleteView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_failure(self): - semester = mommy.make(Semester) - mommy.make(Evaluation, course=mommy.make(Course, semester=semester), state='in_evaluation', voters=[mommy.make(UserProfile)]) + semester = baker.make(Semester) + baker.make(Evaluation, course=baker.make(Course, semester=semester), state='in_evaluation', voters=[baker.make(UserProfile)]) self.assertFalse(semester.can_be_deleted_by_manager) response = self.app.post(self.url, params={'semester_id': semester.pk}, user='manager', expect_errors=True) self.assertEqual(response.status_code, 400) self.assertTrue(Semester.objects.filter(pk=semester.pk).exists()) def test_success_if_no_courses(self): - semester = mommy.make(Semester) + semester = baker.make(Semester) self.assertTrue(semester.can_be_deleted_by_manager) response = self.app.post(self.url, params={'semester_id': semester.pk}, user='manager') self.assertEqual(response.status_code, 302) self.assertFalse(Semester.objects.filter(pk=semester.pk).exists()) def test_success_if_archived(self): - semester = mommy.make(Semester) - course = mommy.make(Course, semester=semester) - evaluation = mommy.make(Evaluation, course=course, state='published') + semester = baker.make(Semester) + course = baker.make(Course, semester=semester) + evaluation = baker.make(Evaluation, course=course, state='published') general_contribution = evaluation.general_contribution - responsible_contribution = mommy.make(Contribution, evaluation=evaluation, contributor=mommy.make(UserProfile)) - textanswer = mommy.make(TextAnswer, contribution=general_contribution, state='PU') - ratinganswercounter = mommy.make(RatingAnswerCounter, contribution=responsible_contribution) + responsible_contribution = baker.make(Contribution, evaluation=evaluation, contributor=baker.make(UserProfile)) + textanswer = baker.make(TextAnswer, contribution=general_contribution, state='PU') + ratinganswercounter = baker.make(RatingAnswerCounter, contribution=responsible_contribution) self.assertFalse(semester.can_be_deleted_by_manager) @@ -499,16 +499,16 @@ class TestSemesterAssignView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) - lecture_type = mommy.make(CourseType, name_de="Vorlesung", name_en="Lecture") - seminar_type = mommy.make(CourseType, name_de="Seminar", name_en="Seminar") - cls.questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - evaluation1 = mommy.make(Evaluation, course=mommy.make(Course, semester=cls.semester, type=seminar_type)) - mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=evaluation1, + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) + lecture_type = baker.make(CourseType, name_de="Vorlesung", name_en="Lecture") + seminar_type = baker.make(CourseType, name_de="Seminar", name_en="Seminar") + cls.questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + evaluation1 = baker.make(Evaluation, course=baker.make(Course, semester=cls.semester, type=seminar_type)) + baker.make(Contribution, contributor=baker.make(UserProfile), evaluation=evaluation1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - evaluation2 = mommy.make(Evaluation, course=mommy.make(Course, semester=cls.semester, type=lecture_type)) - mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=evaluation2, + evaluation2 = baker.make(Evaluation, course=baker.make(Course, semester=cls.semester, type=lecture_type)) + baker.make(Contribution, contributor=baker.make(UserProfile), evaluation=evaluation2, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) def test_assign_questionnaires(self): @@ -530,13 +530,13 @@ class TestSemesterPreparationReminderView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) def test_preparation_reminder(self): - user = mommy.make(UserProfile, username='user_to_find') - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester, responsibles=[user]), state='prepared', name_en='name_to_find', name_de='name_to_find') - mommy.make(Contribution, evaluation=evaluation, contributor=user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + user = baker.make(UserProfile, username='user_to_find') + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester, responsibles=[user]), state='prepared', name_en='name_to_find', name_de='name_to_find') + baker.make(Contribution, evaluation=evaluation, contributor=user, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) response = self.app.get(self.url, user='manager') self.assertContains(response, 'user_to_find') @@ -544,8 +544,8 @@ def test_preparation_reminder(self): @patch("evap.staff.views.EmailTemplate.send_to_user") def test_remind_all(self, send_to_user_mock): - user = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester, responsibles=[user]), state='prepared') + user = baker.make(UserProfile) + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester, responsibles=[user]), state='prepared') response = self.app.post(self.url, user='manager') self.assertEqual(response.status_code, 200) @@ -564,11 +564,11 @@ class TestSendReminderView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) - responsible = mommy.make(UserProfile, pk=3, email='[email protected]') - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=cls.semester, responsibles=[responsible]), state='prepared') - mommy.make(Contribution, evaluation=evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) + responsible = baker.make(UserProfile, pk=3, email='[email protected]') + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=cls.semester, responsibles=[responsible]), state='prepared') + baker.make(Contribution, evaluation=evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) def test_form(self): page = self.app.get(self.url, user='manager') @@ -589,12 +589,12 @@ class TestSemesterImportView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(Semester, pk=1) - mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + baker.make(Semester, pk=1) + baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) def test_import_valid_file(self): - mommy.make(CourseType, name_de="Vorlesung", name_en="Vorlesung") - mommy.make(CourseType, name_de="Seminar", name_en="Seminar") + baker.make(CourseType, name_de="Vorlesung", name_en="Vorlesung") + baker.make(CourseType, name_de="Seminar", name_en="Seminar") original_user_count = UserProfile.objects.count() @@ -666,7 +666,7 @@ def test_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - mommy.make(UserProfile, email="[email protected]") + baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user='manager') @@ -708,8 +708,8 @@ def test_invalid_import_operation(self): self.assertEqual(reply.status_code, 400) def test_missing_evaluation_period(self): - mommy.make(CourseType, name_de="Vorlesung", name_en="Vorlesung") - mommy.make(CourseType, name_de="Seminar", name_en="Seminar") + baker.make(CourseType, name_de="Vorlesung", name_en="Vorlesung") + baker.make(CourseType, name_de="Seminar", name_en="Seminar") page = self.app.get(self.url, user='manager') @@ -729,13 +729,13 @@ class TestSemesterExportView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) - cls.degree = mommy.make(Degree) - cls.course_type = mommy.make(CourseType) - cls.evaluation = mommy.make( + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) + cls.degree = baker.make(Degree) + cls.course_type = baker.make(CourseType) + cls.evaluation = baker.make( Evaluation, - course=mommy.make(Course, degrees=[cls.degree], type=cls.course_type, semester=cls.semester) + course=baker.make(Course, degrees=[cls.degree], type=cls.course_type, semester=cls.semester) ) def test_view_downloads_excel_file(self): @@ -762,15 +762,15 @@ class TestSemesterRawDataExportView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) - cls.course_type = mommy.make(CourseType, name_en="Type") + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) + cls.course_type = baker.make(CourseType, name_en="Type") def test_view_downloads_csv_file(self): - student_user = mommy.make(UserProfile, username='student') - mommy.make(Evaluation, course=mommy.make(Course, type=self.course_type, semester=self.semester, name_de="1", + student_user = baker.make(UserProfile, username='student') + baker.make(Evaluation, course=baker.make(Course, type=self.course_type, semester=self.semester, name_de="1", name_en="Course 1"), participants=[student_user], voters=[student_user], name_de="E1", name_en="E1") - mommy.make(Evaluation, course=mommy.make(Course, type=self.course_type, semester=self.semester, name_de="2", + baker.make(Evaluation, course=baker.make(Course, type=self.course_type, semester=self.semester, name_de="2", name_en="Course 2"), participants=[student_user]) response = self.app.get(self.url, user='manager') @@ -782,7 +782,7 @@ def test_view_downloads_csv_file(self): self.assertEqual(response.content, expected_content.encode("utf-8")) def test_single_result(self): - mommy.make(Evaluation, course=mommy.make(Course, type=self.course_type, semester=self.semester, name_de="3", + baker.make(Evaluation, course=baker.make(Course, type=self.course_type, semester=self.semester, name_de="3", name_en="Single Result"), _participant_count=5, _voter_count=5, is_single_result=True) response = self.app.get(self.url, user='manager') @@ -798,19 +798,19 @@ class TestSemesterParticipationDataExportView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.student_user = mommy.make(UserProfile, username='student') - cls.student_user2 = mommy.make(UserProfile, username='student2') - cls.semester = mommy.make(Semester, pk=1) - cls.course_type = mommy.make(CourseType, name_en="Type") - cls.evaluation1 = mommy.make(Evaluation, course=mommy.make(Course, type=cls.course_type, semester=cls.semester), participants=[cls.student_user], + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.student_user = baker.make(UserProfile, username='student') + cls.student_user2 = baker.make(UserProfile, username='student2') + cls.semester = baker.make(Semester, pk=1) + cls.course_type = baker.make(CourseType, name_en="Type") + cls.evaluation1 = baker.make(Evaluation, course=baker.make(Course, type=cls.course_type, semester=cls.semester), participants=[cls.student_user], voters=[cls.student_user], name_de="Veranstaltung 1", name_en="Evaluation 1", is_rewarded=True) - cls.evaluation2 = mommy.make(Evaluation, course=mommy.make(Course, type=cls.course_type, semester=cls.semester), participants=[cls.student_user, cls.student_user2], + cls.evaluation2 = baker.make(Evaluation, course=baker.make(Course, type=cls.course_type, semester=cls.semester), participants=[cls.student_user, cls.student_user2], name_de="Veranstaltung 2", name_en="Evaluation 2", is_rewarded=False) - mommy.make(Contribution, evaluation=cls.evaluation1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - mommy.make(Contribution, evaluation=cls.evaluation2, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - mommy.make(RewardPointGranting, semester=cls.semester, user_profile=cls.student_user, value=23) - mommy.make(RewardPointGranting, semester=cls.semester, user_profile=cls.student_user, value=42) + baker.make(Contribution, evaluation=cls.evaluation1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(Contribution, evaluation=cls.evaluation2, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(RewardPointGranting, semester=cls.semester, user_profile=cls.student_user, value=23) + baker.make(RewardPointGranting, semester=cls.semester, user_profile=cls.student_user, value=42) def test_view_downloads_csv_file(self): response = self.app.get(self.url, user='manager') @@ -827,13 +827,13 @@ class TestLoginKeyExportView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.external_user = mommy.make(UserProfile, email="[email protected]") - cls.internal_user = mommy.make(UserProfile, email="[email protected]") + cls.external_user = baker.make(UserProfile, email="[email protected]") + cls.internal_user = baker.make(UserProfile, email="[email protected]") - semester = mommy.make(Semester, pk=1) - mommy.make(Evaluation, pk=1, course=mommy.make(Course, semester=semester), participants=[cls.external_user, cls.internal_user], voters=[cls.external_user, cls.internal_user]) + semester = baker.make(Semester, pk=1) + baker.make(Evaluation, pk=1, course=baker.make(Course, semester=semester), participants=[cls.external_user, cls.internal_user], voters=[cls.external_user, cls.internal_user]) def test_login_key_export_works_as_expected(self): self.assertEqual(self.external_user.login_key, None) @@ -854,8 +854,8 @@ class TestEvaluationOperationView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) def helper_publish_evaluation_with_publish_notifications_for(self, evaluation, contributors=True, participants=True): page = self.app.get("/staff/semester/1", user="manager") @@ -873,14 +873,14 @@ def helper_publish_evaluation_with_publish_notifications_for(self, evaluation, c evaluation.save() def test_publish_notifications(self): - participant1 = mommy.make(UserProfile, email="[email protected]") - participant2 = mommy.make(UserProfile, email="[email protected]") - contributor1 = mommy.make(UserProfile, email="[email protected]") + participant1 = baker.make(UserProfile, email="[email protected]") + participant2 = baker.make(UserProfile, email="[email protected]") + contributor1 = baker.make(UserProfile, email="[email protected]") - course = mommy.make(Course, semester=self.semester) - evaluation = mommy.make(Evaluation, course=course, state='reviewed', + course = baker.make(Course, semester=self.semester) + evaluation = baker.make(Evaluation, course=course, state='reviewed', participants=[participant1, participant2], voters=[participant1, participant2]) - mommy.make(Contribution, contributor=contributor1, evaluation=evaluation) + baker.make(Contribution, contributor=contributor1, evaluation=evaluation) self.helper_publish_evaluation_with_publish_notifications_for(evaluation, contributors=False, participants=False) self.assertEqual(len(mail.outbox), 0) @@ -920,37 +920,37 @@ def helper_semester_state_views(self, evaluation, old_state, new_state): The following tests make sure the evaluation state transitions are triggerable via the UI. """ def test_semester_publish(self): - participant1 = mommy.make(UserProfile, email="[email protected]") - participant2 = mommy.make(UserProfile, email="[email protected]") - course = mommy.make(Course, semester=self.semester) - evaluation = mommy.make(Evaluation, course=course, state='reviewed', + participant1 = baker.make(UserProfile, email="[email protected]") + participant2 = baker.make(UserProfile, email="[email protected]") + course = baker.make(Course, semester=self.semester) + evaluation = baker.make(Evaluation, course=course, state='reviewed', participants=[participant1, participant2], voters=[participant1, participant2]) self.helper_semester_state_views(evaluation, "reviewed", "published") self.assertEqual(len(mail.outbox), 2) def test_semester_reset_1(self): - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester), state='prepared') + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='prepared') self.helper_semester_state_views(evaluation, "prepared", "new") def test_semester_reset_2(self): - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester), state='approved') + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='approved') self.helper_semester_state_views(evaluation, "approved", "new") def test_semester_contributor_ready_1(self): - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester), state='new') + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='new') self.helper_semester_state_views(evaluation, "new", "prepared") def test_semester_contributor_ready_2(self): - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester), state='editor_approved') + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='editor_approved') self.helper_semester_state_views(evaluation, "editor_approved", "prepared") def test_semester_unpublish(self): - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=self.semester), state='published', _participant_count=0, _voter_count=0) + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='published', _participant_count=0, _voter_count=0) self.helper_semester_state_views(evaluation, "published", "reviewed") def test_operation_start_evaluation(self): - evaluation = mommy.make(Evaluation, state='approved', course=mommy.make(Course, semester=self.semester)) + evaluation = baker.make(Evaluation, state='approved', course=baker.make(Course, semester=self.semester)) urloptions = '?evaluation={}&target_state=in_evaluation'.format(evaluation.pk) response = self.app.get(self.url + urloptions, user='manager') @@ -963,7 +963,7 @@ def test_operation_start_evaluation(self): self.assertEqual(evaluation.state, 'in_evaluation') def test_operation_prepare(self): - evaluation = mommy.make(Evaluation, state='new', course=mommy.make(Course, semester=self.semester)) + evaluation = baker.make(Evaluation, state='new', course=baker.make(Course, semester=self.semester)) urloptions = '?evaluation={}&target_state=prepared'.format(evaluation.pk) response = self.app.get(self.url + urloptions, user='manager') @@ -981,11 +981,11 @@ class TestCourseCreateView(WebTest): @classmethod def setUpTestData(cls): - cls.manager_user = mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, pk=1) - cls.course_type = mommy.make(CourseType) - cls.degree = mommy.make(Degree) - cls.responsible = mommy.make(UserProfile) + cls.manager_user = baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) + cls.course_type = baker.make(CourseType) + cls.degree = baker.make(Degree) + cls.responsible = baker.make(UserProfile) def test_course_create(self): """ @@ -1017,8 +1017,8 @@ class TestSingleResultCreateView(WebTest): @classmethod def setUpTestData(cls): - cls.manager_user = mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.course = mommy.make(Course, semester=mommy.make(Semester, pk=1)) + cls.manager_user = baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.course = baker.make(Course, semester=baker.make(Semester, pk=1)) def test_single_result_create(self): """ @@ -1047,10 +1047,10 @@ class TestEvaluationCreateView(WebTest): @classmethod def setUpTestData(cls): - cls.manager_user = mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.course = mommy.make(Course, semester=mommy.make(Semester, pk=1)) - cls.q1 = mommy.make(Questionnaire, type=Questionnaire.TOP) - cls.q2 = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + cls.manager_user = baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.course = baker.make(Course, semester=baker.make(Semester, pk=1)) + cls.q1 = baker.make(Questionnaire, type=Questionnaire.TOP) + cls.q2 = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) def test_evaluation_create(self): """ @@ -1090,11 +1090,11 @@ class TestCourseEditView(WebTest): @classmethod def setUpTestData(cls): - cls.user = mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - semester = mommy.make(Semester, pk=1) - degree = mommy.make(Degree) - responsible = mommy.make(UserProfile) - cls.course = mommy.make(Course, name_en="Some name", semester=semester, degrees=[degree], + cls.user = baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + semester = baker.make(Semester, pk=1) + degree = baker.make(Degree) + responsible = baker.make(UserProfile) + cls.course = baker.make(Course, name_en="Some name", semester=semester, degrees=[degree], responsibles=[responsible], pk=1, last_modified_user=cls.user, last_modified_time=datetime.datetime(2000, 1, 1, 0, 0)) def setUp(self): @@ -1113,7 +1113,7 @@ def test_last_modified_user(self): """ Tests whether saving only changes the last_modified_user if changes were made. """ - test_user = mommy.make(UserProfile, username='test_user', groups=[Group.objects.get(name='Manager')]) + test_user = baker.make(UserProfile, username='test_user', groups=[Group.objects.get(name='Manager')]) old_name_en = self.course.name_en old_last_modified_user = self.course.last_modified_user @@ -1154,17 +1154,17 @@ class TestEvaluationEditView(WebTest): @classmethod def setUpTestData(cls): - cls.user = mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - semester = mommy.make(Semester, pk=1) - degree = mommy.make(Degree) - responsible = mommy.make(UserProfile) - cls.editor = mommy.make(UserProfile) - cls.evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=semester, degrees=[degree], responsibles=[responsible]), pk=1, last_modified_user=cls.user, + cls.user = baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + semester = baker.make(Semester, pk=1) + degree = baker.make(Degree) + responsible = baker.make(UserProfile) + cls.editor = baker.make(UserProfile) + cls.evaluation = baker.make(Evaluation, course=baker.make(Course, semester=semester, degrees=[degree], responsibles=[responsible]), pk=1, last_modified_user=cls.user, vote_start_datetime=datetime.datetime(2099, 1, 1, 0, 0), vote_end_date=datetime.date(2099, 12, 31)) - mommy.make(Questionnaire, questions=[mommy.make(Question)]) - cls.evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) - mommy.make(Contribution, evaluation=cls.evaluation, contributor=responsible, order=0, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - mommy.make(Contribution, evaluation=cls.evaluation, contributor=cls.editor, order=1, can_edit=True) + baker.make(Questionnaire, questions=[baker.make(Question)]) + cls.evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) + baker.make(Contribution, evaluation=cls.evaluation, contributor=responsible, order=0, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(Contribution, evaluation=cls.evaluation, contributor=cls.editor, order=1, can_edit=True) def setUp(self): self.evaluation = Evaluation.objects.get(pk=self.evaluation.pk) @@ -1179,10 +1179,10 @@ def test_edit_evaluation(self): self.assertFalse(self.evaluation.contributions.get(contributor=self.editor).can_edit) def test_participant_removal_reward_point_granting_message(self): - already_evaluated = mommy.make(Evaluation, pk=2, course=mommy.make(Course, semester=self.evaluation.course.semester)) + already_evaluated = baker.make(Evaluation, pk=2, course=baker.make(Course, semester=self.evaluation.course.semester)) SemesterActivation.objects.create(semester=self.evaluation.course.semester, is_active=True) - other = mommy.make(UserProfile, evaluations_participating_in=[self.evaluation]) - student = mommy.make(UserProfile, email="[email protected]", + other = baker.make(UserProfile, evaluations_participating_in=[self.evaluation]) + student = baker.make(UserProfile, email="[email protected]", evaluations_participating_in=[self.evaluation, already_evaluated], evaluations_voted_for=[already_evaluated]) page = self.app.get(self.url, user='manager') @@ -1195,12 +1195,12 @@ def test_participant_removal_reward_point_granting_message(self): self.assertIn("The removal as participant has granted the user &quot;{}&quot; 3 reward points for the semester.".format(student.username), page) def test_remove_participants(self): - already_evaluated = mommy.make(Evaluation, pk=2, course=mommy.make(Course, semester=self.evaluation.course.semester)) + already_evaluated = baker.make(Evaluation, pk=2, course=baker.make(Course, semester=self.evaluation.course.semester)) SemesterActivation.objects.create(semester=self.evaluation.course.semester, is_active=True) - student = mommy.make(UserProfile, evaluations_participating_in=[self.evaluation]) + student = baker.make(UserProfile, evaluations_participating_in=[self.evaluation]) for name in ["a", "b", "c", "d", "e"]: - mommy.make(UserProfile, username=name, email="{}@institution.example.com".format(name), + baker.make(UserProfile, username=name, email="{}@institution.example.com".format(name), evaluations_participating_in=[self.evaluation, already_evaluated], evaluations_voted_for=[already_evaluated]) page = self.app.get(self.url, user='manager') @@ -1214,12 +1214,12 @@ def test_remove_participants(self): self.assertIn("The removal as participant has granted the user &quot;{}&quot; 3 reward points for the semester.".format(name), page) def test_remove_participants_proportional_reward_points(self): - already_evaluated = mommy.make(Evaluation, pk=2, course=mommy.make(Course, semester=self.evaluation.course.semester)) + already_evaluated = baker.make(Evaluation, pk=2, course=baker.make(Course, semester=self.evaluation.course.semester)) SemesterActivation.objects.create(semester=self.evaluation.course.semester, is_active=True) - student = mommy.make(UserProfile, evaluations_participating_in=[self.evaluation]) + student = baker.make(UserProfile, evaluations_participating_in=[self.evaluation]) for name, points_granted in [("a", 0), ("b", 1), ("c", 2), ("d", 3)]: - user = mommy.make(UserProfile, username=name, email="{}@institution.example.com".format(name), + user = baker.make(UserProfile, username=name, email="{}@institution.example.com".format(name), evaluations_participating_in=[self.evaluation, already_evaluated], evaluations_voted_for=[already_evaluated]) RewardPointGranting.objects.create(user_profile=user, semester=self.evaluation.course.semester, value=points_granted) @@ -1240,7 +1240,7 @@ def test_last_modified_user(self): Tests whether the button "Save and approve" does only change the last_modified_user if changes were made. """ - test_user = mommy.make(UserProfile, username='approve_test_user', groups=[Group.objects.get(name='Manager')]) + test_user = baker.make(UserProfile, username='approve_test_user', groups=[Group.objects.get(name='Manager')]) old_name_de = self.evaluation.name_de old_vote_start_datetime = self.evaluation.vote_start_datetime @@ -1287,7 +1287,7 @@ def test_last_modified_on_formset_change(self): self.assertEqual(self.evaluation.last_modified_user, self.user) last_modified_time_before = self.evaluation.last_modified_time - test_user = mommy.make( + test_user = baker.make( UserProfile, username='approve_test_user', groups=[Group.objects.get(name='Manager')] @@ -1311,7 +1311,7 @@ def test_last_modified_unchanged(self): last_modified_user_before = self.evaluation.last_modified_user last_modified_time_before = self.evaluation.last_modified_time - test_user = mommy.make( + test_user = baker.make( UserProfile, username='approve_test_user', groups=[Group.objects.get(name='Manager')] @@ -1333,20 +1333,20 @@ class TestSingleResultEditView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - semester = mommy.make(Semester, pk=1) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + semester = baker.make(Semester, pk=1) - responsible = mommy.make(UserProfile) - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=semester, responsibles=[responsible]), pk=1) - contribution = mommy.make(Contribution, evaluation=evaluation, contributor=responsible, can_edit=True, + responsible = baker.make(UserProfile) + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=semester, responsibles=[responsible]), pk=1) + contribution = baker.make(Contribution, evaluation=evaluation, contributor=responsible, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, questionnaires=[Questionnaire.single_result_questionnaire()]) question = Questionnaire.single_result_questionnaire().questions.get() - mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=1, count=5) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=2, count=15) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=3, count=40) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=4, count=60) - mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=5, count=30) + baker.make(RatingAnswerCounter, question=question, contribution=contribution, answer=1, count=5) + baker.make(RatingAnswerCounter, question=question, contribution=contribution, answer=2, count=15) + baker.make(RatingAnswerCounter, question=question, contribution=contribution, answer=3, count=40) + baker.make(RatingAnswerCounter, question=question, contribution=contribution, answer=4, count=60) + baker.make(RatingAnswerCounter, question=question, contribution=contribution, answer=5, count=30) class TestEvaluationPreviewView(WebTestWith200Check): @@ -1355,10 +1355,10 @@ class TestEvaluationPreviewView(WebTestWith200Check): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - semester = mommy.make(Semester, pk=1) - evaluation = mommy.make(Evaluation, course=mommy.make(Course, semester=semester), pk=1) - evaluation.general_contribution.questionnaires.set([mommy.make(Questionnaire)]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + semester = baker.make(Semester, pk=1) + evaluation = baker.make(Evaluation, course=baker.make(Course, semester=semester), pk=1) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) class TestEvaluationImportPersonsView(WebTest): @@ -1369,11 +1369,11 @@ class TestEvaluationImportPersonsView(WebTest): @classmethod def setUpTestData(cls): - semester = mommy.make(Semester, pk=1) - cls.manager_user = mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) - cls.evaluation = mommy.make(Evaluation, pk=1, course=mommy.make(Course, semester=semester)) - profiles = mommy.make(UserProfile, _quantity=42) - cls.evaluation2 = mommy.make(Evaluation, pk=2, course=mommy.make(Course, semester=semester), participants=profiles) + semester = baker.make(Semester, pk=1) + cls.manager_user = baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + cls.evaluation = baker.make(Evaluation, pk=1, course=baker.make(Course, semester=semester)) + profiles = baker.make(UserProfile, _quantity=42) + cls.evaluation2 = baker.make(Evaluation, pk=2, course=baker.make(Course, semester=semester), participants=profiles) @classmethod def tearDown(cls): @@ -1460,7 +1460,7 @@ def test_import_participants_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - mommy.make(UserProfile, email="[email protected]") + baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user='manager') @@ -1491,7 +1491,7 @@ def test_import_contributors_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - mommy.make(UserProfile, email="[email protected]") + baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user='manager') @@ -1556,11 +1556,11 @@ class TestEvaluationEmailView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - semester = mommy.make(Semester, pk=1) - participant1 = mommy.make(UserProfile, email="[email protected]") - participant2 = mommy.make(UserProfile, email="[email protected]") - mommy.make(Evaluation, pk=1, course=mommy.make(Course, semester=semester), participants=[participant1, participant2]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + semester = baker.make(Semester, pk=1) + participant1 = baker.make(UserProfile, email="[email protected]") + participant2 = baker.make(UserProfile, email="[email protected]") + baker.make(Evaluation, pk=1, course=baker.make(Course, semester=semester), participants=[participant1, participant2]) def test_emails_are_sent(self): page = self.app.get(self.url, user="manager", status=200) @@ -1578,19 +1578,19 @@ class TestEvaluationTextAnswerView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - semester = mommy.make(Semester, pk=1) - student1 = mommy.make(UserProfile) - cls.student2 = mommy.make(UserProfile) - cls.evaluation = mommy.make(Evaluation, pk=1, course=mommy.make(Course, semester=semester), participants=[student1, cls.student2], voters=[student1], state="in_evaluation") - top_general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - mommy.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + semester = baker.make(Semester, pk=1) + student1 = baker.make(UserProfile) + cls.student2 = baker.make(UserProfile) + cls.evaluation = baker.make(Evaluation, pk=1, course=baker.make(Course, semester=semester), participants=[student1, cls.student2], voters=[student1], state="in_evaluation") + top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) - questionnaire = mommy.make(Questionnaire) - question = mommy.make(Question, questionnaire=questionnaire, type=Question.TEXT) - contribution = mommy.make(Contribution, evaluation=cls.evaluation, contributor=mommy.make(UserProfile), questionnaires=[questionnaire]) + questionnaire = baker.make(Questionnaire) + question = baker.make(Question, questionnaire=questionnaire, type=Question.TEXT) + contribution = baker.make(Contribution, evaluation=cls.evaluation, contributor=baker.make(UserProfile), questionnaires=[questionnaire]) cls.answer = 'should show up' - mommy.make(TextAnswer, contribution=contribution, question=question, answer=cls.answer) + baker.make(TextAnswer, contribution=contribution, question=question, answer=cls.answer) def test_textanswers_showing_up(self): # in an evaluation with only one voter the view should not be available @@ -1618,18 +1618,18 @@ class TestEvaluationTextAnswerEditView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - semester = mommy.make(Semester, pk=1) - student1 = mommy.make(UserProfile) - cls.student2 = mommy.make(UserProfile) - cls.evaluation = mommy.make(Evaluation, pk=1, course=mommy.make(Course, semester=semester), participants=[student1, cls.student2], voters=[student1], state="in_evaluation") - top_general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - mommy.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + semester = baker.make(Semester, pk=1) + student1 = baker.make(UserProfile) + cls.student2 = baker.make(UserProfile) + cls.evaluation = baker.make(Evaluation, pk=1, course=baker.make(Course, semester=semester), participants=[student1, cls.student2], voters=[student1], state="in_evaluation") + top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) - questionnaire = mommy.make(Questionnaire) - question = mommy.make(Question, questionnaire=questionnaire, type=Question.TEXT) - contribution = mommy.make(Contribution, evaluation=cls.evaluation, contributor=mommy.make(UserProfile), questionnaires=[questionnaire]) - mommy.make(TextAnswer, contribution=contribution, question=question, answer='test answer text', pk='00000000-0000-0000-0000-000000000001') + questionnaire = baker.make(Questionnaire) + question = baker.make(Question, questionnaire=questionnaire, type=Question.TEXT) + contribution = baker.make(Contribution, evaluation=cls.evaluation, contributor=baker.make(UserProfile), questionnaires=[questionnaire]) + baker.make(TextAnswer, contribution=contribution, question=question, answer='test answer text', pk='00000000-0000-0000-0000-000000000001') def test_textanswers_showing_up(self): # in an evaluation with only one voter the view should not be available @@ -1657,9 +1657,9 @@ class TestQuestionnaireNewVersionView(WebTest): def setUpTestData(cls): cls.name_de_orig = 'kurzer name' cls.name_en_orig = 'short name' - questionnaire = mommy.make(Questionnaire, id=2, name_de=cls.name_de_orig, name_en=cls.name_en_orig) - mommy.make(Question, questionnaire=questionnaire) - mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + questionnaire = baker.make(Questionnaire, id=2, name_de=cls.name_de_orig, name_en=cls.name_en_orig) + baker.make(Question, questionnaire=questionnaire) + baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) def test_changes_old_title(self): page = self.app.get(url=self.url, user='manager') @@ -1694,7 +1694,7 @@ class TestQuestionnaireCreateView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_create_questionnaire(self): page = self.app.get(self.url, user="manager") @@ -1736,10 +1736,10 @@ class TestQuestionnaireIndexView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.contributor_questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - cls.top_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - cls.bottom_questionnaire = mommy.make(Questionnaire, type=Questionnaire.BOTTOM) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + cls.top_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + cls.bottom_questionnaire = baker.make(Questionnaire, type=Questionnaire.BOTTOM) def test_ordering(self): content = self.app.get(self.url, user="manager").body.decode() @@ -1756,12 +1756,12 @@ class TestQuestionnaireEditView(WebTestWith200Check): @classmethod def setUpTestData(cls): - evaluation = mommy.make(Evaluation, state='in_evaluation') - cls.questionnaire = mommy.make(Questionnaire, id=2) - mommy.make(Contribution, questionnaires=[cls.questionnaire], evaluation=evaluation) + evaluation = baker.make(Evaluation, state='in_evaluation') + cls.questionnaire = baker.make(Questionnaire, id=2) + baker.make(Contribution, questionnaires=[cls.questionnaire], evaluation=evaluation) - mommy.make(Question, questionnaire=cls.questionnaire) - mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + baker.make(Question, questionnaire=cls.questionnaire) + baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) def test_allowed_type_changes_on_used_questionnaire(self): # top to bottom @@ -1795,11 +1795,11 @@ class TestQuestionnaireViewView(WebTestWith200Check): @classmethod def setUpTestData(cls): - questionnaire = mommy.make(Questionnaire, id=2) - mommy.make(Question, questionnaire=questionnaire, type=Question.TEXT) - mommy.make(Question, questionnaire=questionnaire, type=Question.GRADE) - mommy.make(Question, questionnaire=questionnaire, type=Question.LIKERT) - mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + questionnaire = baker.make(Questionnaire, id=2) + baker.make(Question, questionnaire=questionnaire, type=Question.TEXT) + baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) + baker.make(Question, questionnaire=questionnaire, type=Question.LIKERT) + baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) class TestQuestionnaireCopyView(WebTest): @@ -1807,9 +1807,9 @@ class TestQuestionnaireCopyView(WebTest): @classmethod def setUpTestData(cls): - questionnaire = mommy.make(Questionnaire, id=2) - mommy.make(Question, questionnaire=questionnaire) - mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + questionnaire = baker.make(Questionnaire, id=2) + baker.make(Question, questionnaire=questionnaire) + baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) def test_not_changing_name_fails(self): response = self.app.get(self.url, user="manager", status=200) @@ -1836,10 +1836,10 @@ class TestQuestionnaireDeletionView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.q1 = mommy.make(Questionnaire) - cls.q2 = mommy.make(Questionnaire) - mommy.make(Contribution, questionnaires=[cls.q1]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.q1 = baker.make(Questionnaire) + cls.q2 = baker.make(Questionnaire) + baker.make(Contribution, questionnaires=[cls.q1]) def test_questionnaire_deletion(self): """ @@ -1862,7 +1862,7 @@ class TestCourseTypeView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_page_displays_something(self): CourseType.objects.create(name_de='uZJcsl0rNc', name_en='uZJcsl0rNc') @@ -1889,9 +1889,9 @@ class TestCourseTypeMergeSelectionView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.main_type = mommy.make(CourseType, name_en="A course type") - cls.other_type = mommy.make(CourseType, name_en="Obsolete course type") + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.main_type = baker.make(CourseType, name_en="A course type") + cls.other_type = baker.make(CourseType, name_en="Obsolete course type") def test_same_evaluation_fails(self): page = self.app.get(self.url, user="manager", status=200) @@ -1907,11 +1907,11 @@ class TestCourseTypeMergeView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.main_type = mommy.make(CourseType, pk=1, name_en="A course type") - cls.other_type = mommy.make(CourseType, pk=2, name_en="Obsolete course type") - mommy.make(Course, type=cls.main_type) - mommy.make(Course, type=cls.other_type) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.main_type = baker.make(CourseType, pk=1, name_en="A course type") + cls.other_type = baker.make(CourseType, pk=2, name_en="Obsolete course type") + baker.make(Course, type=cls.main_type) + baker.make(Course, type=cls.other_type) def test_merge_works(self): page = self.app.get(self.url, user="manager", status=200) @@ -1931,16 +1931,16 @@ class TestEvaluationTextAnswersUpdatePublishView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username="manager.user", groups=[Group.objects.get(name="Manager")]) - cls.student1 = mommy.make(UserProfile) - cls.student2 = mommy.make(UserProfile) - cls.evaluation = mommy.make(Evaluation, participants=[cls.student1, cls.student2], voters=[cls.student1], state="in_evaluation") - top_general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - mommy.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) + baker.make(UserProfile, username="manager.user", groups=[Group.objects.get(name="Manager")]) + cls.student1 = baker.make(UserProfile) + cls.student2 = baker.make(UserProfile) + cls.evaluation = baker.make(Evaluation, participants=[cls.student1, cls.student2], voters=[cls.student1], state="in_evaluation") + top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) def helper(self, old_state, expected_new_state, action, expect_errors=False): - textanswer = mommy.make(TextAnswer, state=old_state) + textanswer = baker.make(TextAnswer, state=old_state) response = self.app.post(self.url, params={"id": textanswer.id, "action": action, "evaluation_id": self.evaluation.pk}, user="manager.user", expect_errors=expect_errors) if expect_errors: self.assertEqual(response.status_code, 403) @@ -1965,13 +1965,13 @@ def test_review_actions(self): class ParticipationArchivingTests(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) + baker.make(UserProfile, username="manager", groups=[Group.objects.get(name="Manager")]) def test_raise_403(self): """ Tests whether inaccessible views on semesters/evaluations with archived participations correctly raise a 403. """ - semester = mommy.make(Semester, participations_are_archived=True) + semester = baker.make(Semester, participations_are_archived=True) semester_url = "/staff/semester/{}/".format(semester.pk) @@ -1986,7 +1986,7 @@ class TestTemplateEditView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_emailtemplate(self): """ @@ -2010,7 +2010,7 @@ class TestDegreeView(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) def test_degree_form(self): """ @@ -2032,18 +2032,18 @@ class TestSemesterQuestionnaireAssignment(WebTest): @classmethod def setUpTestData(cls): - mommy.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) - cls.semester = mommy.make(Semester, id=1) - cls.course_type_1 = mommy.make(CourseType) - cls.course_type_2 = mommy.make(CourseType) - cls.responsible = mommy.make(UserProfile) - cls.questionnaire_1 = mommy.make(Questionnaire, type=Questionnaire.TOP) - cls.questionnaire_2 = mommy.make(Questionnaire, type=Questionnaire.TOP) - cls.questionnaire_responsible = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - cls.evaluation_1 = mommy.make(Evaluation, course=mommy.make(Course, semester=cls.semester, type=cls.course_type_1, responsibles=[cls.responsible])) - cls.evaluation_2 = mommy.make(Evaluation, course=mommy.make(Course, semester=cls.semester, type=cls.course_type_2, responsibles=[cls.responsible])) - mommy.make(Contribution, contributor=cls.responsible, evaluation=cls.evaluation_1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) - mommy.make(Contribution, contributor=cls.responsible, evaluation=cls.evaluation_2, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, id=1) + cls.course_type_1 = baker.make(CourseType) + cls.course_type_2 = baker.make(CourseType) + cls.responsible = baker.make(UserProfile) + cls.questionnaire_1 = baker.make(Questionnaire, type=Questionnaire.TOP) + cls.questionnaire_2 = baker.make(Questionnaire, type=Questionnaire.TOP) + cls.questionnaire_responsible = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + cls.evaluation_1 = baker.make(Evaluation, course=baker.make(Course, semester=cls.semester, type=cls.course_type_1, responsibles=[cls.responsible])) + cls.evaluation_2 = baker.make(Evaluation, course=baker.make(Course, semester=cls.semester, type=cls.course_type_2, responsibles=[cls.responsible])) + baker.make(Contribution, contributor=cls.responsible, evaluation=cls.evaluation_1, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) + baker.make(Contribution, contributor=cls.responsible, evaluation=cls.evaluation_2, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS) def test_questionnaire_assignment(self): page = self.app.get(self.url, user="manager", status=200) diff --git a/evap/student/tests/test_views.py b/evap/student/tests/test_views.py --- a/evap/student/tests/test_views.py +++ b/evap/student/tests/test_views.py @@ -2,7 +2,7 @@ from django.urls import reverse from django_webtest import WebTest -from model_mommy import mommy +from model_bakery import baker from evap.evaluation.models import UserProfile, Evaluation, Questionnaire, Question, Contribution, TextAnswer, RatingAnswerCounter from evap.evaluation.tests.tools import WebTestWith200Check @@ -16,8 +16,8 @@ class TestStudentIndexView(WebTestWith200Check): def setUp(self): # View is only visible to users participating in at least one evaluation. - user = mommy.make(UserProfile, username='student') - mommy.make(Evaluation, participants=[user]) + user = baker.make(UserProfile, username='student') + baker.make(Evaluation, participants=[user]) @override_settings(INSTITUTION_EMAIL_DOMAINS=["example.com"]) @@ -26,34 +26,34 @@ class TestVoteView(WebTest): @classmethod def setUpTestData(cls): - cls.voting_user1 = mommy.make(UserProfile) - cls.voting_user2 = mommy.make(UserProfile) - cls.contributor1 = mommy.make(UserProfile) - cls.contributor2 = mommy.make(UserProfile) + cls.voting_user1 = baker.make(UserProfile) + cls.voting_user2 = baker.make(UserProfile) + cls.contributor1 = baker.make(UserProfile) + cls.contributor2 = baker.make(UserProfile) - cls.evaluation = mommy.make(Evaluation, pk=1, participants=[cls.voting_user1, cls.voting_user2, cls.contributor1], state="in_evaluation") + cls.evaluation = baker.make(Evaluation, pk=1, participants=[cls.voting_user1, cls.voting_user2, cls.contributor1], state="in_evaluation") - cls.top_general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP) - cls.bottom_general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.BOTTOM) - cls.contributor_questionnaire = mommy.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) + cls.top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.TOP) + cls.bottom_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.BOTTOM) + cls.contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.CONTRIBUTOR) - cls.contributor_heading_question = mommy.make(Question, questionnaire=cls.contributor_questionnaire, order=0, type=Question.HEADING) - cls.contributor_text_question = mommy.make(Question, questionnaire=cls.contributor_questionnaire, order=1, type=Question.TEXT) - cls.contributor_likert_question = mommy.make(Question, questionnaire=cls.contributor_questionnaire, order=2, type=Question.LIKERT) + cls.contributor_heading_question = baker.make(Question, questionnaire=cls.contributor_questionnaire, order=0, type=Question.HEADING) + cls.contributor_text_question = baker.make(Question, questionnaire=cls.contributor_questionnaire, order=1, type=Question.TEXT) + cls.contributor_likert_question = baker.make(Question, questionnaire=cls.contributor_questionnaire, order=2, type=Question.LIKERT) - cls.top_heading_question = mommy.make(Question, questionnaire=cls.top_general_questionnaire, order=0, type=Question.HEADING) - cls.top_text_question = mommy.make(Question, questionnaire=cls.top_general_questionnaire, order=1, type=Question.TEXT) - cls.top_likert_question = mommy.make(Question, questionnaire=cls.top_general_questionnaire, order=2, type=Question.LIKERT) - cls.top_grade_question = mommy.make(Question, questionnaire=cls.top_general_questionnaire, order=3, type=Question.GRADE) + cls.top_heading_question = baker.make(Question, questionnaire=cls.top_general_questionnaire, order=0, type=Question.HEADING) + cls.top_text_question = baker.make(Question, questionnaire=cls.top_general_questionnaire, order=1, type=Question.TEXT) + cls.top_likert_question = baker.make(Question, questionnaire=cls.top_general_questionnaire, order=2, type=Question.LIKERT) + cls.top_grade_question = baker.make(Question, questionnaire=cls.top_general_questionnaire, order=3, type=Question.GRADE) - cls.bottom_heading_question = mommy.make(Question, questionnaire=cls.bottom_general_questionnaire, order=0, type=Question.HEADING) - cls.bottom_text_question = mommy.make(Question, questionnaire=cls.bottom_general_questionnaire, order=1, type=Question.TEXT) - cls.bottom_likert_question = mommy.make(Question, questionnaire=cls.bottom_general_questionnaire, order=2, type=Question.LIKERT) - cls.bottom_grade_question = mommy.make(Question, questionnaire=cls.bottom_general_questionnaire, order=3, type=Question.GRADE) + cls.bottom_heading_question = baker.make(Question, questionnaire=cls.bottom_general_questionnaire, order=0, type=Question.HEADING) + cls.bottom_text_question = baker.make(Question, questionnaire=cls.bottom_general_questionnaire, order=1, type=Question.TEXT) + cls.bottom_likert_question = baker.make(Question, questionnaire=cls.bottom_general_questionnaire, order=2, type=Question.LIKERT) + cls.bottom_grade_question = baker.make(Question, questionnaire=cls.bottom_general_questionnaire, order=3, type=Question.GRADE) - cls.contribution1 = mommy.make(Contribution, contributor=cls.contributor1, questionnaires=[cls.contributor_questionnaire], + cls.contribution1 = baker.make(Contribution, contributor=cls.contributor1, questionnaires=[cls.contributor_questionnaire], evaluation=cls.evaluation) - cls.contribution2 = mommy.make(Contribution, contributor=cls.contributor2, questionnaires=[cls.contributor_questionnaire], + cls.contribution2 = baker.make(Contribution, contributor=cls.contributor2, questionnaires=[cls.contributor_questionnaire], evaluation=cls.evaluation) cls.evaluation.general_contribution.questionnaires.set([cls.top_general_questionnaire, cls.bottom_general_questionnaire])
Migrate from Model Mommy to Model Bakery Model Mommy is no longer maintained and was replaced by Model Bakery. We should [migrate to Model Bakery](https://model-bakery.readthedocs.io/en/latest/migrating_from_mommy.html).
2020-01-06T18:38:21
e-valuation/EvaP
1,403
e-valuation__EvaP-1403
[ "1390" ]
a14ec3e6867c0d08a23890f94d548bfc7dd3b0fa
diff --git a/evap/staff/importers.py b/evap/staff/importers.py --- a/evap/staff/importers.py +++ b/evap/staff/importers.py @@ -278,9 +278,10 @@ def consolidate_enrollment_data(self): def check_evaluation_data_correctness(self, semester): for evaluation_data in self.evaluations.values(): - already_exists = Evaluation.objects.filter(course__semester=semester, name_de=evaluation_data.name_de).exists() - if already_exists: + if Course.objects.filter(semester=semester, name_en=evaluation_data.name_en).exists(): self.errors.append(_("Course {} does already exist in this semester.").format(evaluation_data.name_en)) + if Course.objects.filter(semester=semester, name_de=evaluation_data.name_de).exists(): + self.errors.append(_("Course {} does already exist in this semester.").format(evaluation_data.name_de)) degree_names = set() for evaluation_data in self.evaluations.values():
diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -220,6 +220,19 @@ def test_invalid_file_error(self): self.assertIn('Errors occurred while parsing the input data. No data was imported.', errors_test) self.assertEqual(UserProfile.objects.count(), original_user_count) + def test_duplicate_course_error(self): + with open(self.filename_valid, "rb") as excel_file: + excel_content = excel_file.read() + + semester = baker.make(Semester) + baker.make(Course, name_de="Stehlen", name_en="Stehlen", semester=semester) + baker.make(Course, name_de="Shine", name_en="Shine", semester=semester) + + __, __, errors = EnrollmentImporter.process(excel_content, semester, None, None, test_run=False) + + self.assertIn("Course Stehlen does already exist in this semester.", errors) + self.assertIn("Course Shine does already exist in this semester.", errors) + class TestPersonImporter(TestCase): @classmethod
Error message if course already exists on import When importing enrollment data including at least one course with a name that already exists for this semester in the database, the importer fails silently. This shouldn't be the case. The test import should display an error message if a course with the same (German or English) name already exists in the current semester. There should also be a test for that.
In EnrollmentImporter.check_evaluation_data_correctness(), there is this line: ```python already_exists = Evaluation.objects.filter(course__semester=semester, name_de=evaluation_data.name_de).exists() ``` It only checks for German names, maybe this has something to do with it.
2020-01-06T19:18:58
e-valuation/EvaP
1,416
e-valuation__EvaP-1416
[ "1338" ]
4862017e110166c1e49b6420ceec241c8bf90d90
diff --git a/evap/contributor/views.py b/evap/contributor/views.py --- a/evap/contributor/views.py +++ b/evap/contributor/views.py @@ -221,7 +221,7 @@ def evaluation_direct_delegation(request, evaluation_id): # we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have # an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins. - template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_user=request.user) + template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user]) messages.add_message( request, diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1503,7 +1503,7 @@ def send_to_users_in_evaluations(self, evaluations, recipient_groups, use_cc, re body_params = {'user': user, 'evaluations': user_evaluations, 'due_evaluations': user.get_sorted_due_evaluations()} self.send_to_user(user, subject_params, body_params, use_cc=use_cc, request=request) - def send_to_user(self, user, subject_params, body_params, use_cc, additional_cc_user=None, request=None): + def send_to_user(self, user, subject_params, body_params, use_cc, additional_cc_users=(), request=None): if not user.email: warning_message = "{} has no email address defined. Could not send email.".format(user.username) # If this method is triggered by a cronjob changing evaluation states, the request is None. @@ -1516,16 +1516,11 @@ def send_to_user(self, user, subject_params, body_params, use_cc, additional_cc_ logger.error(warning_message) return - cc_users = set() - - if additional_cc_user: - cc_users.add(additional_cc_user) + cc_users = set(additional_cc_users) if use_cc: - cc_users |= set(user.delegates.all() | user.cc_users.all()) - - if additional_cc_user: - cc_users |= set(additional_cc_user.delegates.all() | additional_cc_user.cc_users.all()) + users = {user, *additional_cc_users} + cc_users |= set(UserProfile.objects.filter(Q(represented_users__in=users) | Q(ccing_users__in=users))) cc_addresses = [p.email for p in cc_users if p.email] diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -220,7 +220,18 @@ def apply(request, evaluations, email_template=None, email_template_contributor= messages.success(request, ungettext("Successfully enabled {} evaluation for editor review.", "Successfully enabled {} evaluations for editor review.", len(evaluations)).format(len(evaluations))) if email_template: - email_template.send_to_users_in_evaluations(evaluations, [EmailTemplate.EDITORS], use_cc=True, request=request) + evaluations_by_responsible = {} + for evaluation in evaluations: + for responsible in evaluation.course.responsibles.all(): + evaluations_by_responsible.setdefault(responsible, []).append(evaluation) + + for responsible, responsible_evaluations in evaluations_by_responsible.items(): + body_params = {'user': responsible, 'evaluations': responsible_evaluations} + editors = UserProfile.objects \ + .filter(contributions__evaluation__in=responsible_evaluations, contributions__can_edit=True) \ + .exclude(pk=responsible.pk) + email_template.send_to_user(responsible, subject_params={}, body_params=body_params, + use_cc=True, additional_cc_users=editors, request=request) class StartEvaluationOperation(EvaluationOperation):
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -538,7 +538,14 @@ def test_login_url_when_use_cc_is_false(self): self.assertEqual(mail.outbox[0].body, self.user.login_url) # message does contain the login url +@override_settings(INSTITUTION_EMAIL_DOMAINS=["example.com"]) class TestEmailTemplate(TestCase): + @classmethod + def setUpTestData(cls): + cls.user = baker.make(UserProfile, email='[email protected]') + cls.additional_cc = baker.make(UserProfile, email='[email protected]') + cls.template = EmailTemplate.objects.get(name=EmailTemplate.EDITOR_REVIEW_NOTICE) + @staticmethod def test_missing_email_address(): """ @@ -549,6 +556,68 @@ def test_missing_email_address(): template = EmailTemplate.objects.get(name=EmailTemplate.STUDENT_REMINDER) template.send_to_user(user, {}, {}, False, None) + def test_put_delegates_in_cc(self): + delegate_a = baker.make(UserProfile, email='[email protected]') + delegate_b = baker.make(UserProfile, email='[email protected]') + self.user.delegates.add(delegate_a, delegate_b) + self.template.send_to_user(self.user, {}, {}, use_cc=True) + + self.assertEqual(len(mail.outbox), 1) + self.assertEqual(set(mail.outbox[0].cc), {delegate_a.email, delegate_b.email}) + + def test_put_cc_users_in_cc(self): + cc_a = baker.make(UserProfile, email='[email protected]') + cc_b = baker.make(UserProfile, email='[email protected]') + self.user.cc_users.add(cc_a, cc_b) + self.template.send_to_user(self.user, {}, {}, use_cc=True) + + self.assertEqual(len(mail.outbox), 1) + self.assertEqual(set(mail.outbox[0].cc), {cc_a.email, cc_b.email}) + + def test_put_additional_cc_users_in_cc(self): + additional_cc_b = baker.make(UserProfile, email='[email protected]') + self.template.send_to_user(self.user, {}, {}, use_cc=True, + additional_cc_users=[self.additional_cc, additional_cc_b]) + + self.assertEqual(len(mail.outbox), 1) + self.assertEqual(set(mail.outbox[0].cc), {self.additional_cc.email, additional_cc_b.email}) + + def test_put_delegates_of_additional_cc_user_in_cc(self): + additional_delegate_a = baker.make(UserProfile, email='[email protected]') + additional_delegate_b = baker.make(UserProfile, email='[email protected]') + self.additional_cc.delegates.add(additional_delegate_a, additional_delegate_b) + self.template.send_to_user(self.user, {}, {}, use_cc=True, additional_cc_users=[self.additional_cc]) + + self.assertEqual(len(mail.outbox), 1) + self.assertEqual(set(mail.outbox[0].cc), + {self.additional_cc.email, additional_delegate_a.email, additional_delegate_b.email}) + + def test_cc_does_not_contain_duplicates(self): + user_a = baker.make(UserProfile, email='[email protected]') + user_b = baker.make(UserProfile, email='[email protected]') + user_c = baker.make(UserProfile, email='[email protected]') + self.user.delegates.add(user_a) + self.user.cc_users.add(self.additional_cc, user_b) + self.additional_cc.delegates.add(user_b, user_c) + self.additional_cc.cc_users.add(user_a, user_c) + self.template.send_to_user(self.user, {}, {}, use_cc=True, additional_cc_users=[self.additional_cc]) + + self.assertEqual(len(mail.outbox), 1) + self.assertEqual(len(mail.outbox[0].cc), 4) + self.assertEqual(set(mail.outbox[0].cc), {self.additional_cc.email, user_a.email, user_b.email, user_c.email}) + + def test_disable_cc(self): + delegate = baker.make(UserProfile, email='[email protected]') + cc_user = baker.make(UserProfile, email='[email protected]') + self.user.delegates.add(delegate) + self.user.cc_users.add(cc_user) + self.additional_cc.delegates.add(delegate) + self.additional_cc.cc_users.add(cc_user) + self.template.send_to_user(self.user, {}, {}, use_cc=False, additional_cc_users=[self.additional_cc]) + + self.assertEqual(len(mail.outbox), 1) + self.assertEqual(set(mail.outbox[0].cc), {self.additional_cc.email}) + class TestEmailRecipientList(TestCase): def test_recipient_list(self): diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -860,6 +860,8 @@ class TestEvaluationOperationView(WebTest): def setUpTestData(cls): baker.make(UserProfile, username='manager', groups=[Group.objects.get(name='Manager')]) cls.semester = baker.make(Semester, pk=1) + cls.responsible = baker.make(UserProfile, email='[email protected]') + cls.course = baker.make(Course, semester=cls.semester, responsibles=[cls.responsible]) def helper_publish_evaluation_with_publish_notifications_for(self, evaluation, contributors=True, participants=True): page = self.app.get("/staff/semester/1", user="manager") @@ -881,8 +883,7 @@ def test_publish_notifications(self): participant2 = baker.make(UserProfile, email="[email protected]") contributor1 = baker.make(UserProfile, email="[email protected]") - course = baker.make(Course, semester=self.semester) - evaluation = baker.make(Evaluation, course=course, state='reviewed', + evaluation = baker.make(Evaluation, course=self.course, state='reviewed', participants=[participant1, participant2], voters=[participant1, participant2]) baker.make(Contribution, contributor=contributor1, evaluation=evaluation) @@ -926,35 +927,34 @@ def helper_semester_state_views(self, evaluation, old_state, new_state): def test_semester_publish(self): participant1 = baker.make(UserProfile, email="[email protected]") participant2 = baker.make(UserProfile, email="[email protected]") - course = baker.make(Course, semester=self.semester) - evaluation = baker.make(Evaluation, course=course, state='reviewed', + evaluation = baker.make(Evaluation, course=self.course, state='reviewed', participants=[participant1, participant2], voters=[participant1, participant2]) self.helper_semester_state_views(evaluation, "reviewed", "published") self.assertEqual(len(mail.outbox), 2) def test_semester_reset_1(self): - evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='prepared') + evaluation = baker.make(Evaluation, course=self.course, state='prepared') self.helper_semester_state_views(evaluation, "prepared", "new") def test_semester_reset_2(self): - evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='approved') + evaluation = baker.make(Evaluation, course=self.course, state='approved') self.helper_semester_state_views(evaluation, "approved", "new") def test_semester_contributor_ready_1(self): - evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='new') + evaluation = baker.make(Evaluation, course=self.course, state='new') self.helper_semester_state_views(evaluation, "new", "prepared") def test_semester_contributor_ready_2(self): - evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='editor_approved') + evaluation = baker.make(Evaluation, course=self.course, state='editor_approved') self.helper_semester_state_views(evaluation, "editor_approved", "prepared") def test_semester_unpublish(self): - evaluation = baker.make(Evaluation, course=baker.make(Course, semester=self.semester), state='published', _participant_count=0, _voter_count=0) + evaluation = baker.make(Evaluation, course=self.course, state='published', _participant_count=0, _voter_count=0) self.helper_semester_state_views(evaluation, "published", "reviewed") def test_operation_start_evaluation(self): - evaluation = baker.make(Evaluation, state='approved', course=baker.make(Course, semester=self.semester)) + evaluation = baker.make(Evaluation, state='approved', course=self.course) urloptions = '?evaluation={}&target_state=in_evaluation'.format(evaluation.pk) response = self.app.get(self.url + urloptions, user='manager') @@ -967,18 +967,119 @@ def test_operation_start_evaluation(self): self.assertEqual(evaluation.state, 'in_evaluation') def test_operation_prepare(self): - evaluation = baker.make(Evaluation, state='new', course=baker.make(Course, semester=self.semester)) + evaluation = baker.make(Evaluation, state='new', course=self.course) urloptions = '?evaluation={}&target_state=prepared'.format(evaluation.pk) response = self.app.get(self.url + urloptions, user='manager') self.assertEqual(response.status_code, 200, 'url "{}" failed with user "manager"'.format(self.url)) - form = response.forms['evaluation-operation-form'] form.submit() evaluation = Evaluation.objects.get(pk=evaluation.pk) self.assertEqual(evaluation.state, 'prepared') + def submit_operation_prepare_form(self, url_options): + actual_emails = [] + + def mock(email_template, user, subject_params, body_params, use_cc, additional_cc_users=None, request=None): + actual_emails.append({ + 'user': user, + 'subject': email_template.subject, + 'subject_params': subject_params, + 'body': email_template.body, + 'body_params': body_params, + 'use_cc': use_cc, + 'additional_cc_users': set(additional_cc_users), + }) + + response = self.app.get(self.url + url_options, user='manager') + form = response.forms['evaluation-operation-form'] + form['send_email'] = True + form['email_subject'] = 'New evaluations ready for review' + form['email_body'] = 'There are evaluations that need your approval.' + + with patch.object(EmailTemplate, 'send_to_user', mock): + form.submit() + + return actual_emails + + def test_operation_prepare_sends_email_to_responsible(self): + evaluation = baker.make(Evaluation, state='new', course=self.course) + url_options = '?evaluation={}&target_state=prepared'.format(evaluation.pk) + actual_emails = self.submit_operation_prepare_form(url_options) + + self.assertEqual(actual_emails, [{ + 'user': self.responsible, + 'subject': 'New evaluations ready for review', + 'subject_params': {}, + 'body': 'There are evaluations that need your approval.', + 'body_params': {'user': self.responsible, 'evaluations': [evaluation]}, + 'use_cc': True, + 'additional_cc_users': set(), + }]) + + def test_operation_prepare_sends_one_email_to_each_responsible(self): + other_responsible = baker.make(UserProfile, email='[email protected]') + self.course.responsibles.add(other_responsible) + evaluation = baker.make(Evaluation, state='new', course=self.course) + url_options = '?evaluation={}&target_state=prepared'.format(evaluation.pk) + actual_emails = self.submit_operation_prepare_form(url_options) + + self.assertEqual(len(actual_emails), 2) + + email_to_responsible = next(email for email in actual_emails if email['user'] == self.responsible) + self.assertEqual(email_to_responsible['body_params'], {'user': self.responsible, 'evaluations': [evaluation]}) + + email_to_other_responsible = next(email for email in actual_emails if email['user'] == other_responsible) + self.assertEqual(email_to_other_responsible['body_params'], {'user': other_responsible, 'evaluations': [evaluation]}) + + def test_operation_prepare_with_multiple_evaluations(self): + responsible_b = baker.make(UserProfile, email='[email protected]') + course_b = baker.make(Course, semester=self.semester, responsibles=[responsible_b]) + evaluation_a = baker.make(Evaluation, state='new', course=self.course) + evaluation_b = baker.make(Evaluation, state='new', course=course_b) + url_options = '?evaluation={}&evaluation={}&target_state=prepared'.format(evaluation_a.pk, evaluation_b.pk) + actual_emails = self.submit_operation_prepare_form(url_options) + + self.assertEqual(len(actual_emails), 2) + + email_to_responsible = next(email for email in actual_emails if email['user'] == self.responsible) + self.assertEqual(email_to_responsible['body_params'], {'user': self.responsible, 'evaluations': [evaluation_a]}) + + email_to_responsible_b = next(email for email in actual_emails if email['user'] == responsible_b) + self.assertEqual(email_to_responsible_b['body_params'], {'user': responsible_b, 'evaluations': [evaluation_b]}) + + def test_operation_prepare_sends_email_with_editors_in_cc(self): + editor_a = baker.make(UserProfile, email='[email protected]') + editor_b = baker.make(UserProfile, email='[email protected]') + evaluation = baker.make(Evaluation, state='new', course=self.course) + baker.make(Contribution, evaluation=evaluation, contributor=editor_a, can_edit=True) + baker.make(Contribution, evaluation=evaluation, contributor=editor_b, can_edit=True) + url_options = '?evaluation={}&target_state=prepared'.format(evaluation.pk) + actual_emails = self.submit_operation_prepare_form(url_options) + + self.assertEqual(len(actual_emails), 1) + self.assertEqual(actual_emails[0]['additional_cc_users'], {editor_a, editor_b}) + + def test_operation_prepare_does_not_put_responsible_into_cc(self): + evaluation = baker.make(Evaluation, state='new', course=self.course) + baker.make(Contribution, evaluation=evaluation, contributor=self.responsible, can_edit=True) + url_options = '?evaluation={}&target_state=prepared'.format(evaluation.pk) + actual_emails = self.submit_operation_prepare_form(url_options) + + self.assertEqual(len(actual_emails), 1) + self.assertEqual(actual_emails[0]['additional_cc_users'], set()) + + def test_operation_prepare_does_not_send_email_to_contributors(self): + contributor = baker.make(UserProfile, email='[email protected]') + evaluation = baker.make(Evaluation, state='new', course=self.course) + baker.make(Contribution, evaluation=evaluation, contributor=contributor, can_edit=False) + url_options = '?evaluation={}&target_state=prepared'.format(evaluation.pk) + actual_emails = self.submit_operation_prepare_form(url_options) + + self.assertEqual(len(actual_emails), 1) + self.assertEqual(actual_emails[0]['additional_cc_users'], set()) + class TestCourseCreateView(WebTest): url = '/staff/semester/1/course/create'
Send editor review notice to responsible with others in CC When sending an editor review notice email to all editors of an evaluation, this message should be addressed to the responsible(s) and CC'ed to all other editors. Currently the message is sent to each editor individually.
In theory there could be problems with sending too many emails when notifications are split up per evaluation if these have different editors but in practice this won't happen because evaluations don't have editors besides from the responsible after import and this situation will only occur for manually edited evaluations which shouldn't be a lot.
2020-01-31T15:16:43
e-valuation/EvaP
1,420
e-valuation__EvaP-1420
[ "1391" ]
bce782f875cf936739f7a3f0d1dd70c29aac7d67
diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py --- a/evap/evaluation/templatetags/evaluation_filters.py +++ b/evap/evaluation/templatetags/evaluation_filters.py @@ -187,3 +187,13 @@ def hours_and_minutes(time_left_for_evaluation): @register.filter def has_nonresponsible_editor(evaluation): return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists() + + [email protected] +def order_by(iterable, attribute): + return sorted(iterable, key=lambda item: getattr(item, attribute)) + + [email protected] +def order_due_evaluations_by(due_evaluations, attribute): + return sorted(due_evaluations, key=lambda due_evaluation: getattr(due_evaluation[1], attribute))
diff --git a/evap/evaluation/fixtures/test_data.json b/evap/evaluation/fixtures/test_data.json --- a/evap/evaluation/fixtures/test_data.json +++ b/evap/evaluation/fixtures/test_data.json @@ -117886,7 +117886,7 @@ "model": "evaluation.userprofile", "fields": { "password": "pbkdf2_sha256$150000$VhbGuFyU0NsF$LaOk+e0jHdSnobNBx3Zv9+/jeVxWIJuz2IuLVJVgtNk=", - "last_login": "2019-10-28T18:35:38.084", + "last_login": "2020-02-18T13:51:45.323", "is_superuser": true, "username": "evap", "email": "[email protected]", @@ -129028,8 +129028,8 @@ "pk": 1, "fields": { "name": "Editor Review Notice", - "subject": "[EvaP] Neue Lehrveranstaltungen stehen zur \u00dcberpr\u00fcfung bereit / New evaluations ready for approval", - "body": "(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\nvielen Dank, dass Sie in diesem Semester Veranstaltungen anbieten. Um die Evaluierung dieser Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe. \r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeiter delegieren. Unter \u201cEinstellungen\u201d k\u00f6nnen Sie Stellvertreter hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Lehrveranstaltungen erhalten. Beim Bearbeiten einzelner Lehrveranstaltungen k\u00f6nnen sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Veranstaltung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% else %}Zum Anmelden verwenden Sie bitte Ihre Zugangsdaten.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Lehrveranstaltungen innerhalb der n\u00e4chsten Woche Folgendes zu \u00fcberpr\u00fcfen:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Veranstaltung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Veranstaltungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nThank you very much for teaching during this semester. We need your help so we can evaluate all evaluations on our platform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your evaluations. On the details page of a single evaluation you can also add persons and assign edit rights for this evaluation to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% else %}Please use your credentials to login.{% endif %}\r\n\r\nTo prepare your evaluations we would like to ask you for the following within a week:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your evaluation.\r\n - Are the selected questionnaires adequate for the evaluation? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese evaluations need your approval:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ([email protected]).\r\n\r\nKind regards,\r\nthe evaluation team\r\n\r\n(This is an automated message.)" + "subject": "[EvaP] Neue Lehrveranstaltungen stehen zur \u00dcberpr\u00fcfung bereit / New courses ready for approval", + "body": "{% load evaluation_filters %}\r\n(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\nvielen Dank, dass Sie in diesem Semester Veranstaltungen anbieten. Um die Evaluierung dieser Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe. \r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeiter delegieren. Unter \u201cEinstellungen\u201d k\u00f6nnen Sie Stellvertreter hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Lehrveranstaltungen erhalten. Beim Bearbeiten einzelner Lehrveranstaltungen k\u00f6nnen sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Veranstaltung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% else %}Zum Anmelden verwenden Sie bitte Ihre Zugangsdaten.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Lehrveranstaltungen innerhalb der n\u00e4chsten Woche Folgendes zu \u00fcberpr\u00fcfen:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Veranstaltung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Veranstaltungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nThank you very much for teaching during this semester. We need your help so we can evaluate all courses on our platform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your courses. On the details page of a single course you can also add persons and assign edit rights for this course to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% else %}Please use your credentials to login.{% endif %}\r\n\r\nTo prepare your courses we would like to ask you for the following within a week:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your course.\r\n - Are the selected questionnaires adequate for the course? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese courses need your approval:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ([email protected]).\r\n\r\nKind regards,\r\nthe evaluation team\r\n\r\n(This is an automated message.)" } }, { @@ -129038,7 +129038,7 @@ "fields": { "name": "Student Reminder", "subject": "[EvaP] Die Evaluierung endet {% if first_due_in_days == 0 %}heute{% elif first_due_in_days == 1 %}morgen{% else %}in {{ first_due_in_days }} Tagen{% endif %} / The evaluation is about to end {% if first_due_in_days == 0 %}today{% elif first_due_in_days == 1 %}tomorrow{% else %}in {{ first_due_in_days }} days{% endif %}", - "body": "(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr eine deiner Evaluierungen endet {% if first_due_in_days == 0 %}heute{% elif first_due_in_days == 1 %}morgen{% else %}in {{ first_due_in_days }} Tagen{% endif %} die Evaluierungsfrist.\r\n\r\nAn folgenden Evaluierungen hast du noch nicht teilgenommen:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} abgeben, wir w\u00fcrden uns \u00fcber deine Stimme freuen :)\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif%}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\nP.S.: Bei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ([email protected]).\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for one of your evaluations will end {% if first_due_in_days == 0 %}today{% elif first_due_in_days == 1 %}tomorrow{% else %}in {{ first_due_in_days }} days{% endif %}.\r\n\r\nYou did not yet participate in the following evaluations:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}. We\u2019re looking forward to receive your feedback :)\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif%}\r\nThank you very much for your efforts and kind regards,\r\nthe evaluation team\r\n\r\nPS: If you have any questions or feedback, please let us know ([email protected]).\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}\r\n(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr eine deiner Evaluierungen endet {% if first_due_in_days == 0 %}heute{% elif first_due_in_days == 1 %}morgen{% else %}in {{ first_due_in_days }} Tagen{% endif %} die Evaluierungsfrist.\r\n\r\nAn folgenden Evaluierungen hast du noch nicht teilgenommen:\r\n{% for evaluation, due_in_days in due_evaluations|order_due_evaluations_by:\"full_name_de\" %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} abgeben, wir w\u00fcrden uns \u00fcber deine Stimme freuen :)\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif%}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\nP.S.: Bei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ([email protected]).\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for one of your evaluations will end {% if first_due_in_days == 0 %}today{% elif first_due_in_days == 1 %}tomorrow{% else %}in {{ first_due_in_days }} days{% endif %}.\r\n\r\nYou did not yet participate in the following evaluations:\r\n{% for evaluation, due_in_days in due_evaluations|order_due_evaluations_by:\"full_name_en\" %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}. We\u2019re looking forward to receive your feedback :)\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif%}\r\nThank you very much for your efforts and kind regards,\r\nthe evaluation team\r\n\r\nPS: If you have any questions or feedback, please let us know ([email protected]).\r\n\r\n(This is an automated message.)" } }, { @@ -129047,7 +129047,7 @@ "fields": { "name": "Publishing Notice Contributor", "subject": "[EvaP] Evaluierungsergebnisse ver\u00f6ffentlicht / Evaluation results published", - "body": "(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear users of the evaluation platform,\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}\r\n(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear users of the evaluation platform,\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, { @@ -129065,7 +129065,7 @@ "fields": { "name": "Evaluation Started", "subject": "[EvaP] Evaluierung hat begonnen / Evaluation started", - "body": "(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} abgeben, die Dozenten und wir freuen uns \u00fcber deine Bewertung.\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}Diese Evaluierungen warten auf deine Bewertung:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}{% endif %}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\nP.S.: Bei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ([email protected]).\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for the following evaluations just started:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}. The lecturers and we are looking forward to receive your feedback.\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}These evaluations are waiting for your feedback:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}{% endif %}\r\nThank you very much for your efforts and kind regards,\r\nthe evaluation team\r\n\r\nPS: If you have any questions or feedback, please let us know ([email protected]).\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}\r\n(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} abgeben, die Dozenten und wir freuen uns \u00fcber deine Bewertung.\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}Diese Evaluierungen warten auf deine Bewertung:\r\n{% for evaluation, due_in_days in due_evaluations|order_due_evaluations_by:\"full_name_de\" %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}{% endif %}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\nP.S.: Bei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ([email protected]).\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for the following courses just started:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}. The lecturers and we are looking forward to receive your feedback.\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}These evaluations are waiting for your feedback:\r\n{% for evaluation, due_in_days in due_evaluations|order_due_evaluations_by:\"full_name_en\" %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}{% endif %}\r\nThank you very much for your efforts and kind regards,\r\nthe evaluation team\r\n\r\nPS: If you have any questions or feedback, please let us know ([email protected]).\r\n\r\n(This is an automated message.)" } }, { @@ -129074,7 +129074,7 @@ "fields": { "name": "Editor Review Reminder", "subject": "[EvaP] Reminder: Neue Lehrveranstaltungen stehen zur \u00dcberpr\u00fcfung bereit / New Evaluation ready for approval", - "body": "(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\num die Evaluierung Ihrer Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe. \r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeiter delegieren. Unter \"Einstellungen\" k\u00f6nnen Sie Stellvertreter hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Lehrveranstaltungen erhalten. Beim Bearbeiten einzelner Lehrveranstaltungen k\u00f6nnen sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Veranstaltung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% else %}Zum Anmelden verwenden Sie bitte Ihre Zugangsdaten.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Lehrveranstaltungen sobald wie m\u00f6glich Folgendes zu \u00fcberpr\u00fcfen:\r\n- Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n- Wurden die f\u00fcr die Veranstaltung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n- Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Veranstaltungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nwe need your help so we can evaluate all evaluations on our platform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your evaluations. On the details page of a single evaluation you can also add persons and assign edit rights for this evaluation to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% else %}Please use your credentials to login.{% endif %}\r\n\r\nTo prepare your evaluations we would like to ask you for the following:\r\n- Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your evaluation.\r\n- Are the selected questionnaires adequate for the evaluation? Please adapt the selection if necessary.\r\n- Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese evaluations need your approval:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ([email protected]).\r\n\r\nKind regards,\r\nthe evaluation team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}\r\n(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\num die Evaluierung Ihrer Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe. \r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeiter delegieren. Unter \"Einstellungen\" k\u00f6nnen Sie Stellvertreter hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Lehrveranstaltungen erhalten. Beim Bearbeiten einzelner Lehrveranstaltungen k\u00f6nnen sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Veranstaltung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% else %}Zum Anmelden verwenden Sie bitte Ihre Zugangsdaten.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Lehrveranstaltungen sobald wie m\u00f6glich Folgendes zu \u00fcberpr\u00fcfen:\r\n- Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n- Wurden die f\u00fcr die Veranstaltung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n- Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Veranstaltungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nwe need your help so we can evaluate all courses on our platform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your evaluations. On the details page of a single evaluation you can also add persons and assign edit rights for this evaluation to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% else %}Please use your credentials to login.{% endif %}\r\n\r\nTo prepare your evaluations we would like to ask you for the following:\r\n- Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your evaluation.\r\n- Are the selected questionnaires adequate for the evaluation? Please adapt the selection if necessary.\r\n- Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese evaluations need your approval:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ([email protected]).\r\n\r\nKind regards,\r\nthe evaluation team\r\n\r\n(This is an automated message.)" } }, { @@ -129092,7 +129092,7 @@ "fields": { "name": "Publishing Notice Participant", "subject": "[EvaP] Evaluierungsergebnisse ver\u00f6ffentlicht / Evaluation results published", - "body": "(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear users of the evaluation platform,\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}\r\n(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear users of the evaluation platform,\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, {
Sort evaluations in email lists by name When sending emails which include lists of evaluations (when asking for preparation, reminding for preparation, publishing results), these lists should be sorted alphabetically by the name of the evaluation.
2020-02-03T17:10:36
e-valuation/EvaP
1,428
e-valuation__EvaP-1428
[ "1414" ]
3245da4e8044adef4d87b7a5301440742ab9f8d8
diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py --- a/evap/contributor/forms.py +++ b/evap/contributor/forms.py @@ -68,9 +68,13 @@ class EditorContributionForm(ContributionForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None + self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter( Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct() - self.fields['contributor'].queryset = UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True) + self.fields['contributor'].queryset = UserProfile.objects.filter( + (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk) + ) class DelegatesForm(forms.ModelForm):
diff --git a/evap/contributor/tests/test_forms.py b/evap/contributor/tests/test_forms.py --- a/evap/contributor/tests/test_forms.py +++ b/evap/contributor/tests/test_forms.py @@ -65,6 +65,30 @@ def test_managers_only(self): self.assertEqual(expected, set(formset.forms[0].fields['questionnaires'].queryset)) self.assertEqual(expected, set(formset.forms[1].fields['questionnaires'].queryset)) + def test_existing_contributors_are_in_queryset(self): + """ + Asserts that users that should normally not be in the contributor queryset are in it when they are already set. + Regression test for #1414. + """ + evaluation = baker.make(Evaluation) + non_proxy_user = baker.make(UserProfile) + proxy_user = baker.make(UserProfile, is_proxy_user=True) + contribution1 = baker.make(Contribution, evaluation=evaluation, contributor=non_proxy_user, questionnaires=[]) + + InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1) + formset = InlineContributionFormset(instance=evaluation, form_kwargs={'evaluation': evaluation}) + + self.assertEqual({non_proxy_user}, set(formset.forms[0].fields['contributor'].queryset)) + + # now a manager adds the proxy user as a contributor. + contribution1.contributor = proxy_user + contribution1.save() + + InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1) + formset = InlineContributionFormset(instance=evaluation, form_kwargs={'evaluation': evaluation}) + + self.assertEqual({proxy_user, non_proxy_user}, set(formset.forms[0].fields['contributor'].queryset)) + class ContributionFormsetWebTests(WebTest): csrf_checks = False
Contributors missing in editor evaluation edit form When editing an evaluation as an editor, contributors who have already been added (by managers) and who are marked as inactive or proxy users are not shown in the form anymore because they are not included in the contributor queryset. This leads to errors when saving the form. The queryset should always include those people already added as contributors for this evaluation.
2020-02-10T20:06:29
e-valuation/EvaP
1,438
e-valuation__EvaP-1438
[ "1243" ]
546ad1421e750b9532f96620914cf4ffa9928230
diff --git a/evap/evaluation/migrations/0108_remove_default_name_de_ordering.py b/evap/evaluation/migrations/0108_remove_default_name_de_ordering.py new file mode 100644 --- /dev/null +++ b/evap/evaluation/migrations/0108_remove_default_name_de_ordering.py @@ -0,0 +1,25 @@ +# Generated by Django 2.2.10 on 2020-03-15 20:55 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('evaluation', '0107_remove_default_evaluation_ordering'), + ] + + operations = [ + migrations.AlterModelOptions( + name='course', + options={'verbose_name': 'course', 'verbose_name_plural': 'courses'}, + ), + migrations.AlterModelOptions( + name='questionnaire', + options={'ordering': ('type', 'order', 'pk'), 'verbose_name': 'questionnaire', 'verbose_name_plural': 'questionnaires'}, + ), + migrations.AlterModelOptions( + name='semester', + options={'ordering': ('-created_at', 'pk'), 'verbose_name': 'semester', 'verbose_name_plural': 'semesters'}, + ), + ] diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -50,7 +50,7 @@ class Semester(models.Model): created_at = models.DateField(verbose_name=_("created at"), auto_now_add=True) class Meta: - ordering = ('-created_at', 'name_de') + ordering = ('-created_at', 'pk') verbose_name = _("semester") verbose_name_plural = _("semesters") @@ -173,7 +173,7 @@ class Questionnaire(models.Model): objects = QuestionnaireManager() class Meta: - ordering = ('type', 'order', 'name_de') + ordering = ('type', 'order', 'pk') verbose_name = _("questionnaire") verbose_name_plural = _("questionnaires") @@ -181,10 +181,10 @@ def __str__(self): return self.name def __lt__(self, other): - return (self.type, self.order, self.name_de) < (other.type, other.order, self.name_de) + return (self.type, self.order, self.pk) < (other.type, other.order, other.pk) def __gt__(self, other): - return (self.type, self.order, self.name_de) > (other.type, other.order, self.name_de) + return (self.type, self.order, self.pk) > (other.type, other.order, other.pk) @property def is_above_contributors(self): @@ -288,7 +288,6 @@ class Course(models.Model): last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name="courses_last_modified+") class Meta: - ordering = ('name_de',) unique_together = ( ('semester', 'name_de'), ('semester', 'name_en'), diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -600,6 +600,7 @@ def semester_grade_reminder(request, semester_id): courses = semester.courses.filter(evaluations__state__in=['evaluated', 'reviewed', 'published'], is_graded=True, gets_no_grade_documents=False).distinct() courses = [course for course in courses if not course.final_grade_documents.exists()] + courses.sort(key=lambda course: course.name) responsibles = list(set(responsible for course in courses for responsible in course.responsibles.all())) responsibles.sort(key=lambda responsible: (responsible.last_name.lower(), responsible.first_name.lower()))
Don't sort by name_de by default Several models have `name_de` as their default `ordering`. That's almost always the wrong choice, since either we want to sort by `name` (this is the localized name, either `name_de` or `name_en`, and must be sorted in the views), or we want to sort by `pk` or `created_at` or something similar. We should probably remove that. Maybe do some spot tests (search for e.g. `Course.objects`, `.courses` and `.course_set`) whether there are any places that actually need sorting, but don't explicitly specify any.
2020-03-15T20:11:16
e-valuation/EvaP
1,440
e-valuation__EvaP-1440
[ "1439" ]
a660e144f5c9e18b99382a674e6f1a8c0845e080
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -216,7 +216,7 @@ def evaluation_detail(request, semester_id, evaluation_id): # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution # information for rendering the distribution bar if evaluation.state != 'published': - evaluation = get_evaluations_with_prefetched_data([evaluation])[0] + evaluation = get_evaluations_with_course_result_attributes(get_evaluations_with_prefetched_data([evaluation]))[0] template_data = dict( evaluation=evaluation,
diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -230,6 +230,24 @@ def test_wrong_state(self): url = '/results/semester/%s/evaluation/%s' % (self.semester.id, evaluation.id) self.app.get(url, user='student', status=403) + def test_preview_without_rating_answers(self): + evaluation = baker.make(Evaluation, state='evaluated', course=baker.make(Course, semester=self.semester)) + url = f'/results/semester/{self.semester.id}/evaluation/{evaluation.id}' + self.app.get(url, user='manager') + + def test_preview_with_rating_answers(self): + evaluation = baker.make(Evaluation, state='evaluated', course=baker.make(Course, semester=self.semester)) + questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) + likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=1) + evaluation.general_contribution.questionnaires.set([questionnaire]) + participants = baker.make(UserProfile, _quantity=20) + evaluation.participants.set(participants) + evaluation.voters.set(participants) + baker.make(RatingAnswerCounter, question=likert_question, contribution=evaluation.general_contribution, answer=1, count=20) + + url = f'/results/semester/{self.semester.id}/evaluation/{evaluation.id}' + self.app.get(url, user='manager') + class TestResultsSemesterEvaluationDetailViewFewVoters(WebTest): @classmethod
Crash when previewing results Steps to reproduce: 1. Log in as evap 2. Select `Preview results` on "Subject Specific English 2" (breaks in German too) Interestingly, I don't get any errors when viewing "Beautiful Data", which is also a preview, or "3d Computer Graphic - Software Architecture", which is public. I am on `25401c018f8f304001d67dcd22f52861d01ddf3b` and reloaded the test data right before testing. ``` AttributeError at /results/semester/21/evaluation/347 'Course' object has no attribute 'evaluation_weight_sum' ``` Can anyone confirm this?
Can confirm, refreshing the results cache doesn't change anything (makes sense because the evaluation is not published yet). @pixunil can you please look into it? We should also add a test which opens a result preview page.
2020-03-24T11:39:00
e-valuation/EvaP
1,455
e-valuation__EvaP-1455
[ "1411" ]
dceafb532394d1809eac9b59afa0067aecd3bcdf
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -254,8 +254,10 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['course'].queryset = Course.objects.filter(semester=semester) - self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter( - Q(visibility=Questionnaire.Visibility.MANAGERS) | Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct() + visible_questionnaires = Q(visibility__in=(Questionnaire.Visibility.MANAGERS, Questionnaire.Visibility.EDITORS)) + if self.instance.pk is not None: + visible_questionnaires |= Q(contributions__evaluation=self.instance) + self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(visible_questionnaires).distinct() self.fields['participants'].queryset = UserProfile.objects.exclude(is_active=False)
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -707,3 +707,15 @@ def test_locked_questionnaire(self): # Assert form is valid and locked questionnaire is removed form.save() self.assertEqual({questionnaire}, set(evaluation.general_contribution.questionnaires.all())) + + def test_unused_questionnaire_visibility(self): + evaluation = baker.make(Evaluation) + questionnaire = baker.make(Questionnaire, visibility=Questionnaire.Visibility.HIDDEN, type=Questionnaire.Type.TOP) + + form = EvaluationForm(instance=evaluation, semester=evaluation.course.semester) + self.assertNotIn(questionnaire, form.fields["general_questionnaires"].queryset) + + evaluation.general_contribution.questionnaires.add(questionnaire) + + form = EvaluationForm(instance=evaluation, semester=evaluation.course.semester) + self.assertIn(questionnaire, form.fields["general_questionnaires"].queryset)
Questionnaire filtering on Evaluation creation broken When creating a new Evaluation in the staff create evaluation form, in the general questionnaire list questionnaires which are set to be hidden but have never been used are listed. They should be hidden instead.
2020-04-27T20:02:10
e-valuation/EvaP
1,457
e-valuation__EvaP-1457
[ "1456" ]
943ba64a24c31ee10bb32c9bdda1a7fb47faeb0e
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -107,7 +107,7 @@ def get_evaluations_with_prefetched_data(evaluations): "course__degrees", "course__semester", "course__responsibles", - ) + ).order_by('pk') ) for evaluation, participant_count, voter_count, course_evaluations_count in zip(evaluations, participant_counts, voter_counts, course_evaluations_counts): if evaluation._participant_count is None:
Participant count wrong for unpublished results On the results index page the number of voters and participants for unpublished evaluations is not shown correctly. Staff users can see these evaluations before they are published, but the numbers differ from the actual values (which are shown correctly on results detail pages).
2020-04-29T18:53:42
e-valuation/EvaP
1,459
e-valuation__EvaP-1459
[ "1422" ]
c40b4b038806919b24c6aeeb42855a3c63b39721
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -15,6 +15,7 @@ FaqSection, Question, Questionnaire, RatingAnswerCounter, Semester, TextAnswer, UserProfile) from evap.evaluation.tools import date_to_datetime +from evap.results.tools import collect_results from evap.results.views import (update_template_cache, update_template_cache_of_published_evaluations_in_course) @@ -758,6 +759,11 @@ def save(self, *args, **kw): self.instance.is_active = not self.cleaned_data.get('is_inactive') + # refresh results cache + for evaluation in Evaluation.objects.filter(contributions__contributor=self.instance).distinct(): + if any(attribute in self.changed_data for attribute in ["first_name", "last_name", "title"]): + collect_results(evaluation, force_recalculation=True) + self.instance.save()
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -9,6 +9,7 @@ to_querydict) from evap.staff.forms import (ContributionForm, ContributionFormSet, CourseForm, EvaluationEmailForm, EvaluationForm, QuestionnaireForm, SingleResultForm, UserForm) +from evap.results.tools import collect_results from evap.contributor.forms import EvaluationForm as ContributorEvaluationForm @@ -143,6 +144,31 @@ def test_user_cannot_be_removed_from_evaluation_already_voted_for(self): self.assertIn('evaluations_participating_in', form.errors) self.assertIn("Evaluations for which the user already voted can't be removed", form.errors['evaluations_participating_in'][0]) + def test_results_cache_refreshed(self): + contributor = baker.make(UserProfile, first_name="Peter") + evaluation = baker.make(Evaluation, state="published") + baker.make(Contribution, contributor=contributor, + evaluation=evaluation) + + results_before = collect_results(evaluation) + + form_data = get_form_data_from_instance(UserForm, contributor) + form_data["first_name"] = "Patrick" + form = UserForm(form_data, instance=contributor) + form.save() + + results_after = collect_results(evaluation) + + self.assertEqual( + results_before.contribution_results[0].contributor.first_name, + "Peter" + ) + + self.assertEqual( + results_after.contribution_results[0].contributor.first_name, + "Patrick" + ) + class SingleResultFormTests(TestCase): def test_single_result_form_saves_participant_and_voter_count(self):
Changing a username won’t refresh results cache When the name of a contributor is changed, the new name won’t be reflected on the results pages.
2020-05-04T16:28:05
e-valuation/EvaP
1,463
e-valuation__EvaP-1463
[ "999" ]
4cc07147079e5e838c6efd76c97934c3c544c843
diff --git a/evap/results/exporters.py b/evap/results/exporters.py --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -85,6 +85,10 @@ def export(self, response, semesters, selection_list, include_not_enough_voters= # always be tightly coupled based on the layout of the sheet. We thus think that one big method # containing the business logic is okay here # pylint: disable=too-many-locals, too-many-nested-blocks, too-many-branches, too-many-statements + + # We want to throw early here, since workbook.save() will throw an IndexError otherwise. + assert len(selection_list) > 0 + workbook = xlwt.Workbook() self.init_styles(workbook) counter = 1 @@ -114,7 +118,8 @@ def export(self, response, semesters, selection_list, include_not_enough_voters= results = OrderedDict() for contribution_result in collect_results(evaluation).contribution_results: for questionnaire_result in contribution_result.questionnaire_results: - if all(not question_result.question.is_rating_question or question_result.counts is None for question_result in questionnaire_result.question_results): + # RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it + if all(not question_result.question.is_rating_question or question_result.counts is None or sum(question_result.counts) == 0 for question_result in questionnaire_result.question_results): continue if not contributor or contribution_result.contributor is None or contribution_result.contributor == contributor: results.setdefault(questionnaire_result.questionnaire.id, []).extend(questionnaire_result.question_results)
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -202,6 +202,161 @@ def test_course_type_ordering(self): self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_2.full_name + "\n") self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n") + def test_multiple_sheets(self): + binary_content = BytesIO() + semester = baker.make(Semester) + ExcelExporter().export(binary_content, [semester], [([], []), ([], [])]) + + binary_content.seek(0) + workbook = xlrd.open_workbook(file_contents=binary_content.read()) + + self.assertEqual(len(workbook.sheets()), 2) + + @staticmethod + def get_export_sheet(semester, degree, course_types, include_unpublished=True, include_not_enough_voters=True): + binary_content = BytesIO() + ExcelExporter().export( + binary_content, + [semester], + [([degree.id], course_types)], + include_unpublished=include_unpublished, + include_not_enough_voters=include_not_enough_voters, + ) + binary_content.seek(0) + workbook = xlrd.open_workbook(file_contents=binary_content.read()) + return workbook.sheets()[0] + + def test_include_unpublished(self): + semester = baker.make(Semester) + degree = baker.make(Degree) + published_evaluation = baker.make(Evaluation, state="published", course__semester=semester, course__degrees=[degree], course__type__order=1) + unpublished_evaluation = baker.make(Evaluation, state="reviewed", course__semester=semester, course__degrees=[degree], course__type__order=2) + course_types = [published_evaluation.course.type.id, unpublished_evaluation.course.type.id] + + # First, make sure that the unpublished does not appear + sheet = self.get_export_sheet(include_unpublished=False, semester=semester, degree=degree, course_types=course_types) + self.assertEqual(len(sheet.row_values(0)), 2) + self.assertEqual( + sheet.row_values(0)[1][:-1], + published_evaluation.full_name + ) + + # Now, make sure that it appears when wanted + sheet = self.get_export_sheet(include_unpublished=True, semester=semester, degree=degree, course_types=course_types) + self.assertEqual(len(sheet.row_values(0)), 3) + # These two should be ordered according to evaluation.course.type.order + self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name) + self.assertEqual(sheet.row_values(0)[2][:-1], unpublished_evaluation.full_name) + + def test_include_not_enough_voters(self): + semester = baker.make(Semester) + degree = baker.make(Degree) + enough_voters_evaluation = baker.make( + Evaluation, + state="published", + course__semester=semester, + course__degrees=[degree], + _voter_count=1000, + _participant_count=1000, + ) + not_enough_voters_evaluation = baker.make( + Evaluation, + state="published", + course__semester=semester, + course__degrees=[degree], + _voter_count=1, + _participant_count=1000, + ) + + course_types = [enough_voters_evaluation.course.type.id, not_enough_voters_evaluation.course.type.id] + + # First, make sure that the one with only a single voter does not appear + sheet = self.get_export_sheet(semester, degree, course_types, include_not_enough_voters=False) + self.assertEqual(len(sheet.row_values(0)), 2) + self.assertEqual( + sheet.row_values(0)[1][:-1], + enough_voters_evaluation.full_name + ) + + # Now, check with the option enabled + sheet = self.get_export_sheet(semester, degree, course_types, include_not_enough_voters=True) + self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual( + {enough_voters_evaluation.full_name, not_enough_voters_evaluation.full_name}, + {sheet.row_values(0)[1][:-1], sheet.row_values(0)[2][:-1]} + ) + + def test_no_degree_or_course_type(self): + evaluation = baker.make(Evaluation) + with self.assertRaises(AssertionError): + ExcelExporter().export(BytesIO(), [evaluation.course.semester], []) + + def test_exclude_single_result(self): + degree = baker.make(Degree) + evaluation = baker.make(Evaluation, is_single_result=True, state="published", course__degrees=[degree]) + sheet = self.get_export_sheet(evaluation.course.semester, degree, [evaluation.course.type.id]) + self.assertEqual(len(sheet.row_values(0)), 1, "There should be no column for the evaluation, only the row description") + + def test_exclude_used_but_unanswered_questionnaires(self): + degree = baker.make(Degree) + evaluation = baker.make(Evaluation, _voter_count=10, _participant_count=10, state="published", course__degrees=[degree]) + used_questionnaire = baker.make(Questionnaire) + used_question = baker.make(Question, type=Question.LIKERT, questionnaire=used_questionnaire) + unused_questionnaire = baker.make(Questionnaire) + unused_question = baker.make(Question, type=Question.LIKERT, questionnaire=unused_questionnaire) + baker.make(RatingAnswerCounter, question=used_question, contribution=evaluation.general_contribution, answer=3, count=10) + evaluation.general_contribution.questionnaires.set([used_questionnaire, unused_questionnaire]) + + sheet = self.get_export_sheet(evaluation.course.semester, degree, [evaluation.course.type.id]) + self.assertEqual(sheet.row_values(4)[0], used_questionnaire.name) + self.assertEqual(sheet.row_values(5)[0], used_question.text) + self.assertNotIn(unused_questionnaire.name, sheet.col_values(0)) + self.assertNotIn(unused_question.text, sheet.col_values(0)) + + def test_degree_course_type_name(self): + degree = baker.make(Degree, name_en="Celsius") + course_type = baker.make(CourseType, name_en="LetsPlay") + evaluation = baker.make(Evaluation, course__degrees=[degree], course__type=course_type, state="published") + + sheet = self.get_export_sheet(evaluation.course.semester, degree, [course_type.id]) + self.assertEqual(sheet.col_values(1)[1:3], [degree.name, course_type.name]) + + def test_multiple_evaluations(self): + semester = baker.make(Semester) + degree = baker.make(Degree) + evaluation1 = baker.make(Evaluation, course__semester=semester, course__degrees=[degree], state="published") + evaluation2 = baker.make(Evaluation, course__semester=semester, course__degrees=[degree], state="published") + + sheet = self.get_export_sheet(semester, degree, [evaluation1.course.type.id, evaluation2.course.type.id]) + + self.assertEqual( + set(sheet.row_values(0)[1:]), + set((evaluation1.full_name + "\n", evaluation2.full_name + "\n")) + ) + + def test_correct_grades_and_bottom_numbers(self): + degree = baker.make(Degree) + evaluation = baker.make(Evaluation, _voter_count=5, _participant_count=10, course__degrees=[degree], state="published") + questionnaire1 = baker.make(Questionnaire, order=1) + questionnaire2 = baker.make(Questionnaire, order=2) + question1 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire1) + question2 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire2) + baker.make(RatingAnswerCounter, answer=1, count=1, question=question1, contribution=evaluation.general_contribution) + baker.make(RatingAnswerCounter, answer=3, count=1, question=question1, contribution=evaluation.general_contribution) + baker.make(RatingAnswerCounter, answer=2, count=1, question=question2, contribution=evaluation.general_contribution) + baker.make(RatingAnswerCounter, answer=4, count=1, question=question2, contribution=evaluation.general_contribution) + + evaluation.general_contribution.questionnaires.set([questionnaire1, questionnaire2]) + + sheet = self.get_export_sheet(evaluation.course.semester, degree, [evaluation.course.type.id]) + + self.assertEqual(sheet.row_values(5)[1], 2.0) # question 1 average + self.assertEqual(sheet.row_values(8)[1], 3.0) # question 2 average + self.assertEqual(sheet.row_values(10)[1], 2.5) # Average grade + self.assertEqual(sheet.row_values(11)[1], "5/10") # Voters / Participants + self.assertEqual(sheet.row_values(12)[1], "50%") # Voter percentage + + def test_contributor_result_export(self): degree = baker.make(Degree) contributor = baker.make(UserProfile)
Result exporter is not tested there are some tests in staff/tests/test_views that test the exporter, but it seems like it is not creating any questionnaires. large portions of the results exporter are not covered therefore.
2020-05-11T17:34:49
e-valuation/EvaP
1,467
e-valuation__EvaP-1467
[ "1466" ]
ca579b29417ac9f8cd375401d53a195d0b4869e2
diff --git a/evap/middleware.py b/evap/middleware.py --- a/evap/middleware.py +++ b/evap/middleware.py @@ -17,6 +17,9 @@ def process_view(request, view_func, _view_args, _view_kwargs): if "no_login_required" in view_func.__dict__ and view_func.no_login_required: return None + if view_func.__name__ in ["OIDCAuthenticationRequestView", "OIDCAuthenticationCallbackView"]: + return None + return redirect_to_login(request.get_full_path())
OpenID login button is broken in latest firefox Behavior: When clicking the "login" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/` Expected Behavior: Display the openID page and allow login Steps to reproduce Go to https://evap.hpi.de and click the "login" button. Browser is an up-to-date firefox on linux
2020-05-18T08:48:43
e-valuation/EvaP
1,472
e-valuation__EvaP-1472
[ "1405" ]
b37e240670b536fb58b10ca2074fecd0c9c54339
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -1,4 +1,5 @@ from collections.abc import Iterable +from datetime import datetime import logging from django import forms @@ -326,6 +327,15 @@ def save(self, *args, **kw): return evaluation +class EvaluationCopyForm(EvaluationForm): + def __init__(self, data=None, instance=None): + opts = self._meta + initial = forms.models.model_to_dict(instance, opts.fields, opts.exclude) + initial['last_modified_time'] = datetime.now() + initial['general_questionnaires'] = instance.general_contribution.questionnaires.all() + super().__init__(data=data, initial=initial, semester=instance.course.semester) + + class SingleResultForm(forms.ModelForm): last_modified_time_2 = forms.DateTimeField(label=_("Last modified"), required=False, localize=True, disabled=True) last_modified_user_2 = forms.CharField(label=_("Last modified by"), required=False, disabled=True) @@ -458,6 +468,18 @@ def clean(self): self.add_error('does_not_contribute', _("Select either this option or at least one questionnaire!")) +class ContributionCopyForm(ContributionForm): + def __init__(self, data=None, instance=None, evaluation=None, **kwargs): + initial = None + copied_instance = Contribution(evaluation=evaluation) + if instance: + opts = self._meta + initial = forms.models.model_to_dict(instance, opts.fields, opts.exclude) + del initial['evaluation'] + initial['does_not_contribute'] = not instance.questionnaires.exists() + super().__init__(data, initial=initial, instance=copied_instance, evaluation=evaluation, **kwargs) + + class EvaluationEmailForm(forms.Form): recipients = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=EmailTemplate.Recipients.choices, label=_("Send email to")) subject = forms.CharField(label=_("Subject")) @@ -636,6 +658,21 @@ def clean(self): found_contributor.add(contributor) +class ContributionCopyFormSet(ContributionFormSet): + def __init__(self, data, instance, new_instance): + # First, pass the old evaluation instance to create a ContributionCopyForm for each contribution + super().__init__(data, instance=instance, form_kwargs={'evaluation': new_instance}) + # Then, use the new evaluation instance as target for validation and saving purposes + self.instance = new_instance + + def save(self, commit=True): + # As the contained ContributionCopyForm have not-yet-saved instances, + # they’d be skipped when saving the formset. + # To circumvent this, explicitly note that all forms should be saved as new instance. + self.save_as_new = True + super().save(commit) + + class QuestionForm(forms.ModelForm): class Meta: model = Question diff --git a/evap/staff/urls.py b/evap/staff/urls.py --- a/evap/staff/urls.py +++ b/evap/staff/urls.py @@ -25,6 +25,7 @@ path("semester/<int:semester_id>/evaluation/create", views.evaluation_create, name="evaluation_create"), path("semester/<int:semester_id>/evaluation/create/<int:course_id>", views.evaluation_create, name="evaluation_create"), path("semester/<int:semester_id>/evaluation/<int:evaluation_id>/edit", views.evaluation_edit, name="evaluation_edit"), + path("semester/<int:semester_id>/evaluation/<int:evaluation_id>/copy", views.evaluation_copy, name="evaluation_copy"), path("semester/<int:semester_id>/evaluation/<int:evaluation_id>/email", views.evaluation_email, name="evaluation_email"), path("semester/<int:semester_id>/evaluation/<int:evaluation_id>/preview", views.evaluation_preview, name="evaluation_preview"), path("semester/<int:semester_id>/evaluation/<int:evaluation_id>/person_management", views.evaluation_person_management, name="evaluation_person_management"), diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -31,9 +31,11 @@ from evap.results.views import update_template_cache_of_published_evaluations_in_course from evap.rewards.models import RewardPointGranting from evap.rewards.tools import can_reward_points_be_used_by, is_semester_activated -from evap.staff.forms import (AtLeastOneFormSet, ContributionForm, ContributionFormSet, CourseForm, CourseTypeForm, +from evap.staff.forms import (AtLeastOneFormSet, ContributionForm, ContributionCopyForm, ContributionFormSet, + ContributionCopyFormSet, CourseForm, CourseTypeForm, CourseTypeMergeSelectionForm, DegreeForm, EmailTemplateForm, EvaluationEmailForm, - EvaluationForm, EvaluationParticipantCopyForm, ExportSheetForm, FaqQuestionForm, + EvaluationForm, EvaluationCopyForm, EvaluationParticipantCopyForm, ExportSheetForm, + FaqQuestionForm, FaqSectionForm, ModelWithImportNamesFormSet, ImportForm, QuestionForm, QuestionnaireForm, QuestionnairesAssignForm, RemindResponsibleForm, SemesterForm, SingleResultForm, TextAnswerForm, UserBulkUpdateForm, UserForm, UserImportForm, UserMergeSelectionForm) @@ -806,6 +808,37 @@ def evaluation_create(request, semester_id, course_id=None): )) +@manager_required +def evaluation_copy(request, semester_id, evaluation_id): + semester = get_object_or_404(Semester, id=semester_id) + evaluation = get_object_or_404(Evaluation, id=evaluation_id, course__semester=semester) + + form = EvaluationCopyForm(request.POST or None, evaluation) + + InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionCopyFormSet, + form=ContributionCopyForm, extra=1) + formset = InlineContributionFormset(request.POST or None, instance=evaluation, new_instance=form.instance) + + if form.is_valid() and formset.is_valid(): + copied_evaluation = form.save() + copied_evaluation.set_last_modified(request.user) + copied_evaluation.save() + formset.save() + update_template_cache_of_published_evaluations_in_course(copied_evaluation.course) + + messages.success(request, _("Successfully created evaluation.")) + return redirect('staff:semester_view', semester_id) + + return render(request, "staff_evaluation_form.html", dict( + semester=semester, + evaluation_form=form, + formset=formset, + manager=True, + editable=True, + state="", + )) + + @manager_required def single_result_create(request, semester_id, course_id=None): semester = get_object_or_404(Semester, id=semester_id)
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -1,4 +1,5 @@ from unittest.mock import patch +from datetime import datetime from django.forms.models import inlineformset_factory from django.test import TestCase from model_bakery import baker @@ -7,7 +8,8 @@ Questionnaire, Semester, UserProfile) from evap.evaluation.tests.tools import (create_evaluation_with_responsible_and_editor, get_form_data_from_instance, to_querydict) -from evap.staff.forms import (ContributionForm, ContributionFormSet, CourseForm, EvaluationEmailForm, EvaluationForm, +from evap.staff.forms import (ContributionForm, ContributionCopyForm, ContributionFormSet, CourseForm, + EvaluationEmailForm, EvaluationForm, EvaluationCopyForm, QuestionnaireForm, SingleResultForm, UserForm) from evap.results.tools import collect_results from evap.contributor.forms import EvaluationForm as ContributorEvaluationForm @@ -177,6 +179,52 @@ def test_single_result_form_saves_participant_and_voter_count(self): self.assertEqual(evaluation.num_voters, 10) +class ContributionCopyFormTests(TestCase): + @classmethod + def setUpTestData(cls): + cls.evaluation = baker.make(Evaluation) + cls.contributor = baker.make(UserProfile) + cls.contribution = baker.make( + Contribution, + evaluation=cls.evaluation, + contributor=cls.contributor, + order=2, + role=Contribution.Role.EDITOR, + textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS, + label='Teacher', + ) + cls.questionnaires = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR, _quantity=2) + cls.contribution.questionnaires.set(cls.questionnaires) + + def test_initial_from_original(self): + evaluation = Evaluation() + form = ContributionCopyForm(None, instance=self.contribution, evaluation=evaluation) + self.assertEqual(form['evaluation'].initial, None) + self.assertEqual(form['contributor'].initial, self.contributor.pk) + self.assertCountEqual(form['questionnaires'].initial, self.questionnaires) + self.assertEqual(form['order'].initial, 2) + self.assertEqual(form['role'].initial, Contribution.Role.EDITOR) + self.assertEqual(form['textanswer_visibility'].initial, Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS) + self.assertEqual(form['label'].initial, 'Teacher') + self.assertEqual(form.evaluation, evaluation) + + def test_no_original_given(self): + new_evaluation = Evaluation() + form = ContributionCopyForm(None, instance=None, evaluation=new_evaluation) + self.assertEqual(form.evaluation, new_evaluation) + + def test_copy_contribution(self): + # To simulate the life-cycle of the form, first give the form an unsaved evaluation. + new_evaluation = baker.prepare(Evaluation, _save_related=True) + form_data = get_form_data_from_instance(ContributionCopyForm, self.contribution, evaluation=new_evaluation) + # Just before saving the form, save the evaluation instance. + new_evaluation.save() + form = ContributionCopyForm(form_data, instance=self.contribution, evaluation=new_evaluation) + self.assertTrue(form.is_valid()) + copied_contribution = form.save() + self.assertEqual(copied_contribution.evaluation, new_evaluation) + + class ContributionFormsetTests(TestCase): def test_contribution_form_set(self): """ @@ -706,3 +754,51 @@ def test_unused_questionnaire_visibility(self): form = EvaluationForm(instance=evaluation, semester=evaluation.course.semester) self.assertIn(questionnaire, form.fields["general_questionnaires"].queryset) + + +class EvaluationCopyFormTests(TestCase): + @classmethod + def setUpTestData(cls): + cls.semester = baker.make(Semester) + cls.course = baker.make(Course, semester=cls.semester) + cls.participants = baker.make(UserProfile, _quantity=8) + cls.evaluation = baker.make( + Evaluation, + course=cls.course, + name_de="Das Original", + name_en="The Original", + last_modified_time=datetime(2020, 1, 1), + last_modified_user=baker.make(UserProfile), + participants=cls.participants, + voters=cls.participants[:6], + ) + cls.general_questionnaires = baker.make(Questionnaire, _quantity=5) + cls.evaluation.general_contribution.questionnaires.set(cls.general_questionnaires) + + def test_initial_from_original(self): + form = EvaluationCopyForm(None, self.evaluation) + self.assertEqual(form['course'].initial, self.course.pk) + self.assertCountEqual(form.fields['course'].queryset, self.semester.courses.all()) + self.assertEqual(form['name_de'].initial, "Das Original") + self.assertEqual(form['name_en'].initial, "The Original") + self.assertCountEqual(form['participants'].initial, self.participants) + self.assertGreater(form['last_modified_time'].initial, self.evaluation.last_modified_time) + self.assertEqual(form['last_modified_user_name'].initial, None) + self.assertCountEqual(form['general_questionnaires'].initial, self.general_questionnaires) + + def test_not_changing_name_fails(self): + form_data = EvaluationCopyForm(None, self.evaluation).initial + form = EvaluationCopyForm(form_data, self.evaluation) + self.assertFalse(form.is_valid()) + self.assertEqual(form.errors['name_de'], ["Evaluation with this Course and Name (german) already exists."]) + self.assertEqual(form.errors['name_en'], ["Evaluation with this Course and Name (english) already exists."]) + + def test_save_makes_a_copy(self): + form_data = get_form_data_from_instance(EvaluationCopyForm, self.evaluation) + form_data['name_de'] = "Eine Kopie" + form_data['name_en'] = "A Copy" + form = EvaluationCopyForm(form_data, self.evaluation) + self.assertTrue(form.is_valid()) + copied_evaluation = form.save() + self.assertNotEqual(copied_evaluation, self.evaluation) + self.assertEqual(Evaluation.objects.count(), 2) diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -19,6 +19,7 @@ from evap.evaluation.tests.tools import FuzzyInt, let_user_vote_for_evaluation, WebTestWith200Check from evap.rewards.models import SemesterActivation, RewardPointGranting from evap.staff.tools import generate_import_filename, ImportType +from evap.staff.forms import ContributionCopyForm, ContributionCopyFormSet, EvaluationCopyForm from evap.staff.views import get_evaluations_with_prefetched_data @@ -1282,6 +1283,11 @@ def setUpTestData(cls): cls.manager_user = baker.make(UserProfile, email='[email protected]', groups=[Group.objects.get(name='Manager')]) cls.course = baker.make(Course, semester=baker.make(Semester, pk=1)) + def test_course_is_prefilled(self): + response = self.app.get(f'{self.url}/{self.course.pk}', user=self.manager_user, status=200) + form = response.context['form'] + self.assertEqual(form['course'].initial, self.course.pk) + def test_single_result_create(self): """ Tests the single result creation view with one valid and one invalid input dataset. @@ -1314,6 +1320,11 @@ def setUpTestData(cls): cls.q1 = baker.make(Questionnaire, type=Questionnaire.Type.TOP) cls.q2 = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR) + def test_course_is_prefilled(self): + response = self.app.get(f'{self.url}/{self.course.pk}', user=self.manager_user, status=200) + form = response.context['evaluation_form'] + self.assertEqual(form['course'].initial, self.course.pk) + def test_evaluation_create(self): """ Tests the evaluation creation view with one valid and one invalid input dataset. @@ -1348,6 +1359,50 @@ def test_evaluation_create(self): self.assertEqual(Evaluation.objects.get().name_de, "lfo9e7bmxp1xi") +class TestEvaluationCopyView(WebTest): + url = '/staff/semester/1/evaluation/1/copy' + + @classmethod + def setUpTestData(cls): + cls.manager = baker.make(UserProfile, email='[email protected]', groups=[Group.objects.get(name='Manager')]) + cls.semester = baker.make(Semester, pk=1) + cls.course = baker.make(Course, semester=cls.semester) + cls.evaluation = baker.make( + Evaluation, + pk=1, + course=cls.course, + name_de="Das Original", + name_en="The Original", + ) + cls.general_questionnaires = baker.make(Questionnaire, _quantity=5) + cls.evaluation.general_contribution.questionnaires.set(cls.general_questionnaires) + for __ in range(3): + baker.make( + Contribution, + evaluation=cls.evaluation, + contributor=baker.make(UserProfile), + ) + + def test_copy_forms_are_used(self): + response = self.app.get(self.url, user=self.manager, status=200) + self.assertIsInstance(response.context['evaluation_form'], EvaluationCopyForm) + self.assertIsInstance(response.context['formset'], ContributionCopyFormSet) + self.assertTrue(issubclass(response.context['formset'].form, ContributionCopyForm)) + + def test_evaluation_copy(self): + response = self.app.get(self.url, user=self.manager, status=200) + form = response.forms['evaluation-form'] + form['name_de'] = "Eine Kopie" + form['name_en'] = "A Copy" + form.submit() + + # As we checked previously that the respective copy forms were used, + # we don’t have to check for individual attributes, as those are checked in the respective form tests + self.assertEqual(Evaluation.objects.count(), 2) + copied_evaluation = Evaluation.objects.exclude(pk=self.evaluation.pk).get() + self.assertEqual(copied_evaluation.contributions.count(), 4) + + class TestCourseEditView(WebTest): url = '/staff/semester/1/course/1/edit'
Course and Evaluation creation redesign The creation of Courses and Evaluations should get a different user flow. - The current dropdown button for creating a Course, Evaluation or Single Result and the import of data should be replaced with a single (light) button just for the import in the staff semester view. - On the "Courses" tab a new (dark) button for creating a Course should be placed on the upper left with the same functionality as before. - In each row of the table a column with one (dark) button for adding an Evaluation and one (dark) button for adding a Single Result to this Course should be added on the right. - When clicking on these buttons, the Course field in the Evaluation form/Single Result form should be pre-filled with the respective Course. - Additionally, each Evaluation in the "Evaluations" tab should get a "Copy" button, which, similar to the questionnaire copying, will open an Evaluation creation form with all fields (including participants and contributors) pre-filled like in the original Evaluation.
2020-05-19T20:04:36
e-valuation/EvaP
1,476
e-valuation__EvaP-1476
[ "1465" ]
42a5662c70bfcb55c3ad658d920ecefb1092651b
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1,5 +1,6 @@ from collections import namedtuple, defaultdict from datetime import datetime, date, timedelta +from enum import Enum, auto import logging import secrets import uuid @@ -393,6 +394,13 @@ class Evaluation(models.Model): # whether to wait for grade uploading before publishing results wait_for_grade_upload_before_publishing = models.BooleanField(verbose_name=_("wait for grade upload before publishing"), default=True) + class TextAnswerReviewState(Enum): + do_not_call_in_templates = True + NO_TEXTANSWERS = auto() + REVIEW_NEEDED = auto() + REVIEW_URGENT = auto() + REVIEWED = auto() + class Meta: unique_together = ( ('course', 'name_de'), @@ -700,6 +708,24 @@ def reviewed_textanswer_set(self): def num_reviewed_textanswers(self): return self.reviewed_textanswer_set.count() + @property + def textanswer_review_state(self): + if self.num_textanswers == 0: + return self.TextAnswerReviewState.NO_TEXTANSWERS + + if self.num_textanswers == self.num_reviewed_textanswers: + return self.TextAnswerReviewState.REVIEWED + + if self.state != "evaluated": + return self.TextAnswerReviewState.REVIEW_NEEDED + + if (self.course.final_grade_documents + or self.course.gets_no_grade_documents + or not self.wait_for_grade_upload_before_publishing): + return self.TextAnswerReviewState.REVIEW_URGENT + + return self.TextAnswerReviewState.REVIEW_NEEDED + @property def ratinganswer_counters(self): return RatingAnswerCounter.objects.filter(contribution__evaluation=self)
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -12,6 +12,7 @@ from evap.evaluation.models import (Contribution, Course, CourseType, EmailTemplate, Evaluation, NotArchiveable, Question, Questionnaire, RatingAnswerCounter, Semester, TextAnswer, UserProfile) +from evap.grades.models import GradeDocument from evap.evaluation.tests.tools import let_user_vote_for_evaluation from evap.results.tools import calculate_average_distribution from evap.results.views import get_evaluation_result_template_fragment_cache_key @@ -307,6 +308,81 @@ def test_publishing_and_unpublishing_effect_on_template_cache(self): self.assertIsNone(caches['results'].get(get_evaluation_result_template_fragment_cache_key(evaluation.id, "de", True))) self.assertIsNone(caches['results'].get(get_evaluation_result_template_fragment_cache_key(evaluation.id, "de", False))) + # pylint: disable=invalid-name + def assert_textanswer_review_state( + self, + evaluation, + expected_default_value, + expected_value_with_gets_no_grade_documents, + expected_value_with_wait_for_grade_upload_before_publishing, + expected_value_after_grade_upload): + + self.assertEqual(evaluation.textanswer_review_state, expected_default_value) + + evaluation.course.gets_no_grade_documents = True + self.assertEqual(evaluation.textanswer_review_state, expected_value_with_gets_no_grade_documents) + evaluation.course.gets_no_grade_documents = False + + evaluation.wait_for_grade_upload_before_publishing = True + self.assertEqual(evaluation.textanswer_review_state, expected_value_with_wait_for_grade_upload_before_publishing) + + grade_document = baker.make(GradeDocument, type=GradeDocument.Type.FINAL_GRADES, course=evaluation.course) + self.assertEqual(evaluation.textanswer_review_state, expected_value_after_grade_upload) + grade_document.delete() + + evaluation.wait_for_grade_upload_before_publishing = False + + def test_textanswer_review_state(self): + evaluation = baker.make( + Evaluation, + state="in_evaluation", + can_publish_text_results=True, + wait_for_grade_upload_before_publishing=False + ) + + self.assert_textanswer_review_state( + evaluation, + evaluation.TextAnswerReviewState.NO_TEXTANSWERS, + evaluation.TextAnswerReviewState.NO_TEXTANSWERS, + evaluation.TextAnswerReviewState.NO_TEXTANSWERS, + evaluation.TextAnswerReviewState.NO_TEXTANSWERS, + ) + + textanswer = baker.make(TextAnswer, contribution=evaluation.general_contribution) + del evaluation.num_textanswers # reset cached_property cache + + # text_answer_review_state should be REVIEW_NEEDED as long as we are still in_evaluation + self.assert_textanswer_review_state( + evaluation, + evaluation.TextAnswerReviewState.REVIEW_NEEDED, + evaluation.TextAnswerReviewState.REVIEW_NEEDED, + evaluation.TextAnswerReviewState.REVIEW_NEEDED, + evaluation.TextAnswerReviewState.REVIEW_NEEDED, + ) + + evaluation.evaluation_end() + evaluation.save() + + self.assert_textanswer_review_state( + evaluation, + evaluation.TextAnswerReviewState.REVIEW_URGENT, + evaluation.TextAnswerReviewState.REVIEW_URGENT, # course has `gets_no_grade_documents` + evaluation.TextAnswerReviewState.REVIEW_NEEDED, # still waiting for grades + evaluation.TextAnswerReviewState.REVIEW_URGENT, # grades were uploaded + ) + + textanswer.state = TextAnswer.State.PUBLISHED + textanswer.save() + del evaluation.num_reviewed_textanswers # reset cached_property cache + + self.assert_textanswer_review_state( + evaluation, + evaluation.TextAnswerReviewState.REVIEWED, + evaluation.TextAnswerReviewState.REVIEWED, + evaluation.TextAnswerReviewState.REVIEWED, + evaluation.TextAnswerReviewState.REVIEWED, + ) + class TestCourse(TestCase): def test_can_be_deleted_by_manager(self): diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -1,6 +1,6 @@ import datetime import os -from unittest.mock import patch +from unittest.mock import patch, PropertyMock from django.conf import settings from django.contrib.auth.models import Group @@ -421,7 +421,7 @@ class TestSemesterView(WebTest): @classmethod def setUpTestData(cls): - baker.make(UserProfile, email='[email protected]', groups=[Group.objects.get(name='Manager')]) + cls.user = baker.make(UserProfile, email='[email protected]', groups=[Group.objects.get(name='Manager')]) cls.semester = baker.make(Semester, pk=1) cls.evaluation1 = baker.make(Evaluation, name_de="Evaluation 1", name_en="Evaluation 1", course=baker.make(Course, name_de="A", name_en="B", semester=cls.semester)) @@ -465,6 +465,38 @@ def test_badge_for_external_responsibles(self): response = self.app.get(self.url, user="[email protected]") self.assertContains(response, 'External responsible') + @patch("evap.evaluation.models.Evaluation.textanswer_review_state", new_callable=PropertyMock) + def test_textanswer_review_state_tags(self, textanswer_review_state_mock): + """ Regression test for #1465 """ + + evaluation = baker.make( + Evaluation, + state="in_evaluation", + can_publish_text_results=True, + course__semester=self.semester, + ) + baker.make(TextAnswer, contribution=evaluation.general_contribution) + + textanswer_review_state_mock.return_value = Evaluation.TextAnswerReviewState.NO_TEXTANSWERS + page = self.app.get(f'/staff/semester/{evaluation.course.semester.id}', user=self.user) + expected_count = page.body.decode().count('no_textanswers') + + textanswer_review_state_mock.return_value = Evaluation.TextAnswerReviewState.REVIEW_NEEDED + page = self.app.get(f'/staff/semester/{evaluation.course.semester.id}', user=self.user) + # + 1 because the buttons at the top of the page contain it two times (once for _urgent) + self.assertEqual(page.body.decode().count('unreviewed_textanswers'), expected_count + 1) + self.assertEqual(page.body.decode().count('no_textanswers'), 1) + + textanswer_review_state_mock.return_value = Evaluation.TextAnswerReviewState.REVIEW_URGENT + page = self.app.get(f'/staff/semester/{evaluation.course.semester.id}', user=self.user) + self.assertEqual(page.body.decode().count('unreviewed_textanswers_urgent'), expected_count) + self.assertEqual(page.body.decode().count('no_textanswers'), 1) + + textanswer_review_state_mock.return_value = Evaluation.TextAnswerReviewState.REVIEWED + page = self.app.get(f'/staff/semester/{evaluation.course.semester.id}', user=self.user) + self.assertEqual(page.body.decode().count('textanswers_reviewed'), expected_count) + self.assertEqual(page.body.decode().count('no_textanswers'), 1) + class TestGetEvaluationsWithPrefetchedData(TestCase): @staticmethod
Urgent review only for closed evaluations Text answer review should be marked as "urgent" on the staff semester page only for evaluations where the evaluation period already ended (state `evaluated`).
2020-05-25T17:09:35
e-valuation/EvaP
1,484
e-valuation__EvaP-1484
[ "1479" ]
4595866ed29b3209cfeb66994aa347ec9a66e1e0
diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py --- a/evap/contributor/forms.py +++ b/evap/contributor/forms.py @@ -15,7 +15,7 @@ class EvaluationForm(forms.ModelForm): - general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires")) + general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_("General questionnaires")) course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput()) name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False) name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False) @@ -64,10 +64,14 @@ def clean_vote_end_date(self): def clean_general_questionnaires(self): # Ensure all locked questionnaires still have the same status (included or not) - locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True) + not_locked = [] + if self.cleaned_data.get('general_questionnaires'): + not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False)) - not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs] - locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)] + locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True)) + + if not not_locked + locked: + self.add_error("general_questionnaires", _("At least one questionnaire must be selected.")) return not_locked + locked
diff --git a/evap/contributor/tests/test_views.py b/evap/contributor/tests/test_views.py --- a/evap/contributor/tests/test_views.py +++ b/evap/contributor/tests/test_views.py @@ -3,10 +3,10 @@ from django_webtest import WebTest from model_bakery import baker -from evap.evaluation.models import Evaluation, UserProfile, Contribution +from evap.evaluation.models import Evaluation, UserProfile, Contribution, Questionnaire, Course from evap.evaluation.tests.tools import WebTestWith200Check, create_evaluation_with_responsible_and_editor -TESTING_COURSE_ID = 2 +TESTING_EVALUATION_ID = 2 class TestContributorDirectDelegationView(WebTest): @@ -92,15 +92,15 @@ def test_save_settings(self): class TestContributorEvaluationView(WebTestWith200Check): - url = '/contributor/evaluation/%s' % TESTING_COURSE_ID + url = '/contributor/evaluation/%s' % TESTING_EVALUATION_ID test_users = ['[email protected]', '[email protected]'] @classmethod def setUpTestData(cls): - create_evaluation_with_responsible_and_editor(evaluation_id=TESTING_COURSE_ID) + create_evaluation_with_responsible_and_editor(evaluation_id=TESTING_EVALUATION_ID) def setUp(self): - self.evaluation = Evaluation.objects.get(pk=TESTING_COURSE_ID) + self.evaluation = Evaluation.objects.get(pk=TESTING_EVALUATION_ID) def test_wrong_state(self): self.evaluation.revert_to_new() @@ -117,15 +117,15 @@ def test_information_message(self): class TestContributorEvaluationPreviewView(WebTestWith200Check): - url = '/contributor/evaluation/%s/preview' % TESTING_COURSE_ID + url = '/contributor/evaluation/%s/preview' % TESTING_EVALUATION_ID test_users = ['[email protected]', '[email protected]'] @classmethod def setUpTestData(cls): - create_evaluation_with_responsible_and_editor(evaluation_id=TESTING_COURSE_ID) + create_evaluation_with_responsible_and_editor(evaluation_id=TESTING_EVALUATION_ID) def setUp(self): - self.evaluation = Evaluation.objects.get(pk=TESTING_COURSE_ID) + self.evaluation = Evaluation.objects.get(pk=TESTING_EVALUATION_ID) def test_wrong_state(self): self.evaluation.revert_to_new() @@ -134,21 +134,21 @@ def test_wrong_state(self): class TestContributorEvaluationEditView(WebTest): - url = '/contributor/evaluation/%s/edit' % TESTING_COURSE_ID + url = '/contributor/evaluation/%s/edit' % TESTING_EVALUATION_ID @classmethod def setUpTestData(cls): - create_evaluation_with_responsible_and_editor(evaluation_id=TESTING_COURSE_ID) + create_evaluation_with_responsible_and_editor(evaluation_id=TESTING_EVALUATION_ID) def setUp(self): - self.evaluation = Evaluation.objects.get(pk=TESTING_COURSE_ID) + self.evaluation = Evaluation.objects.get(pk=TESTING_EVALUATION_ID) def test_not_authenticated(self): """ Asserts that an unauthorized user gets redirected to the login page. """ response = self.app.get(self.url) - self.assertRedirects(response, '/?next=/contributor/evaluation/%s/edit' % TESTING_COURSE_ID) + self.assertRedirects(response, '/?next=/contributor/evaluation/%s/edit' % TESTING_EVALUATION_ID) def test_wrong_usergroup(self): """ @@ -190,6 +190,33 @@ def test_contributor_evaluation_edit(self): response = form.submit(expect_errors=True) self.assertEqual(response.status_code, 403) + def test_single_locked_questionnaire(self): + locked_questionnaire = baker.make( + Questionnaire, + type=Questionnaire.Type.TOP, + is_locked=True, + visibility=Questionnaire.Visibility.EDITORS, + ) + responsible = UserProfile.objects.get(email='[email protected]') + evaluation = baker.make( + Evaluation, + course=baker.make(Course, responsibles=[responsible]), + state='prepared', + pk=TESTING_EVALUATION_ID+1 + ) + evaluation.general_contribution.questionnaires.set([locked_questionnaire]) + + page = self.app.get(f'/contributor/evaluation/{evaluation.pk}/edit', user=responsible, status=200) + form = page.forms['evaluation-form'] + + # see https://github.com/Pylons/webtest/issues/138 + for name_field_tuple in form.field_order[:]: + if 'disabled' in name_field_tuple[1].attrs: + form.field_order.remove(name_field_tuple) + + response = form.submit(name='operation', value='save') + self.assertIn("Successfully updated evaluation", response.follow()) + def test_contributor_evaluation_edit_preview(self): """ Asserts that the preview button either renders a preview or shows an error.
Locked questionnaires failing in editor form #1445 introduced locked questionnaires. However, they are not dealt with correctly in the evaluation editor form. When initially opening the form, the locked questionnaires are correctly selected but are not handled correctly when saving the form. Steps to reproduce: 1. As manager, assign a locked questionnaire as the only general questionnaire for an evaluation. 2. Enable the evaluation for editor review. 3. As editor, open the evaluation form and try to save it. Saving will fail with an error for the field "General questionnaires" ("This field is required."). The locked questionnaire should count as a selected questionnaire and the form should be saved. A test should be added for this use case.
2020-06-19T13:25:08
e-valuation/EvaP
1,492
e-valuation__EvaP-1492
[ "1483" ]
6cc5dc07ac4e083a9fdd6ee038f7653cca0b2577
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1599,6 +1599,15 @@ def send_contributor_publish_notifications(cls, evaluations, template=None): evaluations_per_contributor = defaultdict(set) for evaluation in evaluations: + # an average grade is published or a general text answer exists + relevant_information_published_for_responsibles = ( + evaluation.can_publish_average_grade + or evaluation.textanswer_set.filter(contribution=evaluation.general_contribution).exists() + ) + if relevant_information_published_for_responsibles: + for responsible in evaluation.course.responsibles.all(): + evaluations_per_contributor[responsible].add(evaluation) + # for evaluations with published averaged grade, all contributors get a notification # we don't send a notification if the significance threshold isn't met if evaluation.can_publish_average_grade: @@ -1612,9 +1621,6 @@ def send_contributor_publish_notifications(cls, evaluations, template=None): if textanswer.contribution.contributor: evaluations_per_contributor[textanswer.contribution.contributor].add(evaluation) - for contributor in evaluation.course.responsibles.all(): - evaluations_per_contributor[contributor].add(evaluation) - for contributor, evaluation_set in evaluations_per_contributor.items(): body_params = {'user': contributor, 'evaluations': list(evaluation_set)} template.send_to_user(contributor, {}, body_params, use_cc=True)
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -1,5 +1,5 @@ from datetime import datetime, timedelta, date -from unittest.mock import patch, Mock +from unittest.mock import patch, Mock, call from django.contrib.auth.models import Group from django.core.exceptions import ValidationError @@ -13,7 +13,7 @@ from evap.evaluation.models import (Contribution, Course, CourseType, EmailTemplate, Evaluation, NotArchiveable, Question, Questionnaire, RatingAnswerCounter, Semester, TextAnswer, UserProfile) from evap.grades.models import GradeDocument -from evap.evaluation.tests.tools import let_user_vote_for_evaluation +from evap.evaluation.tests.tools import let_user_vote_for_evaluation, make_contributor, make_editor from evap.results.tools import calculate_average_distribution from evap.results.views import get_evaluation_result_template_fragment_cache_key @@ -719,6 +719,58 @@ def test_disable_cc(self): self.assertEqual(len(mail.outbox), 1) self.assertEqual(set(mail.outbox[0].cc), {self.additional_cc.email}) + @staticmethod + def test_send_contributor_publish_notifications(): + responsible1 = baker.make(UserProfile) + responsible2 = baker.make(UserProfile) + # use is_single_result to get can_publish_average_grade to become true + evaluation1 = baker.make(Evaluation, course__responsibles=[responsible1], is_single_result=True) + evaluation2 = baker.make(Evaluation, course__responsibles=[responsible2]) + + editor1 = baker.make(UserProfile) + contributor1 = baker.make(UserProfile) + + contributor2 = baker.make(UserProfile) + editor2 = baker.make(UserProfile) + contributor_both = baker.make(UserProfile) + + # Contributions for evaluation1 + make_contributor(responsible1, evaluation1) + make_contributor(contributor1, evaluation1) + make_contributor(contributor_both, evaluation1) + make_editor(editor1, evaluation1) + + # Contributions for evaluation2 + make_editor(editor2, evaluation2) + contributor_both_contribution = make_contributor(contributor_both, evaluation2) + contributor2_contribution = make_contributor(contributor2, evaluation2) + + baker.make(TextAnswer, contribution=contributor_both_contribution) + baker.make(TextAnswer, contribution=contributor2_contribution) + + expected_calls = [ + # these 4 are included since they are contributors for evaluation1 which can publish the average grade + call(responsible1, {}, {'user': responsible1, 'evaluations': [evaluation1]}, use_cc=True), + call(editor1, {}, {'user': editor1, 'evaluations': [evaluation1]}, use_cc=True), + call(contributor1, {}, {'user': contributor1, 'evaluations': [evaluation1]}, use_cc=True), + call(contributor_both, {}, {'user': contributor_both, 'evaluations': [evaluation1, evaluation2]}, use_cc=True), + # contributor2 has textanswers, so they are notified + call(contributor2, {}, {'user': contributor2, 'evaluations': [evaluation2]}, use_cc=True), + ] + + with patch('evap.evaluation.models.EmailTemplate.send_to_user') as send_to_user_mock: + EmailTemplate.send_contributor_publish_notifications([evaluation1, evaluation2]) + # Assert that all expected publish notifications are sent to contributors. + send_to_user_mock.assert_has_calls(expected_calls, any_order=True) + + # if general textanswers for an evaluation exist, all responsibles should also be notified + baker.make(TextAnswer, contribution=evaluation2.general_contribution) + expected_calls.append(call(responsible2, {}, {'user': responsible2, 'evaluations': [evaluation2]}, use_cc=True)) + + with patch('evap.evaluation.models.EmailTemplate.send_to_user') as send_to_user_mock: + EmailTemplate.send_contributor_publish_notifications([evaluation1, evaluation2]) + send_to_user_mock.assert_has_calls(expected_calls, any_order=True) + class TestEmailRecipientList(TestCase): def test_recipient_list(self): diff --git a/evap/evaluation/tests/tools.py b/evap/evaluation/tests/tools.py --- a/evap/evaluation/tests/tools.py +++ b/evap/evaluation/tests/tools.py @@ -101,3 +101,23 @@ def make_manager(): email='[email protected]', groups=[Group.objects.get(name='Manager')], ) + + +def make_contributor(user, evaluation): + """ Make user a contributor of evaluation. """ + return baker.make( + Contribution, + evaluation=evaluation, + contributor=user, + role=Contribution.Role.CONTRIBUTOR + ) + + +def make_editor(user, evaluation): + """ Make user an editor of evaluation. """ + return baker.make( + Contribution, + evaluation=evaluation, + contributor=user, + role=Contribution.Role.EDITOR, + ) diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -1118,21 +1118,21 @@ def test_publish_notifications(self): mail.outbox = [] self.helper_publish_evaluation_with_publish_notifications_for(evaluation, contributors=True, participants=False) - self.assertEqual(len(mail.outbox), 1) - self.assertEqual(mail.outbox[0].to, [contributor1.email]) + self.assertEqual(len(mail.outbox), 2) + self.assertCountEqual([[contributor1.email], [self.responsible.email]], [mail.outbox[0].to, mail.outbox[1].to]) mail.outbox = [] self.helper_publish_evaluation_with_publish_notifications_for(evaluation, contributors=False, participants=True) self.assertEqual(len(mail.outbox), 2) - self.assertIn([participant1.email], [mail.outbox[0].to, mail.outbox[1].to]) - self.assertIn([participant2.email], [mail.outbox[0].to, mail.outbox[1].to]) + self.assertCountEqual([[participant1.email], [participant2.email]], [mail.outbox[0].to, mail.outbox[1].to]) mail.outbox = [] self.helper_publish_evaluation_with_publish_notifications_for(evaluation, contributors=True, participants=True) - self.assertEqual(len(mail.outbox), 3) - self.assertIn([participant1.email], [mail.outbox[0].to, mail.outbox[1].to, mail.outbox[2].to]) - self.assertIn([participant2.email], [mail.outbox[0].to, mail.outbox[1].to, mail.outbox[2].to]) - self.assertIn([contributor1.email], [mail.outbox[0].to, mail.outbox[1].to, mail.outbox[2].to]) + self.assertEqual(len(mail.outbox), 4) + self.assertCountEqual( + [[participant1.email], [participant2.email], [contributor1.email], [self.responsible.email]], + [outbox_entry.to for outbox_entry in mail.outbox] + ) mail.outbox = [] def helper_semester_state_views(self, evaluation, old_state, new_state): @@ -1153,11 +1153,20 @@ def helper_semester_state_views(self, evaluation, old_state, new_state): def test_semester_publish(self): participant1 = baker.make(UserProfile, email="[email protected]") participant2 = baker.make(UserProfile, email="[email protected]") - evaluation = baker.make(Evaluation, course=self.course, state='reviewed', - participants=[participant1, participant2], voters=[participant1, participant2]) + evaluation = baker.make( + Evaluation, + course=self.course, + state='reviewed', + participants=[participant1, participant2], + voters=[participant1, participant2] + ) self.helper_semester_state_views(evaluation, "reviewed", "published") - self.assertEqual(len(mail.outbox), 2) + self.assertEqual(len(mail.outbox), 3) + self.assertCountEqual( + [[participant1.email], [participant2.email], [self.responsible.email]], + [outbox_entry.to for outbox_entry in mail.outbox], + ) def test_semester_reset_1(self): evaluation = baker.make(Evaluation, course=self.course, state='prepared')
Send publish notification to responsible When sending publish notifications in `send_contributor_publish_notifications` they are currently not sent to the person responsible for the course if the average grade was published (only in the `elif` part the responsible is included). The responsible person should get a notification there as well.
2020-07-06T18:00:02
e-valuation/EvaP
1,499
e-valuation__EvaP-1499
[ "1493" ]
c83bef11c808a6f117ad4a1630e579aff4ce1f27
diff --git a/evap/staff/importers.py b/evap/staff/importers.py --- a/evap/staff/importers.py +++ b/evap/staff/importers.py @@ -540,7 +540,7 @@ def save_users_to_db(self): except Exception as error: self.errors[ImporterError.GENERAL].append( _("A problem occured while writing the entries to the database." - " The error message has been: '{}'").format(error=error)) + " The error message has been: '{}'").format(error)) raise msg = format_html(_("Successfully created {} users:"), len(created_users)) @@ -603,6 +603,7 @@ def process(cls, excel_content, test_run): if settings.DEBUG: # re-raise error for further introspection if in debug mode raise + return [], importer.success_messages, importer.warnings, importer.errors class PersonImporter:
diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -1,5 +1,7 @@ import os from datetime import date, datetime +from unittest.mock import patch + from django.test import TestCase, override_settings from django.conf import settings from model_bakery import baker @@ -120,6 +122,17 @@ def test_import_makes_inactive_user_active(self): self.assertEqual(UserProfile.objects.count(), 2) + @override_settings(DEBUG=False) + @patch("evap.evaluation.models.UserProfile.objects.update_or_create") + def test_unhandled_exception(self, mocked_db_access): + mocked_db_access.side_effect = Exception("Contact your database admin right now!") + result, __, __, errors = UserImporter.process(self.valid_excel_content, test_run=False) + self.assertEqual(result, []) + self.assertIn( + "Import finally aborted after exception: 'Contact your database admin right now!'", + errors[ImporterError.GENERAL], + ) + class TestEnrollmentImporter(TestCase): filename_valid = os.path.join(settings.BASE_DIR, "staff/fixtures/test_enrollment_data.xls")
Handle multiple sheets in import files correctly When importing an excel file with multiple sheets (e.g. when using the person importer on the person management page for a certain course) and at least one of the sheets is not correctly formatted, processing the import fails with a `TypeError`: ``` cannot unpack non-iterable NoneType object File "/opt/evap/evap/staff/importers.py", line 660, in process_file_content user_list, importer.success_messages, importer.warnings, importer.errors = UserImporter.process(file_content, test_run) ``` The importer should show an error message instead. The same should be done for other importers as well.
The `None` is coming from a missing return statement, is this intended? ```python @classmethod def process(cls, excel_content, test_run): """ Entry point for the view. """ try: importer = cls() # ... return importer.save_users_to_db(), importer.success_messages, importer.warnings, importer.errors except Exception as e: # pylint: disable=broad-except importer.errors[ImporterError.GENERAL].append(_("Import finally aborted after exception: '%s'" % e)) if settings.DEBUG: # re-raise error for further introspection if in debug mode raise # No return, so function returns None? ``` I mean, there still is a warning, but when errors occur in other places, there is something like ```python return [], importer.success_messages, importer.warnings, importer.errors ``` this is actually a mistake and the messages should be returned here as well
2020-08-24T18:02:46
e-valuation/EvaP
1,502
e-valuation__EvaP-1502
[ "1489" ]
17a85c76f7cbd7a1d8a48ecb0e6d295c4e698032
diff --git a/evap/staff/importers.py b/evap/staff/importers.py --- a/evap/staff/importers.py +++ b/evap/staff/importers.py @@ -244,7 +244,8 @@ def for_each_row_in_excel_file_do(self, row_function): for sheet in self.book.sheets(): try: for row in range(self.skip_first_n_rows, sheet.nrows): - row_function(sheet.row_values(row), sheet, row) + # see https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python + row_function([' '.join(cell.split()) for cell in sheet.row_values(row)], sheet, row) self.success_messages.append(_("Successfully read sheet '%s'.") % sheet.name) except Exception: self.warnings[ImporterWarning.GENERAL].append(
diff --git a/evap/staff/fixtures/test_enrollment_data_consecutive_and_trailing_spaces.xls b/evap/staff/fixtures/test_enrollment_data_consecutive_and_trailing_spaces.xls new file mode 100644 Binary files /dev/null and b/evap/staff/fixtures/test_enrollment_data_consecutive_and_trailing_spaces.xls differ diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -125,6 +125,7 @@ class TestEnrollmentImporter(TestCase): filename_valid = os.path.join(settings.BASE_DIR, "staff/fixtures/test_enrollment_data.xls") filename_valid_degree_merge = os.path.join(settings.BASE_DIR, "staff/fixtures/test_enrollment_data_degree_merge.xls") filename_valid_import_names = os.path.join(settings.BASE_DIR, "staff/fixtures/test_enrollment_data_import_names.xls") + filename_valid_consecutive_and_trailing_spaces = os.path.join(settings.BASE_DIR, "staff/fixtures/test_enrollment_data_consecutive_and_trailing_spaces.xls") filename_invalid = os.path.join(settings.BASE_DIR, "staff/fixtures/invalid_enrollment_data.xls") filename_random = os.path.join(settings.BASE_DIR, "staff/fixtures/random.random") @@ -278,6 +279,12 @@ def test_duplicate_course_error(self): "Course Stehlen does already exist in this semester.", "Course Shine does already exist in this semester."}) + def test_replace_consecutive_and_trailing_spaces(self): + with open(self.filename_valid_consecutive_and_trailing_spaces, "rb") as excel_file: + excel_content = excel_file.read() + success_messages, __, __ = EnrollmentImporter.process(excel_content, self.semester, None, None, test_run=True) + self.assertIn("The import run will create 1 courses/evaluations and 3 users", "".join(success_messages)) + class TestPersonImporter(TestCase): @classmethod
Importer string cleaning All importers should replace multiple consecutive spaces in cell values by single spaces. There should be a test in which an exemplary enrollment file is imported, which includes two courses with a name differing only in the amount of spaces. The test should assert that only one course is created by the import.
2020-08-29T10:29:43
e-valuation/EvaP
1,506
e-valuation__EvaP-1506
[ "1419" ]
c83bef11c808a6f117ad4a1630e579aff4ce1f27
diff --git a/evap/contributor/views.py b/evap/contributor/views.py --- a/evap/contributor/views.py +++ b/evap/contributor/views.py @@ -24,11 +24,14 @@ def index(request): user = request.user show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True) + represented_proxy_users = user.represented_users.filter(is_proxy_user=True) contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published'] own_courses = Course.objects.filter( Q(evaluations__state__in=contributor_visible_states) & ( Q(responsibles=user) | - Q(evaluations__contributions__contributor=user) + Q(evaluations__contributions__contributor=user) | + Q(evaluations__contributions__contributor__in=represented_proxy_users) | + Q(responsibles__in=represented_proxy_users) ) ) own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)] @@ -37,7 +40,7 @@ def index(request): displayed_evaluations = set(own_evaluations) if show_delegated: - represented_users = user.represented_users.all() + represented_users = user.represented_users.exclude(is_proxy_user=True) delegated_courses = Course.objects.filter( Q(evaluations__state__in=contributor_visible_states) & ( Q(responsibles__in=represented_users) |
Proxied evaluations shouldn't show as delegated Evaluations where a proxy user is responsible shouldn't be shown as "delegated courses" for the delegates of this proxy user. The delegates should see this course in the list of their own courses like those they are directly responsible for and they shouldn't be hidden when the option to hide delegated courses is active.
2020-09-07T17:55:38
e-valuation/EvaP
1,525
e-valuation__EvaP-1525
[ "1522" ]
c312fec3f1f9fba57dc4b7d293d1651d53c87e15
diff --git a/evap/staff/importers.py b/evap/staff/importers.py --- a/evap/staff/importers.py +++ b/evap/staff/importers.py @@ -330,6 +330,18 @@ def check_user_data_sanity(self, test_run): if len(users_same_name) > 0: self._create_user_name_collision_warning(user_data, users_same_name) + def check_data_type_correctness(self): + """ + Checks that all cells after the skipped rows contain string values (not floats or integers). + """ + for sheet in self.book.sheets(): + for row in range(self.skip_first_n_rows, sheet.nrows): + if not all(isinstance(cell, str) for cell in sheet.row_values(row)): + self.errors[ImporterError.SCHEMA].append( + _("Wrong data type in sheet '{}' in row {}." + " Please make sure all cells are string types, not numerical.").format(sheet.name, row + 1) + ) + class EnrollmentImporter(ExcelImporter): def __init__(self): @@ -579,6 +591,7 @@ def process(cls, excel_content, test_run): return [], importer.success_messages, importer.warnings, importer.errors importer.check_column_count(4) + importer.check_data_type_correctness() if importer.errors: importer.errors[ImporterError.GENERAL].append(_("The input data is malformed. No data was imported.")) return [], importer.success_messages, importer.warnings, importer.errors
diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -16,6 +16,7 @@ class TestUserImporter(TestCase): filename_duplicate = os.path.join(settings.BASE_DIR, "staff/fixtures/duplicate_user_import.xls") filename_invalid = os.path.join(settings.BASE_DIR, "staff/fixtures/invalid_user_import.xls") filename_random = os.path.join(settings.BASE_DIR, "staff/fixtures/random.random") + filename_numerics = os.path.join(settings.BASE_DIR, "staff/fixtures/numerical_data_in_user_data.xls") # valid user import tested in tests/test_views.py, TestUserImportView @@ -29,6 +30,8 @@ def setUpTestData(cls): cls.random_excel_content = excel_file.read() with open(cls.filename_duplicate, "rb") as excel_file: cls.duplicate_excel_content = excel_file.read() + with open(cls.filename_numerics, "rb") as excel_file: + cls.numerical_excel_content = excel_file.read() def test_test_run_does_not_change_database(self): original_users = list(UserProfile.objects.all()) @@ -133,6 +136,22 @@ def test_unhandled_exception(self, mocked_db_access): errors[ImporterError.GENERAL], ) + def test_disallow_non_string_types(self): + imported_users, __, __, errors = UserImporter.process(self.numerical_excel_content, test_run=False) + self.assertEqual(len(imported_users), 0) + self.assertIn( + "The input data is malformed. No data was imported.", + errors[ImporterError.GENERAL] + ) + # The sheet has a float in row 3 and an int row 4. All others rows only contain strings. + self.assertSetEqual( + { + "Wrong data type in sheet 'Users' in row 3. Please make sure all cells are string types, not numerical.", + "Wrong data type in sheet 'Users' in row 4. Please make sure all cells are string types, not numerical." + }, + set(errors[ImporterError.SCHEMA]) + ) + class TestEnrollmentImporter(TestCase): filename_valid = os.path.join(settings.BASE_DIR, "staff/fixtures/test_enrollment_data.xls")
Excel importer: Data types of cells. Float cells might cause import errors Successor of #1493. With the current code, we had occurences of ``` AttributeError 'float' object has no attribute 'strip' /evap/evap/staff/importers.py in __init__, line 40 ``` which we think is caused by the excel file cell being set to type "float", so xlrd gives us a float value instead of a string value, but we expect it to be a string. @niklasmohrin had some proposals on what to do in #1499. I think our last consent is given in https://github.com/e-valuation/EvaP/pull/1499#issuecomment-688454822 by @janno42: > we just use strings, so every cell could be parsed as string value, regardless of the excel data type.
2020-10-19T18:24:53
e-valuation/EvaP
1,531
e-valuation__EvaP-1531
[ "1523" ]
82f52104271768bacf7dba5ce3625d80bf00413c
diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py --- a/evap/evaluation/views.py +++ b/evap/evaluation/views.py @@ -74,6 +74,7 @@ def index(request): # clean up our test cookie if request.session.test_cookie_worked(): request.session.delete_test_cookie() + return redirect('evaluation:index') # if not logged in by now, render form if not request.user.is_authenticated:
Access denied on manager login Currently, after logging in, a manager is redirected to /staff/, but staff mode will not be active, so they will get a 403 access denied. @janno42 what behavior do we want here? Redirect as if they weren't a manager or enable staff mode?
They should be redirected as if they weren't manager (or reviewer).
2020-10-27T18:36:17
e-valuation/EvaP
1,538
e-valuation__EvaP-1538
[ "1524" ]
5b88d1cb86e073ca85d10cdd8f55f801d5864215
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -376,6 +376,7 @@ class Evaluation(LoggedModel): # when the evaluation takes place vote_start_datetime = models.DateTimeField(verbose_name=_("start of evaluation")) + # Usually the property vote_end_datetime should be used instead of this field vote_end_date = models.DateField(verbose_name=_("last day of evaluation")) # Disable to prevent editors from changing evaluation data
Add evaluation period check when rendering "Evaluate" button **Edit: See latest comment for up-to-date description of what this issue is about** --- With the current testdata, you can do the following: reload testdata, login as evap, go to student index page, click on "Evaluate" for "Game Programming". You will get access denied, triggered by these two lines in evap/student/views.py:138: ```python if not evaluation.can_be_voted_for_by(request.user): raise PermissionDenied ``` I think we can agree that in general, the evaluation should not be shown on the student index page, or the student should be able to access it. What happened here is that the cron job that was supposed to move the evaluation from state `in_evaluation` to `evaluated` didn't run (in the dev vm). We rely on it to run on more places than just here, and there might be a lot of other stuff that can break if the cron jobs stop running. However, I think there are not that many that are in direct contact with students, which is the majority of our users. Still, I think we should make sure that one of the following holds: - If the cronjob should have run but didn't run, notify someone (`assert evaluation.can_be_voted_for_by(request.user)` for each evaluation in the student index page would do that) - Filter out evaluations whose evaluation period has ended, even if their state is still `in_evaluation`, on the student index view. This would allow the platform to continue running even if cron fails for some reason. @janno42, @karyon, @pixunil, @felixrindt what do you think?
an assertion is perfectly fine for me. if we would do state checks on the student index page i would say we should do them everywhere. but i think the assert is sufficient and avoids too much code. Hmm, we should keep in mind that adding an assertion will actually make the error worse, when it occurs. Right now, users will the evaluation that has already ended, but as long as they don't click on it, everything is fine. If we add the assertion, the student index page will 500 instead of giving the semi-correct version (which in this case would still be better). Me and @janno42 agreed that the student index page with the "Evaluate" button is probably the only place where we have two different sources of state (system time and cron job execution state) that could collide (if cron job execution is delayed). Thus, adding another check on system time, just as the vote view does, on the student index page, and not showing that "Evaluate"- button if the time is passed should be an okay solution.
2020-11-16T17:02:53
e-valuation/EvaP
1,539
e-valuation__EvaP-1539
[ "1536" ]
984b6e3fd7b61481990d8453648562c971dcebc9
diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -262,8 +262,8 @@ WSGI_APPLICATION = 'evap.wsgi.application' -# Redirect url after login LOGIN_REDIRECT_URL = '/' +LOGOUT_REDIRECT_URL = '/' LOGIN_URL = "/" @@ -371,7 +371,6 @@ OIDC_RP_SIGN_ALGO = 'RS256' OIDC_USERNAME_ALGO = '' OIDC_RP_SCOPES = 'openid email profile' -LOGOUT_REDIRECT_URL = '/' OIDC_RP_CLIENT_ID = 'evap' OIDC_RP_CLIENT_SECRET = 'evap-secret' diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -1771,7 +1771,7 @@ def export_contributor_results_view(request, contributor_id): def enter_staff_mode(request): staff_mode.enter_staff_mode(request) messages.success(request, _("Successfully entered staff mode.")) - return redirect('/') + return redirect('evaluation:index') @require_POST @@ -1779,4 +1779,4 @@ def enter_staff_mode(request): def exit_staff_mode(request): staff_mode.exit_staff_mode(request) messages.success(request, _("Successfully exited staff mode.")) - return redirect('/') + return redirect('evaluation:index')
diff --git a/evap/evaluation/tests/test_views.py b/evap/evaluation/tests/test_views.py --- a/evap/evaluation/tests/test_views.py +++ b/evap/evaluation/tests/test_views.py @@ -1,5 +1,6 @@ from django.core import mail from django.contrib.auth.hashers import make_password +from django.contrib.auth.models import Group from django_webtest import WebTest from model_bakery import baker @@ -22,6 +23,19 @@ def test_passworduser_login(self): password_form['password'] = 'evap' self.assertEqual(password_form.submit().status_code, 302) + def test_login_for_staff_users_correctly_redirects(self): + """ Regression test for #1523: Access denied on manager login """ + internal_email = '[email protected]' # external users don't necessarily have a proper redirect page + baker.make(UserProfile, email=internal_email, password=make_password('evap'), groups=[Group.objects.get(name='Manager')]) + + response = self.app.get(self.url) + password_form = response.forms[0] + password_form['email'] = internal_email + password_form['password'] = 'evap' + response = password_form.submit() + self.assertRedirects(response, self.url, fetch_redirect_response=False) + self.assertRedirects(response.follow(), '/results/') + def test_send_new_login_key(self): """ Tests whether requesting a new login key is only possible for existing users, shows the expected success message and sends only one email to the requesting
Write test for the login workflow https://github.com/e-valuation/EvaP/pull/1531 should've been caught by a test. While at it, verify there's a test for logging in via a login URL.
2020-11-16T18:51:01
e-valuation/EvaP
1,541
e-valuation__EvaP-1541
[ "1082" ]
0aa5d29a206959ea80492df1c1a3a60c2596af11
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -618,9 +618,6 @@ def handle_moved_contributors(data, **kwargs): def clean(self): self.handle_deleted_and_added_contributions() - - super().clean() - found_contributor = set() for form in self.forms: if not form.cleaned_data or form.cleaned_data.get('DELETE'): @@ -629,9 +626,10 @@ def clean(self): if contributor is None: raise forms.ValidationError(_('Please select the name of each added contributor. Remove empty rows if necessary.')) if contributor and contributor in found_contributor: - raise forms.ValidationError(_('Duplicate contributor found. Each contributor should only be used once.')) + raise forms.ValidationError(_('Duplicate contributor ({}) found. Each contributor should only be used once.').format(contributor.full_name)) if contributor: found_contributor.add(contributor) + super().clean() class ContributionCopyFormSet(ContributionFormSet):
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -231,7 +231,7 @@ def test_contribution_form_set(self): Tests the ContributionFormset with various input data sets. """ evaluation = baker.make(Evaluation) - user1 = baker.make(UserProfile) + user1 = baker.make(UserProfile, _fill_optional=['first_name', 'last_name']) user2 = baker.make(UserProfile) baker.make(UserProfile) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR) @@ -257,13 +257,22 @@ def test_contribution_form_set(self): data['contributions-TOTAL_FORMS'] = 2 data['contributions-1-contributor'] = user1.pk data['contributions-1-evaluation'] = evaluation.pk - data['contributions-1-questionnaires'] = questionnaire.pk data['contributions-1-order'] = 1 data['contributions-1-textanswer_visibility'] = Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS - self.assertFalse(ContributionFormset(instance=evaluation, form_kwargs={'evaluation': evaluation}, data=data).is_valid()) + data['contributions-1-role'] = Contribution.Role.EDITOR + formset = ContributionFormset(instance=evaluation, form_kwargs={'evaluation': evaluation}, data=data) + self.assertFalse(formset.is_valid()) + # regression for https://github.com/e-valuation/EvaP/issues/1082 + # assert same error message with and without questionnaire + self.assertEqual(formset.non_form_errors(), [('Duplicate contributor ({}) found. Each contributor should only be used once.').format(user1.full_name)]) + + data['contributions-1-questionnaires'] = questionnaire.pk + formset = ContributionFormset(instance=evaluation, form_kwargs={'evaluation': evaluation}, data=data) + self.assertFalse(formset.is_valid()) + self.assertEqual(formset.non_form_errors(), [('Duplicate contributor ({}) found. Each contributor should only be used once.').format(user1.full_name)]) + # two contributors data['contributions-1-contributor'] = user2.pk - data['contributions-1-role'] = Contribution.Role.EDITOR self.assertTrue(ContributionFormset(instance=evaluation, form_kwargs={'evaluation': evaluation}, data=data).is_valid()) def test_dont_validate_deleted_contributions(self):
Duplicate contributor error message When trying to add the same contributor to a course twice, an error message is shown. This message differs based on whether a questionnaire was assigned or not. The error message should in both cases be the one currently shown when no questionnaire was selected. **Current messages:** Questionnaire selected: `Please correct the duplicate data for contributor. Please correct the duplicate values below.` No questionnaire selected: `Duplicate contributor found. Each contributor should only be used once.`
Identified Cause: The `Please correct the duplicate data for contributor. Please correct the duplicate values below.` message originates from Django internal unique checks. The `Duplicate contributor found. Each contributor should only be used once.` message originates from our own checks. Django internal unique checks only apply to all valid Forms of a FormSet. Hence, any invalid ContributionForm is not included in the unique check. A newly added contributor is invalid (set to contributes to evaluation without any questionnaires). To resolve this, either we have to change the unique message of django internal unique checks or somehow exclude this unique check. @Pottiman do you still want to work on this? Django code line generating the error message: https://github.com/django/django/blob/ba42569d5c01b58b2999e393f097b530e538ec41/django/forms/models.py#L722 Django's unique validation there is kind of weird. Django does not do unique checks on the formset unless every single form is valid (I guess that's why we added our manual check?). If it does, the error message listed above in the issue is generated by two seperate parts, see my linked code part. One of these parts could be changed by overwriting a django method, the other part would have to be overwritten in a different method, so the message @janno42 listed above are actually two seperate messages. I'm also confused about one of the errors being raised as a `ValidationError` while the other is just put in `form._errors` as a non field error. Maybe @karyon knows more about this handling / has done anything with it before?
2020-11-30T18:28:06
e-valuation/EvaP
1,543
e-valuation__EvaP-1543
[ "1331" ]
b031c2852916f4ede1765695ef3e261db3af4de9
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -605,7 +605,7 @@ def revert_to_new(self): pass @transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period]) - def evaluation_begin(self): + def begin_evaluation(self): pass @transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period]) @@ -613,15 +613,15 @@ def reopen_evaluation(self): pass @transition(field=state, source='in_evaluation', target='evaluated') - def evaluation_end(self): + def end_evaluation(self): pass @transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed]) - def review_finished(self): + def end_review(self): pass @transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result]) - def single_result_created(self): + def skip_review_single_result(self): pass @transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: not self.is_fully_reviewed]) @@ -776,13 +776,13 @@ def update_evaluations(cls): for evaluation in cls.objects.all(): try: if evaluation.state == "approved" and evaluation.vote_start_datetime <= datetime.now(): - evaluation.evaluation_begin() + evaluation.begin_evaluation() evaluation.save() evaluations_new_in_evaluation.append(evaluation) elif evaluation.state == "in_evaluation" and datetime.now() >= evaluation.vote_end_datetime: - evaluation.evaluation_end() + evaluation.end_evaluation() if evaluation.is_fully_reviewed: - evaluation.review_finished() + evaluation.end_review() if evaluation.grading_process_is_finished: evaluation.publish() evaluation_results_evaluations.append(evaluation) diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -388,7 +388,7 @@ def save(self, *args, **kw): # change state to "reviewed" # works only for single_results so the evaluation and its contribution must be saved first - evaluation.single_result_created() + evaluation.skip_review_single_result() evaluation.save() if hasattr(self.instance, 'old_course'): diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -189,7 +189,7 @@ def apply(request, evaluations, email_template=None, email_template_contributor= "Successfully reverted {} evaluations to in preparation.", len(evaluations)).format(len(evaluations))) -class MoveToPreparedOperation(EvaluationOperation): +class ReadyForEditorsOperation(EvaluationOperation): email_template_name = EmailTemplate.EDITOR_REVIEW_NOTICE confirmation_message = gettext_lazy("Do you want to send the following evaluations to editor review?") @@ -230,7 +230,7 @@ def apply(request, evaluations, email_template=None, email_template_contributor= use_cc=True, additional_cc_users=editors, request=request) -class StartEvaluationOperation(EvaluationOperation): +class BeginEvaluationOperation(EvaluationOperation): email_template_name = EmailTemplate.EVALUATION_STARTED confirmation_message = gettext_lazy("Do you want to immediately start the following evaluations?") @@ -250,7 +250,7 @@ def apply(request, evaluations, email_template=None, email_template_contributor= for evaluation in evaluations: evaluation.vote_start_datetime = datetime.now() - evaluation.evaluation_begin() + evaluation.begin_evaluation() evaluation.save() messages.success(request, ngettext("Successfully started {} evaluation.", "Successfully started {} evaluations.", len(evaluations)).format(len(evaluations))) @@ -258,7 +258,7 @@ def apply(request, evaluations, email_template=None, email_template_contributor= email_template.send_to_users_in_evaluations(evaluations, [EmailTemplate.Recipients.ALL_PARTICIPANTS], use_cc=False, request=request) -class RevertToReviewedOperation(EvaluationOperation): +class UnpublishOperation(EvaluationOperation): confirmation_message = gettext_lazy("Do you want to unpublish the following evaluations?") @staticmethod @@ -314,9 +314,9 @@ def apply(request, evaluations, email_template=None, email_template_contributor= EVALUATION_OPERATIONS = { 'new': RevertToNewOperation, - 'prepared': MoveToPreparedOperation, - 'in_evaluation': StartEvaluationOperation, - 'reviewed': RevertToReviewedOperation, + 'prepared': ReadyForEditorsOperation, + 'in_evaluation': BeginEvaluationOperation, + 'reviewed': UnpublishOperation, 'published': PublishOperation, } @@ -1149,7 +1149,7 @@ def evaluation_textanswers_update_publish(request): answer.save() if evaluation.state == "evaluated" and evaluation.is_fully_reviewed: - evaluation.review_finished() + evaluation.end_review() evaluation.save() if evaluation.state == "reviewed" and not evaluation.is_fully_reviewed: evaluation.reopen_review()
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -110,7 +110,7 @@ def test_evaluation_ended(self): baker.make(Evaluation, course=course_2, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2), vote_end_date=date.today(), wait_for_grade_upload_before_publishing=False) - with patch('evap.evaluation.models.Evaluation.evaluation_end') as mock: + with patch('evap.evaluation.models.Evaluation.end_evaluation') as mock: Evaluation.update_evaluations() self.assertEqual(mock.call_count, 1) @@ -173,7 +173,7 @@ def test_single_result_can_be_deleted_only_in_reviewed(self): textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS, ) baker.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution) - evaluation.single_result_created() + evaluation.skip_review_single_result() evaluation.publish() evaluation.save() @@ -202,7 +202,7 @@ def test_single_result_can_be_published(): ) baker.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution) - single_result.single_result_created() + single_result.skip_review_single_result() single_result.publish() # used to crash def test_second_vote_sets_can_publish_text_results_to_true(self): @@ -354,7 +354,7 @@ def test_textanswer_review_state(self): evaluation.TextAnswerReviewState.REVIEW_NEEDED, ) - evaluation.evaluation_end() + evaluation.end_evaluation() evaluation.save() self.assert_textanswer_review_state( diff --git a/evap/grades/tests.py b/evap/grades/tests.py --- a/evap/grades/tests.py +++ b/evap/grades/tests.py @@ -114,17 +114,17 @@ def test_upload_final_grades(self): self.helper_check_final_grade_upload(course, 0) # state: in_evaluation - evaluation.evaluation_begin() + evaluation.begin_evaluation() evaluation.save() self.helper_check_final_grade_upload(course, 0) # state: evaluated - evaluation.evaluation_end() + evaluation.end_evaluation() evaluation.save() self.helper_check_final_grade_upload(course, 0) # state: reviewed - evaluation.review_finished() + evaluation.end_review() evaluation.save() self.helper_check_final_grade_upload( course, evaluation.num_participants + evaluation.contributions.exclude(contributor=None).count()) @@ -137,9 +137,9 @@ def test_upload_final_grades(self): def test_toggle_no_grades(self): evaluation = self.evaluation evaluation.manager_approve() - evaluation.evaluation_begin() - evaluation.evaluation_end() - evaluation.review_finished() + evaluation.begin_evaluation() + evaluation.end_evaluation() + evaluation.end_review() evaluation.save() self.assertFalse(evaluation.course.gets_no_grade_documents) diff --git a/evap/results/tests/test_tools.py b/evap/results/tests/test_tools.py --- a/evap/results/tests/test_tools.py +++ b/evap/results/tests/test_tools.py @@ -30,7 +30,7 @@ def test_caching_lifecycle(self): self.assertIsNone(caches['results'].get(get_results_cache_key(evaluation))) - evaluation.evaluation_end() + evaluation.end_evaluation() evaluation.save() self.assertIsNotNone(caches['results'].get(get_results_cache_key(evaluation))) @@ -45,8 +45,8 @@ def test_caching_works_after_multiple_transitions(self): self.assertIsNone(caches['results'].get(get_results_cache_key(evaluation))) - evaluation.evaluation_end() - evaluation.review_finished() + evaluation.end_evaluation() + evaluation.end_review() evaluation.publish() evaluation.save() diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -335,8 +335,8 @@ def helper_test_answer_visibility_two_voters(self, user_email): def test_answer_visibility_one_voter(self): let_user_vote_for_evaluation(self.app, self.student1, self.evaluation) - self.evaluation.evaluation_end() - self.evaluation.review_finished() + self.evaluation.end_evaluation() + self.evaluation.end_review() self.evaluation.publish() self.evaluation.save() self.assertEqual(self.evaluation.voters.count(), 1) @@ -349,8 +349,8 @@ def test_answer_visibility_one_voter(self): def test_answer_visibility_two_voters(self): let_user_vote_for_evaluation(self.app, self.student1, self.evaluation) let_user_vote_for_evaluation(self.app, self.student2, self.evaluation) - self.evaluation.evaluation_end() - self.evaluation.review_finished() + self.evaluation.end_evaluation() + self.evaluation.end_review() self.evaluation.publish() self.evaluation.save() self.assertEqual(self.evaluation.voters.count(), 2) diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -2366,7 +2366,7 @@ def test_review_actions(self): def test_finishing_review_updates_results(self): let_user_vote_for_evaluation(self.app, self.student2, self.evaluation) - self.evaluation.evaluation_end() + self.evaluation.end_evaluation() self.evaluation.can_publish_text_results = True self.evaluation.save() results = get_results(self.evaluation) @@ -2376,7 +2376,7 @@ def test_finishing_review_updates_results(self): textanswer = self.evaluation.unreviewed_textanswer_set[0] textanswer.state = TextAnswer.State.PUBLISHED textanswer.save() - self.evaluation.review_finished() + self.evaluation.end_review() self.evaluation.save() results = get_results(self.evaluation)
Refactor Evaluation state transition names As a followup to #1310, we became aware that the evaluation state transition function's names aren't quite consistent along themselves. Also, naming could be improved throughout the whole project. For example from #1310, there is a name mismatch with * `MoveToPreparedOperation` and `evaluation.ready_for_editors` * `StartEvaluationOperation` and `evaluation.evaluation_begin` * `RevertToReviewedOperation` and `evaluation.unpublish`
I would also propose `evaluation_begin` ->`begin_evaluation` (or `start_evaluation`), also `evaluation_end` -> `end_evaluation`. I also thought about `review_finished`. While it accurately describes what has happened for the transition to occur, I think it should rather describe the transition itself. so `finish_review` might be better, or actually I don't see any reason to not call it `end_review` to be in line with `end_evaluation`.
2020-12-14T17:45:54
e-valuation/EvaP
1,547
e-valuation__EvaP-1547
[ "1069" ]
c34e53435d69c320e60e0c7a9d456566c68375a0
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1621,6 +1621,9 @@ def send_to_user(self, user, subject_params, body_params, use_cc, additional_cc_ else: send_separate_login_url = True + body_params['page_url'] = settings.PAGE_URL + body_params['contact_email'] = settings.CONTACT_EMAIL + subject = self.render_string(self.subject, subject_params) body = self.render_string(self.body, body_params) @@ -1652,7 +1655,7 @@ def send_reminder_to_user(cls, user, first_due_in_days, due_evaluations): def send_login_url_to_user(cls, user): template = cls.objects.get(name=cls.LOGIN_KEY_CREATED) subject_params = {} - body_params = {'user': user, 'login_url': user.login_url} + body_params = {'user': user} template.send_to_user(user, subject_params, body_params, use_cc=False) logger.info(('Sent login url to {}.').format(user.email))
diff --git a/evap/evaluation/fixtures/test_data.json b/evap/evaluation/fixtures/test_data.json --- a/evap/evaluation/fixtures/test_data.json +++ b/evap/evaluation/fixtures/test_data.json @@ -128101,7 +128101,7 @@ "fields": { "name": "Editor Review Notice", "subject": "[EvaP] Neue Lehrveranstaltungen stehen zur \u00dcberpr\u00fcfung bereit / New courses ready for approval", - "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\nvielen Dank, dass Sie in diesem Semester Veranstaltungen anbieten. Um die Evaluierung dieser Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe.\r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeitenden delegieren. Unter \"Einstellungen\" k\u00f6nnen Sie Stellvertretende hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Veranstaltungen erhalten. Beim Bearbeiten einzelner Evaluierungen k\u00f6nnen Sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Evaluierung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Evaluierungen innerhalb der n\u00e4chsten Woche Folgendes zu \u00fcberpr\u00fcfen:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Evaluierung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Evaluierungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nThank you very much for teaching during this semester. We need your help so we can evaluate all courses on our platform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your courses. On the details page of a single evaluation you can also add persons and assign edit rights for this evaluation to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nTo prepare your evaluations we would like to ask you for the following within a week:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your course.\r\n - Are the selected questionnaires adequate for the evaluation? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese evaluations need your approval:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\nvielen Dank, dass Sie in diesem Semester Veranstaltungen anbieten. Um die Evaluierung dieser Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe.\r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeitenden delegieren. Unter \"Einstellungen\" k\u00f6nnen Sie Stellvertretende hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Veranstaltungen erhalten. Beim Bearbeiten einzelner Evaluierungen k\u00f6nnen Sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Evaluierung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Evaluierungen innerhalb der n\u00e4chsten Woche Folgendes zu \u00fcberpr\u00fcfen:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Evaluierung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Evaluierungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ({{ contact_email }}).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nThank you very much for teaching during this semester. We need your help so we can evaluate all courses on our platform EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your courses. On the details page of a single evaluation you can also add persons and assign edit rights for this evaluation to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nTo prepare your evaluations we would like to ask you for the following within a week:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your course.\r\n - Are the selected questionnaires adequate for the evaluation? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese evaluations need your approval:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ({{ contact_email }}).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, { @@ -128110,7 +128110,7 @@ "fields": { "name": "Student Reminder", "subject": "[EvaP] Die Evaluierung endet {% if first_due_in_days == 0 %}heute{% elif first_due_in_days == 1 %}morgen{% else %}in {{ first_due_in_days }} Tagen{% endif %} / The evaluation is about to end {% if first_due_in_days == 0 %}today{% elif first_due_in_days == 1 %}tomorrow{% else %}in {{ first_due_in_days }} days{% endif %}", - "body": "(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr eine deiner Evaluierungen endet {% if first_due_in_days == 0 %}heute{% elif first_due_in_days == 1 %}morgen{% else %}in {{ first_due_in_days }} Tagen{% endif %} die Evaluierungsfrist.\r\n\r\nAn folgenden Evaluierungen hast du noch nicht teilgenommen:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} abgeben, wir w\u00fcrden uns \u00fcber deine Stimme freuen :)\r\nBei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ([email protected]).\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif%}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for one of your evaluations will end {% if first_due_in_days == 0 %}today{% elif first_due_in_days == 1 %}tomorrow{% else %}in {{ first_due_in_days }} days{% endif %}.\r\n\r\nYou did not yet participate in the following evaluations:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}\r\nYou can give your opinion on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}. We\u2019re looking forward to receive your feedback :)\r\nIf you have any questions or feedback, please let us know ([email protected]).\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif%}\r\nThank you very much for your efforts and kind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr eine deiner Evaluierungen endet {% if first_due_in_days == 0 %}heute{% elif first_due_in_days == 1 %}morgen{% else %}in {{ first_due_in_days }} Tagen{% endif %} die Evaluierungsfrist.\r\n\r\nAn folgenden Evaluierungen hast du noch nicht teilgenommen:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %} abgeben, wir w\u00fcrden uns \u00fcber deine Stimme freuen :)\r\nBei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ({{ contact_email }}).\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif%}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for one of your evaluations will end {% if first_due_in_days == 0 %}today{% elif first_due_in_days == 1 %}tomorrow{% else %}in {{ first_due_in_days }} days{% endif %}.\r\n\r\nYou did not yet participate in the following evaluations:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}\r\nYou can give your opinion on EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %}. We\u2019re looking forward to receive your feedback :)\r\nIf you have any questions or feedback, please let us know ({{ contact_email }}).\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif%}\r\nThank you very much for your efforts and kind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, { @@ -128119,7 +128119,7 @@ "fields": { "name": "Publishing Notice Contributor", "subject": "[EvaP] Evaluierungsergebnisse ver\u00f6ffentlicht / Evaluation results published", - "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ({{ contact_email }}).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ({{ contact_email }}).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, { @@ -128128,7 +128128,7 @@ "fields": { "name": "Login Key Created", "subject": "[EvaP] Ihr Anmeldelink / Your login URL", - "body": "BITTE NICHT WEITERLEITEN / PLEASE DO NOT FORWARD\r\n\r\nMit dem folgenden Link k\u00f6nnen Sie sich einmalig als externer Nutzer bei der Evaluierungsplattform anmelden:\r\nWith the following one-time URL you can login to the evaluation platform as an external user:\r\n\r\n{{ login_url }}\r\n\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\nIf you have any questions or feedback, please contact the Evaluation Team ([email protected])." + "body": "BITTE NICHT WEITERLEITEN / PLEASE DO NOT FORWARD\r\n\r\nMit dem folgenden Link k\u00f6nnen Sie sich einmalig als externer Nutzer bei der Evaluierungsplattform anmelden:\r\nWith the following one-time URL you can login to the evaluation platform as an external user:\r\n\r\n{{ login_url }}\r\n\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ({{ contact_email }}).\r\nIf you have any questions or feedback, please contact the Evaluation Team ({{ contact_email }})." } }, { @@ -128137,7 +128137,7 @@ "fields": { "name": "Evaluation Started", "subject": "[EvaP] Evaluierung hat begonnen / Evaluation started", - "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} abgeben, die Dozenten und wir freuen uns \u00fcber deine Bewertung. Bei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ([email protected]).\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}Diese Evaluierungen warten auf deine Bewertung:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}{% endif %}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for the following evaluations just started:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}. The lecturers and we are looking forward to receive your feedback. If you have any questions or feedback, please let us know ([email protected]).\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}These evaluations are waiting for your feedback:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}{% endif %}\r\nThank you very much for your efforts and kind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nf\u00fcr die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %} abgeben, die Dozenten und wir freuen uns \u00fcber deine Bewertung. Bei Fragen und R\u00fcckmeldungen kannst du dich jederzeit an uns wenden ({{ contact_email }}).\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}Diese Evaluierungen warten auf deine Bewertung:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}{% endif %}\r\nVielen Dank f\u00fcr deine M\u00fche und viele Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for the following evaluations just started:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %}. The lecturers and we are looking forward to receive your feedback. If you have any questions or feedback, please let us know ({{ contact_email }}).\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}These evaluations are waiting for your feedback:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}{% endif %}\r\nThank you very much for your efforts and kind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, { @@ -128146,7 +128146,7 @@ "fields": { "name": "Editor Review Reminder", "subject": "[EvaP] Reminder: Neue Lehrveranstaltungen stehen zur \u00dcberpr\u00fcfung bereit / New Evaluation ready for approval", - "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\num die Evaluierung Ihrer Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe.\r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeitenden delegieren. Unter \"Einstellungen\" k\u00f6nnen Sie Stellvertretende hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Veranstaltungen erhalten. Beim Bearbeiten einzelner Evaluierungen k\u00f6nnen Sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Evaluierung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Evaluierungen sobald wie m\u00f6glich Folgendes zu \u00fcberpr\u00fcfen:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Evaluierung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Evaluierungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nwe need your help so we can evaluate all courses on our platform EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your courses. On the details page of a single evaluation you can also add persons and assign edit rights for this evaluation to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nTo prepare your evaluations we would like to ask you for the following as soon as possible:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your course.\r\n - Are the selected questionnaires adequate for the evaluation? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese evaluations need your approval:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nSehr geehrte Dozentin, sehr geehrter Dozent,\r\n\r\num die Evaluierung Ihrer Veranstaltungen auf unserer Plattform EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %} durchf\u00fchren zu k\u00f6nnen, ben\u00f6tigen wir Ihre Mithilfe.\r\n\r\nSie k\u00f6nnen die folgenden Aufgaben auch an Ihre Mitarbeitenden delegieren. Unter \"Einstellungen\" k\u00f6nnen Sie Stellvertretende hinzuf\u00fcgen, die damit Bearbeitungsrechte f\u00fcr alle Ihre Veranstaltungen erhalten. Beim Bearbeiten einzelner Evaluierungen k\u00f6nnen Sie ebenfalls Bearbeitungsrechte vergeben, die sich auf diese Evaluierung beschr\u00e4nken.\r\n\r\n{% if user.needs_login_key and login_url %}Mit diesem Link k\u00f6nnen Sie sich einmalig bei der Platform anmelden: {{ login_url }}{% elif user.needs_login_key %}Ein Link zum Anmelden wird Ihnen per E-Mail zugesendet.{% endif %}\r\n\r\nWir m\u00f6chten Sie bitten, f\u00fcr Ihre Evaluierungen sobald wie m\u00f6glich Folgendes zu \u00fcberpr\u00fcfen:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Evaluierung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nFolgende Evaluierungen ben\u00f6tigen Ihre Freigabe:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ({{ contact_email }}).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear lecturer,\r\n\r\nwe need your help so we can evaluate all courses on our platform EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %}.\r\n\r\nYou can delegate the following tasks to your staff. Under \"Settings\" you can assign your delegates, which thereby will gain editing rights for all your courses. On the details page of a single evaluation you can also add persons and assign edit rights for this evaluation to them.\r\n\r\n{% if user.needs_login_key and login_url %}With the following one-time URL you can login to the evaluation platform: {{ login_url }}{% elif user.needs_login_key %}We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nTo prepare your evaluations we would like to ask you for the following as soon as possible:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your course.\r\n - Are the selected questionnaires adequate for the evaluation? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThese evaluations need your approval:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ({{ contact_email }}).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, { @@ -128155,7 +128155,7 @@ "fields": { "name": "Direct Delegation", "subject": "[EvaP] Bitte Evaluierung vorbereiten / Please prepare evaluation", - "body": "(English version below)\r\n\r\n\r\nLiebe/r {{ delegate_user.full_name }},\r\n\r\nSie werden von {{ user.full_name }} darum gebeten, die Evaluierung f\u00fcr \"{{ evaluation.full_name_de }}\" auf der Plattform EvaP (https://evap.hpi.de) vorzubereiten.\r\n\r\nBitte \u00fcberpr\u00fcfen Sie m\u00f6glichst bald die folgenden Dinge:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Veranstaltung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ delegate_user.full_name }},\r\n\r\n{{ user.full_name }} asks you to prepare the evaluation for \"{{ evaluation.full_name_en }}\" on the platform EvaP (https://evap.hpi.de).\r\n\r\nPlease check the following as soon as possible:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your course.\r\n - Are the selected questionnaires adequate for the course? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "(English version below)\r\n\r\n\r\nLiebe/r {{ delegate_user.full_name }},\r\n\r\nSie werden von {{ user.full_name }} darum gebeten, die Evaluierung f\u00fcr \"{{ evaluation.full_name_de }}\" auf der Plattform EvaP ({{ page_url }}) vorzubereiten.\r\n\r\nBitte \u00fcberpr\u00fcfen Sie m\u00f6glichst bald die folgenden Dinge:\r\n - Ist der Evaluierungszeitraum angemessen? Bitte legen Sie das Ende der Evaluierung vor die finale Pr\u00fcfungsleistung (Klausur, Pr\u00fcfung, Ausarbeitung etc.).\r\n - Wurden die f\u00fcr die Veranstaltung geeigneten Frageb\u00f6gen ausgew\u00e4hlt? Bitte passen Sie die Auswahl gegebenenfalls an.\r\n - Werden alle beteiligten Dozenten, \u00dcbungsleiter, Projektleiter, Seminarbetreuer etc. evaluiert? F\u00fcgen Sie bitte alle weiteren Personen mit den passenden Frageb\u00f6gen hinzu.\r\n\r\nVielen Dank im Voraus f\u00fcr Ihre M\u00fche!\r\nBei Fragen und R\u00fcckmeldungen k\u00f6nnen Sie sich jederzeit gerne an das Evaluierungsteam wenden ({{ contact_email }}).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ delegate_user.full_name }},\r\n\r\n{{ user.full_name }} asks you to prepare the evaluation for \"{{ evaluation.full_name_en }}\" on the platform EvaP ({{ page_url }}).\r\n\r\nPlease check the following as soon as possible:\r\n - Is the evaluation period appropriate? Please let the evaluation end before the final exam (written or oral examination, final assignment, etc.) of your course.\r\n - Are the selected questionnaires adequate for the course? Please adapt the selection if necessary.\r\n - Are all contributors (lecturers, teaching assistants, etc.) included in the evaluation? Please add all additional persons with their appropriate questionnaires.\r\n\r\nThank you very much in advance for your efforts!\r\nIf you have any questions or feedback, please contact the evaluation team ({{ contact_email }}).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, { @@ -128164,7 +128164,7 @@ "fields": { "name": "Publishing Notice Participant", "subject": "[EvaP] Evaluierungsergebnisse ver\u00f6ffentlicht / Evaluation results published", - "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ([email protected]).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ([email protected]).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" + "body": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben ver\u00f6ffentlicht:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDie Ergebnisse k\u00f6nnen auf EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %} eingesehen werden.{% if user.needs_login_key and login_url %} Hier klicken zum Anmelden: {{ login_url }}{% elif user.needs_login_key %} Ein Link zum Anmelden wird per E-Mail zugesendet.{% endif %}\r\n\r\nBei Fragen und R\u00fcckmeldungen stehen wir gerne zur Verf\u00fcgung ({{ contact_email }}).\r\n\r\nFreundliche Gr\u00fc\u00dfe,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nthe results of the following evaluations have just been published:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can view the results on EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %}.{% if user.needs_login_key and login_url %} Click here to login: {{ login_url }}{% elif user.needs_login_key %} We will send you a one-time login URL in a separate email.{% endif %}\r\n\r\nIf you have any questions or feedback, please let us know ({{ contact_email }}).\r\n\r\nKind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)" } }, {
Generalize email template and faq test data The email templates and faq questions/answers in the test data should be more generalized and abstracted from the current HPI specific phrasing.
Email addresses and URLs in the templates should be filled from the settings file. could you detail how you imagine that to work? because we can't access the settings from the json file. the templates can have a variable that is filled during rendering when sending the emails (just like the login url).
2021-01-04T18:46:31
e-valuation/EvaP
1,559
e-valuation__EvaP-1559
[ "1068" ]
a1cafc3b49cf6087392b2a3763be52a64a480b20
diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -1706,7 +1706,26 @@ def template_edit(request, template_id): messages.success(request, _("Successfully updated template.")) return redirect('staff:index') - return render(request, "staff_template_form.html", dict(form=form, template=template)) + available_variables = [ + "contact_email", + "page_url", + "login_url", # only if they need it + "user", + ] + + if template.name == EmailTemplate.STUDENT_REMINDER: + available_variables += ["first_due_in_days", "due_evaluations"] + elif template.name in [EmailTemplate.PUBLISHING_NOTICE_CONTRIBUTOR, EmailTemplate.PUBLISHING_NOTICE_PARTICIPANT, EmailTemplate.EDITOR_REVIEW_NOTICE, EmailTemplate.EDITOR_REVIEW_REMINDER]: + available_variables += ["evaluations"] + elif template.name == EmailTemplate.EVALUATION_STARTED: + available_variables += ["evaluations", "due_evaluations"] + elif template.name == EmailTemplate.DIRECT_DELEGATION: + available_variables += ["evaluation", "delegate_user"] + + available_variables = ["{{ " + variable + " }}" for variable in available_variables] + available_variables.sort() + + return render(request, "staff_template_form.html", dict(form=form, template=template, available_variables=available_variables)) @manager_required
Show available variables for email templates Staff users should see a list of available variables that can be used when editing the email templates.
yeah definitely. i would make them more copypasteable by including the {% %} tags or maybe even adding a "copy to clipboard" button alongside the variables that adds those tags. maybe we could also give the user a snippet for the loop through the courses this way.
2021-02-15T16:23:34
e-valuation/EvaP
1,562
e-valuation__EvaP-1562
[ "1530" ]
81feb88c824cfbee467b1249c13a875144e077a7
diff --git a/evap/evaluation/auth.py b/evap/evaluation/auth.py --- a/evap/evaluation/auth.py +++ b/evap/evaluation/auth.py @@ -7,6 +7,7 @@ from evap.evaluation.models import UserProfile from evap.evaluation.tools import clean_email from evap.rewards.tools import can_reward_points_be_used_by +from evap.staff.tools import delete_navbar_cache_for_users class RequestAuthUserBackend(ModelBackend): @@ -24,7 +25,9 @@ def authenticate(self, request, key): # pylint: disable=arguments-differ return None try: - return UserProfile.objects.get(login_key=key) + user = UserProfile.objects.get(login_key=key) + after_login_function(request, user, None) + return user except UserProfile.DoesNotExist: return None @@ -38,10 +41,15 @@ def authenticate(self, request, email=None, password=None): # pylint: disable=a return None else: if user.check_password(password): + after_login_function(request, user, None) return user return None +def after_login_function(request, user, _client): + delete_navbar_cache_for_users([user]) + + def user_passes_test(test_func): """ Decorator for views that checks whether a user passes a given test diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -367,6 +367,7 @@ ### OpenID Login # replace 'example.com', OIDC_RP_CLIENT_ID and OIDC_RP_CLIENT_SECRET with real values in localsettings when activating ACTIVATE_OPEN_ID_LOGIN = False +OIDC_AFTER_USERLOGIN_HOOK = 'evap.evaluation.auth.after_login_function' OIDC_RENEW_ID_TOKEN_EXPIRY_SECONDS = 60 * 60 * 24 * 7 # one week OIDC_RP_SIGN_ALGO = 'RS256' OIDC_USERNAME_ALGO = ''
diff --git a/evap/evaluation/tests/test_auth.py b/evap/evaluation/tests/test_auth.py --- a/evap/evaluation/tests/test_auth.py +++ b/evap/evaluation/tests/test_auth.py @@ -1,10 +1,11 @@ from unittest.mock import patch import urllib -from django.urls import reverse -from django.core import mail from django.conf import settings +from django.contrib.auth.models import Group +from django.core import mail from django.test import override_settings +from django.urls import reverse from model_bakery import baker @@ -135,3 +136,49 @@ def test_oidc_login(self): # user should see the Logout button then. self.assertIn('Logout', page.body.decode()) + + +class LoginTestsWithCSRF(WebTest): + @classmethod + def setUpTestData(cls): + cls.staff_user = baker.make( + UserProfile, + email='[email protected]', + groups=[Group.objects.get(name='Manager')] + ) + cls.staff_user_password = 'staff' + cls.staff_user.set_password(cls.staff_user_password) + cls.staff_user.save() + + def test_entering_staff_mode_after_logout_and_login(self): + """ + Asserts that managers can enter the staff mode after logging out and logging in again. + Regression test for #1530. + """ + page = self.app.get(reverse('evaluation:index')) + form = page.forms['email-login-form'] + form['email'] = self.staff_user.email + form['password'] = self.staff_user_password + form.submit() + + # staff user should now be logged in and see the logout button + page = self.app.get(reverse('results:index')) + self.assertContains(page, 'Logout') + + # log out user + page = self.app.get(reverse('django-auth-logout')).follow() + self.assertNotContains(page, 'Logout') + + # log user in again + page = self.app.get(reverse('evaluation:index')) + form = page.forms['email-login-form'] + form['email'] = self.staff_user.email + form['password'] = self.staff_user_password + form.submit() + + # enter staff mode + page = self.app.get(reverse('results:index')) + page.forms['enter-staff-mode-form'].submit() + page = self.app.get(reverse('results:index')) + self.assertTrue('staff_mode_start_time' in self.app.session) + self.assertContains(page, 'Exit Staff Mode')
Navbar CSRF caching broken for entering staff mode Steps to reproduce (on master, after reloading test data): 1. Log in as evap 2. Enter and leave staff mode 3. Log out and log in again 4. Try to enter staff mode This will throw a 403 CSRF something error and people in Discord concluded that it is because the navbar is cached across logins. Clearing this cache on logout was proposed. This is reset when reloading test data again,
We're currently considering dropping the navbar cache alltogether, since it caused quite some bugs over the time and we need a lot of cache invalidation all around the codebase. Thus, it would be nice to know how much performance we actually currently gain from the cache (originally added in https://github.com/e-valuation/EvaP/pull/598). We suspect that one of the main impacts might be that for n permission checks, we currently do n queries (check out the properties `is_manager`, `is_reviewer` and `is_grade_publisher` for examples). We should be able to prevent this by prefetching the groups of a user and using the prefetched values. Or, we could overwrite the cached_properties (although I consider this more likely to cause bugs in the future, as it is hidden behaviour). prefetching wouldn't work directly as filter queries are used, but using the newly added is_prefetched would work (so prefetching all the user's groups, and then iterating through the list if it's prefetched instead of doing the filter query) Seems like we had the session ID in the cache key at some point but removed it: https://github.com/e-valuation/EvaP/issues/1208 I’ll also add that a Csrf error is thrown when I’m logged in a different browser: 1. Log in as evap (in browser A) 2. Enter staff mode 3. Log in as evap (in browser B) 4. Try to leave staff mode Due to the similarity with the issue, I guess that this error-case is also covered by the defect
2021-02-17T18:52:24
e-valuation/EvaP
1,579
e-valuation__EvaP-1579
[ "1303" ]
03e05bc277aba1cf7ea4b68ec5b88d12a141c3f3
diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -16,6 +16,7 @@ from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse +from django.utils.html import format_html from django.utils.translation import gettext as _, gettext_lazy from django.utils.translation import get_language, ngettext from django.views.decorators.http import require_POST @@ -974,11 +975,24 @@ def evaluation_email(request, semester_id, evaluation_id): return render(request, "staff_evaluation_email.html", dict(semester=semester, evaluation=evaluation, form=form)) +def helper_delete_users_from_evaluation(evaluation, operation): + if 'participants' in operation: + deleted_person_count = evaluation.participants.count() + deletion_message = _("{} participants were deleted from evaluation {}") + evaluation.participants.clear() + elif 'contributors' in operation: + deleted_person_count = evaluation.contributions.exclude(contributor=None).count() + deletion_message = _("{} contributors were deleted from evaluation {}") + evaluation.contributions.exclude(contributor=None).delete() + + return deleted_person_count, deletion_message + @manager_required [email protected] def evaluation_person_management(request, semester_id, evaluation_id): # This view indeed handles 4 tasks. However, they are tightly coupled, splitting them up # would lead to more code duplication. Thus, we decided to leave it as is for now - # pylint: disable=too-many-locals + # pylint: disable=too-many-locals, too-many-branches semester = get_object_or_404(Semester, id=semester_id) evaluation = get_object_or_404(Evaluation, id=evaluation_id, course__semester=semester) if evaluation.participations_are_archived: @@ -996,8 +1010,8 @@ def evaluation_person_management(request, semester_id, evaluation_id): if request.method == "POST": operation = request.POST.get('operation') - if operation not in ('test-participants', 'import-participants', 'copy-participants', - 'test-contributors', 'import-contributors', 'copy-contributors'): + if operation not in ('test-participants', 'import-participants', 'copy-participants', 'import-replace-participants', 'copy-replace-participants', + 'test-contributors', 'import-contributors', 'copy-contributors', 'import-replace-contributors', 'copy-replace-contributors'): raise SuspiciousOperation("Invalid POST operation") import_type = ImportType.PARTICIPANT if 'participants' in operation else ImportType.CONTRIBUTOR @@ -1014,21 +1028,28 @@ def evaluation_person_management(request, semester_id, evaluation_id): if not errors: save_import_file(excel_file, request.user.id, import_type) - elif 'import' in operation: - file_content = get_import_file_content_or_raise(request.user.id, import_type) - success_messages, warnings, __ = PersonImporter.process_file_content(import_type, evaluation, test_run=False, file_content=file_content) - delete_import_file(request.user.id, import_type) + else: + additional_messages = [] + if 'replace' in operation: + deleted_person_count, deletion_message = helper_delete_users_from_evaluation(evaluation, operation) + additional_messages = format_html(deletion_message, deleted_person_count, evaluation.full_name) + + if 'import' in operation: + file_content = get_import_file_content_or_raise(request.user.id, import_type) + success_messages, warnings, __ = PersonImporter.process_file_content(import_type, evaluation, test_run=False, file_content=file_content) + delete_import_file(request.user.id, import_type) + + elif 'copy' in operation: + copy_form.evaluation_selection_required = True + if copy_form.is_valid(): + import_evaluation = copy_form.cleaned_data['evaluation'] + success_messages, warnings, errors = PersonImporter.process_source_evaluation(import_type, evaluation, test_run=False, source_evaluation=import_evaluation) + + success_messages.insert(0, additional_messages) + forward_messages(request, success_messages, warnings) return redirect('staff:semester_view', semester_id) - elif 'copy' in operation: - copy_form.evaluation_selection_required = True - if copy_form.is_valid(): - import_evaluation = copy_form.cleaned_data['evaluation'] - success_messages, warnings, errors = PersonImporter.process_source_evaluation(import_type, evaluation, test_run=False, source_evaluation=import_evaluation) - forward_messages(request, success_messages, warnings) - return redirect('staff:semester_view', semester_id) - participant_test_passed = import_file_exists(request.user.id, ImportType.PARTICIPANT) contributor_test_passed = import_file_exists(request.user.id, ImportType.CONTRIBUTOR) # casting warnings to a normal dict is necessary for the template to iterate over it.
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -1459,16 +1459,15 @@ def test_evaluation_create(self): class TestEvaluationCopyView(WebTestStaffMode): - url = '/staff/semester/1/evaluation/1/copy' + @classmethod def setUpTestData(cls): cls.manager = make_manager() - cls.semester = baker.make(Semester, pk=1) + cls.semester = baker.make(Semester) cls.course = baker.make(Course, semester=cls.semester) cls.evaluation = baker.make( Evaluation, - pk=1, course=cls.course, name_de="Das Original", name_en="The Original", @@ -1481,6 +1480,7 @@ def setUpTestData(cls): evaluation=cls.evaluation, contributor=baker.make(UserProfile), ) + cls.url = f'/staff/semester/{cls.semester.id}/evaluation/{cls.evaluation.id}/copy' def test_copy_forms_are_used(self): response = self.app.get(self.url, user=self.manager, status=200) @@ -1686,6 +1686,7 @@ def setUpTestData(cls): class TestEvaluationImportPersonsView(WebTestStaffMode): url = "/staff/semester/1/evaluation/1/person_management" + url2 = "/staff/semester/1/evaluation/2/person_management" filename_valid = os.path.join(settings.BASE_DIR, "staff/fixtures/valid_user_import.xls") filename_invalid = os.path.join(settings.BASE_DIR, "staff/fixtures/invalid_user_import.xls") filename_random = os.path.join(settings.BASE_DIR, "staff/fixtures/random.random") @@ -1694,9 +1695,11 @@ class TestEvaluationImportPersonsView(WebTestStaffMode): def setUpTestData(cls): semester = baker.make(Semester, pk=1) cls.manager = make_manager() - cls.evaluation = baker.make(Evaluation, pk=1, course=baker.make(Course, semester=semester)) - profiles = baker.make(UserProfile, _quantity=42) - cls.evaluation2 = baker.make(Evaluation, pk=2, course=baker.make(Course, semester=semester), participants=profiles) + profiles1 = baker.make(UserProfile, _quantity=31) + cls.evaluation = baker.make(Evaluation, pk=1, course=baker.make(Course, semester=semester), participants=profiles1) + profiles2 = baker.make(UserProfile, _quantity=42) + cls.evaluation2 = baker.make(Evaluation, pk=2, course=baker.make(Course, semester=semester), participants=profiles2) + cls.contribution2 = baker.make(Contribution, evaluation=cls.evaluation2, contributor=baker.make(UserProfile)) @classmethod def tearDown(cls): @@ -1722,6 +1725,22 @@ def test_import_valid_participants_file(self): page = self.app.get(self.url, user=self.manager) self.assertNotContains(page, 'Import previously uploaded file') + def test_replace_valid_participants_file(self): + page = self.app.get(self.url2, user=self.manager) + + form = page.forms["participant-import-form"] + form["excel_file"] = (self.filename_valid,) + page = form.submit(name="operation", value="test-participants") + + self.assertNotEqual(self.evaluation2.participants.count(), 2) + + form = page.forms["participant-import-form"] + form.submit(name="operation", value="import-replace-participants") + self.assertEqual(self.evaluation2.participants.count(), 2) + + page = self.app.get(self.url2, user=self.manager) + self.assertNotContains(page, 'Import previously uploaded file') + def test_copy_participants(self): page = self.app.get(self.url, user=self.manager) @@ -1733,6 +1752,17 @@ def test_copy_participants(self): self.assertEqual(self.evaluation.participants.count(), original_participant_count + self.evaluation2.participants.count()) + def test_replace_copy_participants(self): + page = self.app.get(self.url, user=self.manager) + + self.assertNotEqual(self.evaluation.participants.count(), self.evaluation2.participants.count()) + + form = page.forms["participant-copy-form"] + form["evaluation"] = str(self.evaluation2.pk) + page = form.submit(name="operation", value="copy-replace-participants") + + self.assertEqual(self.evaluation.participants.count(), self.evaluation2.participants.count()) + def test_import_valid_contributors_file(self): page = self.app.get(self.url, user=self.manager) @@ -1752,6 +1782,22 @@ def test_import_valid_contributors_file(self): page = self.app.get(self.url, user=self.manager) self.assertNotContains(page, 'Import previously uploaded file') + def test_replace_valid_contributors_file(self): + page = self.app.get(self.url2, user=self.manager) + + form = page.forms["contributor-import-form"] + form["excel_file"] = (self.filename_valid,) + page = form.submit(name="operation", value="test-contributors") + + self.assertNotEqual(UserProfile.objects.filter(contributions__evaluation=self.evaluation2).count(), 2) + + form = page.forms["contributor-import-form"] + form.submit(name="operation", value="import-replace-contributors") + self.assertEqual(UserProfile.objects.filter(contributions__evaluation=self.evaluation2).count(), 2) + + page = self.app.get(self.url, user=self.manager) + self.assertNotContains(page, 'Import previously uploaded file') + def test_copy_contributors(self): page = self.app.get(self.url, user=self.manager) @@ -1764,6 +1810,19 @@ def test_copy_contributors(self): new_contributor_count = UserProfile.objects.filter(contributions__evaluation=self.evaluation).count() self.assertEqual(new_contributor_count, original_contributor_count + UserProfile.objects.filter(contributions__evaluation=self.evaluation2).count()) + def test_copy_replace_contributors(self): + page = self.app.get(self.url, user=self.manager) + + old_contributor_count = UserProfile.objects.filter(contributions__evaluation=self.evaluation).count() + self.assertNotEqual(old_contributor_count, UserProfile.objects.filter(contributions__evaluation=self.evaluation2).count()) + + form = page.forms["contributor-copy-form"] + form["evaluation"] = str(self.evaluation2.pk) + page = form.submit(name="operation", value="copy-replace-contributors") + + new_contributor_count = UserProfile.objects.filter(contributions__evaluation=self.evaluation).count() + self.assertEqual(new_contributor_count, UserProfile.objects.filter(contributions__evaluation=self.evaluation2).count()) + def test_import_participants_error_handling(self): """ Tests whether errors given from the importer are displayed
Replace option for participant adding When copying or importing participants, there should be an option to replace the current participants instead of just adding the new ones.
2021-03-29T18:30:32
e-valuation/EvaP
1,583
e-valuation__EvaP-1583
[ "1571", "1571" ]
793d935d9ecb2635a0c1c373691b6a4280c739a9
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -4,6 +4,7 @@ from django import forms from django.contrib.auth.models import Group from django.core.exceptions import SuspiciousOperation, ValidationError +from django.db import transaction from django.db.models import Max, Q from django.forms.models import BaseInlineFormSet from django.forms.widgets import CheckboxSelectMultiple @@ -147,8 +148,8 @@ class Meta: model = Semester fields = ("name_de", "name_en", "short_name_de", "short_name_en") - def save(self, *args, **kwargs): - semester = super().save(*args, **kwargs) + def save(self, commit=True): + semester = super().save(commit) if "short_name_en" in self.changed_data or "short_name_de" in self.changed_data: update_template_cache(semester.evaluations.filter(state__in=STATES_WITH_RESULT_TEMPLATE_CACHING)) return semester @@ -214,9 +215,7 @@ def clean(self): raise ValidationError(_("You must select two different course types.")) -class CourseForm(forms.ModelForm): - semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput()) - +class CourseFormMixin: class Meta: model = Course fields = ( @@ -232,17 +231,11 @@ class Meta: "responsibles": UserModelMultipleChoiceField, } - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.fields["responsibles"].queryset = UserProfile.objects.exclude(is_active=False) - if self.instance.pk: - self.fields["responsibles"].queryset |= UserProfile.objects.filter( - pk__in=[user.pk for user in self.instance.responsibles.all()] - ) - - if not self.instance.can_be_edited_by_manager: - disable_all_fields(self) + def _set_responsibles_queryset(self, existing_course=None): + queryset = UserProfile.objects.exclude(is_active=False) + if existing_course: + queryset = (queryset | existing_course.responsibles.all()).distinct() + self.fields["responsibles"].queryset = queryset def validate_unique(self): super().validate_unique() @@ -257,6 +250,98 @@ def validate_unique(self): self.add_error(name_field, e) +class CourseForm(CourseFormMixin, forms.ModelForm): + semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput()) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._set_responsibles_queryset(self.instance if self.instance.pk else None) + if not self.instance.can_be_edited_by_manager: + disable_all_fields(self) + + +class CourseCopyForm(CourseFormMixin, forms.ModelForm): + semester = forms.ModelChoiceField(Semester.objects.all()) + vote_start_datetime = forms.DateTimeField(label=_("Start of evaluations"), localize=True) + vote_end_date = forms.DateField(label=_("Last day of evaluations"), localize=True) + + field_order = ["semester"] + + def __init__(self, data=None, instance: Course = None): + self.old_course = instance + opts = self._meta + initial = forms.models.model_to_dict(instance, opts.fields, opts.exclude) + super().__init__(data=data, initial=initial) + self._set_responsibles_queryset(instance) + + # To ensure we don't forget about copying a relevant field, we explicitly list copied and ignored fields and test against that + EVALUATION_COPIED_FIELDS = { + "name_de", + "name_en", + "weight", + "is_single_result", + "is_rewarded", + "is_midterm_evaluation", + "allow_editors_to_edit", + "wait_for_grade_upload_before_publishing", + } + + EVALUATION_EXCLUDED_FIELDS = { + "id", + "course", + "vote_start_datetime", + "vote_end_date", + "participants", + "contributions", + "state", + "can_publish_text_results", + "_participant_count", + "_voter_count", + "voters", + } + + CONTRIBUTION_COPIED_FIELDS = { + "contributor", + "role", + "textanswer_visibility", + "label", + "order", + } + + CONTRIBUTION_EXCLUDED_FIELDS = { + "id", + "evaluation", + "ratinganswercounter_set", + "textanswer_set", + "questionnaires", + } + + @transaction.atomic() + def save(self, commit=True): + new_course: Course = super().save() + # we need to create copies of evaluations and their participation as well + for old_evaluation in self.old_course.evaluations.exclude(is_single_result=True): + new_evaluation = Evaluation( + **{field: getattr(old_evaluation, field) for field in self.EVALUATION_COPIED_FIELDS}, + can_publish_text_results=False, + course=new_course, + vote_start_datetime=self.cleaned_data["vote_start_datetime"], + vote_end_date=self.cleaned_data["vote_end_date"], + ) + new_evaluation.save() + + new_evaluation.contributions.all().delete() # delete default general contribution + for old_contribution in old_evaluation.contributions.all(): + new_contribution = Contribution( + **{field: getattr(old_contribution, field) for field in self.CONTRIBUTION_COPIED_FIELDS}, + evaluation=new_evaluation, + ) + new_contribution.save() + new_contribution.questionnaires.set(old_contribution.questionnaires.all()) + + return new_course + + class EvaluationForm(forms.ModelForm): general_questionnaires = forms.ModelMultipleChoiceField( Questionnaire.objects.general_questionnaires().exclude(visibility=Questionnaire.Visibility.HIDDEN), @@ -581,7 +666,6 @@ def send(self, request): class RemindResponsibleForm(forms.Form): - to = UserModelChoiceField(None, required=False, disabled=True, label=_("To")) cc = UserModelMultipleChoiceField(None, required=False, disabled=True, label=_("CC")) subject = forms.CharField(label=_("Subject")) diff --git a/evap/staff/urls.py b/evap/staff/urls.py --- a/evap/staff/urls.py +++ b/evap/staff/urls.py @@ -21,6 +21,7 @@ path("semester/<int:semester_id>/grade_reminder", views.semester_grade_reminder, name="semester_grade_reminder"), path("semester/<int:semester_id>/course/create", views.course_create, name="course_create"), path("semester/<int:semester_id>/course/<int:course_id>/edit", views.course_edit, name="course_edit"), + path("semester/<int:semester_id>/course/<int:course_id>/copy", views.course_copy, name="course_copy"), path("semester/<int:semester_id>/evaluation/create", views.evaluation_create, name="evaluation_create"), path("semester/<int:semester_id>/evaluation/create/<int:course_id>", views.evaluation_create, name="evaluation_create"), path("semester/<int:semester_id>/evaluation/<int:evaluation_id>/edit", views.evaluation_edit, name="evaluation_edit"), diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -56,6 +56,7 @@ ContributionCopyFormSet, ContributionForm, ContributionFormSet, + CourseCopyForm, CourseForm, CourseTypeForm, CourseTypeMergeSelectionForm, @@ -950,6 +951,45 @@ def course_create(request, semester_id): return render(request, "staff_course_form.html", dict(semester=semester, course_form=course_form, editable=True)) +@manager_required +def course_copy(request, semester_id, course_id): + semester = get_object_or_404(Semester, id=semester_id) + course = get_object_or_404(Course, id=course_id, semester=semester) + course_form = CourseCopyForm(request.POST or None, instance=course) + + if course_form.is_valid(): + copied_course = course_form.save() + messages.success(request, _("Successfully copied course.")) + + inactive_users = UserProfile.objects.filter( + Q(contributions__evaluation__course=copied_course, is_active=False) + | Q(courses_responsible_for=copied_course, is_active=False) + ).distinct() + if inactive_users: + messages.warning( + request, + _("The accounts of the following contributors were reactivated:") + + " {accounts}".format(accounts=", ".join(user.full_name for user in inactive_users)), + ) + inactive_users.update(is_active=True) + + return redirect("staff:semester_view", copied_course.semester_id) + + evaluations = sorted(course.evaluations.exclude(is_single_result=True), key=lambda cr: cr.full_name) + return render( + request, + "staff_course_copyform.html", + dict( + course=course, + evaluations=evaluations, + semester=semester, + course_form=course_form, + editable=True, + disable_breadcrumb_course=True, + ), + ) + + @manager_required def course_edit(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id)
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -29,6 +29,7 @@ ContributionCopyForm, ContributionForm, ContributionFormSet, + CourseCopyForm, CourseForm, EvaluationCopyForm, EvaluationEmailForm, @@ -711,6 +712,22 @@ def test_handle_multivaluedicts(self): self.assertEqual(Questionnaire.objects.filter(contributions=self.contribution2).count(), 2) +class CourseCopyFormTests(TestCase): + @staticmethod + def test_all_evaluation_attributes_covered(): + for field in Evaluation._meta.get_fields(): + assert field.name in ( + CourseCopyForm.EVALUATION_COPIED_FIELDS | CourseCopyForm.EVALUATION_EXCLUDED_FIELDS + ), "evaluation field {} is not considered by CourseCopyForm".format(field.name) + + @staticmethod + def test_all_contribution_attributes_covered(): + for field in Contribution._meta.get_fields(): + assert field.name in ( + CourseCopyForm.CONTRIBUTION_COPIED_FIELDS | CourseCopyForm.CONTRIBUTION_EXCLUDED_FIELDS + ), "contribution field {} is not considered by CourseCopyForm".format(field.name) + + class CourseFormTests(TestCase): def test_course_form_same_name(self): """ diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -39,7 +39,7 @@ ) from evap.results.tools import cache_results, get_results from evap.rewards.models import RewardPointGranting, SemesterActivation -from evap.staff.forms import ContributionCopyForm, ContributionCopyFormSet, EvaluationCopyForm +from evap.staff.forms import ContributionCopyForm, ContributionCopyFormSet, CourseCopyForm, EvaluationCopyForm from evap.staff.tests.utils import ( WebTestStaffMode, WebTestStaffModeWith200Check, @@ -1653,6 +1653,70 @@ def test_evaluation_copy(self): self.assertEqual(copied_evaluation.contributions.count(), 4) +class TestCourseCopyView(WebTestStaffMode): + @classmethod + def setUpTestData(cls): + cls.manager = make_manager() + cls.semester = baker.make(Semester) + cls.other_semester = baker.make(Semester) + degree = baker.make(Degree) + cls.responsibles = [ + baker.make(UserProfile, last_name="Muller"), + baker.make(UserProfile, is_active=False, last_name="Wolf"), + ] + cls.course = baker.make( + Course, + name_en="Some name", + semester=cls.semester, + degrees=[degree], + responsibles=cls.responsibles, + ) + cls.evaluation = baker.make( + Evaluation, + course=cls.course, + name_de="Das Original", + name_en="The Original", + ) + cls.general_questionnaires = baker.make(Questionnaire, _quantity=5) + cls.evaluation.general_contribution.questionnaires.set(cls.general_questionnaires) + baker.make( + Contribution, + evaluation=cls.evaluation, + _quantity=3, + _fill_optional=["contributor"], + ) + cls.url = f"/staff/semester/{cls.semester.id}/course/{cls.course.id}/copy" + + def test_copy_forms_are_used(self): + response = self.app.get(self.url, user=self.manager, status=200) + self.assertIsInstance(response.context["course_form"], CourseCopyForm) + + def test_course_copy(self): + response = self.app.get(self.url, user=self.manager, status=200) + form = response.forms["course-form"] + form["semester"] = self.other_semester.pk + form["vote_start_datetime"] = datetime.datetime(2099, 1, 1, 0, 0) + form["vote_end_date"] = datetime.date(2099, 12, 31) + + # check that the user activation is mentioned + self.assertFalse(self.responsibles[1].is_active) + response = form.submit().follow() + self.assertIn(self.responsibles[1].full_name, response) + + self.assertEqual(Course.objects.count(), 2) + copied_course = Course.objects.exclude(pk=self.course.pk).get() + self.assertEqual(copied_course.evaluations.count(), 1) + self.assertEqual(set(copied_course.responsibles.all()), set(self.responsibles)) + + copied_evaluation = copied_course.evaluations.get() + self.assertEqual(copied_evaluation.weight, self.evaluation.weight) + self.assertEqual( + set(copied_evaluation.general_contribution.questionnaires.all()), + set(self.evaluation.general_contribution.questionnaires.all()), + ) + self.assertFalse(copied_course.responsibles.filter(is_active=False).exists()) + + class TestCourseEditView(WebTestStaffMode): url = "/staff/semester/1/course/1/edit"
Allow copying evaluation to other semester When copying an evaluation, the copy will currently be created in the same semester as the original. This should be changed so that the semester can be selected in a field at the top of the form, defaulting to the current semester. Allow copying evaluation to other semester When copying an evaluation, the copy will currently be created in the same semester as the original. This should be changed so that the semester can be selected in a field at the top of the form, defaulting to the current semester.
2021-04-12T20:12:21
e-valuation/EvaP
1,586
e-valuation__EvaP-1586
[ "1572" ]
4bb3d4ee74b07a398d7a7a3181d9bfa920a86b26
diff --git a/evap/staff/tools.py b/evap/staff/tools.py --- a/evap/staff/tools.py +++ b/evap/staff/tools.py @@ -1,4 +1,5 @@ import os +from datetime import date, datetime, timedelta from enum import Enum from django.contrib import messages @@ -294,8 +295,14 @@ def merge_users(main_user, other_user, preview=False): def find_unreviewed_evaluations(semester, excluded): + # as evaluations are open for an offset of hours after vote_end_datetime, the evaluations ending yesterday are also excluded during offset + exclude_date = date.today() + if datetime.now().hour < settings.EVALUATION_END_OFFSET_HOURS: + exclude_date -= timedelta(days=1) + return semester.evaluations.exclude(pk__in=excluded) \ .exclude(state='published') \ + .exclude(vote_end_date__gte=exclude_date) \ .exclude(can_publish_text_results=False) \ .filter(contributions__textanswer_set__state=TextAnswer.State.NOT_REVIEWED) \ .annotate(num_unreviewed_textanswers=Count("contributions__textanswer_set")) \
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -1909,7 +1909,7 @@ def setUpTestData(cls): cls.evaluation = baker.make( Evaluation, pk=1, - course=baker.make(Course, semester=semester), + course__semester=semester, participants=[student1, cls.student2], voters=[student1], state="in_evaluation" @@ -1929,6 +1929,29 @@ def setUpTestData(cls): cls.answer = 'should show up' baker.make(TextAnswer, contribution=contribution, question=question, answer=cls.answer) + cls.evaluation2 = baker.make( + Evaluation, + course__semester=semester, + participants=[student1], + voters=[student1, cls.student2], + vote_start_datetime=datetime.datetime.now() - datetime.timedelta(days=5), + vote_end_date=datetime.date.today() - datetime.timedelta(days=4), + can_publish_text_results=True + ) + + contribution2 = baker.make( + Contribution, + evaluation=cls.evaluation2, + contributor=baker.make(UserProfile), + questionnaires=[questionnaire], + ) + cls.text_answer = baker.make( + TextAnswer, + contribution=contribution2, + question=question, + answer='test answer text', + ) + def test_textanswers_showing_up(self): # in an evaluation with only one voter the view should not be available with run_in_staff_mode(self): @@ -1953,6 +1976,22 @@ def test_textanswers_full_view(self): page = self.app.get(self.url + '?view=full', user=self.manager, status=200) self.assertContains(page, self.answer) + # use offset of more than 25 hours to make sure the test doesn't fail even on combined time zone change and leap second + @override_settings(EVALUATION_END_OFFSET_HOURS=26) + def test_exclude_unfinished_evaluations(self): + let_user_vote_for_evaluation(self.app, self.student2, self.evaluation) + with run_in_staff_mode(self): + page = self.app.get(self.url, user=self.manager, status=200) + # evaluation2 is finished and should show up + self.assertContains(page, self.evaluation2.full_name) + + self.evaluation2.vote_end_date = datetime.date.today() - datetime.timedelta(days=1) + self.evaluation2.save() + with run_in_staff_mode(self): + page = self.app.get(self.url, user=self.manager, status=200) + # unfinished because still in EVALUATION_END_OFFSET_HOURS + self.assertNotContains(page, self.evaluation2.full_name) + class TestEvaluationTextAnswerEditView(WebTest): @classmethod
Filter evaluations on fast review When finishing the review of an evaluation's text answers in fast mode, only evaluations where the evaluation period already ended should be selected for continuing the review. Currently, also evaluations that are still in evaluation are shown.
why? :) Running evaluations are usually not to be reviewed to ensure the review of all text answers in an evaluation is done as a whole. This gives the evaluation team a complete overview of the course, which can be summarized for reports and feedback discussions. If staff wants to review a specific evaluation after the change anyway, they can still open the review for this evaluation.
2021-04-19T16:19:54
e-valuation/EvaP
1,593
e-valuation__EvaP-1593
[ "1570" ]
e20a1b4c553938af23d95c073717966a7c6eb49f
diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py --- a/evap/evaluation/views.py +++ b/evap/evaluation/views.py @@ -163,7 +163,8 @@ def contact(request): mail = EmailMessage( subject=subject, body="{}\n{}\n\n{}".format(title, request.user.email, message), - to=[settings.CONTACT_EMAIL]) + to=[settings.CONTACT_EMAIL], + reply_to=[request.user.email]) try: mail.send() logger.info('Sent contact email: \n{}\n'.format(mail.message()))
diff --git a/evap/evaluation/tests/test_views.py b/evap/evaluation/tests/test_views.py --- a/evap/evaluation/tests/test_views.py +++ b/evap/evaluation/tests/test_views.py @@ -74,7 +74,7 @@ def test_sends_mail(self): user=user, ) self.assertEqual(len(mail.outbox), 1) - + self.assertTrue(mail.outbox[0].reply_to == ["[email protected]"]) class TestChangeLanguageView(WebTest): url = '/set_lang'
Set reply-to header For all emails sent via one of the contact modals, the reply-to header should be set to the address of the person sending the request.
2021-06-21T17:01:49
e-valuation/EvaP
1,602
e-valuation__EvaP-1602
[ "1595" ]
7fd6a9aac102f2dc040271f8c2cf3cc05f0c6ec8
diff --git a/evap/staff/importers.py b/evap/staff/importers.py --- a/evap/staff/importers.py +++ b/evap/staff/importers.py @@ -12,7 +12,7 @@ from evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, UserProfile from evap.evaluation.tools import clean_email -from evap.staff.tools import create_user_list_html_string_for_message, ImportType +from evap.staff.tools import create_user_list_html_string_for_message, ImportType, merge_dictionaries_of_sets def sorted_messages(messages): @@ -405,6 +405,9 @@ def process_evaluation(self, evaluation_data, sheet, row): ).format(sheet, row + 1, evaluation_data.name_en) ) self.evaluations[evaluation_id].degrees |= evaluation_data.degrees + self.evaluations[evaluation_id].errors = merge_dictionaries_of_sets( + self.evaluations[evaluation_id].errors, evaluation_data.errors + ) elif evaluation_data != self.evaluations[evaluation_id]: self.errors[ImporterError.COURSE].append( _('Sheet "{}", row {}: The course\'s "{}" data differs from it\'s data in a previous row.').format( @@ -420,8 +423,8 @@ def consolidate_enrollment_data(self): self.enrollments.append((evaluation_data, student_data)) def check_evaluation_data_correctness(self, semester): - degree_names = set() - course_type_names = set() + missing_degree_names = set() + missing_course_type_names = set() for evaluation_data in self.evaluations.values(): if Course.objects.filter(semester=semester, name_en=evaluation_data.name_en).exists(): self.errors[ImporterError.COURSE].append( @@ -432,9 +435,9 @@ def check_evaluation_data_correctness(self, semester): _("Course {} does already exist in this semester.").format(evaluation_data.name_de) ) if "degrees" in evaluation_data.errors: - degree_names |= evaluation_data.errors["degrees"] + missing_degree_names |= evaluation_data.errors["degrees"] if "course_type" in evaluation_data.errors: - course_type_names.add(evaluation_data.errors["course_type"]) + missing_course_type_names.add(evaluation_data.errors["course_type"]) if "is_graded" in evaluation_data.errors: self.errors[ImporterError.IS_GRADED].append( _('"is_graded" of course {} is {}, but must be {} or {}').format( @@ -445,13 +448,13 @@ def check_evaluation_data_correctness(self, semester): ) ) - for degree_name in degree_names: + for degree_name in missing_degree_names: self.errors[ImporterError.DEGREE_MISSING].append( _('Error: No degree is associated with the import name "{}". Please manually create it first.').format( degree_name ) ) - for course_type_name in course_type_names: + for course_type_name in missing_course_type_names: self.errors[ImporterError.COURSE_TYPE_MISSING].append( _( 'Error: No course type is associated with the import name "{}". Please manually create it first.' diff --git a/evap/staff/tools.py b/evap/staff/tools.py --- a/evap/staff/tools.py +++ b/evap/staff/tools.py @@ -1,5 +1,6 @@ import os from datetime import date, datetime, timedelta +from typing import Dict, Set, Any from enum import Enum from django.contrib import messages @@ -368,3 +369,11 @@ def remove_user_from_represented_and_ccing_users(user, ignored_users=None, test_ cc_user.cc_users.remove(user) remove_messages.append(_("Removed {} from the CC users of {}.").format(user.full_name, cc_user.full_name)) return remove_messages + + +def merge_dictionaries_of_sets(a: Dict[Any, Set], b: Dict[Any, Set]) -> Dict[Any, Set]: + return { + **a, + **b, + **({key: (a[key] | b[key]) for key in a if key in b}), + }
diff --git a/evap/staff/fixtures/excel_files_test_data.py b/evap/staff/fixtures/excel_files_test_data.py --- a/evap/staff/fixtures/excel_files_test_data.py +++ b/evap/staff/fixtures/excel_files_test_data.py @@ -132,6 +132,14 @@ ] } +test_unknown_degree_error_filedata = { + 'Sheet 1': [ + ['Degree', 'Student last name', 'Student first name', 'Student email address', 'Course kind', 'Course is graded', 'Course name (de)', 'Course name (en)', 'Responsible title', 'Responsible last name', 'Responsible first name', 'Responsible email address'], + ['bachelor', 'Doe', 'John', '[email protected]', 'Vorlesung', 'yes', 'Neovim kompilieren', 'compiling Neovim', 'Prof. Dr.', 'Prorsus', 'Christoph', '[email protected]'], + ['beginner', 'Roe', 'Jane', '[email protected]', 'Vorlesung', 'yes', 'Neovim kompilieren', 'compiling Neovim', 'Prof. Dr.', 'Prorsus', 'Christoph', '[email protected]'], + ], +} + valid_user_courses_import_filedata = { 'MA Belegungen': [ ['Degree', 'Student last name', 'Student first name', 'Student email address', 'Course kind', 'Course is graded', 'Course name (de)', 'Course name (en)', 'Responsible title', 'Responsible last name', 'Responsible first name', 'Responsible email address'], diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -359,6 +359,13 @@ def test_duplicate_course_error(self): }, ) + def test_unknown_degree_error(self): + excel_content = excel_data.create_memory_excel_file(excel_data.test_unknown_degree_error_filedata) + __, __, errors = EnrollmentImporter.process(excel_content, baker.make(Semester), None, None, test_run=False) + missing_degree_errors = errors[ImporterError.DEGREE_MISSING] + self.assertEqual(len(missing_degree_errors), 1) + self.assertIn("manually create it first", missing_degree_errors[0]) + def test_replace_consecutive_and_trailing_spaces(self): excel_content = excel_data.create_memory_excel_file( excel_data.test_enrollment_data_consecutive_and_trailing_spaces_filedata diff --git a/evap/staff/tests/test_tools.py b/evap/staff/tests/test_tools.py --- a/evap/staff/tests/test_tools.py +++ b/evap/staff/tests/test_tools.py @@ -8,7 +8,12 @@ from evap.evaluation.tests.tools import WebTest from evap.evaluation.models import Contribution, Course, Evaluation, UserProfile from evap.rewards.models import RewardPointGranting, RewardPointRedemption -from evap.staff.tools import merge_users, delete_navbar_cache_for_users, remove_user_from_represented_and_ccing_users +from evap.staff.tools import ( + merge_users, + delete_navbar_cache_for_users, + remove_user_from_represented_and_ccing_users, + merge_dictionaries_of_sets, +) class NavbarCacheTest(WebTest): @@ -258,3 +263,9 @@ def test_do_nothing_if_test_run(self): self.assertEqual([set(user1.delegates.all()), set(user1.cc_users.all())], [{delete_user}, {delete_user}]) self.assertEqual([set(user2.delegates.all()), set(user2.cc_users.all())], [{delete_user}, {delete_user}]) self.assertEqual(len(messages), 4) + + +class MiscellaneousToolsTest(TestCase): + def test_merge_dictionaries_of_sets(self): + self.assertEqual(merge_dictionaries_of_sets({"a": set([1])}, {"b": set([2])}), {"a": set([1]), "b": set([2])}) + self.assertEqual(merge_dictionaries_of_sets({"a": set([1])}, {"a": set([2])}), {"a": set([1, 2])})
Missing degree check in enrollment import When importing enrollment data, the importer must check whether the degrees used in the uploaded file already exist (and show an error message if a degree doesn't yet exist and must be manually created). However, the error message for non-existent degrees currently only works correctly if this degree is the one in the first occurrence of an evaluation. If the first occurrence has a valid degree and any of the following degrees does not exist, no error will be shown and the import will fail silently. Instead, an error message should be shown if any entry in the imported file uses a non-existent degree.
2021-07-26T17:49:26
e-valuation/EvaP
1,614
e-valuation__EvaP-1614
[ "1594" ]
867a88c08f2af6cf9856e1d9b6e536e9dba18239
diff --git a/evap/evaluation/models_logging.py b/evap/evaluation/models_logging.py --- a/evap/evaluation/models_logging.py +++ b/evap/evaluation/models_logging.py @@ -131,6 +131,7 @@ class Meta: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._logentry = None + self._m2m_changes = defaultdict(lambda: defaultdict(list)) def _as_dict(self): """ @@ -182,8 +183,12 @@ def _get_change_data(self, action_type: InstanceActionType): return changes - def log_m2m_change(self, changes): - self._update_log(changes, InstanceActionType.CHANGE) + def log_m2m_change(self, field_name, action_type: FieldActionType, change_list): + # This might be called multiple times with cumulating changes + # But this is fine, since the old changes will be included in the latest log update + # See https://github.com/e-valuation/EvaP/issues/1594 + self._m2m_changes[field_name][action_type] += change_list + self._update_log(self._m2m_changes, InstanceActionType.CHANGE) def log_instance_create(self): changes = self._get_change_data(InstanceActionType.CREATE) @@ -304,13 +309,9 @@ def _m2m_changed(sender, instance, action, reverse, model, pk_set, **kwargs): # if field_name in instance.unlogged_fields: return - m2m_changes = defaultdict(lambda: defaultdict(list)) if action == "pre_remove": - m2m_changes[field_name][FieldActionType.M2M_REMOVE] += list(pk_set) + instance.log_m2m_change(field_name, FieldActionType.M2M_REMOVE, list(pk_set)) elif action == "pre_add": - m2m_changes[field_name][FieldActionType.M2M_ADD] += list(pk_set) + instance.log_m2m_change(field_name, FieldActionType.M2M_ADD, list(pk_set)) elif action == "pre_clear": - m2m_changes[field_name][FieldActionType.M2M_CLEAR] = [] - - if m2m_changes: - instance.log_m2m_change(m2m_changes) + instance.log_m2m_change(field_name, FieldActionType.M2M_CLEAR, [])
diff --git a/evap/evaluation/tests/test_models_logging.py b/evap/evaluation/tests/test_models_logging.py --- a/evap/evaluation/tests/test_models_logging.py +++ b/evap/evaluation/tests/test_models_logging.py @@ -4,7 +4,7 @@ from django.utils.formats import localize from model_bakery import baker -from evap.evaluation.models import Evaluation, Course, Contribution, Questionnaire +from evap.evaluation.models import Evaluation, Course, Contribution, Questionnaire, UserProfile from evap.evaluation.models_logging import FieldAction @@ -82,3 +82,18 @@ def test_none_value_not_included(self): baker.make(Contribution, evaluation=self.evaluation, label=None) self.assertNotIn("label", self.evaluation.related_logentries().order_by("id").last().data) + + def test_simultaneous_add_and_remove(self): + # Regression test for https://github.com/e-valuation/EvaP/issues/1594 + participant1 = baker.make(UserProfile) + participant2 = baker.make(UserProfile) + self.evaluation.participants.add(participant1) + # Refresh reference to evaluation, to force new log entry + self.evaluation = Evaluation.objects.get(pk=self.evaluation.pk) + + self.evaluation.participants.remove(participant1) + self.evaluation.participants.add(participant2) + self.assertEqual( + self.evaluation.related_logentries().order_by("id").last().data, + {"participants": {"add": [participant2.id], "remove": [participant1.id]}}, + )
Log M2M Clear With #1579 participants and contributors of an evaluation can be replaced. In the code, the participants/contributors are cleared first and then the new ones are added. The log only lists the newly added participants/contributors and does not show an entry for the deleted ones. The deleted participants/contributors should also be logged.
Also, when removing and adding participants in the evaluation form at the same time, removed participants are not shown in the log.
2021-08-09T16:46:40
e-valuation/EvaP
1,627
e-valuation__EvaP-1627
[ "1617" ]
adcc893fed0f5a505da178ce78ad4b0b07832430
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -535,8 +535,8 @@ class EvaluationEmailForm(forms.Form): widget=forms.CheckboxSelectMultiple(), choices=EmailTemplate.Recipients.choices, label=_("Send email to") ) subject = forms.CharField(label=_("Subject")) - plain_content = forms.CharField(widget=forms.Textarea(), label=_("Plain Message")) - html_content = forms.CharField(widget=forms.Textarea(), label=_("HTML Message")) + plain_content = forms.CharField(widget=forms.Textarea(), label=_("Plain Text")) + html_content = forms.CharField(widget=forms.Textarea(), label=_("HTML")) def __init__(self, *args, evaluation, export=False, **kwargs): super().__init__(*args, **kwargs) @@ -575,8 +575,8 @@ class RemindResponsibleForm(forms.Form): to = UserModelChoiceField(None, required=False, disabled=True, label=_("To")) cc = UserModelMultipleChoiceField(None, required=False, disabled=True, label=_("CC")) subject = forms.CharField(label=_("Subject")) - plain_content = forms.CharField(widget=forms.Textarea(), label=_("Plain Message")) - html_content = forms.CharField(widget=forms.Textarea(), label=_("HTML Message")) + plain_content = forms.CharField(widget=forms.Textarea(), label=_("Plain Text")) + html_content = forms.CharField(widget=forms.Textarea(), label=_("HTML")) def __init__(self, *args, responsible, **kwargs): super().__init__(*args, **kwargs)
diff --git a/evap/static/ts/tests/frontend/student-vote.ts b/evap/static/ts/tests/frontend/student-vote.ts --- a/evap/static/ts/tests/frontend/student-vote.ts +++ b/evap/static/ts/tests/frontend/student-vote.ts @@ -54,7 +54,7 @@ test("checking bottom confirm checkbox check top but keeps bottom visible", page test("resolving submit errors clears warning", pageHandler( "student/vote/1/submit_errors.html", async page => { - const checkbox = (await page.$(".choice-error input[type=radio][value='3']"))!; + const checkbox = (await page.$(".choice-error + input[type=radio][value='3']"))!; await checkbox.click(); const row = await queryClosest(checkbox, ".row"); expect(await row.$$(".choice-error")).toHaveLength(0); @@ -64,16 +64,14 @@ test("resolving submit errors clears warning", pageHandler( test("skip contributor", pageHandler( "student/vote/1/normal.html", async page => { - const voteArea = (await page.$(".card .collapse"))!; - const button = (await queryClosest(voteArea, ".card").then(card => card.$("button")))!; + const button = (await page.$("[data-mark-no-answers-for]"))!; + const voteArea = (await queryClosest(button, ".card").then(card => card.$(".collapse")))! await button.click(); for (const checkbox of await voteArea.$$("input[type=radio]:not([value='6'])")) { await expect(checkbox).not.toBeChecked(); - await expect(await queryParent(checkbox)).not.toHaveClass("active"); } for (const checkbox of await voteArea.$$("input[type=radio][value='6']")) { await expect(checkbox).toBeChecked(); - await expect(await queryParent(checkbox)).toHaveClass("active"); } await expect(voteArea).toHaveClass("collapsing"); }, @@ -82,8 +80,8 @@ test("skip contributor", pageHandler( test("skipping contributor clears warning", pageHandler( "student/vote/1/submit_errors.html", async page => { - const voteArea = (await page.$(".card .collapse"))!; - const button = (await queryClosest(voteArea, ".card").then(card => card.$("button")))!; + const button = (await page.$("[data-mark-no-answers-for]"))!; + const voteArea = (await queryClosest(button, ".card").then(card => card.$(".collapse")))!; await button.click(); await expect(await voteArea.$$(".choice-error")).toHaveLength(0); }, diff --git a/evap/static/ts/tests/utils/page.ts b/evap/static/ts/tests/utils/page.ts --- a/evap/static/ts/tests/utils/page.ts +++ b/evap/static/ts/tests/utils/page.ts @@ -44,6 +44,7 @@ export function pageHandler(fileName: string, fn: (page: Page) => void): (done?: await page.evaluate(() => { localStorage.clear(); }); + await page.close(); done!(reason); } }
Bootstrap update Update Bootstrap to version 5
2021-09-19T14:52:58
e-valuation/EvaP
1,632
e-valuation__EvaP-1632
[ "1628" ]
2e985c6cee7cbccc354b13993200530077971a88
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1807,9 +1807,13 @@ def send_to_users_in_evaluations(self, evaluations, recipient_groups, use_cc, re for user, user_evaluations in user_evaluation_map.items(): subject_params = {} + evaluations_with_date = dict() + for evaluation in user_evaluations: + evaluations_with_date[evaluation] = (evaluation.vote_end_date - date.today()).days + evaluations_with_date = sorted(evaluations_with_date.items(), key=lambda tup: tup[0].full_name) body_params = { "user": user, - "evaluations": user_evaluations, + "evaluations": evaluations_with_date, "due_evaluations": user.get_sorted_due_evaluations(), } self.send_to_user(user, subject_params, body_params, use_cc=use_cc, request=request) @@ -1934,7 +1938,7 @@ def send_contributor_publish_notifications(cls, evaluations, template=None): evaluations_per_contributor[textanswer.contribution.contributor].add(evaluation) for contributor, evaluation_set in evaluations_per_contributor.items(): - body_params = {"user": contributor, "evaluations": list(evaluation_set)} + body_params = {"user": contributor, "evaluations": evaluation_set} template.send_to_user(contributor, {}, body_params, use_cc=True) @classmethod
diff --git a/evap/development/fixtures/test_data.json b/evap/development/fixtures/test_data.json --- a/evap/development/fixtures/test_data.json +++ b/evap/development/fixtures/test_data.json @@ -128349,8 +128349,8 @@ "fields": { "name": "Evaluation Started", "subject": "[EvaP] Evaluierung hat begonnen / Evaluation started", - "plain_content": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nfür die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n{% for evaluation in evaluations|order_by:\"full_name_de\" %} - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %} abgeben, die Dozenten und wir freuen uns über deine Bewertung. Bei Fragen und Rückmeldungen kannst du dich jederzeit an uns wenden ({{ contact_email }}).\r\n{% if user.needs_login_key %}\r\nKlicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif %}\r\n{% if due_evaluations|length > 1%}Diese Evaluierungen warten auf deine Bewertung:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}{% endif %}\r\nVielen Dank für deine Mühe und viele Grüße,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for the following evaluations just started:\r\n{% for evaluation in evaluations|order_by:\"full_name_en\" %} - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} ({{ page_url }}){% endif %}. The lecturers and we are looking forward to receive your feedback. If you have any questions or feedback, please let us know ({{ contact_email }}).\r\n{% if user.needs_login_key %}\r\nClick here to login: {{ login_url }}\r\n{% endif %}\r\n{% if due_evaluations|length > 1%}These evaluations are waiting for your feedback:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}{% endif %}\r\nThank you very much for your efforts and kind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)", - "html_content": "{% load evaluation_filters %}\r\n<i>(English version below)</i><br /><br /><br />\r\n\r\n\r\nHallo {{ user.first_name }},<br /><br />\r\n\r\nfür die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n<ul>{% for evaluation in evaluations|order_by:\"full_name_de\" %}\r\n<li>{{ evaluation.full_name_de }}</li>\r\n{% endfor %}</ul><br />\r\n\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (<a href=\"{{ page_url }}\">{{ page_url }}</a>){% endif %} abgeben, die Dozenten und wir freuen uns über deine Bewertung. Bei Fragen und Rückmeldungen kannst du dich jederzeit an uns wenden (<a href=\"mailto:{{ contact_email }}\">{{ contact_email }}</a>).<br /><br />\r\n\r\n{% if user.needs_login_key %}\r\nKlicke hier, um dich anzumelden: <a href=\"{{ login_url }}\">{{ login_url }}</a><br /><br />\r\n{% endif %}\r\n\r\n{% if due_evaluations|length > 1%}\r\nDiese Evaluierungen warten auf deine Bewertung:\r\n<ul>{% for evaluation, due_in_days in due_evaluations %}\r\n<li>{{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})</li>\r\n{% endfor %}</ul><br />\r\n{% endif %}\r\n\r\nVielen Dank für deine Mühe und viele Grüße,<br />\r\ndas Evaluierungsteam<br /><br />\r\n\r\n<i>(Dies ist eine automatisch versendete E-Mail.)</i><br /><br />\r\n\r\n<hr><br /><br />\r\n\r\nDear {{ user.first_name }},<br /><br />\r\n\r\nThe evaluation period for the following evaluations just started:\r\n<ul>{% for evaluation in evaluations|order_by:\"full_name_en\" %}\r\n<li>{{ evaluation.full_name_en }}</li>\r\n{% endfor %}</ul><br />\r\n\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (<a href=\"{{ page_url }}\">{{ page_url }}</a>){% endif %}. The lecturers and we are looking forward to receive your feedback. If you have any questions or feedback, please let us know (<a href=\"mailto:{{ contact_email }}\">{{ contact_email }}</a>).<br /><br />\r\n\r\n{% if user.needs_login_key %}\r\nClick here to login: <a href=\"{{ login_url }}\">{{ login_url }}</a><br /><br />\r\n{% endif %}\r\n{% if due_evaluations|length > 1%}\r\nThese evaluations are waiting for your feedback:\r\n<ul>{% for evaluation, due_in_days in due_evaluations %}\r\n<li>{{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})</li>\r\n{% endfor %}</ul><br />\r\n{% endif %}\r\n\r\nThank you very much for your efforts and kind regards,<br />\r\nthe Evaluation Team<br /><br />\r\n\r\n<i>(This is an automated message.)</i>" + "plain_content": "{% load evaluation_filters %}(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nfür die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n{% for evaluation, due_in_days in evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %} abgeben, die Dozenten und wir freuen uns über deine Bewertung. Bei Fragen und Rückmeldungen kannst du dich jederzeit an uns wenden ([email protected]).\r\n\r\n{% if user.needs_login_key %}Klicke hier, um dich anzumelden: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}Diese Evaluierungen warten auf deine Bewertung:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}{% endif %}\r\nVielen Dank für deine Mühe und viele Grüße,\r\ndas Evaluierungsteam\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\nDear {{ user.first_name }},\r\n\r\nThe evaluation period for the following evaluations just started:\r\n{% for evaluation, due_in_days in evaluations%} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (https://evap.hpi.de){% endif %}. The lecturers and we are looking forward to receive your feedback. If you have any questions or feedback, please let us know ([email protected]).\r\n\r\n{% if user.needs_login_key %}Click here to login: {{ login_url }}\r\n{% endif %}{% if due_evaluations|length > 1%}These evaluations are waiting for your feedback:\r\n{% for evaluation, due_in_days in due_evaluations %} - {{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}{% endif %}\r\nThank you very much for your efforts and kind regards,\r\nthe Evaluation Team\r\n\r\n(This is an automated message.)", + "html_content": "{% load evaluation_filters %}\r\n<i>(English version below)</i><br /><br /><br />\r\n\r\n\r\nHallo {{ user.first_name }},<br /><br />\r\n\r\nfür die folgenden Evaluierungen hat die Evaluierungsphase begonnen:\r\n<ul>{% for evaluation, due_in_days in evaluations %} \r\n<li>{{ evaluation.full_name_de }}</li>\r\n(endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})\r\n{% endfor %}</ul><br />\r\n\r\nDu kannst dein Feedback auf EvaP{% if not user.needs_login_key %} (<a href=\"{{ page_url }}\">{{ page_url }}</a>){% endif %} abgeben, die Dozenten und wir freuen uns über deine Bewertung. Bei Fragen und Rückmeldungen kannst du dich jederzeit an uns wenden (<a href=\"mailto:{{ contact_email }}\">{{ contact_email }}</a>).<br /><br />\r\n\r\n{% if user.needs_login_key %}\r\nKlicke hier, um dich anzumelden: <a href=\"{{ login_url }}\">{{ login_url }}</a><br /><br />\r\n{% endif %}\r\n\r\n{% if due_evaluations|length > 1%}\r\nDiese Evaluierungen warten auf deine Bewertung:\r\n<ul>{% for evaluation, due_in_days in due_evaluations %}\r\n<li>{{ evaluation.full_name_de }} (endet {% if due_in_days == 0 %}heute{% elif due_in_days == 1 %}morgen{% else %}in {{ due_in_days }} Tagen{% endif %})</li>\r\n{% endfor %}</ul><br />\r\n{% endif %}\r\n\r\nVielen Dank für deine Mühe und viele Grüße,<br />\r\ndas Evaluierungsteam<br /><br />\r\n\r\n<i>(Dies ist eine automatisch versendete E-Mail.)</i><br /><br />\r\n\r\n<hr><br /><br />\r\n\r\nDear {{ user.first_name }},<br /><br />\r\n\r\nThe evaluation period for the following evaluations just started:\r\n<ul>{{% for evaluation, due_in_days in evaluations%}\r\n<li>{{ evaluation.full_name_en }}</li>\r\n(ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})\r\n{% endfor %}</ul><br />\r\n\r\nYou can evaluate them on EvaP{% if not user.needs_login_key %} (<a href=\"{{ page_url }}\">{{ page_url }}</a>){% endif %}. The lecturers and we are looking forward to receive your feedback. If you have any questions or feedback, please let us know (<a href=\"mailto:{{ contact_email }}\">{{ contact_email }}</a>).<br /><br />\r\n\r\n{% if user.needs_login_key %}\r\nClick here to login: <a href=\"{{ login_url }}\">{{ login_url }}</a><br /><br />\r\n{% endif %}\r\n{% if due_evaluations|length > 1%}\r\nThese evaluations are waiting for your feedback:\r\n<ul>{% for evaluation, due_in_days in due_evaluations %}\r\n<li>{{ evaluation.full_name_en }} (ends {% if due_in_days == 0 %}today{% elif due_in_days == 1 %}tomorrow{% else %}in {{ due_in_days }} days{% endif %})</li>\r\n{% endfor %}</ul><br />\r\n{% endif %}\r\n\r\nThank you very much for your efforts and kind regards,<br />\r\nthe Evaluation Team<br /><br />\r\n\r\n<i>(This is an automated message.)</i>" } }, { diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -927,27 +927,27 @@ def test_send_contributor_publish_notifications(): expected_calls = [ # these 4 are included since they are contributors for evaluation1 which can publish the average grade - call(responsible1, {}, {"user": responsible1, "evaluations": [evaluation1]}, use_cc=True), - call(editor1, {}, {"user": editor1, "evaluations": [evaluation1]}, use_cc=True), - call(contributor1, {}, {"user": contributor1, "evaluations": [evaluation1]}, use_cc=True), + call(responsible1, {}, {"user": responsible1, "evaluations": {evaluation1}}, use_cc=True), + call(editor1, {}, {"user": editor1, "evaluations": {evaluation1}}, use_cc=True), + call(contributor1, {}, {"user": contributor1, "evaluations": {evaluation1}}, use_cc=True), call( - contributor_both, {}, {"user": contributor_both, "evaluations": [evaluation1, evaluation2]}, use_cc=True + contributor_both, {}, {"user": contributor_both, "evaluations": {evaluation1, evaluation2}}, use_cc=True ), # contributor2 has textanswers, so they are notified - call(contributor2, {}, {"user": contributor2, "evaluations": [evaluation2]}, use_cc=True), + call(contributor2, {}, {"user": contributor2, "evaluations": {evaluation2}}, use_cc=True), ] with patch("evap.evaluation.models.EmailTemplate.send_to_user") as send_to_user_mock: - EmailTemplate.send_contributor_publish_notifications([evaluation1, evaluation2]) + EmailTemplate.send_contributor_publish_notifications({evaluation1, evaluation2}) # Assert that all expected publish notifications are sent to contributors. send_to_user_mock.assert_has_calls(expected_calls, any_order=True) # if general textanswers for an evaluation exist, all responsibles should also be notified baker.make(TextAnswer, contribution=evaluation2.general_contribution) - expected_calls.append(call(responsible2, {}, {"user": responsible2, "evaluations": [evaluation2]}, use_cc=True)) + expected_calls.append(call(responsible2, {}, {"user": responsible2, "evaluations": {evaluation2}}, use_cc=True)) with patch("evap.evaluation.models.EmailTemplate.send_to_user") as send_to_user_mock: - EmailTemplate.send_contributor_publish_notifications([evaluation1, evaluation2]) + EmailTemplate.send_contributor_publish_notifications({evaluation1, evaluation2}) send_to_user_mock.assert_has_calls(expected_calls, any_order=True)
Add due date in evaluation started email The "Evaluation Started" email for participants should include the due date for the evaluation that just started. Currently, only all other evaluations that are still due are listed below the new evaluation with their respective due dates.
2021-10-04T17:49:19
e-valuation/EvaP
1,633
e-valuation__EvaP-1633
[ "1631" ]
2fbe315cf0169bd5152a4d3d901b67e4464f3916
diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py --- a/evap/evaluation/tools.py +++ b/evap/evaluation/tools.py @@ -4,11 +4,8 @@ import xlwt from django.conf import settings -from django.contrib.auth import user_logged_in -from django.dispatch import receiver from django.http import HttpResponse -from django.utils import translation -from django.utils.translation import LANGUAGE_SESSION_KEY, get_language +from django.utils.translation import get_language def is_prefetched(instance, attribute_name): @@ -38,16 +35,6 @@ def vote_end_datetime(vote_end_date): return date_to_datetime(vote_end_date) + datetime.timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) -@receiver(user_logged_in) -def set_or_get_language(user, request, **_kwargs): - if user.language: - translation.activate(user.language) - else: - user.language = get_language() - user.save() - request.session[LANGUAGE_SESSION_KEY] = user.language - - def get_parameter_from_url_or_session(request, parameter, default=False): result = request.GET.get(parameter, None) if result is None: # if no parameter is given take session value diff --git a/evap/middleware.py b/evap/middleware.py --- a/evap/middleware.py +++ b/evap/middleware.py @@ -1,4 +1,6 @@ +from django.conf import settings from django.contrib.auth.views import redirect_to_login +from django.utils import translation class RequireLoginMiddleware: @@ -26,3 +28,23 @@ def process_view(request, view_func, _view_args, _view_kwargs): def no_login_required(func): func.no_login_required = True return func + + +def user_language_middleware(get_response): + def middleware(request): + if not (request.user and request.user.is_authenticated): + return get_response(request) + if request.user.language == translation.get_language(): + return get_response(request) + + if request.user.language: + translation.activate(request.user.language) + else: + request.user.language = translation.get_language() + request.user.save() + lang = request.user.language + response = get_response(request) + response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang) + return response + + return middleware diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -218,7 +218,6 @@ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", # LocaleMiddleware should be here according to https://docs.djangoproject.com/en/2.2/topics/i18n/translation/#how-django-discovers-language-preference - # Furthermore, set_or_get_language (happens on login) uses the active language, so LocaleMiddleware should be before AuthenticationMiddleware "django.middleware.locale.LocaleMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", @@ -227,6 +226,7 @@ "django.middleware.clickjacking.XFrameOptionsMiddleware", "mozilla_django_oidc.middleware.SessionRefresh", "evap.middleware.RequireLoginMiddleware", + "evap.middleware.user_language_middleware", "evap.staff.staff_mode.staff_mode_middleware", "evap.evaluation.middleware.LoggingRequestMiddleware", ]
diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -93,7 +93,9 @@ def test_order(self): page = self.app.get(self.url, user=student).body.decode() self.assertLess(page.index(evaluation1.name_en), page.index(evaluation2.name_en)) - page = self.app.get(self.url, user=student, extra_environ={"HTTP_ACCEPT_LANGUAGE": "de"}).body.decode() + student.language = "de" + student.save() + page = self.app.get(self.url, user=student).body.decode() self.assertGreater(page.index(evaluation1.name_de), page.index(evaluation2.name_de)) # using LocMemCache so the cache queries don't show up in the query count that's measured here
Lost in translation: Language preference dropped when browser is closed The language preference is lost after I restart my browser. This happens on both Firefox and Chromium. 1. log in 2. set language to German (my system language is English) 3. close and reopen browser 4. go to evap, now the site is English again @He3lixxx reproduced and said that the django cookie for language is discarded after the session ends. It gets restored if you have the "restore session when opening firefox" setting enabled (edit: when coming back to the site (still logged in) with a new session, the site is served in English although the database knows the language for the user is German) We have `set_or_get_language` that runs on `@receiver(user_logged_in)`, but when I open the log in site, the language is set to English anyways and stays that way after I logged in.
2021-10-11T19:20:19
e-valuation/EvaP
1,637
e-valuation__EvaP-1637
[ "1623" ]
f71213a870fe19fca4bd94ab44fbec58f94d0ae5
diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -1421,10 +1421,12 @@ def get_evaluation_and_contributor_textanswer_sections(evaluation, filter_textan @reviewer_required def evaluation_textanswers(request, semester_id, evaluation_id): semester = get_object_or_404(Semester, id=semester_id) - if semester.results_are_archived and not request.user.is_manager: + if semester.results_are_archived: raise PermissionDenied evaluation = get_object_or_404(Evaluation, id=evaluation_id, course__semester=semester) + if evaluation.state == Evaluation.State.PUBLISHED: + raise PermissionDenied if not evaluation.can_publish_text_results: raise PermissionDenied @@ -1476,7 +1478,9 @@ def evaluation_textanswers_update_publish(request): evaluation_id = request.POST["evaluation_id"] evaluation = Evaluation.objects.get(pk=evaluation_id) - if evaluation.course.semester.results_are_archived and not request.user.is_manager: + if evaluation.state == Evaluation.State.PUBLISHED: + raise PermissionDenied + if evaluation.course.semester.results_are_archived: raise PermissionDenied if not evaluation.can_publish_text_results: raise PermissionDenied @@ -1508,10 +1512,12 @@ def evaluation_textanswers_update_publish(request): @reviewer_required def evaluation_textanswer_edit(request, semester_id, evaluation_id, textanswer_id): semester = get_object_or_404(Semester, id=semester_id) - if semester.results_are_archived and not request.user.is_manager: + if semester.results_are_archived: raise PermissionDenied evaluation = get_object_or_404(Evaluation, id=evaluation_id, course__semester=semester) + if evaluation.state == Evaluation.State.PUBLISHED: + raise PermissionDenied if not evaluation.can_publish_text_results: raise PermissionDenied
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -2262,6 +2262,22 @@ def test_num_queries_is_constant(self): with self.assertNumQueries(FuzzyInt(0, self.num_questions)): self.app.get(self.url, user=self.manager) + def test_published(self): + let_user_vote_for_evaluation(self.app, self.student2, self.evaluation) + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=200) + Evaluation.objects.filter(id=self.evaluation.id).update(state=Evaluation.State.PUBLISHED) + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=403) + + def test_archived(self): + let_user_vote_for_evaluation(self.app, self.student2, self.evaluation) + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=200) + Semester.objects.filter(id=self.evaluation.course.semester.id).update(results_are_archived=True) + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=403) + class TestEvaluationTextAnswerEditView(WebTest): @classmethod @@ -2320,6 +2336,21 @@ def test_textanswers_showing_up(self): self.text_answer.refresh_from_db() self.assertEqual(self.text_answer.answer, "edited answer text") + # archive and it shouldn't work anymore + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=200) + Semester.objects.filter(id=self.evaluation.course.semester.id).update(results_are_archived=True) + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=403) + Semester.objects.filter(id=self.evaluation.course.semester.id).update(results_are_archived=False) + + # publish and it shouldn't work anymore + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=200) + Evaluation.objects.filter(id=self.evaluation.id).update(state=Evaluation.State.PUBLISHED) + with run_in_staff_mode(self): + self.app.get(self.url, user=self.manager, status=403) + class TestQuestionnaireNewVersionView(WebTestStaffMode): url = "/staff/questionnaire/2/new_version" @@ -2699,6 +2730,18 @@ def test_finishing_review_updates_results(self): self.assertEqual(len(results.questionnaire_results[0].question_results[1].answers), 1) + def test_published(self): + let_user_vote_for_evaluation(self.app, self.student2, self.evaluation) + self.helper(TextAnswer.State.NOT_REVIEWED, TextAnswer.State.PUBLISHED, "publish") + Evaluation.objects.filter(id=self.evaluation.id).update(state=Evaluation.State.PUBLISHED) + self.helper(TextAnswer.State.NOT_REVIEWED, TextAnswer.State.PUBLISHED, "publish", expect_errors=True) + + def test_archived(self): + let_user_vote_for_evaluation(self.app, self.student2, self.evaluation) + self.helper(TextAnswer.State.NOT_REVIEWED, TextAnswer.State.PUBLISHED, "publish") + Semester.objects.filter(id=self.evaluation.course.semester.id).update(results_are_archived=True) + self.helper(TextAnswer.State.NOT_REVIEWED, TextAnswer.State.PUBLISHED, "publish", expect_errors=True) + class TestEvaluationTextAnswersSkip(WebTestStaffMode): csrf_checks = False
Prevent text answer updates after publishing evaluation After an evaluation was published, changes to text answers should not be possible anymore. The staff text answer page of an evaluation (`staff.views.evaluation_textanswers`) should then not be accessible (the UI doesn't link to it but manually entering the URL is currently possible) and `evaluation_textanswers_update_publish` as well as `evaluation_textanswer_edit` should also raise `PermissionDenied`.
2021-10-18T15:36:37
e-valuation/EvaP
1,639
e-valuation__EvaP-1639
[ "1569" ]
f6c5e252bf1b80fb19154c10c90a347beb52cfa8
diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py --- a/evap/contributor/forms.py +++ b/evap/contributor/forms.py @@ -5,7 +5,7 @@ from django.forms.widgets import CheckboxSelectMultiple from django.utils.translation import gettext_lazy as _ -from evap.evaluation.forms import UserModelChoiceField +from evap.evaluation.forms import UserModelChoiceField, UserModelMultipleChoiceField from evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile from evap.evaluation.tools import vote_end_datetime from evap.staff.forms import ContributionForm @@ -26,9 +26,13 @@ class Meta: "name_en_field", "vote_start_datetime", "vote_end_date", + "participants", "general_questionnaires", "course", ) + field_classes = { + "participants": UserModelMultipleChoiceField, + } def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -45,6 +49,11 @@ def __init__(self, *args, **kwargs): self.fields["vote_start_datetime"].localize = True self.fields["vote_end_date"].localize = True + queryset = UserProfile.objects.exclude(is_active=False) + if self.instance.pk is not None: + queryset = (queryset | self.instance.participants.all()).distinct() + self.fields["participants"].queryset = queryset + if self.instance.general_contribution: self.fields["general_questionnaires"].initial = [ q.pk for q in self.instance.general_contribution.questionnaires.all() diff --git a/evap/evaluation/forms.py b/evap/evaluation/forms.py --- a/evap/evaluation/forms.py +++ b/evap/evaluation/forms.py @@ -97,6 +97,8 @@ def label_from_instance(self, obj): class UserModelMultipleChoiceField(forms.ModelMultipleChoiceField): + widget = forms.SelectMultiple(attrs={"data-selection-css-class": "user-multi-select"}) + def label_from_instance(self, obj): return obj.full_name_with_additional_info diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -386,7 +386,10 @@ def __init__(self, *args, **kwargs): Questionnaire.objects.general_questionnaires().filter(visible_questionnaires).distinct() ) - self.fields["participants"].queryset = UserProfile.objects.exclude(is_active=False) + queryset = UserProfile.objects.exclude(is_active=False) + if self.instance.pk is not None: + queryset = (queryset | self.instance.participants.all()).distinct() + self.fields["participants"].queryset = queryset if self.instance.general_contribution: self.fields["general_questionnaires"].initial = [
diff --git a/evap/contributor/tests/test_forms.py b/evap/contributor/tests/test_forms.py --- a/evap/contributor/tests/test_forms.py +++ b/evap/contributor/tests/test_forms.py @@ -3,7 +3,7 @@ from model_bakery import baker from evap.contributor.forms import EditorContributionForm, EvaluationForm -from evap.evaluation.models import Contribution, Evaluation, Questionnaire, UserProfile +from evap.evaluation.models import Contribution, Degree, Evaluation, Questionnaire, UserProfile from evap.evaluation.tests.tools import WebTest, get_form_data_from_instance from evap.staff.forms import ContributionFormSet @@ -21,6 +21,24 @@ def test_fields_disabled_when_editors_disallowed_to_edit(self): form = EvaluationForm(instance=evaluation) self.assertTrue(all(form.fields[field].disabled for field in form.fields)) + def test_edit_participants(self): + student = baker.make(UserProfile) + evaluation = baker.make(Evaluation, course__degrees=[baker.make(Degree)], participants=[student]) + evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) + + form_data = get_form_data_from_instance(EvaluationForm, evaluation) + form = EvaluationForm(form_data, instance=evaluation) + self.assertEqual(len(form["participants"].initial), 1) + + form_data["participants"].remove(student.pk) + EvaluationForm(form_data, instance=evaluation).save() + self.assertEqual(evaluation.num_participants, 0) + + form_data["participants"].append(student.pk) + EvaluationForm(form_data, instance=evaluation).save() + del evaluation.num_participants # discard cached property + self.assertEqual(evaluation.num_participants, 1) + class ContributionFormsetTests(TestCase): def test_managers_only(self): diff --git a/evap/contributor/tests/test_views.py b/evap/contributor/tests/test_views.py --- a/evap/contributor/tests/test_views.py +++ b/evap/contributor/tests/test_views.py @@ -244,7 +244,7 @@ def test_contact_modal_escape(self): self.evaluation.save() page = self.app.get(self.url, user=self.responsible, status=200) - self.assertIn("changeParticipantRequestModalLabel", page) + self.assertIn("changeEvaluationRequestModalLabel", page) self.assertNotIn("Adam &amp;amp; Eve", page) self.assertIn("Adam &amp; Eve", page) @@ -258,3 +258,16 @@ def test_information_message(self): "Please review the evaluation's details below, add all contributors and select suitable questionnaires. " "Once everything is okay, please approve the evaluation on the bottom of the page.", ) + + def test_display_request_buttons(self): + self.evaluation.allow_editors_to_edit = False + self.evaluation.save() + page = self.app.get(self.url, user=self.responsible) + self.assertEqual(page.body.decode().count("Request changes"), 1) + self.assertEqual(page.body.decode().count("Request creation of new account"), 1) + + self.evaluation.allow_editors_to_edit = True + self.evaluation.save() + page = self.app.get(self.url, user=self.responsible) + self.assertEqual(page.body.decode().count("Request changes"), 0) + self.assertEqual(page.body.decode().count("Request creation of new account"), 2) diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -954,6 +954,29 @@ def test_unused_questionnaire_visibility(self): form = EvaluationForm(instance=evaluation, semester=evaluation.course.semester) self.assertIn(questionnaire, form.fields["general_questionnaires"].queryset) + def test_inactive_participants_remain(self): + student = baker.make(UserProfile, is_active=False) + evaluation = baker.make(Evaluation, course__degrees=[baker.make(Degree)], participants=[student]) + + form_data = get_form_data_from_instance(EvaluationForm, evaluation) + form = EvaluationForm(form_data, instance=evaluation) + self.assertEqual(len(form["participants"]), 1) + + def test_inactive_participants_not_in_queryset(self): + evaluation = baker.make(Evaluation, course__degrees=[baker.make(Degree)]) + + form_data = get_form_data_from_instance(EvaluationForm, evaluation) + form = EvaluationForm(form_data, instance=evaluation) + self.assertEqual(form.fields["participants"].queryset.count(), 0) + + baker.make(UserProfile, is_active=True) + form = EvaluationForm(form_data, instance=evaluation) + self.assertEqual(form.fields["participants"].queryset.count(), 1) + + baker.make(UserProfile, is_active=False) + form = EvaluationForm(form_data, instance=evaluation) + self.assertEqual(form.fields["participants"].queryset.count(), 1) + class EvaluationCopyFormTests(TestCase): @classmethod
Make participants editable by editors Editors are currently not able to change participants of an evaluation and have to send a message asking for changes instead. This results in a lot of work for the evaluation team. Since recently, all changes are logged, so editors can be allowed to change the participants themselves, and staff users can see what changes have been made. This should be done as follows; - The list of participants should become an editable form field just like in the staff form and should be placed at the same position as in the staff form instead of at the end of the page. - The field (both for editors and staff) should be changed to contain exactly one person per line (by changing the select2 elements' CSS). - If the log contains participant changes done by users who are not staff and the evaluation was not yet approved by a staff user, the participant field in the staff form should show a warning (e.g., by having a yellow border).
That's pretty cool. And a little obvious in hindsight again :D It does feel a little weird to me to use the logging component for business logic. An alternative could be a boolean flag on the evaluation, like "participations_modified". This might even be simpler and more robust in the end than searching those JSONs for participation changes. The flag could also be cleared easily if need be (perhaps when sending the evaluation back to editors?), that would be a lot harder with logging entries. Or maybe display a filtered log with all the entries that should be reviewed somewhere above the save buttons? If not, I suppose marking (e.g. coloring) log entries that should be reviewed in the log below the save buttons could help so staff users have to search less if the log gets long. Or maybe move the entire log above the buttons, but by default only show entries that should be reviewed, and provide a "Show full log" button? :) I'm fine with both using the flag and coloring log entries. For the time being, we could also omit the warning part, because another larger issue with logging/highlighting refactoring for the evaluation form will be added soon. Making the participants editable is especially important and can be implemented even if the other requirements have not yet been fully specified.
2021-10-18T16:50:35
e-valuation/EvaP
1,651
e-valuation__EvaP-1651
[ "1558" ]
27b28b5bbe710cc7151e6a8e015f2bc78d60ef52
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -517,6 +517,16 @@ def clean(self): if not self.cleaned_data.get("does_not_contribute") and not self.cleaned_data.get("questionnaires"): self.add_error("does_not_contribute", _("Select either this option or at least one questionnaire!")) + @property + def show_delete_button(self): + if self.instance.pk is None: + return True # not stored in the DB. Required so temporary instances in the formset can be deleted. + + if not self.instance.ratinganswercounter_set.exists() and not self.instance.textanswer_set.exists(): + return True + + return False + class ContributionCopyForm(ContributionForm): def __init__(self, data=None, instance=None, evaluation=None, **kwargs):
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -6,6 +6,7 @@ from evap.contributor.forms import EvaluationForm as ContributorEvaluationForm from evap.evaluation.models import ( + Answer, Contribution, Course, Degree, @@ -13,7 +14,9 @@ Evaluation, Question, Questionnaire, + RatingAnswerCounter, Semester, + TextAnswer, UserProfile, ) from evap.evaluation.tests.tools import ( @@ -554,6 +557,36 @@ def test_staff_can_select_proxy_user(self): form = CourseForm(instance=course) self.assertIn(proxy_user, form.fields["responsibles"].queryset) + def test_prevent_contribution_deletion_with_answers(self): + """ + When answers for a contribution already exist, it should not be possible to remove that contribution. + """ + self.assertEqual( + set(Answer.__subclasses__()), + {RatingAnswerCounter, TextAnswer}, + "This requires an update if a new answer type is added", + ) + evaluation = baker.make(Evaluation) + contribution = baker.make( + Contribution, + evaluation=evaluation, + role=Contribution.Role.EDITOR, + textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS, + _fill_optional=["contributor"], + ) + + contribution_formset = inlineformset_factory( + Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=1 + ) + formset = contribution_formset(instance=evaluation, form_kwargs={"evaluation": evaluation}) + self.assertTrue(formset.forms[0].show_delete_button) + self.assertTrue(formset.forms[1].show_delete_button) + + baker.make(RatingAnswerCounter, contribution=contribution) + + self.assertFalse(formset.forms[0].show_delete_button) + self.assertTrue(formset.forms[1].show_delete_button) + class ContributionFormset775RegressionTests(TestCase): """
Prevent deleting contributors voted for in the form Staff users can delete contributors in the evaluation form even if answers to questions of their contribution have already been given. However, this correctly results in a `ProtectedError` when saving the form: ``` Cannot delete some instances of model 'Contribution' because they are referenced through a protected foreign key ``` Deleting these contributors should not be possible in the form.
2021-10-25T16:47:25
e-valuation/EvaP
1,661
e-valuation__EvaP-1661
[ "1640" ]
37d4f5e7fcdd9a74819c91bf93c649029cca80a1
diff --git a/evap/grades/views.py b/evap/grades/views.py --- a/evap/grades/views.py +++ b/evap/grades/views.py @@ -165,6 +165,10 @@ def edit_grades(request, semester_id, course_id, grade_document_id): form = GradeDocumentForm(request.POST or None, request.FILES or None, instance=grade_document) + final_grades = ( + grade_document.type == GradeDocument.Type.FINAL_GRADES + ) # if parameter is not given, assume midterm grades + if form.is_valid(): form.save(modifying_user=request.user) messages.success(request, _("Successfully updated grades.")) @@ -175,6 +179,7 @@ def edit_grades(request, semester_id, course_id, grade_document_id): course=course, form=form, show_automated_publishing_info=False, + final_grades=final_grades, ) return render(request, "grades_upload_form.html", template_data)
diff --git a/evap/grades/tests.py b/evap/grades/tests.py --- a/evap/grades/tests.py +++ b/evap/grades/tests.py @@ -6,6 +6,7 @@ from model_bakery import baker from evap.evaluation.models import Contribution, Course, Evaluation, Questionnaire, Semester, UserProfile +from evap.grades.models import GradeDocument class GradeUploadTest(WebTest): @@ -238,3 +239,32 @@ def test_403_on_archived_semester(self): Evaluation, course=baker.make(Course, pk=1, semester=archived_semester), state=Evaluation.State.PREPARED ) self.app.get("/grades/semester/1/course/1", user=self.grade_publisher, status=403) + + +class GradeEditTest(WebTest): + def test_grades_headlines(self): + + grade_publisher = baker.make( + UserProfile, + email="[email protected]", + groups=[Group.objects.get(name="Grade publisher")], + ) + grade_document = baker.make(GradeDocument) + + url = f"/grades/semester/{grade_document.course.semester.pk}/course/{grade_document.course.pk}/edit/{grade_document.pk}" + + response = self.app.get( + url, + user=grade_publisher, + ) + self.assertContains(response, "Upload midterm grades") + self.assertNotContains(response, "Upload final grades") + + grade_document.type = GradeDocument.Type.FINAL_GRADES + grade_document.save() + response = self.app.get( + url, + user=grade_publisher, + ) + self.assertContains(response, "Upload final grades") + self.assertNotContains(response, "Upload midterm grades")
Wrong grade document edit form title When editing a grade document that holds final grades, the title of the form wrongly shows "Upload midterm grades" (should be "Upload final grades" instead) because the parameter `final_grades` is not correctly set for the template. This can for example be seen at the course "Operating Systems I (Summer term 2014)" in the test data.
2021-11-04T15:02:07
e-valuation/EvaP
1,666
e-valuation__EvaP-1666
[ "1652" ]
27b28b5bbe710cc7151e6a8e015f2bc78d60ef52
diff --git a/evap/evaluation/management/commands/format.py b/evap/evaluation/management/commands/format.py --- a/evap/evaluation/management/commands/format.py +++ b/evap/evaluation/management/commands/format.py @@ -11,3 +11,4 @@ class Command(BaseCommand): def handle(self, *args, **options): subprocess.run(["black", "evap"], check=False) # nosec subprocess.run(["isort", "."], check=False) # nosec + subprocess.run(["npx", "prettier", "--write", "evap/static/ts/src"], check=False) # nosec
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -69,22 +69,29 @@ jobs: formatter: runs-on: ubuntu-18.04 - container: - image: python:3.7 - name: Formatting steps: - name: Check out repository code uses: actions/checkout@v2 - - name: Install dependencies + - uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install Python dependencies run: pip install -r requirements-dev.txt + - name: Setup Node + uses: actions/setup-node@v2 + - name: Install Node dependencies + run: npm ci - name: Add localsettings run: cp evap/settings_test.py evap/localsettings.py - name: Check code formatting run: black --check evap - name: Check imports formatting run: isort . --check --diff + - run: ls -laR evap/static/ts + - name: Check TypeScript formatting + run: npx prettier --list-different --loglevel debug --config evap/static/ts/.prettierrc.json evap/static/ts/src backup-process: diff --git a/evap/evaluation/tests/test_commands.py b/evap/evaluation/tests/test_commands.py --- a/evap/evaluation/tests/test_commands.py +++ b/evap/evaluation/tests/test_commands.py @@ -352,11 +352,12 @@ class TestFormatCommand(TestCase): @patch("subprocess.run") def test_formatters_called(self, mock_subprocess_run): management.call_command("format") - self.assertEqual(len(mock_subprocess_run.mock_calls), 2) + self.assertEqual(len(mock_subprocess_run.mock_calls), 3) mock_subprocess_run.assert_has_calls( [ call(["black", "evap"], check=False), call(["isort", "."], check=False), + call(["npx", "prettier", "--write", "evap/static/ts/src"], check=False), ] )
Make Typescript code Prettier We should add automated formatting for our typescript files. I think https://prettier.io/ is pretty good, but the choice is open for discussion. The formatting should be done in `manage.py format` and be checked in CI.
2021-11-15T20:10:42
e-valuation/EvaP
1,667
e-valuation__EvaP-1667
[ "1333" ]
c10f0c962363fa439a6a75bf924b35321301b13b
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -1015,6 +1015,10 @@ class UserMergeSelectionForm(forms.Form): other_user = UserModelChoiceField(UserProfile.objects.all()) +class UserEditSelectionForm(forms.Form): + user = UserModelChoiceField(UserProfile.objects.all()) + + class EmailTemplateForm(forms.ModelForm): class Meta: model = EmailTemplate diff --git a/evap/staff/urls.py b/evap/staff/urls.py --- a/evap/staff/urls.py +++ b/evap/staff/urls.py @@ -69,6 +69,7 @@ path("user/create", views.user_create, name="user_create"), path("user/import", views.user_import, name="user_import"), path("user/<int:user_id>/edit", views.user_edit, name="user_edit"), + path("user/list", views.user_list, name="user_list"), path("user/delete", views.user_delete, name="user_delete"), path("user/bulk_update", views.user_bulk_update, name="user_bulk_update"), diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -84,6 +84,7 @@ TextAnswerForm, TextAnswerWarningForm, UserBulkUpdateForm, + UserEditSelectionForm, UserForm, UserImportForm, UserMergeSelectionForm, @@ -1944,6 +1945,17 @@ def text_answer_warnings_index(request): @manager_required def user_index(request): + form = UserEditSelectionForm(request.POST or None) + + if form.is_valid(): + user = form.cleaned_data["user"] + return redirect("staff:user_edit", user.id) + + return render(request, "staff_user_index.html", dict(form=form)) + + +@manager_required +def user_list(request): filter_users = get_parameter_from_url_or_session(request, "filter_users") users = UserProfile.objects.all() @@ -1976,7 +1988,7 @@ def user_index(request): .order_by("last_name", "first_name", "email") ) - return render(request, "staff_user_index.html", dict(users=users, filter_users=filter_users)) + return render(request, "staff_user_list.html", dict(users=users, filter_users=filter_users)) @manager_required @@ -2083,6 +2095,7 @@ def user_delete(request): if not user.can_be_deleted_by_manager: raise SuspiciousOperation("Deleting user not allowed") user.delete() + messages.success(request, _("Successfully deleted user.")) return HttpResponse() # 200 OK
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -144,6 +144,22 @@ def setUpTestData(cls): class TestUserIndexView(WebTestStaffMode): url = "/staff/user/" + @classmethod + def setUpTestData(cls): + cls.manager = make_manager() + cls.some_user = baker.make(UserProfile) + + def test_redirect(self): + page = self.app.get(self.url, user=self.manager, status=200) + form = page.forms["user-edit-form"] + form["user"] = self.some_user.pk + response = form.submit(status=302) + self.assertEqual(response.location, f"/staff/user/{self.some_user.pk}/edit") + + +class TestUserListView(WebTestStaffMode): + url = "/staff/user/list" + @classmethod def setUpTestData(cls): cls.manager = make_manager()
Redesign user index The user index page loads quite slowly. This could probably be improved be removing badges that are not really needed ("Contributor", "Responsible", "External", "No email") and postponing the `can_be_deleted_by_manager` check to be run only after clicking the button and returning an error message if not possible It should be investigated if further improvements to the loading time are possible.
In an offline discussion, @janno42 realized that the delete button could simply be moved to the user edit form. (edited, see discussion below) In most cases we don't need a complete list of all users at all. So the user index page should be changed to a new overview page like the staff index giving access to: - user import - user adding - user merge - user bulk update - the complete user list as it is now ~but extended by filters for proxy users, managers, grade publishers, and reviewers~ - a new search field where all users (including inactive and proxy users) can be found via a user selection field which then allows to open the respective user's edit form for a specific user And in addition: - the user edit form needs to get a delete button as stated above to avoid any confusion, you want to *keep* the full list but extend it with more filters, and add buttons to import/merge etc at the top? and your last bullet point I didn't understand. What does it do differently than the current search field? the full list with additional filters is for the rare cases where you might want to see all reviewers when the student representative group changes or something like that. but it needs to be an extra page instead of the index, so that for all other use cases the loading time of the list doesn't matter anymore. the search field should be placed on the (new) user index page and could work just like the contributor selection fields but with a button next to it opening the selected profile. ooooh i see. So it's a completely new page with a button leading to the old one. i would suggest leaving out the additional filters to the old page for the first PR to make this issue more accessible. maybe add the delete button to the user edit form instead, so the new page can cover more use cases. maybe the old page really becomes irrelevant then (or irrelevant enough that typing 'reviewer' into the search field is sufficient for your "see all reviewers" use case) ah right, i had searching like that in mind and somehow forgot it on the way writing the requirement above. you are right: additional filters are not required. i'll update the list above and add the delete button thing. @lill28 do you still want to work on this issue?
2021-11-15T20:55:39
e-valuation/EvaP
1,668
e-valuation__EvaP-1668
[ "1663" ]
16012cefc09833c30eeea6e6d146b4a3d46e6555
diff --git a/evap/staff/tools.py b/evap/staff/tools.py --- a/evap/staff/tools.py +++ b/evap/staff/tools.py @@ -334,15 +334,17 @@ def find_unreviewed_evaluations(semester, excluded): if datetime.now().hour < settings.EVALUATION_END_OFFSET_HOURS: exclude_date -= timedelta(days=1) - return ( - semester.evaluations.exclude(pk__in=excluded) - .exclude(state=Evaluation.State.PUBLISHED) - .exclude(vote_end_date__gte=exclude_date) - .exclude(can_publish_text_results=False) - .filter(contributions__textanswer_set__state=TextAnswer.State.NOT_REVIEWED) - .annotate(num_unreviewed_textanswers=Count("contributions__textanswer_set")) - .order_by("vote_end_date", "-num_unreviewed_textanswers") - .all() + # Evaluations where the grading process is finished should be shown first, need to be sorted in Python + return sorted( + ( + semester.evaluations.exclude(pk__in=excluded) + .exclude(state=Evaluation.State.PUBLISHED) + .exclude(vote_end_date__gte=exclude_date) + .exclude(can_publish_text_results=False) + .filter(contributions__textanswer_set__state=TextAnswer.State.NOT_REVIEWED) + .annotate(num_unreviewed_textanswers=Count("contributions__textanswer_set")) + ), + key=lambda e: (-e.grading_process_is_finished, e.vote_end_date, -e.num_unreviewed_textanswers), )
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -2187,16 +2187,16 @@ class TestEvaluationTextAnswerView(WebTest): @classmethod def setUpTestData(cls): cls.manager = make_manager() - semester = baker.make(Semester, pk=1) - student1 = baker.make(UserProfile, email="[email protected]") + cls.semester = baker.make(Semester, pk=1) + cls.student1 = baker.make(UserProfile, email="[email protected]") cls.student2 = baker.make(UserProfile, email="[email protected]") cls.evaluation = baker.make( Evaluation, pk=1, - course__semester=semester, - participants=[student1, cls.student2], - voters=[student1], + course__semester=cls.semester, + participants=[cls.student1, cls.student2], + voters=[cls.student1], state=Evaluation.State.IN_EVALUATION, ) top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) @@ -2216,9 +2216,9 @@ def setUpTestData(cls): cls.evaluation2 = baker.make( Evaluation, - course__semester=semester, - participants=[student1], - voters=[student1, cls.student2], + course__semester=cls.semester, + participants=[cls.student1], + voters=[cls.student1, cls.student2], vote_start_datetime=datetime.datetime.now() - datetime.timedelta(days=5), vote_end_date=datetime.date.today() - datetime.timedelta(days=4), can_publish_text_results=True, @@ -2280,6 +2280,51 @@ def test_exclude_unfinished_evaluations(self): # unfinished because still in EVALUATION_END_OFFSET_HOURS self.assertNotContains(page, self.evaluation2.full_name) + def test_suggested_evaluation_ordering(self): + evaluations = baker.make( + Evaluation, + course__semester=self.semester, + participants=[self.student1, self.student2], + voters=[self.student1, self.student2], + state=Evaluation.State.IN_EVALUATION, + vote_start_datetime=datetime.datetime.now() - datetime.timedelta(days=42), + vote_end_date=datetime.date.today() - datetime.timedelta(days=2), + can_publish_text_results=True, + _quantity=2, + ) + + for evaluation, answer_count in zip(evaluations, [1, 2]): + contribution = baker.make(Contribution, evaluation=evaluation, _fill_optional=["contributor"]) + baker.make(TextAnswer, contribution=contribution, question__type=Question.TEXT, _quantity=answer_count) + + url = f"/staff/semester/{self.semester.pk}/evaluation/{self.evaluation2.pk}/textanswers" + + with run_in_staff_mode(self): + # Since Evaluation 1 has an extra text answer, it should be first + page = self.app.get(url, user=self.manager) + self.assertIn( + f'data-evaluation="{evaluations[1].pk}"', + str(page.html.select_one("span[data-next-evaluation-index]")), + ) + + # Since Evaluation 0 has an earlier end date, it should now be first + evaluations[0].vote_end_date = datetime.date.today() - datetime.timedelta(days=4) + evaluations[0].save() + page = self.app.get(url, user=self.manager) + self.assertIn( + f'data-evaluation="{evaluations[0].pk}"', + str(page.html.select_one("span[data-next-evaluation-index]")), + ) + + # Since the grading process for Evaluation 1 is finished, it should be first + evaluations[1].wait_for_grade_upload_before_publishing = False + evaluations[1].save() + page = self.app.get(url, user=self.manager) + self.assertIn( + f'data-evaluation="{evaluations[1].pk}"', + str(page.html.select_one("span[data-next-evaluation-index]")), + ) + def test_num_queries_is_constant(self): let_user_vote_for_evaluation(self.student2, self.evaluation) with run_in_staff_mode(self):
Prioritize evaluations with finished grading for review In quick text answer review, evaluations where the grading process is finished should be shown first in the list of suggested evaluations for review. The list is created in `evap.staff.tools.find_unreviewed_evaluations`.
2021-11-15T21:20:39
e-valuation/EvaP
1,674
e-valuation__EvaP-1674
[ "1612" ]
f8119b440103dfe93001edacd16758d02cee0814
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1,5 +1,4 @@ import logging -import operator import secrets import uuid from collections import defaultdict, namedtuple @@ -1711,15 +1710,13 @@ def get_sorted_evaluations_voted_for(self): return self.evaluations_voted_for.order_by("course__semester__created_at", "name_de") def get_sorted_due_evaluations(self): - due_evaluations = dict() - for evaluation in Evaluation.objects.filter(participants=self, state=Evaluation.State.IN_EVALUATION).exclude( - voters=self - ): - due_evaluations[evaluation] = (evaluation.vote_end_date - date.today()).days - - # Sort evaluations by number of days left for evaluation and bring them to following format: - # [(evaluation, due_in_days), ...] - return sorted(due_evaluations.items(), key=operator.itemgetter(1)) + evaluations_and_days_left = ( + (evaluation, evaluation.days_left_for_evaluation) + for evaluation in Evaluation.objects.filter( + participants=self, state=Evaluation.State.IN_EVALUATION + ).exclude(voters=self) + ) + return sorted(evaluations_and_days_left, key=lambda tup: (tup[1], tup[0].full_name)) def validate_template(value):
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -627,6 +627,39 @@ def test_email_domain_replacement(self): user = baker.make(UserProfile, email="[email protected]") self.assertEqual(user.email, "[email protected]") + def test_get_sorted_due_evaluations(self): + student = baker.make(UserProfile, email="[email protected]") + course = baker.make(Course) + evaluation1 = baker.make( + Evaluation, + course=course, + name_en="C", + name_de="C", + vote_end_date=date.today(), + state=Evaluation.State.IN_EVALUATION, + participants=[student], + ) + evaluation2 = baker.make( + Evaluation, + course=course, + name_en="B", + name_de="B", + vote_end_date=date.today(), + state=Evaluation.State.IN_EVALUATION, + participants=[student], + ) + evaluation3 = baker.make( + Evaluation, + course=course, + name_en="A", + name_de="A", + vote_end_date=date.today() + timedelta(days=1), + state=Evaluation.State.IN_EVALUATION, + participants=[student], + ) + sorted_evaluations = student.get_sorted_due_evaluations() + self.assertEqual(sorted_evaluations, [(evaluation2, 0), (evaluation1, 0), (evaluation3, 1)]) + class ParticipationArchivingTests(TestCase): @classmethod
Due evaluations ordering The reminder message about due evaluations (management command `send_reminders`) sorts them by the number of remaining days. A second ordering level should be added, ordering evaluations with the same number of remaining days by their full name.
2021-11-23T11:28:50
e-valuation/EvaP
1,686
e-valuation__EvaP-1686
[ "1678" ]
1e936a7654345b658e3febd31e9c06a5f995ccde
diff --git a/evap/evaluation/migrations/0123_evaluation_state_fsm_int.py b/evap/evaluation/migrations/0123_evaluation_state_fsm_int.py --- a/evap/evaluation/migrations/0123_evaluation_state_fsm_int.py +++ b/evap/evaluation/migrations/0123_evaluation_state_fsm_int.py @@ -1,5 +1,4 @@ from django.db import migrations -import django_fsm # as defined in the Evaluation model @@ -24,28 +23,52 @@ class State: "reviewed": State.REVIEWED, "published": State.PUBLISHED, } +REV_CONVERSION = {val: key for key, val in CONVERSION.items()} -def str_to_int(apps, _schema_editor): +def model_str_to_int(apps, _schema_editor): Evaluation = apps.get_model("evaluation", "Evaluation") for string_type, int_type in CONVERSION.items(): Evaluation.objects.filter(state=string_type).update(int_state=int_type) -def int_to_str(apps, _schema_editor): +def model_int_to_str(apps, _schema_editor): Evaluation = apps.get_model("evaluation", "Evaluation") for string_type, int_type in CONVERSION.items(): Evaluation.objects.filter(int_state=int_type).update(state=string_type) +def logentries_str_to_int(apps, _schema_editor): + LogEntry = apps.get_model("evaluation", "LogEntry") + for entry in LogEntry.objects.filter(content_type__app_label="evaluation", content_type__model="evaluation"): + if "state" in entry.data: + for key in entry.data["state"]: + entry.data["state"][key] = [ + CONVERSION[val] if val in CONVERSION else val for val in entry.data["state"][key] + ] + entry.save() + + +def logentries_int_to_str(apps, _schema_editor): + LogEntry = apps.get_model("evaluation", "LogEntry") + for entry in LogEntry.objects.filter(content_type__app_label="evaluation", content_type__model="evaluation"): + if "state" in entry.data: + for key in entry.data["state"]: + entry.data["state"][key] = [ + REV_CONVERSION[val] if val in REV_CONVERSION else val for val in entry.data["state"][key] + ] + entry.save() + + class Migration(migrations.Migration): dependencies = [ - ('evaluation', '0122_prepare_evaluation_state_fsm_int'), + ("evaluation", "0122_prepare_evaluation_state_fsm_int"), ] operations = [ - migrations.RunPython(str_to_int, int_to_str), + migrations.RunPython(model_str_to_int, model_int_to_str), + migrations.RunPython(logentries_str_to_int, logentries_int_to_str), migrations.RemoveField( model_name="evaluation", name="state", diff --git a/evap/evaluation/migrations/0127_fix_logentry_types.py b/evap/evaluation/migrations/0127_fix_logentry_types.py new file mode 100644 --- /dev/null +++ b/evap/evaluation/migrations/0127_fix_logentry_types.py @@ -0,0 +1,19 @@ +import importlib +from django.db import migrations + +original_migration_module = importlib.import_module("evap.evaluation.migrations.0123_evaluation_state_fsm_int") + + +class Migration(migrations.Migration): + """ + Initially, we forgot to migrate logentries in 0123, so this migration cleans this up. + Note that 0123 is now modified to also take care of the logentries in the first place. + """ + + dependencies = [ + ("evaluation", "0126_add_textanswer_review_email_template"), + ] + + operations = [ + migrations.RunPython(original_migration_module.logentries_str_to_int, migrations.RunPython.noop), + ]
Migrate Evaluation.state in LogEntries In 37508eb2c426, the `Evaluation.state` was turned into an `FSMIntegerField`. However, the migration didn't update the evaluations' log entries. This leads to errors in `evaluation.models.transform_log_action` when rendering log messages because old entries hold strings (e.g. `"in_evaluation"`) which results in a `KeyError` for `STATE_STR_CONVERSION` that expects a number (e.g. `State.IN_EVALUATION`).
2021-12-13T22:11:41
e-valuation/EvaP
1,690
e-valuation__EvaP-1690
[ "1621" ]
1427e47d4eddec45d443bc441d61d4298219dbd8
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -21,12 +21,9 @@ RatingResult, TextResult, annotate_distributions_and_grades, - calculate_average_distribution, can_textanswer_be_seen_by, - distribution_to_grade, get_evaluations_with_course_result_attributes, get_results, - get_single_result_rating_result, ) @@ -337,16 +334,9 @@ def get_evaluations_of_course(course, request): course_evaluations += course.evaluations.filter( state__in=[Evaluation.State.IN_EVALUATION, Evaluation.State.EVALUATED, Evaluation.State.REVIEWED] ) - + annotate_distributions_and_grades(course_evaluations) course_evaluations = get_evaluations_with_course_result_attributes(course_evaluations) - for course_evaluation in course_evaluations: - if course_evaluation.is_single_result: - course_evaluation.single_result_rating_result = get_single_result_rating_result(course_evaluation) - else: - course_evaluation.distribution = calculate_average_distribution(course_evaluation) - course_evaluation.avg_grade = distribution_to_grade(course_evaluation.distribution) - return course_evaluations
diff --git a/evap/grades/tests.py b/evap/grades/tests.py --- a/evap/grades/tests.py +++ b/evap/grades/tests.py @@ -214,7 +214,7 @@ def test_does_not_crash(self): def test_403_on_deleted(self): baker.make(Semester, pk=1, grade_documents_are_deleted=True) - self.app.get("/grades/semester/1", user=self.grade_publisher, status=403) + self.app.get(self.url, user=self.grade_publisher, status=403) class GradeCourseViewTest(WebTest): @@ -231,14 +231,14 @@ def setUpTestData(cls): def test_does_not_crash(self): semester = baker.make(Semester, pk=1, grade_documents_are_deleted=False) baker.make(Evaluation, course=baker.make(Course, pk=1, semester=semester), state=Evaluation.State.PREPARED) - self.app.get("/grades/semester/1/course/1", user=self.grade_publisher, status=200) + self.app.get(self.url, user=self.grade_publisher, status=200) def test_403_on_archived_semester(self): archived_semester = baker.make(Semester, pk=1, grade_documents_are_deleted=True) baker.make( Evaluation, course=baker.make(Course, pk=1, semester=archived_semester), state=Evaluation.State.PREPARED ) - self.app.get("/grades/semester/1/course/1", user=self.grade_publisher, status=403) + self.app.get(self.url, user=self.grade_publisher, status=403) class GradeEditTest(WebTest): diff --git a/evap/results/tests/test_tools.py b/evap/results/tests/test_tools.py --- a/evap/results/tests/test_tools.py +++ b/evap/results/tests/test_tools.py @@ -297,7 +297,7 @@ def test_distribution_with_general_grade_question(self): def test_get_single_result_rating_result(self): single_result_evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, is_single_result=True) - questionnaire = Questionnaire.objects.get(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME) + questionnaire = Questionnaire.single_result_questionnaire() contribution = baker.make( Contribution, contributor=baker.make(UserProfile), diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -293,7 +293,7 @@ class TestResultsViewContributionWarning(WebTest): @classmethod def setUpTestData(cls): cls.manager = make_manager() - cls.semester = baker.make(Semester, id=3) + cls.semester = baker.make(Semester) contributor = baker.make(UserProfile) # Set up an evaluation with one question but no answers @@ -301,7 +301,6 @@ def setUpTestData(cls): student2 = baker.make(UserProfile) cls.evaluation = baker.make( Evaluation, - id=21, state=Evaluation.State.PUBLISHED, course=baker.make(Course, semester=cls.semester), participants=[student1, student2], @@ -337,12 +336,10 @@ def test_few_answers_evaluation_show_warning(self): class TestResultsSemesterEvaluationDetailView(WebTestStaffMode): - url = "/results/semester/2/evaluation/21" - @classmethod def setUpTestData(cls): cls.manager = make_manager() - cls.semester = baker.make(Semester, id=2) + cls.semester = baker.make(Semester) contributor = baker.make(UserProfile, email="[email protected]") responsible = baker.make(UserProfile, email="[email protected]") @@ -351,7 +348,7 @@ def setUpTestData(cls): # Normal evaluation with responsible and contributor. cls.evaluation = baker.make( - Evaluation, id=21, state=Evaluation.State.PUBLISHED, course=baker.make(Course, semester=cls.semester) + Evaluation, state=Evaluation.State.PUBLISHED, course=baker.make(Course, semester=cls.semester) ) baker.make( @@ -368,6 +365,8 @@ def setUpTestData(cls): role=Contribution.Role.EDITOR, ) + cls.url = f"/results/semester/{cls.semester.id}/evaluation/{cls.evaluation.id}" + def test_questionnaire_ordering(self): top_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR) @@ -473,6 +472,48 @@ def test_preview_with_rating_answers(self): url = f"/results/semester/{self.semester.id}/evaluation/{evaluation.id}" self.app.get(url, user=self.manager) + def test_unpublished_single_results_show_results(self): + """Regression test for #1621""" + # make regular evaluation with some answers + participants = baker.make(UserProfile, _bulk_create=True, _quantity=20) + evaluation = baker.make( + Evaluation, + state=Evaluation.State.REVIEWED, + course=baker.make(Course, semester=self.semester), + participants=participants, + voters=participants, + ) + questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) + likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=1) + evaluation.general_contribution.questionnaires.set([questionnaire]) + make_rating_answer_counters(likert_question, evaluation.general_contribution) + + # make single result + evaluation2: Evaluation = baker.make( + Evaluation, + state=Evaluation.State.REVIEWED, + course=evaluation.course, + is_single_result=True, + name_de="foo", + name_en="foo", + participants=participants, + voters=participants, + ) + evaluation2.general_contribution.questionnaires.set([questionnaire]) + make_rating_answer_counters(likert_question, evaluation2.general_contribution) + + cache_results(evaluation) + + url = f"/results/semester/{self.semester.id}/evaluation/{evaluation.id}" + response = self.app.get(url, user=self.manager) + + # this one is the course result. The two evaluations shouldn't use this + self.assertTemplateUsed(response, "distribution_with_grade_disabled.html", count=1) + # Both evaluations should use this + self.assertTemplateUsed(response, "evaluation_result_widget.html", count=2) + # Both evaluations should use this, plus one for the questionnaire + self.assertTemplateUsed(response, "distribution_with_grade.html", count=3) + class TestResultsSemesterEvaluationDetailViewFewVoters(WebTest): @classmethod @@ -823,7 +864,6 @@ def setUpTestData(cls): responsible = baker.make(UserProfile, email="[email protected]") cls.evaluation = baker.make( Evaluation, - id=21, state=Evaluation.State.PUBLISHED, course=baker.make(Course, semester=cls.semester, responsibles=[responsible]), )
Single results' results are hidden in course detail page if not published If a single result is shown on a course detail page of another evaluation of the same course, its results are hidden if it's not yet published. Non-published regular evaluations have their results shown, just as all published evaluations do. The fix is simple, but I wanted to make sure this behavior is not expected...? It seemed inconsistent to me. See screenshot. Note how Bar's results are not shown, but "Software Architecture" and "Bar2" have their results shown. ![image](https://user-images.githubusercontent.com/1891915/131266243-2494d449-db7c-4146-ab35-e77d77f25bf7.png)
Nice, this indeed is an inconsistency that should be fixed. On another matter: The weighting percentages in the screenshot seem broken, shouldn't they add up to 100%? @karyon do you want to continue working on this? yes!
2022-01-01T14:24:09
e-valuation/EvaP
1,695
e-valuation__EvaP-1695
[ "1691", "1691" ]
482aa81de88b9cb83f76d54885be720b98517b25
diff --git a/evap/results/tools.py b/evap/results/tools.py --- a/evap/results/tools.py +++ b/evap/results/tools.py @@ -298,8 +298,8 @@ def get_evaluations_with_course_result_attributes(evaluations): ) course_id_evaluation_weight_sum_pairs = ( - Course.objects.filter(evaluations__in=evaluations) - .annotate(Sum("evaluations__weight")) + Course.objects.annotate(Sum("evaluations__weight")) + .filter(pk__in=Course.objects.filter(evaluations__in=evaluations)) # is needed, see #1691 .values_list("id", "evaluations__weight__sum") )
diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -20,6 +20,7 @@ Evaluation, Question, Questionnaire, + RatingAnswerCounter, Semester, UserProfile, ) @@ -31,7 +32,7 @@ ) from evap.results.exporters import TextAnswerExporter from evap.results.tools import cache_results -from evap.results.views import get_evaluations_with_prefetched_data +from evap.results.views import get_evaluations_with_prefetched_data, warm_up_template_cache from evap.staff.tests.utils import WebTestStaffMode, helper_exit_staff_mode, run_in_staff_mode @@ -264,6 +265,32 @@ def make_course_with_evaluations(unique_suffix): caches["sessions"].clear() caches["results"].clear() + def test_evaluation_weight_sums(self): + """Regression test for #1691""" + student = baker.make(UserProfile, email="[email protected]") + course = baker.make(Course) + + published = baker.make( + Evaluation, + course=course, + name_en=iter(["ev1", "ev2", "ev3"]), + name_de=iter(["ev1", "ev2", "ev3"]), + state=iter([Evaluation.State.NEW, Evaluation.State.PUBLISHED, Evaluation.State.PUBLISHED]), + weight=iter([8, 3, 4]), + is_single_result=True, + _quantity=3, + )[1:] + + contributions = [e.general_contribution for e in published] + baker.make(RatingAnswerCounter, contribution=iter(contributions), answer=2, count=2, _quantity=len(published)) + warm_up_template_cache(published) + + page = self.app.get(self.url, user=student) + decoded = page.body.decode() + + self.assertTrue(decoded.index("ev2") < decoded.index(" 20% ") < decoded.index("ev3") < decoded.index(" 26% ")) + self.assertNotContains(page, " 53% ") + class TestGetEvaluationsWithPrefetchedData(TestCase): def test_returns_correct_participant_count(self):
Evaluation weight doesn't add up to 100% Split out from #1621. See screenshot: ![image](https://user-images.githubusercontent.com/1891915/147852750-2dd6c5c2-a500-42a1-b997-ec2e74039da8.png) Evaluation weight doesn't add up to 100% Split out from #1621. See screenshot: ![image](https://user-images.githubusercontent.com/1891915/147852750-2dd6c5c2-a500-42a1-b997-ec2e74039da8.png)
@janno42 if there's an evaluation with some non-zero weight that's not shown (evaluation hasn't started yet for staff users, evaluation is not published for all other users), should the shown weights still add up to 100% or to 100% minus the percentage of the hidden evaluation? what if the evaluation is shown here but the grade (and thus the percentage) is not shown? The percentage values should reflect the total weighting (from a global permission-independent view) even if single evaluations are not shown. So the values may add up to less than 100%. If an evaluation is shown but the grade is not yet displayed, we should optimally already show the percentage. The problem is that [this annotate](https://github.com/e-valuation/EvaP/blob/1427e47d4eddec45d443bc441d61d4298219dbd8/evap/results/tools.py#L301) operates on the filtered evaluations. Swapping the filter and the annotate should work [according to the docs](https://docs.djangoproject.com/en/4.0/topics/db/aggregation/#order-of-annotate-and-filter-clauses), but it gives me the sum multiplied by the number of evaluations per course. This might be a variant of [this documented bug](https://docs.djangoproject.com/en/2.2/topics/db/aggregation/#combining-multiple-aggregations). Fetching the courses separately like below should work. ```python course_id_evaluation_weight_sum_pairs = ( Course.objects .filter(pk__in=Course.objects.filter(evaluations__in=evaluations)) .annotate(Sum("evaluations__weight")) .values_list("id", "evaluations__weight__sum") ) ``` An additional weirdness of this view is that first the correct value is annotated [here](https://github.com/e-valuation/EvaP/blob/1427e47d4eddec45d443bc441d61d4298219dbd8/evap/results/views.py#L195) (it is correct because the list of evaluations contains all evaluations of the course), but then overwritten with an incorrect annotation [here](https://github.com/e-valuation/EvaP/blob/1427e47d4eddec45d443bc441d61d4298219dbd8/evap/results/views.py#L210). Generally, the filter-then-annotate approach in results/views.py:210 is wrong, so I think fixing that method in itself is an okay solution for this issue. Analyzing why the annotation happens twice is maybe something for another issue. @janno42 if there's an evaluation with some non-zero weight that's not shown (evaluation hasn't started yet for staff users, evaluation is not published for all other users), should the shown weights still add up to 100% or to 100% minus the percentage of the hidden evaluation? what if the evaluation is shown here but the grade (and thus the percentage) is not shown? The percentage values should reflect the total weighting (from a global permission-independent view) even if single evaluations are not shown. So the values may add up to less than 100%. If an evaluation is shown but the grade is not yet displayed, we should optimally already show the percentage. The problem is that [this annotate](https://github.com/e-valuation/EvaP/blob/1427e47d4eddec45d443bc441d61d4298219dbd8/evap/results/tools.py#L301) operates on the filtered evaluations. Swapping the filter and the annotate should work [according to the docs](https://docs.djangoproject.com/en/4.0/topics/db/aggregation/#order-of-annotate-and-filter-clauses), but it gives me the sum multiplied by the number of evaluations per course. This might be a variant of [this documented bug](https://docs.djangoproject.com/en/2.2/topics/db/aggregation/#combining-multiple-aggregations). Fetching the courses separately like below should work. ```python course_id_evaluation_weight_sum_pairs = ( Course.objects .filter(pk__in=Course.objects.filter(evaluations__in=evaluations)) .annotate(Sum("evaluations__weight")) .values_list("id", "evaluations__weight__sum") ) ``` An additional weirdness of this view is that first the correct value is annotated [here](https://github.com/e-valuation/EvaP/blob/1427e47d4eddec45d443bc441d61d4298219dbd8/evap/results/views.py#L195) (it is correct because the list of evaluations contains all evaluations of the course), but then overwritten with an incorrect annotation [here](https://github.com/e-valuation/EvaP/blob/1427e47d4eddec45d443bc441d61d4298219dbd8/evap/results/views.py#L210). Generally, the filter-then-annotate approach in results/views.py:210 is wrong, so I think fixing that method in itself is an okay solution for this issue. Analyzing why the annotation happens twice is maybe something for another issue.
2022-01-10T19:42:56
e-valuation/EvaP
1,714
e-valuation__EvaP-1714
[ "1711" ]
ee611b71ceffe5575f5d1807867a0a20c5b9f269
diff --git a/evap/staff/importers.py b/evap/staff/importers.py --- a/evap/staff/importers.py +++ b/evap/staff/importers.py @@ -14,7 +14,7 @@ from evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, UserProfile from evap.evaluation.tools import clean_email -from evap.staff.tools import ImportType, create_user_list_html_string_for_message, merge_dictionaries_of_sets +from evap.staff.tools import ImportType, create_user_list_html_string_for_message def sorted_messages(messages): @@ -402,18 +402,25 @@ def process_evaluation(self, evaluation_data, sheetname, row): self.evaluations[evaluation_id] = evaluation_data self.names_de.add(evaluation_data.name_de) else: - if evaluation_data.equals_except_for_degrees(self.evaluations[evaluation_id]): + known_data = self.evaluations[evaluation_id] + if evaluation_data.equals_except_for_degrees(known_data): self.warnings[ImporterWarning.DEGREE].append( _( 'Sheet "{}", row {}: The course\'s "{}" degree differs from it\'s degree in a previous row.' " Both degrees have been set for the course." ).format(sheetname, row + 1, evaluation_data.name_en) ) - self.evaluations[evaluation_id].degrees |= evaluation_data.degrees - self.evaluations[evaluation_id].errors = merge_dictionaries_of_sets( - self.evaluations[evaluation_id].errors, evaluation_data.errors - ) - elif evaluation_data != self.evaluations[evaluation_id]: + + known_data.degrees |= evaluation_data.degrees + + assert evaluation_data.errors.keys() <= {"degrees", "course_type", "is_graded"} + assert known_data.errors.get("course_type") == evaluation_data.errors.get("course_type") + assert known_data.errors.get("is_graded") == evaluation_data.errors.get("is_graded") + + degree_errors = known_data.errors.get("degrees", set()) | evaluation_data.errors.get("degrees", set()) + if len(degree_errors) > 0: + known_data.errors["degrees"] = degree_errors + elif evaluation_data != known_data: self.errors[ImporterError.COURSE].append( _('Sheet "{}", row {}: The course\'s "{}" data differs from it\'s data in a previous row.').format( sheetname, row + 1, evaluation_data.name_en @@ -439,6 +446,9 @@ def check_evaluation_data_correctness(self, semester): self.errors[ImporterError.COURSE].append( _("Course {} does already exist in this semester.").format(evaluation_data.name_de) ) + + assert evaluation_data.errors.keys() <= {"degrees", "course_type", "is_graded"} + if "degrees" in evaluation_data.errors: missing_degree_names |= evaluation_data.errors["degrees"] if "course_type" in evaluation_data.errors: diff --git a/evap/staff/tools.py b/evap/staff/tools.py --- a/evap/staff/tools.py +++ b/evap/staff/tools.py @@ -1,7 +1,6 @@ import os from datetime import date, datetime, timedelta from enum import Enum -from typing import Any, Dict, Set from django.conf import settings from django.contrib import messages @@ -369,11 +368,3 @@ def remove_user_from_represented_and_ccing_users(user, ignored_users=None, test_ cc_user.cc_users.remove(user) remove_messages.append(_("Removed {} from the CC users of {}.").format(user.full_name, cc_user.full_name)) return remove_messages - - -def merge_dictionaries_of_sets(a: Dict[Any, Set], b: Dict[Any, Set]) -> Dict[Any, Set]: - return { - **a, - **b, - **({key: (a[key] | b[key]) for key in a if key in b}), - }
diff --git a/evap/staff/fixtures/excel_files_test_data.py b/evap/staff/fixtures/excel_files_test_data.py --- a/evap/staff/fixtures/excel_files_test_data.py +++ b/evap/staff/fixtures/excel_files_test_data.py @@ -122,6 +122,14 @@ ] } +test_enrollment_data_error_merge_filedata = { + 'MA Belegungen': [ + ['Degree', 'Student last name', 'Student first name', 'Student email address', 'Course kind', 'Course is graded', 'Course name (de)', 'Course name (en)', 'Responsible title', 'Responsible last name', 'Responsible first name', 'Responsible email address'], + ['Grandmaster', 'Quid', 'Bastius', '[email protected]', 'jaminar', 'probably not', 'Bauen', 'Build', '', 'Sed', 'Diam', '[email protected]'], + ['Beginner,Bachelor', 'Lorem', 'Ipsum', '[email protected]', 'jaminar', 'probably not', 'Bauen', 'Build', '', 'Sed', 'Diam', '[email protected]'], + ], +} + test_enrollment_data_import_names_filedata = { 'MA Belegungen': [ ['Degree', 'Student last name', 'Student first name', 'Student email address', 'Course kind', 'Course is graded', 'Course name (de)', 'Course name (en)', 'Responsible title', 'Responsible last name', 'Responsible first name', 'Responsible email address'], diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -243,6 +243,17 @@ def test_degrees_are_merged(self): course = Course.objects.get(name_de="Bauen") self.assertSetEqual(set(course.degrees.all()), set(Degree.objects.filter(name_de__in=["Master", "Bachelor"]))) + def test_errors_are_merged(self): + excel_content = excel_data.create_memory_excel_file(excel_data.test_enrollment_data_error_merge_filedata) + __, warnings, errors = EnrollmentImporter.process( + excel_content, self.semester, self.vote_start_datetime, self.vote_end_date, test_run=False + ) + self.assertIn("Both degrees have been set for the course", "".join(warnings[ImporterWarning.DEGREE])) + self.assertIn("is probably not, but must be", "".join(errors[ImporterError.IS_GRADED])) + self.assertIn("jaminar", "".join(errors[ImporterError.COURSE_TYPE_MISSING])) + self.assertIn("Beginner", "".join(errors[ImporterError.DEGREE_MISSING])) + self.assertIn("Grandmaster", "".join(errors[ImporterError.DEGREE_MISSING])) + def test_course_type_and_degrees_are_retrieved_with_import_names(self): excel_content = excel_data.create_memory_excel_file(excel_data.test_enrollment_data_import_names_filedata) diff --git a/evap/staff/tests/test_tools.py b/evap/staff/tests/test_tools.py --- a/evap/staff/tests/test_tools.py +++ b/evap/staff/tests/test_tools.py @@ -7,12 +7,7 @@ from evap.evaluation.models import Contribution, Course, Evaluation, UserProfile from evap.evaluation.tests.tools import WebTest from evap.rewards.models import RewardPointGranting, RewardPointRedemption -from evap.staff.tools import ( - delete_navbar_cache_for_users, - merge_dictionaries_of_sets, - merge_users, - remove_user_from_represented_and_ccing_users, -) +from evap.staff.tools import delete_navbar_cache_for_users, merge_users, remove_user_from_represented_and_ccing_users class NavbarCacheTest(WebTest): @@ -262,9 +257,3 @@ def test_do_nothing_if_test_run(self): self.assertEqual([set(user1.delegates.all()), set(user1.cc_users.all())], [{delete_user}, {delete_user}]) self.assertEqual([set(user2.delegates.all()), set(user2.cc_users.all())], [{delete_user}, {delete_user}]) self.assertEqual(len(messages), 4) - - -class MiscellaneousToolsTest(TestCase): - def test_merge_dictionaries_of_sets(self): - self.assertEqual(merge_dictionaries_of_sets({"a": set([1])}, {"b": set([2])}), {"a": set([1]), "b": set([2])}) - self.assertEqual(merge_dictionaries_of_sets({"a": set([1])}, {"a": set([2])}), {"a": set([1, 2])})
Dictionary merge error in course importer the following code wants to merge error dicts. however, this fails with a `TypeError` (`unsupported operand type(s) for |: 'str' and 'str'`) in `merge_dictionaries_of_sets`. https://github.com/e-valuation/EvaP/blob/32caa9d799a938f88fa49940b16f4f44bd89d431/evap/staff/importers.py#L413-L415 reproduce by importing a file with two entries: two participants having the same course that differs in its degree (e.g., `BA` and `MA`) and having another error (e.g. grade `ungraded` instead of `no`).
This happens because the `errors` aren't actually shaped like this function assumes, looking at `EvaluationDataFactory`, we can see that the `is_graded` and `course_type` keys actually map to single strings, not a `set` like `errors["degrees"]`. We could fix this by either only merging the keys that we know are `set`s, or make all value types `set` too (and add a type hint?). At first, I though that since there could be multiple wrong course types or `is_graded`-strings for the same evaluation, we should do the second approach. _But_, since this error only happens if the two rows are `equal[s]_except_for_degrees`, the first is probably more appropriate.
2022-02-21T21:26:29
e-valuation/EvaP
1,730
e-valuation__EvaP-1730
[ "1679" ]
13e63cb5278ac89c8417c9d79245a187cc81c1f9
diff --git a/evap/staff/importers.py b/evap/staff/importers.py --- a/evap/staff/importers.py +++ b/evap/staff/importers.py @@ -9,12 +9,13 @@ from django.core.exceptions import ValidationError from django.db import transaction from django.utils.html import format_html, format_html_join +from django.utils.safestring import mark_safe from django.utils.translation import gettext as _ from django.utils.translation import gettext_lazy from evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, UserProfile from evap.evaluation.tools import clean_email, unordered_groupby -from evap.staff.tools import ImportType, create_user_list_html_string_for_message +from evap.staff.tools import ImportType, create_user_list_html_string_for_message, user_edit_link def sorted_messages(messages): @@ -321,13 +322,17 @@ def _create_user_string(user): @staticmethod def _create_user_data_mismatch_warning(user, user_data, test_run): if test_run: - msg = format_html(_("The existing user would be overwritten with the following data:")) + msg = _("The existing user would be overwritten with the following data:") else: - msg = format_html(_("The existing user was overwritten with the following data:")) - return ( - msg - + format_html("<br /> - {} ({})", ExcelImporter._create_user_string(user), _("existing")) - + format_html("<br /> - {} ({})", ExcelImporter._create_user_string(user_data), _("new")) + msg = _("The existing user was overwritten with the following data:") + return format_html( + "{}<br /> - {} ({}) [{}]<br /> - {} ({})", + msg, + ExcelImporter._create_user_string(user), + _("existing"), + user_edit_link(user.pk), + ExcelImporter._create_user_string(user_data), + _("new"), ) @staticmethod @@ -335,19 +340,26 @@ def _create_user_inactive_warning(user, test_run): user_string = ExcelImporter._create_user_string(user) if test_run: return format_html( - _("The following user is currently marked inactive and will be marked active upon importing: {}"), + _("The following user is currently marked inactive and will be marked active upon importing: {} [{}]"), user_string, + user_edit_link(user.pk), ) return format_html( - _("The following user was previously marked inactive and is now marked active upon importing: {}"), + _("The following user was previously marked inactive and is now marked active upon importing: {} [{}]"), user_string, + user_edit_link(user.pk), ) def _create_user_name_collision_warning(self, user_data, users_with_same_names): - warningstring = format_html(_("An existing user has the same first and last name as a new user:")) + warningstring = mark_safe(_("An existing user has the same first and last name as a new user:")) for user in users_with_same_names: - warningstring += format_html("<br /> - {} ({})", self._create_user_string(user), _("existing")) + warningstring += format_html( + "<br /> - {} ({}) [{}]", + self._create_user_string(user), + _("existing"), + user_edit_link(user.pk), + ) warningstring += format_html("<br /> - {} ({})", self._create_user_string(user_data), _("new")) self.warnings[ImporterWarning.DUPL].append(warningstring) diff --git a/evap/staff/tools.py b/evap/staff/tools.py --- a/evap/staff/tools.py +++ b/evap/staff/tools.py @@ -10,6 +10,7 @@ from django.core.exceptions import SuspiciousOperation from django.db import transaction from django.db.models import Count +from django.urls import reverse from django.utils.html import format_html, format_html_join from django.utils.translation import gettext_lazy as _ @@ -370,3 +371,11 @@ def remove_user_from_represented_and_ccing_users(user, ignored_users=None, test_ cc_user.cc_users.remove(user) remove_messages.append(_("Removed {} from the CC users of {}.").format(user.full_name, cc_user.full_name)) return remove_messages + + +def user_edit_link(user_id): + return format_html( + '<a href="{}" target=_blank><span class="fas fa-user-pen"></span> {}</a>', + reverse("staff:user_edit", kwargs={"user_id": user_id}), + _("edit user"), + )
diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -10,7 +10,7 @@ import evap.staff.fixtures.excel_files_test_data as excel_data from evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, Semester, UserProfile from evap.staff.importers import EnrollmentImporter, ImporterError, ImporterWarning, PersonImporter, UserImporter -from evap.staff.tools import ImportType +from evap.staff.tools import ImportType, user_edit_link class TestUserImporter(TestCase): @@ -71,7 +71,7 @@ def test_created_users(self): self.assertTrue(UserProfile.objects.filter(email="[email protected]").exists()) def test_duplicate_warning(self): - baker.make(UserProfile, first_name="Lucilia", last_name="Manilium", email="[email protected]") + user = baker.make(UserProfile, first_name="Lucilia", last_name="Manilium", email="[email protected]") __, __, warnings_test, __ = UserImporter.process(self.valid_excel_content, test_run=True) __, __, warnings_no_test, __ = UserImporter.process(self.valid_excel_content, test_run=False) @@ -81,8 +81,8 @@ def test_duplicate_warning(self): warnings_test[ImporterWarning.DUPL], [ "An existing user has the same first and last name as a new user:<br />" - " - Lucilia Manilium, [email protected] (existing)<br />" - " - Lucilia Manilium, [email protected] (new)" + f" - Lucilia Manilium, [email protected] (existing) [{user_edit_link(user.pk)}]<br />" + " - Lucilia Manilium, [email protected] (new)", ], ) @@ -123,14 +123,14 @@ def test_invalid_file_error(self): self.assertEqual(UserProfile.objects.count(), original_user_count) def test_import_makes_inactive_user_active(self): - baker.make(UserProfile, email="[email protected]", is_active=False) + user = baker.make(UserProfile, email="[email protected]", is_active=False) __, __, warnings_test, __ = UserImporter.process(self.valid_excel_content, test_run=True) self.assertEqual( warnings_test[ImporterWarning.INACTIVE], [ "The following user is currently marked inactive and will be marked active upon importing: " - " None None, [email protected]" + f" None None, [email protected] [{user_edit_link(user.pk)}]", ], ) @@ -139,7 +139,7 @@ def test_import_makes_inactive_user_active(self): warnings_no_test[ImporterWarning.INACTIVE], [ "The following user was previously marked inactive and is now marked active upon importing: " - " None None, [email protected]" + f" None None, [email protected] [{user_edit_link(user.pk)}]" ], ) diff --git a/evap/staff/tests/test_tools.py b/evap/staff/tests/test_tools.py --- a/evap/staff/tests/test_tools.py +++ b/evap/staff/tests/test_tools.py @@ -7,7 +7,12 @@ from evap.evaluation.models import Contribution, Course, Evaluation, UserProfile from evap.evaluation.tests.tools import WebTest from evap.rewards.models import RewardPointGranting, RewardPointRedemption -from evap.staff.tools import delete_navbar_cache_for_users, merge_users, remove_user_from_represented_and_ccing_users +from evap.staff.tools import ( + delete_navbar_cache_for_users, + merge_users, + remove_user_from_represented_and_ccing_users, + user_edit_link, +) class NavbarCacheTest(WebTest): @@ -257,3 +262,9 @@ def test_do_nothing_if_test_run(self): self.assertEqual([set(user1.delegates.all()), set(user1.cc_users.all())], [{delete_user}, {delete_user}]) self.assertEqual([set(user2.delegates.all()), set(user2.cc_users.all())], [{delete_user}, {delete_user}]) self.assertEqual(len(messages), 4) + + +class UserEditLinkTest(TestCase): + def test_user_edit_link(self): + user = baker.make(UserProfile) + self.assertIn(f"/staff/user/{user.id}/edit", user_edit_link(user.id)) diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -53,6 +53,7 @@ helper_set_dynamic_choices_field_value, run_in_staff_mode, ) +from evap.staff.tools import user_edit_link from evap.staff.views import get_evaluations_with_prefetched_data from evap.student.models import TextAnswerWarning @@ -536,7 +537,7 @@ def test_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - baker.make(UserProfile, email="[email protected]") + user = baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user=self.manager) @@ -547,7 +548,7 @@ def test_warning_handling(self): self.assertContains( reply, "The existing user would be overwritten with the following data:<br />" - " - None None, [email protected] (existing)<br />" + f" - None None, [email protected] (existing) [{user_edit_link(user.pk)}]<br />" " - Lucilia Manilium, [email protected] (new)", ) @@ -1028,7 +1029,7 @@ def test_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - baker.make(UserProfile, email="[email protected]") + user = baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user=self.manager) @@ -1042,7 +1043,7 @@ def test_warning_handling(self): self.assertContains( reply, "The existing user would be overwritten with the following data:<br />" - " - None None, [email protected] (existing)<br />" + f" - None None, [email protected] (existing) [{user_edit_link(user.pk)}]<br />" " - Lucilia Manilium, [email protected] (new)", ) @@ -2228,7 +2229,7 @@ def test_import_participants_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - baker.make(UserProfile, email="[email protected]") + user = baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user=self.manager) @@ -2239,7 +2240,7 @@ def test_import_participants_warning_handling(self): self.assertContains( reply, "The existing user would be overwritten with the following data:<br />" - " - None None, [email protected] (existing)<br />" + f" - None None, [email protected] (existing) [{user_edit_link(user.pk)}]<br />" " - Lucilia Manilium, [email protected] (new)", ) @@ -2262,7 +2263,7 @@ def test_import_contributors_warning_handling(self): """ Tests whether warnings given from the importer are displayed """ - baker.make(UserProfile, email="[email protected]") + user = baker.make(UserProfile, email="[email protected]") page = self.app.get(self.url, user=self.manager) @@ -2273,7 +2274,7 @@ def test_import_contributors_warning_handling(self): self.assertContains( reply, "The existing user would be overwritten with the following data:<br />" - " - None None, [email protected] (existing)<br />" + f" - None None, [email protected] (existing) [{user_edit_link(user.pk)}]<br />" " - Lucilia Manilium, [email protected] (new)", )
Link to user edit pages in importer messages When the importer shows messages about users (such as "This new user has the same email as this existing user"), the respective users' edit pages should be linked in the message to enable managers to easily open the user profile for inspection.
2022-03-28T19:24:58
e-valuation/EvaP
1,751
e-valuation__EvaP-1751
[ "1487" ]
18ac9aa71915f04317f58f97d7b33af4e02a9288
diff --git a/evap/results/exporters.py b/evap/results/exporters.py --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -244,9 +244,9 @@ def write_overall_results(self, evaluations_with_results, course_results_exist): def write_questionnaire(self, questionnaire, evaluations_with_results, contributor): if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR: - self.write_cell(f"{questionnaire.name} ({contributor.full_name})", "bold") + self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold") else: - self.write_cell(questionnaire.name, "bold") + self.write_cell(questionnaire.public_name, "bold") # first cell of row is printed above self.write_empty_row_with_styles(["border_left_right"] * len(evaluations_with_results))
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -82,16 +82,16 @@ def test_questionnaire_ordering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(4)[0], questionnaire_1.name) + self.assertEqual(workbook.sheets()[0].row_values(4)[0], questionnaire_1.public_name) self.assertEqual(workbook.sheets()[0].row_values(5)[0], question_1.text) - self.assertEqual(workbook.sheets()[0].row_values(7)[0], questionnaire_2.name) + self.assertEqual(workbook.sheets()[0].row_values(7)[0], questionnaire_2.public_name) self.assertEqual(workbook.sheets()[0].row_values(8)[0], question_2.text) - self.assertEqual(workbook.sheets()[0].row_values(10)[0], questionnaire_3.name) + self.assertEqual(workbook.sheets()[0].row_values(10)[0], questionnaire_3.public_name) self.assertEqual(workbook.sheets()[0].row_values(11)[0], question_3.text) - self.assertEqual(workbook.sheets()[0].row_values(13)[0], questionnaire_4.name) + self.assertEqual(workbook.sheets()[0].row_values(13)[0], questionnaire_4.public_name) self.assertEqual(workbook.sheets()[0].row_values(14)[0], question_4.text) def test_heading_question_filtering(self): @@ -130,7 +130,7 @@ def test_heading_question_filtering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(4)[0], questionnaire.name) + self.assertEqual(workbook.sheets()[0].row_values(4)[0], questionnaire.public_name) self.assertEqual(workbook.sheets()[0].row_values(5)[0], heading_question.text) self.assertEqual(workbook.sheets()[0].row_values(6)[0], likert_question.text) self.assertEqual(workbook.sheets()[0].row_values(7)[0], "") @@ -367,7 +367,7 @@ def test_exclude_used_but_unanswered_questionnaires(self): cache_results(evaluation) sheet = self.get_export_sheet(evaluation.course.semester, degree, [evaluation.course.type.id]) - self.assertEqual(sheet.row_values(4)[0], used_questionnaire.name) + self.assertEqual(sheet.row_values(4)[0], used_questionnaire.public_name) self.assertEqual(sheet.row_values(5)[0], used_question.text) self.assertNotIn(unused_questionnaire.name, sheet.col_values(0)) self.assertNotIn(unused_question.text, sheet.col_values(0)) @@ -530,12 +530,12 @@ def test_contributor_result_export(self): workbook.sheets()[0].row_values(0)[2], f"{evaluation_2.full_name}\n{evaluation_2.course.semester.name}\n{other_contributor.full_name}", ) - self.assertEqual(workbook.sheets()[0].row_values(4)[0], general_questionnaire.name) + self.assertEqual(workbook.sheets()[0].row_values(4)[0], general_questionnaire.public_name) self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_question.text) self.assertEqual(workbook.sheets()[0].row_values(5)[2], 4.0) self.assertEqual( workbook.sheets()[0].row_values(7)[0], - f"{contributor_questionnaire.name} ({contributor.full_name})", + f"{contributor_questionnaire.public_name} ({contributor.full_name})", ) self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_question.text) self.assertEqual(workbook.sheets()[0].row_values(8)[2], 3.0)
Questionnaire names in result exports When hiding a questionnaire and creating a new version, the current date will be added to the title of the old questionnaire. This information will be shown in exports which might confuse users. We should think about only showing the display name instead or handling it in a completely different way. We have to think about exports where multiple old questionnaires of the same name are included though.
The questionnaire's display name should be used in exports for hidden questionnaires (which are usually "archived" and not actively used anymore).
2022-05-02T19:38:24
e-valuation/EvaP
1,764
e-valuation__EvaP-1764
[ "1747" ]
18ac9aa71915f04317f58f97d7b33af4e02a9288
diff --git a/evap/contributor/views.py b/evap/contributor/views.py --- a/evap/contributor/views.py +++ b/evap/contributor/views.py @@ -28,7 +28,7 @@ from evap.results.exporters import ResultsExporter from evap.results.tools import annotate_distributions_and_grades, get_evaluations_with_course_result_attributes from evap.staff.forms import ContributionFormset -from evap.student.views import get_valid_form_groups_or_render_vote_page +from evap.student.views import render_vote_page @responsible_or_contributor_or_delegate_required @@ -155,9 +155,9 @@ def render_preview(request, formset, evaluation_form, evaluation): formset.save() request.POST = None # this prevents errors rendered in the vote form - preview_response = get_valid_form_groups_or_render_vote_page( + preview_response = render_vote_page( request, evaluation, preview=True, for_rendering_in_modal=True - )[1].content.decode() + ).content.decode() raise IntegrityError # rollback transaction to discard the database writes except IntegrityError: pass @@ -240,7 +240,7 @@ def evaluation_preview(request, evaluation_id): ): raise PermissionDenied - return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1] + return render_vote_page(request, evaluation, preview=True) @require_POST diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -105,7 +105,7 @@ ) from evap.student.forms import QuestionnaireVotingForm from evap.student.models import TextAnswerWarning -from evap.student.views import get_valid_form_groups_or_render_vote_page +from evap.student.views import render_vote_page @manager_required @@ -1601,7 +1601,7 @@ def evaluation_preview(request, semester_id, evaluation_id): raise PermissionDenied evaluation = get_object_or_404(Evaluation, id=evaluation_id, course__semester=semester) - return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1] + return render_vote_page(request, evaluation, preview=True) @manager_required diff --git a/evap/student/views.py b/evap/student/views.py --- a/evap/student/views.py +++ b/evap/student/views.py @@ -116,7 +116,7 @@ def sorter(evaluation): return render(request, "student_index.html", template_data) -def get_valid_form_groups_or_render_vote_page(request, evaluation, preview, for_rendering_in_modal=False): +def get_vote_page_form_groups(request, evaluation, preview): contributions_to_vote_on = evaluation.contributions.all() # prevent a user from voting on themselves if not preview: @@ -131,12 +131,15 @@ def get_valid_form_groups_or_render_vote_page(request, evaluation, preview, for_ QuestionnaireVotingForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in questionnaires ] + return form_groups - if all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()): - assert not preview - return form_groups, None - evaluation_form_group = form_groups.pop(evaluation.general_contribution) +def render_vote_page(request, evaluation, preview, for_rendering_in_modal=False): + form_groups = get_vote_page_form_groups(request, evaluation, preview) + + assert preview or not all(form.is_valid() for form_group in form_groups.values() for form in form_group) + + evaluation_form_group = form_groups.pop(evaluation.general_contribution, default=[]) contributor_form_groups = [ ( @@ -175,19 +178,19 @@ def get_valid_form_groups_or_render_vote_page(request, evaluation, preview, for_ general_contribution_textanswers_visible_to=textanswers_visible_to(evaluation.general_contribution), text_answer_warnings=TextAnswerWarning.objects.all(), ) - return None, render(request, "student_vote.html", template_data) + return render(request, "student_vote.html", template_data) @participant_required def vote(request, evaluation_id): - # pylint: disable=too-many-locals,too-many-nested-blocks,too-many-branches + # pylint: disable=too-many-nested-blocks,too-many-branches evaluation = get_object_or_404(Evaluation, id=evaluation_id) if not evaluation.can_be_voted_for_by(request.user): raise PermissionDenied - form_groups, rendered_page = get_valid_form_groups_or_render_vote_page(request, evaluation, preview=False) - if rendered_page is not None: - return rendered_page + form_groups = get_vote_page_form_groups(request, evaluation, preview=False) + if not all(form.is_valid() for form_group in form_groups.values() for form in form_group): + return render_vote_page(request, evaluation, preview=False) # all forms are valid, begin vote operation with transaction.atomic():
diff --git a/evap/contributor/tests/test_views.py b/evap/contributor/tests/test_views.py --- a/evap/contributor/tests/test_views.py +++ b/evap/contributor/tests/test_views.py @@ -122,6 +122,11 @@ def test_wrong_state(self): self.evaluation.save() self.app.get(self.url, user=self.responsible, status=403) + def test_without_questionnaires_assigned(self): + # regression test for #1747 + self.evaluation.general_contribution.questionnaires.set([]) + self.app.get(self.url, user=self.responsible, status=200) + class TestContributorEvaluationEditView(WebTest): @classmethod diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -1980,11 +1980,17 @@ def setUpTestData(cls): class TestEvaluationPreviewView(WebTestStaffModeWith200Check): @classmethod def setUpTestData(cls): - evaluation = baker.make(Evaluation) - evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) + cls.evaluation = baker.make(Evaluation) + cls.evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) - cls.test_users = [make_manager()] - cls.url = f"/staff/semester/{evaluation.course.semester.pk}/evaluation/{evaluation.pk}/preview" + cls.manager = make_manager() + cls.test_users = [cls.manager] + cls.url = f"/staff/semester/{cls.evaluation.course.semester.pk}/evaluation/{cls.evaluation.pk}/preview" + + def test_without_questionnaires_assigned(self): + # regression test for #1747 + self.evaluation.general_contribution.questionnaires.set([]) + self.app.get(self.url, user=self.manager, status=200) class TestEvaluationImportPersonsView(WebTestStaffMode):
Staff vote preview page is accessible, but crashes when no questionnaires are assigned We currently have this situation with http://localhost/staff/semester/21/evaluation/1668/preview. There are two contributions for this evaluation, but no questionnaires are assigned. The staff `evaluation_preview` view calls `get_valid_form_groups_or_render_vote_page`, which in turn will check whether all form groups are valid -- there are no form groups, so this is the case. The method then asserts `not preview`, which fails. The preview button on the staff semester view that leads to this page is enabled (it is shown if `evaluation.state < evaluation.State.IN_EVALUATION`). @janno42 how to we want to handle evaluations without any assigned questionnaires, regarding preview as well as starting the evaluation. From what I see, starting an evaluation that has no questionnaires assigned is currently possible?
Approving an evaluation without questionnaires is not possible via the frontend, so an evaluation without questionnaires cannot be started. The preview in this case should probably simply show an empty voting page.
2022-05-17T19:15:01
e-valuation/EvaP
1,770
e-valuation__EvaP-1770
[ "1704", "1704" ]
70e17465fba3824655a816461705f5a300affa3d
diff --git a/evap/context_processors.py b/evap/context_processors.py --- a/evap/context_processors.py +++ b/evap/context_processors.py @@ -12,3 +12,7 @@ def slogan(request): def debug(request): return {"debug": settings.DEBUG} + + +def allow_anonymous_feedback_messages(request): + return {"allow_anonymous_feedback_messages": settings.ALLOW_ANONYMOUS_FEEDBACK_MESSAGES} diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py --- a/evap/evaluation/views.py +++ b/evap/evaluation/views.py @@ -4,6 +4,7 @@ from django.conf import settings from django.contrib import auth, messages from django.contrib.auth.decorators import login_required +from django.core.exceptions import SuspiciousOperation from django.core.mail import EmailMessage from django.http import HttpResponse, HttpResponseBadRequest from django.shortcuts import redirect, render @@ -161,17 +162,23 @@ def legal_notice(request): @require_POST @login_required def contact(request): + sent_anonymous = request.POST.get("anonymous") == "true" + if sent_anonymous and not settings.ALLOW_ANONYMOUS_FEEDBACK_MESSAGES: + raise SuspiciousOperation("Anonymous feedback messages are not allowed, however received one from user!") message = request.POST.get("message") title = request.POST.get("title") - email = request.user.email or f"User {request.user.id}" - subject = f"[EvaP] Message from {email}" - + if sent_anonymous: + sender = "anonymous user" + subject = "[EvaP] Anonymous message" + else: + sender = request.user.email or f"User {request.user.id}" + subject = f"[EvaP] Message from {sender}" if message: mail = EmailMessage( subject=subject, - body=f"{title}\n{request.user.email}\n\n{message}", + body=f"{title}\n{sender}\n\n{message}", to=[settings.CONTACT_EMAIL], - reply_to=[request.user.email], + reply_to=[] if sent_anonymous else [sender], ) try: mail.send() diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -132,6 +132,7 @@ } CONTACT_EMAIL = "webmaster@localhost" +ALLOW_ANONYMOUS_FEEDBACK_MESSAGES = True # Config for mail system DEFAULT_FROM_EMAIL = "webmaster@localhost" @@ -235,6 +236,7 @@ "django.contrib.messages.context_processors.messages", "evap.context_processors.slogan", "evap.context_processors.debug", + "evap.context_processors.allow_anonymous_feedback_messages", ], "builtins": ["django.templatetags.i18n"], }
diff --git a/evap/evaluation/tests/test_views.py b/evap/evaluation/tests/test_views.py --- a/evap/evaluation/tests/test_views.py +++ b/evap/evaluation/tests/test_views.py @@ -93,16 +93,38 @@ class TestFAQView(WebTestWith200Check): class TestContactEmail(WebTest): csrf_checks = False + url = "/contact" + @override_settings(ALLOW_ANONYMOUS_FEEDBACK_MESSAGES=True) def test_sends_mail(self): user = baker.make(UserProfile, email="[email protected]") + # normal email self.app.post( - "/contact", - params={"message": "feedback message", "title": "some title", "sender_email": "[email protected]"}, + self.url, + params={"message": "feedback message", "title": "some title", "anonymous": "false"}, user=user, ) - self.assertEqual(len(mail.outbox), 1) - self.assertTrue(mail.outbox[0].reply_to == ["[email protected]"]) + # anonymous email + self.app.post( + self.url, + params={"message": "feedback message", "title": "some title", "anonymous": "true"}, + user=user, + ) + + self.assertEqual(len(mail.outbox), 2) + self.assertEqual(mail.outbox[0].reply_to, ["[email protected]"]) + self.assertEqual(mail.outbox[1].reply_to, []) + + @override_settings(ALLOW_ANONYMOUS_FEEDBACK_MESSAGES=False) + def test_anonymous_not_allowed(self): + user = baker.make(UserProfile, email="[email protected]") + self.app.post( + self.url, + params={"message": "feedback message", "title": "some title", "anonymous": "true"}, + user=user, + status=400, + ) + self.assertEqual(len(mail.outbox), 0) class TestChangeLanguageView(WebTest):
Allow anonymous feedback messages via settings When sending a message in the feedback form, the sender field is currently automatically filled and disabled; thus users cannot give anonymous feedback. This should be made optional via a new setting `ALLOW_ANONYMOUS_FEEDBACK_MESSAGES`. When set to `FALSE`, the current behavior is kept. When set to `TRUE`, a new checkbox is shown in the form, enabling users to send their message anonymously. Checking this checkbox will remove the sender information in the form and in the sent message's text. Allow anonymous feedback messages via settings When sending a message in the feedback form, the sender field is currently automatically filled and disabled; thus users cannot give anonymous feedback. This should be made optional via a new setting `ALLOW_ANONYMOUS_FEEDBACK_MESSAGES`. When set to `FALSE`, the current behavior is kept. When set to `TRUE`, a new checkbox is shown in the form, enabling users to send their message anonymously. Checking this checkbox will remove the sender information in the form and in the sent message's text.
2022-05-30T20:35:52
e-valuation/EvaP
1,784
e-valuation__EvaP-1784
[ "1769" ]
a81426729fc9e857a16749eaf0c7cdda0d77d5ba
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -579,6 +579,9 @@ class Meta: fields = ("evaluation", "contributor", "questionnaires", "role", "textanswer_visibility", "label", "order") widgets = { "order": forms.HiddenInput(), + # RadioSelects are necessary so each value gets a id_for_label, see #1769. + "role": forms.RadioSelect(), + "textanswer_visibility": forms.RadioSelect(), } def __init__(self, *args, evaluation=None, **kwargs):
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -1756,6 +1756,8 @@ def get_post_params(cls): ] ) class TestEvaluationEditView(WebTestStaffMode): + render_pages_url = "/staff/semester/PK/evaluation/PK/edit" + @classmethod def setUpTestData(cls): cls.manager = make_manager() @@ -1797,6 +1799,12 @@ def setUpTestData(cls): cls.contribution1.questionnaires.set([cls.contributor_questionnaire]) cls.contribution2.questionnaires.set([cls.contributor_questionnaire]) + @render_pages + def render_pages(self): + return { + "normal": self.app.get(self.url, user=self.manager).content, + } + def test_edit_evaluation(self): page = self.app.get(self.url, user=self.manager) diff --git a/evap/static/ts/tests/frontend/staff-evaluation-edit.ts b/evap/static/ts/tests/frontend/staff-evaluation-edit.ts new file mode 100644 --- /dev/null +++ b/evap/static/ts/tests/frontend/staff-evaluation-edit.ts @@ -0,0 +1,36 @@ +import { test, expect } from "@jest/globals"; +import { ElementHandle } from "puppeteer"; + +import { pageHandler } from "../utils/page"; + +// regression test for #1769 +test("changes form data", pageHandler( + "/staff/semester/PK/evaluation/PK/edit/normal.html", + async page => { + const managerId = await page.evaluate(() => { + const tomselect = (document.getElementById("id_contributions-0-contributor") as any).tomselect; + const options = tomselect.options; + const managerOption = Object.keys(options).find(key => options[key].text == "manager (manager)"); + tomselect.setValue(managerOption); + return managerOption; + }); + + const editorLabels = await page.$x("//label[contains(text(), 'Editor')]"); + const ownAndGeneralLabels = await page.$x("//label[contains(text(), 'Own and general')]"); + if (editorLabels.length < 1 || ownAndGeneralLabels.length < 1) { + throw new Error("Button group buttons not found."); + } + + await (editorLabels[0] as ElementHandle<Element>).click() + await (ownAndGeneralLabels[0] as ElementHandle<Element>).click(); + + const formData = await page.evaluate(() => { + return Object.fromEntries(new FormData(document.getElementById("evaluation-form") as HTMLFormElement)); + }); + + expect(formData["contributions-0-contributor"]).toBe(managerId); + expect(formData["contributions-0-order"]).toBe("0"); + expect(formData["contributions-0-role"]).toBe("1"); + expect(formData["contributions-0-textanswer_visibility"]).toBe("GENERAL"); + }, +));
Evaluation Edit: Button Groups don't work, submitting fails with "This field is required". Since the django4 update (#1746, https://github.com/e-valuation/EvaP/commit/f2ce2c5a686070b153549559ac614c6aad0c8e73), going to any evaluation edit page as a staff user and clicking on "Save" fails with > This field is required on the extra form in the contribution formset. The same is probably true for any formset on our page. At the same time, the button groups on the page can not be used properly. I don't remember (and currently also cannot find) any explanation of changed behavior regarding formsets on the django4 [release notes](https://docs.djangoproject.com/en/4.0/releases/4.0/). [This ticket](https://code.djangoproject.com/ticket/33534) also implies django's behavior shouldn't have changed. Maybe we do something to the form so that `has_changed()` evaluates to true. @niklasmohrin is currently working on a minrep example (for maybe creating a django issue). Edit: Explanation of what's going on is below in https://github.com/e-valuation/EvaP/issues/1769#issuecomment-1146813897. The root cause is that django4 removed `id_for_label` from choices of a choice field, but we attempt to use it in `choice_buttom.html`. Now, we always put "None" there in the html, breaking both button groups and sortable.js.
Django should ignore forms that have `has_changed() == False`. However, for our extra form, it says `has_changed() == True`, because the "order" field has changed from its initial value "-1" to whatever value it got assigned by sortable.js (in my debug-run: 2). Removing the script-tag that loads "sortable.min.js" makes this go away. That leaves the question: Why did this work before? Edit: It seems that with django3.2, sortable.js didn't touch the order for the extra formset, so the request.POST data contains "order: -1" (unchanged). That leaves the question: Why does it touch the order now? It seems like the order is changed on submit, maybe even using the formdata event. Will have to debug sortable to find out why. We pass a custom `rowChanged` method to `makeFormSortable`, which is used to detect what a row was changed by the user. If this returns `true`(=`changed`), the hidden order-input is changed. The implementation in `evaluation_edit.js` uses jquery for this: https://github.com/e-valuation/EvaP/blob/d3d3bf7c0f854fce206c8dded8f7ab6c74b7b6aa/evap/evaluation/templates/evap_evaluation_edit_js.html#L4-L20 Now, in jquery, if `row.find("some_selector")` returns an empty result, it is still valid to call `.prop("checked")` on it, and it returns `undefined`. With the django4 update, the row and visibility inputs no longer have their IDs set correctly, so this is what happened: The `checked` test always returned undefined, and negating that returns `true`. The id is not set correctly anymore because in staff_evaluation_form.py, we use contribution_formset.html, which uses role_buttons.html and textanswer_visibility_buttons.html, which in turn use choice_button.html. `choice_button.html` tries to access `choice.id_for_label` to get the id for the label. This seems to always be `None` from django 4.0 on. `formelement` is a `BoundField` instance here and has a `id_for_label`, [as is documented](https://docs.djangoproject.com/en/4.0/ref/forms/api/#django.forms.BoundField.id_for_label), but `choice` is on of the `BoundWidget` instances that this field is made up of, so it isn't required by the documentation to have an `id_for_label`. This makes some sense, as from the point of django, they are expecting the widget to be a select input, and there, you usually don't need an ID for each option. So: * We need to fix the IDs. Maybe we can build our own id by concatenating `formelement.id_for_label` with some identifying data from the widget. Or, maybe there is a django widget designed for radio-selections, where it's more usual to have a label for each option (and thus need an id). * jquery and javascript are at it again. We should consider making the whole "rowChanged" implementation more robust. Most of our sortable formsets get a template row passed anyway, so it should be easy to just compare the current form's data with the hidden template row's data, instead of manually checking every field. This should wait until #1758 is done because the templating is tightly coupled with select2 right now The same base issue (no more `id_for_label` for the choices of a choice field) also currently breaks the "responsibility" and "text answer visibility" button groups on the evaluation edit page: All buttons (= html labels) have `for="None"`, and all underlying radio inputs have `id="None"`. Thus, no matter which button you click, it will always select the button button in a button group on the page. The voting page as well as the results page filtering seem to work fine with their button groups.
2022-07-19T15:04:01
e-valuation/EvaP
1,790
e-valuation__EvaP-1790
[ "1785", "1785" ]
17075cdc57d3b79b1883ca2b007a153c6cfb99bb
diff --git a/evap/rewards/models.py b/evap/rewards/models.py --- a/evap/rewards/models.py +++ b/evap/rewards/models.py @@ -15,6 +15,10 @@ class NotEnoughPoints(Exception): """An attempt has been made to redeem more points than available.""" +class OutdatedRedemptionData(Exception): + """A redemption request has been sent with outdated data, e.g. when a request has been sent twice.""" + + class RedemptionEventExpired(Exception): """An attempt has been made to redeem more points for an event whose redeem_end_date lies in the past.""" diff --git a/evap/rewards/tools.py b/evap/rewards/tools.py --- a/evap/rewards/tools.py +++ b/evap/rewards/tools.py @@ -14,6 +14,7 @@ from evap.rewards.models import ( NoPointsSelected, NotEnoughPoints, + OutdatedRedemptionData, RedemptionEventExpired, RewardPointGranting, RewardPointRedemption, @@ -23,11 +24,20 @@ @transaction.atomic -def save_redemptions(request, redemptions: Dict[int, int]): +def save_redemptions(request, redemptions: Dict[int, int], previous_redeemed_points: int): # lock these rows to prevent race conditions list(request.user.reward_point_grantings.select_for_update()) list(request.user.reward_point_redemptions.select_for_update()) + # check consistent previous redeemed points + # do not validate reward points, to allow receiving points after page load + if previous_redeemed_points != redeemed_points_of_user(request.user): + raise OutdatedRedemptionData( + _( + "It appears that your browser sent multiple redemption requests. You can see all successful redemptions below." + ) + ) + total_points_available = reward_points_of_user(request.user) total_points_redeemed = sum(redemptions.values()) @@ -60,6 +70,10 @@ def reward_points_of_user(user): return count +def redeemed_points_of_user(user): + return RewardPointRedemption.objects.filter(user_profile=user).aggregate(Sum("value"))["value__sum"] or 0 + + def is_semester_activated(semester): return SemesterActivation.objects.filter(semester=semester, is_active=True).exists() diff --git a/evap/rewards/views.py b/evap/rewards/views.py --- a/evap/rewards/views.py +++ b/evap/rewards/views.py @@ -16,6 +16,7 @@ from evap.rewards.models import ( NoPointsSelected, NotEnoughPoints, + OutdatedRedemptionData, RedemptionEventExpired, RewardPointGranting, RewardPointRedemption, @@ -26,27 +27,34 @@ from evap.staff.views import semester_view +def redeem_reward_points(request): + redemptions = {} + try: + for key, value in request.POST.items(): + if key.startswith("points-"): + event_id = int(key.rpartition("-")[2]) + redemptions[event_id] = int(value) + previous_redeemed_points = int(request.POST["previous_redeemed_points"]) + except (ValueError, KeyError, TypeError) as e: + raise BadRequest from e + + try: + save_redemptions(request, redemptions, previous_redeemed_points) + messages.success(request, _("You successfully redeemed your points.")) + except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error: + messages.warning(request, error) + return 400 + except OutdatedRedemptionData as error: + messages.error(request, error) + return 409 + return 200 + + @reward_user_required def index(request): - # pylint: disable=too-many-locals status = 200 if request.method == "POST": - redemptions = {} - try: - for key, value in request.POST.items(): - if key.startswith("points-"): - event_id = int(key.rpartition("-")[2]) - redemptions[event_id] = int(value) - except ValueError as e: - raise BadRequest from e - - try: - save_redemptions(request, redemptions) - messages.success(request, _("You successfully redeemed your points.")) - except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error: - messages.warning(request, error) - status = 400 - + status = redeem_reward_points(request) total_points_available = reward_points_of_user(request.user) reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user) reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user) @@ -65,6 +73,7 @@ def index(request): template_data = dict( reward_point_actions=reward_point_actions, total_points_available=total_points_available, + total_points_spent=sum(redemption.value for redemption in reward_point_redemptions), events=events, ) return render(request, "rewards_index.html", template_data, status=status)
diff --git a/evap/rewards/tests/test_views.py b/evap/rewards/tests/test_views.py --- a/evap/rewards/tests/test_views.py +++ b/evap/rewards/tests/test_views.py @@ -13,7 +13,7 @@ RewardPointRedemptionEvent, SemesterActivation, ) -from evap.rewards.tools import is_semester_activated, reward_points_of_user +from evap.rewards.tools import is_semester_activated, redeemed_points_of_user, reward_points_of_user from evap.staff.tests.utils import WebTestStaffMode, WebTestStaffModeWith200Check @@ -79,13 +79,46 @@ def test_redeem_points_for_expired_event(self): self.assertContains(response, "event expired already.", status_code=400) self.assertEqual(5, reward_points_of_user(self.student)) + def post_redemption_request(self, redemption_params, additional_params=None, status=200): + if additional_params is None: + additional_params = { + "previous_redeemed_points": redeemed_points_of_user(self.student), + } + return self.app.post( + self.url, params={**redemption_params, **additional_params}, user=self.student, status=status + ) + def test_invalid_post_parameters(self): - self.app.post(self.url, params={"points-asd": 2}, user=self.student, status=400) - self.app.post(self.url, params={"points-": 2}, user=self.student, status=400) - self.app.post(self.url, params={f"points-{self.event1.pk}": ""}, user=self.student, status=400) - self.app.post(self.url, params={f"points-{self.event1.pk}": "asd"}, user=self.student, status=400) + self.post_redemption_request({"points-asd": 2}, status=400) + self.post_redemption_request({"points-": 2}, status=400) + self.post_redemption_request({f"points-{self.event1.pk}": ""}, status=400) + self.post_redemption_request({f"points-{self.event1.pk}": "asd"}, status=400) + + # redemption without or with invalid point parameters + self.post_redemption_request( + redemption_params={f"points-{self.event1.pk}": 1}, additional_params={}, status=400 + ) + self.post_redemption_request( + redemption_params={f"points-{self.event1.pk}": 1}, + additional_params={"previous_redeemed_points": "asd"}, + status=400, + ) self.assertFalse(RewardPointRedemption.objects.filter(user_profile=self.student).exists()) + # now, a correct request succeeds + self.post_redemption_request({f"points-{self.event1.pk}": 2}) + + def test_inconsistent_previous_redemption_counts(self): + response1 = self.app.get(self.url, user=self.student) + form1 = response1.forms["reward-redemption-form"] + form1.set(f"points-{self.event1.pk}", 2) + response2 = self.app.get(self.url, user=self.student) + form2 = response2.forms["reward-redemption-form"] + form2.set(f"points-{self.event1.pk}", 2) + form1.submit() + form2.submit(status=409) + self.assertEqual(1, RewardPointRedemption.objects.filter(user_profile=self.student).count()) + class TestEventsView(WebTestStaffModeWith200Check): url = reverse("rewards:reward_point_redemption_events")
Add waiting time after reward point redemption After reward points have been redeemed, the next redemption should not be able for a short amount of time to prevent users from accidentally redeeming the selected number of points twice by sending a double post request. When a redemption is attempted within 30 seconds of the last one, the redemption should not be performed. Instead, an error should be shown that the user should wait a moment before redeeming more points. Add waiting time after reward point redemption After reward points have been redeemed, the next redemption should not be able for a short amount of time to prevent users from accidentally redeeming the selected number of points twice by sending a double post request. When a redemption is attempted within 30 seconds of the last one, the redemption should not be performed. Instead, an error should be shown that the user should wait a moment before redeeming more points.
Edit: This comment is irrelevant: The issue is about client-side error of accidentally submitting multiple redemption requests without actually intending to, not about clients redeeming more points than they have available. ~Have we actually observed double-redemption?~ ~We have code in place that should prevent that from happening in [rewards/tools.py:`save_redemptions`](https://github.com/e-valuation/EvaP/blob/main/evap/rewards/tools.py#L28). I cannot confidently claim that this code is correct in all situations, but after quickly going through [the docs](https://docs.djangoproject.com/en/4.0/ref/models/querysets/#select-for-update) again, I _think_ it does the right thing.~ ~IMO, an unsychronized timeout wouldn't solve the issue: For two parallel requests, how could one request know that the other one is also concurrently accessing the timeout? I'm certain this can only be solved by using proper synchronization -- and then we don't need the timeout.~ Yes, we have observed them, that's why the issue was opened :) The code we already have is fine for situations where the browser sends two posts almost at the same time for whatever reason. The problem which occurred here is that requests were sent about 10 seconds from each other (probably by clicking the button twice with a pause in between due to slow internet connection or any other reason). The two requests were correctly handled by the server, redeeming points unintentionally twice in a short period of time. Because it is very unlikely that this happens by intention, we should prevent two redemptions in a short amount of time between each other. Ah, I see, so we want to gracefully handle an accidental repetition of partial redemptions by the user. Sorry, I misread the issue text. I'm still not a fan of the timeout, since its amount is arbitrary. 30s would block me if I realize I forgot to set a higher count, but it wouldn't help if I have a tab open that shows an outdated redemption page, and thus think that I didn't redeem yet or something didn't work in my past attempt to redeem. How about we add some more identifying information to the POST request, e.g. the reward points that the client thinks are redeemable, or the count of past redemptions by the user? This way, the server could check whether the request was issued from an up-to-date client, and deny processing it if it was outdated. Edit: This comment is irrelevant: The issue is about client-side error of accidentally submitting multiple redemption requests without actually intending to, not about clients redeeming more points than they have available. ~Have we actually observed double-redemption?~ ~We have code in place that should prevent that from happening in [rewards/tools.py:`save_redemptions`](https://github.com/e-valuation/EvaP/blob/main/evap/rewards/tools.py#L28). I cannot confidently claim that this code is correct in all situations, but after quickly going through [the docs](https://docs.djangoproject.com/en/4.0/ref/models/querysets/#select-for-update) again, I _think_ it does the right thing.~ ~IMO, an unsychronized timeout wouldn't solve the issue: For two parallel requests, how could one request know that the other one is also concurrently accessing the timeout? I'm certain this can only be solved by using proper synchronization -- and then we don't need the timeout.~ Yes, we have observed them, that's why the issue was opened :) The code we already have is fine for situations where the browser sends two posts almost at the same time for whatever reason. The problem which occurred here is that requests were sent about 10 seconds from each other (probably by clicking the button twice with a pause in between due to slow internet connection or any other reason). The two requests were correctly handled by the server, redeeming points unintentionally twice in a short period of time. Because it is very unlikely that this happens by intention, we should prevent two redemptions in a short amount of time between each other. Ah, I see, so we want to gracefully handle an accidental repetition of partial redemptions by the user. Sorry, I misread the issue text. I'm still not a fan of the timeout, since its amount is arbitrary. 30s would block me if I realize I forgot to set a higher count, but it wouldn't help if I have a tab open that shows an outdated redemption page, and thus think that I didn't redeem yet or something didn't work in my past attempt to redeem. How about we add some more identifying information to the POST request, e.g. the reward points that the client thinks are redeemable, or the count of past redemptions by the user? This way, the server could check whether the request was issued from an up-to-date client, and deny processing it if it was outdated.
2022-08-08T19:17:20
e-valuation/EvaP
1,794
e-valuation__EvaP-1794
[ "1624" ]
06f0c1f96d3c8657fdd5be7b9d63d3d1cf128caa
diff --git a/evap/evaluation/migrations/0131_userprofile_ordering.py b/evap/evaluation/migrations/0131_userprofile_ordering.py new file mode 100644 --- /dev/null +++ b/evap/evaluation/migrations/0131_userprofile_ordering.py @@ -0,0 +1,26 @@ +# Generated by Django 4.1.1 on 2022-09-12 19:53 + +from django.db import migrations +import django.db.models.functions.text + + +class Migration(migrations.Migration): + + dependencies = [ + ("evaluation", "0130_alter_textanswer_review_decision"), + ] + + operations = [ + migrations.AlterModelOptions( + name="userprofile", + options={ + "ordering": [ + django.db.models.functions.text.Lower("last_name"), + django.db.models.functions.text.Lower("first_name"), + django.db.models.functions.text.Lower("email"), + ], + "verbose_name": "user", + "verbose_name_plural": "users", + }, + ), + ] diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -16,7 +16,7 @@ from django.core.mail import EmailMultiAlternatives from django.db import IntegrityError, models, transaction from django.db.models import Count, Manager, OuterRef, Q, Subquery -from django.db.models.functions import Coalesce +from django.db.models.functions import Coalesce, Lower from django.dispatch import Signal, receiver from django.template import Context, Template from django.template.defaultfilters import linebreaksbr @@ -356,10 +356,7 @@ def midterm_grade_documents(self): @cached_property def responsibles_names(self): - ordered_responsibles = sorted( - self.responsibles.all(), key=lambda responsible: (responsible.last_name, responsible.full_name) - ) - return ", ".join([responsible.full_name for responsible in ordered_responsibles]) + return ", ".join(responsible.full_name for responsible in self.responsibles.all()) @property def has_external_responsible(self): @@ -1547,7 +1544,7 @@ class UserProfile(AbstractBaseUser, PermissionsMixin): is_active = models.BooleanField(default=True, verbose_name=_("active")) class Meta: - ordering = ["last_name", "first_name", "email"] + ordering = [Lower("last_name"), Lower("first_name"), Lower("email")] verbose_name = _("user") verbose_name_plural = _("users") diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -820,10 +820,7 @@ def semester_preparation_reminder(request, semester_id): ).prefetch_related("course__degrees") prepared_evaluations = semester.evaluations.filter(state=Evaluation.State.PREPARED) - responsibles = list( - set(responsible for evaluation in prepared_evaluations for responsible in evaluation.course.responsibles.all()) - ) - responsibles.sort(key=lambda responsible: (responsible.last_name, responsible.first_name)) + responsibles = UserProfile.objects.filter(courses_responsible_for__evaluations__in=prepared_evaluations).distinct() responsible_list = [ ( @@ -859,8 +856,7 @@ def semester_grade_reminder(request, semester_id): courses = [course for course in courses if not course.final_grade_documents.exists()] courses.sort(key=lambda course: course.name) - responsibles = list(set(responsible for course in courses for responsible in course.responsibles.all())) - responsibles.sort(key=lambda responsible: (responsible.last_name.lower(), responsible.first_name.lower())) + responsibles = UserProfile.objects.filter(courses_responsible_for=courses).distinct() responsible_list = [ (responsible, [course for course in courses if responsible in course.responsibles.all()])
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -670,6 +670,18 @@ def test_get_sorted_due_evaluations(self): sorted_evaluations = student.get_sorted_due_evaluations() self.assertEqual(sorted_evaluations, [(evaluations[1], 0), (evaluations[0], 0), (evaluations[2], 1)]) + def test_correct_sorting(self): + baker.make( + UserProfile, + last_name=iter(["Y", "x", None, None]), + first_name=iter(["x", "x", "a", None]), + email=iter(["[email protected]", "[email protected]", "[email protected]", "[email protected]"]), + _quantity=4, + _bulk_create=True, + ) + email_list = [user.email for user in UserProfile.objects.all()] + self.assertEqual(email_list, ["[email protected]", "[email protected]", "[email protected]", "[email protected]"]) + class ParticipationArchivingTests(TestCase): @classmethod
Handle user profile sorting with incomplete names Sorting user profiles by first and last name currently fails when not both are specified. This happens for special users such as, e.g., "Student Representatives" who do not have a first and last name specified. For example, when opening a semester's grade reminder page, loading the page currently fails when at least one of the responsible users of a course listed on the page does not have a first and last name specified (`AttributeError at /staff/semester/<id>/grade_reminder 'NoneType' object has no attribute 'lower'`). As a solution, if only a last name is specified, the first name can be ignored. If both values are None, the email must be used for sorting. These places need to be checked and updated: - `evaluation.models.responsibles_names` - `staff.views.semester_preparation_reminder` - `staff.views.semester_grade_reminder`
2022-08-15T18:27:11
e-valuation/EvaP
1,799
e-valuation__EvaP-1799
[ "1796" ]
a2b8208262a5ff64211fb47e90f5b2bfa7753bb5
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -778,6 +778,9 @@ def state_str(self): @cached_property def general_contribution(self): + if self.pk is None: + return None + try: return self.contributions.get(contributor=None) except Contribution.DoesNotExist: diff --git a/evap/rewards/tools.py b/evap/rewards/tools.py --- a/evap/rewards/tools.py +++ b/evap/rewards/tools.py @@ -82,7 +82,7 @@ def grant_reward_points_if_eligible(user, semester): or 0 ) progress = float(required_evaluations.filter(voters=user).count()) / float(required_evaluations.count()) - target_points = max([points for threshold, points in settings.REWARD_POINTS if threshold <= progress], default=0) + target_points = max((points for threshold, points in settings.REWARD_POINTS if threshold <= progress), default=0) missing_points = target_points - granted_points if missing_points > 0: diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -768,7 +768,8 @@ class ContributionFormset(BaseInlineFormSet): def __init__(self, data=None, **kwargs): data = self.handle_moved_contributors(data, **kwargs) super().__init__(data, **kwargs) - self.queryset = self.instance.contributions.exclude(contributor=None) + if self.instance.pk is not None: + self.queryset = self.instance.contributions.exclude(contributor=None) def handle_deleted_and_added_contributions(self): """
Update to django 4.1 (#1794 is currently blocked by a bug that is fixed in django 4.1) [Same procedure as every year](https://github.com/e-valuation/EvaP/issues/1387) Some points I think might be relevant * ~Django enables the cached template loader by default now. I think we want to be able to edit a template, reload the page, and instantly see the changes reflected with a development setup. So, we probably want to set a custom, non-cached template loader when `DEBUG=True`?~ **Edit**: As pointed out by @karyon below, the cached template loader has some auto-reload functionality, should work for us. * ~https://code.djangoproject.com/ticket/33631 was closed, so `blocktranslate asvar` now marks the output as safe. This should allow us to remove all `|safe` filter calls that we have left in our templates after https://github.com/e-valuation/EvaP/pull/1737.~ **Edit**: This will be in 4.2, not 4.1 * ~Django defaults to a new forms renderer using `div` tags. We should check if this breaks some layout because CSS rules do not apply anymore.~ **Edit**: As @karyon points out below, default will be changed in 5.0.
Regarding the first point, looking at https://code.djangoproject.com/ticket/33639, it seems that templates do auto-reload even when cached. Regarding the third point, the default is changed only in 5.0. Now that constraints are validated during model validation, [this Questionnaire validation error](https://github.com/e-valuation/EvaP/blob/main/evap/evaluation/models.py#L199) can probably be expressed as a constraint. That would give us additional safety at the db level. Somewhat related, I noticed a couple asserts/checks that might be better expressed as constraints. I'm not sure there was a reason why we didn't do that. * [start date must be before end date](https://github.com/e-valuation/EvaP/blob/main/evap/evaluation/models.py#L467) * [text and heading questions do not allow additional textanswers](https://github.com/e-valuation/EvaP/blob/main/evap/evaluation/models.py#L1121) * [TextAnswer's answer and original_answer must not be the same](https://github.com/e-valuation/EvaP/blob/main/evap/evaluation/models.py#L1461) * [Evaluation's _voter_count must be set iff _participant_count is set](https://github.com/e-valuation/EvaP/blob/main/evap/evaluation/models.py#L620) * [A bunch of validation when redeeming points](https://github.com/e-valuation/EvaP/blob/fc3f5b4390fbaf2aff2633dfbe61bbdd8e80c8b7/evap/rewards/tools.py#L36-L46)
2022-08-22T22:19:44
e-valuation/EvaP
1,805
e-valuation__EvaP-1805
[ "1783" ]
06f0c1f96d3c8657fdd5be7b9d63d3d1cf128caa
diff --git a/evap/development/management/commands/run.py b/evap/development/management/commands/run.py --- a/evap/development/management/commands/run.py +++ b/evap/development/management/commands/run.py @@ -1,4 +1,5 @@ import sys +from subprocess import Popen # nosec from django.core.management import execute_from_command_line from django.core.management.base import BaseCommand @@ -9,10 +10,8 @@ class Command(BaseCommand): help = 'Execute "runserver 0.0.0.0:8000"' def handle(self, *args, **options): - self.stdout.write('Executing "manage.py scss"') - execute_from_command_line(["manage.py", "scss"]) - self.stdout.write('Executing "manage.py ts compile"') - execute_from_command_line(["manage.py", "ts", "compile"]) - self.stdout.write('Executing "manage.py runserver 0.0.0.0:8000"') - sys.argv = ["manage.py", "runserver", "0.0.0.0:8000"] - execute_from_command_line(sys.argv) + self.stdout.write('Executing "manage.py scss" and "manage.py ts compile"') + with Popen(["./manage.py", "scss"]), Popen(["./manage.py", "ts", "compile"]): # nosec + self.stdout.write('Executing "manage.py runserver 0.0.0.0:8000"') + sys.argv = ["manage.py", "runserver", "0.0.0.0:8000"] + execute_from_command_line(sys.argv)
diff --git a/evap/development/tests/test_commands.py b/evap/development/tests/test_commands.py --- a/evap/development/tests/test_commands.py +++ b/evap/development/tests/test_commands.py @@ -57,15 +57,17 @@ def test_executes_key_commands(self, mock_call_command, mock_input): class TestRunCommand(TestCase): - @staticmethod - def test_calls_runserver(): - with patch("django.core.management.execute_from_command_line") as mock: - management.call_command("run", stdout=StringIO()) + def test_calls_runserver(self): + with patch("django.core.management.execute_from_command_line") as execute_mock: + with patch("subprocess.Popen") as popen_mock: + management.call_command("run", stdout=StringIO()) - mock.assert_has_calls( + execute_mock.assert_called_once_with(["manage.py", "runserver", "0.0.0.0:8000"]) + self.assertEqual(popen_mock.call_count, 2) + popen_mock.assert_has_calls( [ - call(["manage.py", "scss"]), - call(["manage.py", "ts", "compile"]), - call(["manage.py", "runserver", "0.0.0.0:8000"]), - ] + call(["./manage.py", "scss"]), + call(["./manage.py", "ts", "compile"]), + ], + any_order=True, )
Remove sass + ts compilation from ./manage.py run These slow down starting the development server. For developers actually changing TS or SASS files, they are not helpful enough, and those will likely start a `--watch` job anyway. We can simply provide a ´./manage.py watch` script for this use case. For anyone else, they simply slow down the development server startup, which is a bit annoying. Opinions?
I thought about this again and just removing compilation could really do us more harm than good. Before, we came to the conclusion that we can spare newcomers the burden of thinking about compiling ts and scss by doing it for them in the provision script, but what happens when they pull updates from main that change these files or add new ones? This could lead to great confusion / frustration for them and us. Maybe the solution we want is opt-in behavior, like a `--no-compile` / `--fast` / `-f` flag that skips the two and that we can tell people to use once they are comfortable with the project setup / annoyed by the long startup time. What do you think? Good point. Similar things are true for dependencies (npm, pip), but those probably change less frequently. Honestly: I'm lazy and don't want to type `-f` (+3 key strokes = +37% compared to `./m<TAB> ru<TAB>`) -> I think the default should be fast. I would be fine with starting the jobs in the background. They could even be `--watch` jobs running the background. I think the main reason we didn't make them --watch jobs is because their output was mushed together (or we just couldn't get it to work), but honestly, if you're editing TS and you want clean output, I think then you could just `./manage.py run --no-watch` and `./manage.py ts compile --watch` in another console. Haven't thought about background jobs yet, how do you feel about starting a background process that compiles the files once? This way, we don't get (much) output mangled in the python output
2022-09-12T15:46:51
e-valuation/EvaP
1,810
e-valuation__EvaP-1810
[ "1788", "1788" ]
d22ddfc4f75ed7bc8d0e46e8bc2e7c14a2490037
diff --git a/evap/rewards/views.py b/evap/rewards/views.py --- a/evap/rewards/views.py +++ b/evap/rewards/views.py @@ -63,7 +63,6 @@ def index(request): reward_point_actions=reward_point_actions, total_points_available=total_points_available, events=events, - point_selection=range(0, total_points_available + 1), ) return render(request, "rewards_index.html", template_data)
Replace reward point redemption dropdown with number input field If a user selects an option, a new line is added and the selection spans two rows. This looks wrong. A user can insert custom options. If the user inputs something invalid like "abcdef" or an empty string, only parts of "Please select"-placeholder is visible. This looks wrong as well. Replace reward point redemption dropdown with number input field If a user selects an option, a new line is added and the selection spans two rows. This looks wrong. A user can insert custom options. If the user inputs something invalid like "abcdef" or an empty string, only parts of "Please select"-placeholder is visible. This looks wrong as well.
And in addition the dropdown field is not right-aligned anymore. All this probably comes from the switch to Tom Select in #1758. The dropdown should be replaced by a number input field with the [max and min values](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/number#max) set. And in addition the dropdown field is not right-aligned anymore. All this probably comes from the switch to Tom Select in #1758. The dropdown should be replaced by a number input field with the [max and min values](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/number#max) set.
2022-09-26T16:46:59
e-valuation/EvaP
1,811
e-valuation__EvaP-1811
[ "1798", "1798" ]
fb647098586465d4b677a43459931dea1656e3b8
diff --git a/evap/evaluation/management/commands/anonymize.py b/evap/evaluation/management/commands/anonymize.py --- a/evap/evaluation/management/commands/anonymize.py +++ b/evap/evaluation/management/commands/anonymize.py @@ -222,6 +222,9 @@ def anonymize_answers(self, lorem_ipsum): text_answer.answer = self.lorem(text_answer.answer, lorem_ipsum) if text_answer.original_answer: text_answer.original_answer = self.lorem(text_answer.original_answer, lorem_ipsum) + # answer and original answer must not be the same (see #1798) + if text_answer.answer == text_answer.original_answer: + text_answer.original_answer += " ipsum" text_answer.save() self.stdout.write("Shuffling rating answer counter counts...")
Anonymize can fail when replacing TextAnswers The anonymize script replaces TextAnswers with lorem ipsum text. If an original answer exists, it gets replaced as well. In some circumstances this can result in an AssertionError because on saving the answer and original answer must not be equal: ``` Traceback (most recent call last): File "manage.py", line 10, in <module> execute_from_command_line(sys.argv) File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line utility.execute() File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv self.execute(*args, **cmd_options) File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/base.py", line 398, in execute output = self.handle(*args, **options) File "/opt/evap/evap/evaluation/management/commands/anonymize.py", line 51, in handle self.anonymize_data() File "/opt/evap/evap/evaluation/management/commands/anonymize.py", line 71, in anonymize_data self.anonymize_answers(lorem_ipsum) File "/opt/evap/evap/evaluation/management/commands/anonymize.py", line 225, in anonymize_answers text_answer.save() File "/opt/evap/evap/evaluation/models.py", line 1448, in save assert self.answer != self.original_answer AssertionError ``` It has to be made sure that the answer and original_answer are not equal after anonymization. Anonymize can fail when replacing TextAnswers The anonymize script replaces TextAnswers with lorem ipsum text. If an original answer exists, it gets replaced as well. In some circumstances this can result in an AssertionError because on saving the answer and original answer must not be equal: ``` Traceback (most recent call last): File "manage.py", line 10, in <module> execute_from_command_line(sys.argv) File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line utility.execute() File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv self.execute(*args, **cmd_options) File "/opt/evap/env/lib/python3.8/site-packages/django/core/management/base.py", line 398, in execute output = self.handle(*args, **options) File "/opt/evap/evap/evaluation/management/commands/anonymize.py", line 51, in handle self.anonymize_data() File "/opt/evap/evap/evaluation/management/commands/anonymize.py", line 71, in anonymize_data self.anonymize_answers(lorem_ipsum) File "/opt/evap/evap/evaluation/management/commands/anonymize.py", line 225, in anonymize_answers text_answer.save() File "/opt/evap/evap/evaluation/models.py", line 1448, in save assert self.answer != self.original_answer AssertionError ``` It has to be made sure that the answer and original_answer are not equal after anonymization.
2022-09-26T17:00:46
e-valuation/EvaP
1,822
e-valuation__EvaP-1822
[ "1804" ]
54fc1ef8bb29bb9591af2bb030dbb79217d7a4ee
diff --git a/evap/rewards/views.py b/evap/rewards/views.py --- a/evap/rewards/views.py +++ b/evap/rewards/views.py @@ -28,6 +28,8 @@ @reward_user_required def index(request): + # pylint: disable=too-many-locals + status = 200 if request.method == "POST": redemptions = {} try: @@ -43,6 +45,7 @@ def index(request): messages.success(request, _("You successfully redeemed your points.")) except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error: messages.warning(request, error) + status = 400 total_points_available = reward_points_of_user(request.user) reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user) @@ -64,7 +67,7 @@ def index(request): total_points_available=total_points_available, events=events, ) - return render(request, "rewards_index.html", template_data) + return render(request, "rewards_index.html", template_data, status=status) @manager_required
diff --git a/evap/rewards/tests/test_views.py b/evap/rewards/tests/test_views.py --- a/evap/rewards/tests/test_views.py +++ b/evap/rewards/tests/test_views.py @@ -65,8 +65,8 @@ def test_redeem_too_many_points(self): form = response.forms["reward-redemption-form"] form.set(f"points-{self.event1.pk}", 3) form.set(f"points-{self.event2.pk}", 3) - response = form.submit() - self.assertContains(response, "have enough reward points.") + response = form.submit(status=400) + self.assertContains(response, "have enough reward points.", status_code=400) self.assertEqual(5, reward_points_of_user(self.student)) def test_redeem_points_for_expired_event(self): @@ -75,8 +75,8 @@ def test_redeem_points_for_expired_event(self): form = response.forms["reward-redemption-form"] form.set(f"points-{self.event2.pk}", 1) RewardPointRedemptionEvent.objects.update(redeem_end_date=date.today() - timedelta(days=1)) - response = form.submit() - self.assertContains(response, "event expired already.") + response = form.submit(status=400) + self.assertContains(response, "event expired already.", status_code=400) self.assertEqual(5, reward_points_of_user(self.student)) def test_invalid_post_parameters(self):
Response status code of failed redemption is 200 As @niklasmohrin remarked in [#1790](https://github.com/e-valuation/EvaP/pull/1790/files#r962983692), in `evap.rewards.views.redeem_reward_points`, the status code of failed redemptions (e.g. due to `NotEnoughPoints` or `RedemptionEventExpired`) is set as 200 OK, even though no redemption points were saved. Instead, the status code should be something like 400 Bad Request to underline that something went wrong. @niklasmohrin added, that `assertContains`, used in some tests in `evap.rewards.tests.test_views.TestIndexView`, needs to adopted, as it asserts that the status code is 200 by default.
2022-10-17T17:16:07
e-valuation/EvaP
1,831
e-valuation__EvaP-1831
[ "1807" ]
daca46ea18c719a880aa81897c9252627bc3d4be
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -111,7 +111,11 @@ class ImportForm(forms.Form): class UserImportForm(forms.Form): use_required_attribute = False - excel_file = forms.FileField(label=_("Excel file"), required=False) + excel_file = forms.FileField( + label=_("Excel file"), + required=False, + widget=forms.FileInput(attrs={"accept": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}), + ) class EvaluationParticipantCopyForm(forms.Form):
User Import File Selection: Mime-Filter not working With Firefox 104.0.2, I don't get a file type filter in the file selection dialog that opens when clicking on "Select file" on the staff user import page. Similarly to the semester import, it would be nice if users would get a correctly filtered file selection dialog.
Hint: We need to correctly set [the "accept" attribute](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file#accept). This is already correctly configured for the "semester import" ;)
2022-11-07T17:01:21
e-valuation/EvaP
1,832
e-valuation__EvaP-1832
[ "1813" ]
47d80498322ebc00fd8757ba4b35dd1b7c979127
diff --git a/evap/rewards/tools.py b/evap/rewards/tools.py --- a/evap/rewards/tools.py +++ b/evap/rewards/tools.py @@ -32,26 +32,22 @@ def save_redemptions(request, redemptions: Dict[int, int], previous_redeemed_poi # check consistent previous redeemed points # do not validate reward points, to allow receiving points after page load if previous_redeemed_points != redeemed_points_of_user(request.user): - raise OutdatedRedemptionData( - _( - "It appears that your browser sent multiple redemption requests. You can see all successful redemptions below." - ) - ) + raise OutdatedRedemptionData() total_points_available = reward_points_of_user(request.user) total_points_redeemed = sum(redemptions.values()) if total_points_redeemed <= 0: - raise NoPointsSelected(_("You cannot redeem 0 points.")) + raise NoPointsSelected() if total_points_redeemed > total_points_available: - raise NotEnoughPoints(_("You don't have enough reward points.")) + raise NotEnoughPoints() for event_id in redemptions: if redemptions[event_id] > 0: event = get_object_or_404(RewardPointRedemptionEvent, pk=event_id) if event.redeem_end_date < date.today(): - raise RedemptionEventExpired(_("Sorry, the deadline for this event expired already.")) + raise RedemptionEventExpired() RewardPointRedemption.objects.create(user_profile=request.user, value=redemptions[event_id], event=event) diff --git a/evap/rewards/views.py b/evap/rewards/views.py --- a/evap/rewards/views.py +++ b/evap/rewards/views.py @@ -41,12 +41,21 @@ def redeem_reward_points(request): try: save_redemptions(request, redemptions, previous_redeemed_points) messages.success(request, _("You successfully redeemed your points.")) - except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error: - messages.warning(request, error) - return 400 - except OutdatedRedemptionData as error: - messages.error(request, error) - return 409 + except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired, OutdatedRedemptionData) as error: + status_code = 400 + if isinstance(error, NoPointsSelected): + error_string = _("You cannot redeem 0 points.") + elif isinstance(error, NotEnoughPoints): + error_string = _("You don't have enough reward points.") + elif isinstance(error, RedemptionEventExpired): + error_string = _("Sorry, the deadline for this event expired already.") + elif isinstance(error, OutdatedRedemptionData): + status_code = 409 + error_string = _( + "It appears that your browser sent multiple redemption requests. You can see all successful redemptions below." + ) + messages.error(request, error_string) + return status_code return 200
diff --git a/evap/rewards/tests/test_views.py b/evap/rewards/tests/test_views.py --- a/evap/rewards/tests/test_views.py +++ b/evap/rewards/tests/test_views.py @@ -69,6 +69,14 @@ def test_redeem_too_many_points(self): self.assertContains(response, "have enough reward points.", status_code=400) self.assertEqual(5, reward_points_of_user(self.student)) + def test_redeem_zero_points(self): + response = self.app.get(self.url, user=self.student) + form = response.forms["reward-redemption-form"] + form.set(f"points-{self.event1.pk}", 0) + response = form.submit(status=400) + self.assertContains(response, "cannot redeem 0 points.", status_code=400) + self.assertEqual(5, reward_points_of_user(self.student)) + def test_redeem_points_for_expired_event(self): """Regression test for #846""" response = self.app.get(self.url, user=self.student)
Separate error messages and error detection in reward points redemption A reward point redemption can fail for several reasons, each of which has its own `Exception` subclass in `evap/rewards/models.py`. These exceptions are created deep in the reward point redemption logic. Currently, error messages that ultimately end up on the user's screen are set at the creation of the exceptions. In order to separate the "error display" and "error detection" logic, the user strings should be moved to where the exceptions are caught. _Originally came up in https://github.com/e-valuation/EvaP/pull/1790#pullrequestreview-1105770573_
2022-11-07T17:31:28
e-valuation/EvaP
1,841
e-valuation__EvaP-1841
[ "1840" ]
e889dce5b2111f42f3cda777649c19d30de165be
diff --git a/evap/staff/importers/base.py b/evap/staff/importers/base.py --- a/evap/staff/importers/base.py +++ b/evap/staff/importers/base.py @@ -216,10 +216,11 @@ def map(self, file_content: bytes): ) ) - for row_index in range(self.skip_first_n_rows, sheet.max_row): - # We use 0-based indexing, openpyxl uses 1-based indexing. - row = [cell.value for cell in sheet[row_index + 1]] - location = ExcelFileLocation(sheet.title, row_index) + # openpyxl uses 1-based indexing. + for row_number, row in enumerate( + sheet.iter_rows(min_row=self.skip_first_n_rows + 1, values_only=True), start=self.skip_first_n_rows + ): + location = ExcelFileLocation(sheet.title, row_number) if not all(isinstance(cell, str) or cell is None for cell in row): self.importer_log.add_error( diff --git a/evap/staff/importers/enrollment.py b/evap/staff/importers/enrollment.py --- a/evap/staff/importers/enrollment.py +++ b/evap/staff/importers/enrollment.py @@ -321,7 +321,7 @@ class NameEnCollisionException(Exception): """Course with same name_en, but different name_de exists""" def __init__(self, semester: Semester): - courses = Course.objects.filter(semester=semester).prefetch_related("responsibles", "evaluations") + courses = Course.objects.filter(semester=semester).prefetch_related("type", "responsibles", "evaluations") assert ("semester", "name_de") in Course._meta.unique_together self.courses_by_name_de = {course.name_de: course for course in courses} @@ -339,9 +339,10 @@ def get_merge_hindrances(course_data: CourseData, merge_candidate: Course) -> Li if len(responsibles) != 1 or responsibles[0].email != course_data.responsible_email: hindrances.append(_("the responsibles of the course do not match")) - if len(merge_candidate.evaluations.all()) != 1: + merge_candidate_evaluations = merge_candidate.evaluations.all() + if len(merge_candidate_evaluations) != 1: hindrances.append(_("the existing course does not have exactly one evaluation")) - elif merge_candidate.evaluations.get().wait_for_grade_upload_before_publishing != course_data.is_graded: + elif merge_candidate_evaluations[0].wait_for_grade_upload_before_publishing != course_data.is_graded: hindrances.append(_("the evaluation of the existing course has a mismatching grading specification")) return hindrances
diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -1,4 +1,5 @@ from copy import deepcopy +from dataclasses import dataclass from datetime import date, datetime from unittest.mock import patch @@ -9,10 +10,35 @@ import evap.staff.fixtures.excel_files_test_data as excel_data from evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, Semester, UserProfile -from evap.staff.importers import ImporterLogEntry, import_enrollments, import_persons_from_evaluation, import_users +from evap.staff.importers import ( + ImporterLog, + ImporterLogEntry, + import_enrollments, + import_persons_from_evaluation, + import_users, +) +from evap.staff.importers.base import ExcelFileLocation, ExcelFileRowMapper, InputRow from evap.staff.tools import ImportType, user_edit_link +class TestExcelFileRowMapper(TestCase): + @dataclass + class SingleColumnInputRow(InputRow): + column_count = 1 + location: ExcelFileLocation + value: str + + def test_skip_first_n_rows_handled_correctly(self): + workbook_data = {"SheetName": [[str(i)] for i in range(10)]} + workbook_file_contents = excel_data.create_memory_excel_file(workbook_data) + + mapper = ExcelFileRowMapper(skip_first_n_rows=3, row_cls=self.SingleColumnInputRow, importer_log=ImporterLog()) + rows = mapper.map(workbook_file_contents) + + self.assertEqual(rows[0].location, ExcelFileLocation("SheetName", 3)) + self.assertEqual(rows[0].value, "3") + + class TestUserImport(TestCase): # valid user import tested in tests/test_views.py, TestUserImportView
Openpyxl `max_column` computation bottlenecks importers The importers currently take seconds up to minutes to run. There's no inherent reason for that, all processing logic inside of them is designed to have O(n) runtime with O(1) database queries. A quick profiling run of the import view shows that we spend most time in openpyxl's `worksheet.py` file in [`max_column`](https://foss.heptapod.net/openpyxl/openpyxl/-/blob/branch/3.0/openpyxl/worksheet/worksheet.py#L365-L375) and a generator expression creating the set `cols`: ```python3 @property def max_column(self): """The maximum column index containing data (1-based) :type: int """ max_col = 1 if self._cells: cols = set(c[1] for c in self._cells) ##### line 373 ##### max_col = max(cols) return max_col ``` ![photo_5289897819194377466_y](https://user-images.githubusercontent.com/13838962/209836150-f999e9a0-36fc-475e-857e-b00c64dd34a9.jpg) I feel like figuring out how many columns a 200kB (12 x 6000 cells) excel file has should be possible in less than 60 seconds, even in a debug-profiling run. They have an open issue here: https://foss.heptapod.net/openpyxl/openpyxl/-/issues/1763 If I read the output above right, we have 3000 accesses to max_column, just reusing the computed value might be enough for now.
2022-12-29T22:01:01
e-valuation/EvaP
1,843
e-valuation__EvaP-1843
[ "1839", "1839" ]
e889dce5b2111f42f3cda777649c19d30de165be
diff --git a/evap/staff/importers/enrollment.py b/evap/staff/importers/enrollment.py --- a/evap/staff/importers/enrollment.py +++ b/evap/staff/importers/enrollment.py @@ -723,8 +723,9 @@ def update_existing_and_create_new_courses( for course_data in course_data_iterable if not course_data.merge_into_course ] - Course.objects.bulk_create(new_course_objects) - Course.update_log_after_bulk_create(new_course_objects) + + for course in new_course_objects: + course.save() # Create one evaluation per newly created course evaluation_objects = [ @@ -736,24 +737,16 @@ def update_existing_and_create_new_courses( ) for course in new_course_objects ] - Evaluation.objects.bulk_create(evaluation_objects) - Evaluation.update_log_after_bulk_create(evaluation_objects) + + for evaluation in evaluation_objects: + evaluation.save() # Create M2M entries for the responsibles of the newly created courses responsible_emails = {course_data.responsible_email for course_data in course_data_iterable} responsible_objs_by_email = {obj.email: obj for obj in UserProfile.objects.filter(email__in=responsible_emails)} - responsibles_through_objects = [ - Course.responsibles.through( - course=course, - userprofile=responsible_objs_by_email[course_data_by_name_en[course.name_en].responsible_email], - ) - for course in new_course_objects - ] - Course.responsibles.through.objects.bulk_create(responsibles_through_objects) - Course.update_log_after_m2m_bulk_create( - new_course_objects, responsibles_through_objects, "course_id", "userprofile_id", "responsibles" - ) + for course in new_course_objects: + course.responsibles.add(responsible_objs_by_email[course_data_by_name_en[course.name_en].responsible_email]) # Create Contributions for the responsibles of the newly created courses evaluation_objects_by_course = {evaluation.course: evaluation for evaluation in evaluation_objects} @@ -766,33 +759,21 @@ def update_existing_and_create_new_courses( ) for course in new_course_objects ] - Contribution.objects.bulk_create(contribution_objects) - Contribution.update_log_after_bulk_create(contribution_objects) + + for obj in contribution_objects: + obj.save() # Create M2M entries for the degrees of the newly created courses - degree_through_objects = [ - Course.degrees.through(course_id=course.id, degree_id=degree_obj.id) - for course in new_course_objects - for degree_obj in course_data_by_name_en[course.name_en].degrees - ] - Course.degrees.through.objects.bulk_create(degree_through_objects) - Course.update_log_after_m2m_bulk_create( - new_course_objects, degree_through_objects, "course_id", "degree_id", "degrees" - ) + for course in new_course_objects: + course.degrees.add(*course_data_by_name_en[course.name_en].degrees) - # Create M2M entries for the degrees of the courses that are updated courses_to_update = semester.courses.filter( name_en__in=[course_data.name_en for course_data in course_data_iterable if course_data.merge_into_course] ) - degree_through_objects = [ - Course.degrees.through(course_id=course.id, degree_id=degree_obj.id) - for course in courses_to_update - for degree_obj in course_data_by_name_en[course.name_en].degrees - ] - Course.degrees.through.objects.bulk_create(degree_through_objects, ignore_conflicts=True) - Course.update_log_after_m2m_bulk_create( - courses_to_update, degree_through_objects, "course_id", "degree_id", "degrees" - ) + + # Create M2M entries for the degrees of the courses that are updated + for course in courses_to_update: + course.degrees.add(*course_data_by_name_en[course.name_en].degrees) def store_participations_in_db(enrollment_rows: Iterable[EnrollmentParsedRow]): @@ -807,30 +788,11 @@ def store_participations_in_db(enrollment_rows: Iterable[EnrollmentParsedRow]): for evaluation in Evaluation.objects.select_related("course").filter(course__name_en__in=course_names_en) } - course_id_participant_id_pairs_in_file = { - (evaluations_by_course_name_en[row.course_data.name_en].pk, users_by_email[row.student_data.email].pk) - for row in enrollment_rows - } - - existing_course_id_participant_id_pairs = { - (participation.evaluation_id, participation.userprofile_id) - for participation in Evaluation.participants.through.objects.filter( - evaluation__in=evaluations_by_course_name_en.values() + participants_by_evaluation = defaultdict(list) + for row in enrollment_rows: + participants_by_evaluation[evaluations_by_course_name_en[row.course_data.name_en]].append( + users_by_email[row.student_data.email] ) - } - - course_id_participant_id_pairs = course_id_participant_id_pairs_in_file - existing_course_id_participant_id_pairs - participants_through_objects = [ - Evaluation.participants.through(evaluation_id=evaluation_id, userprofile_id=userprofile_id) - for (evaluation_id, userprofile_id) in course_id_participant_id_pairs - ] - - Evaluation.participants.through.objects.bulk_create(participants_through_objects) - Evaluation.update_log_after_m2m_bulk_create( - evaluations_by_course_name_en.values(), - participants_through_objects, - "evaluation_id", - "userprofile_id", - "participants", - ) + for evaluation, participants in participants_by_evaluation.items(): + evaluation.participants.add(*participants) diff --git a/evap/staff/importers/user.py b/evap/staff/importers/user.py --- a/evap/staff/importers/user.py +++ b/evap/staff/importers/user.py @@ -385,5 +385,8 @@ def update_existing_and_create_new_user_profiles( existing_user_profiles: Iterable[UserProfile], new_user_profiles: Iterable[UserProfile], ): - UserProfile.objects.bulk_update(existing_user_profiles, UserData.bulk_update_fields()) - UserProfile.objects.bulk_create(new_user_profiles) + for user_profile in existing_user_profiles: + user_profile.save() + + for user_profile in new_user_profiles: + user_profile.save()
diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -223,7 +223,7 @@ def test_validation_error(self, mocked_validation): ) @override_settings(DEBUG=False) - @patch("evap.evaluation.models.UserProfile.objects.bulk_create") + @patch("evap.evaluation.models.UserProfile.save") def test_unhandled_exception(self, mocked_db_access): mocked_db_access.side_effect = Exception("Contact your database admin right now!") result, importer_log = import_users(self.valid_excel_file_content, test_run=False)
Importer doesn't create general contributions When importing enrollment data, no `general_contribution`s are created for the newly added evaluations. This prevents managers from, e.g., using the "assign questionnaires" feature which fails with an `AttributeError`: ``` File "staff/views.py", line 802, in semester_questionnaire_assign evaluation.general_contribution.questionnaires.set(form.cleaned_data[evaluation.course.type.name]) Exception Type: AttributeError Exception Value: 'NoneType' object has no attribute 'questionnaires' ``` The importer should make sure that every new evaluation has a `general_contribution`. Importer doesn't create general contributions When importing enrollment data, no `general_contribution`s are created for the newly added evaluations. This prevents managers from, e.g., using the "assign questionnaires" feature which fails with an `AttributeError`: ``` File "staff/views.py", line 802, in semester_questionnaire_assign evaluation.general_contribution.questionnaires.set(form.cleaned_data[evaluation.course.type.name]) Exception Type: AttributeError Exception Value: 'NoneType' object has no attribute 'questionnaires' ``` The importer should make sure that every new evaluation has a `general_contribution`.
Hm, shouldn't the questionnaire assign view just create it if users tried to use the general contribution? Or, if we dont want to allow that, it shouldnt display an option to assign to the general questionnaire, right? Do we currently have some invariants about what questionnaires exist for an evaluation? If so, we should probably enforce them with database contraints (#1800 could get another point) So the problem is that the importer [uses `bulk_create`](https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/staff/importers/enrollment.py#L739) instead of `save` to be faster, but we [use `save` to create additional objects, in this case, the general contribution](https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/evaluation/models.py#L456-L462). I wouldn't want to duplicate the "when a new evaluation is created, we also must create a general contribution for it" logic across the codebase. I think the approach of overriding `save` and having `pre_save` / `post_save` signals while also having methods like `bulk_create` that completely circumvent them is generally broken in django. We could defer creation of the general contribution to its first access using the getter for the `general_contribution` property. However, code can also try to access the database object via `Contribution.objects.filter(..., contributor=None)`. This would bypass any getters, causing errors. Searching for `contributor=None` gives me 17 places that look like they might break if the object doesn't exist in the database. If we use `save` instead of `bulk_*` in the importer, the importer tests will get 4 seconds slower (11s instead of 7s), and the import of somewhat realistically sized file (500 students, 100 courses, 4 enrollments per student) will get ~8s slower (currently 40s vs 48s, but that's due to a performance bug in openpyxl. There is no good reason for the importer to take longer than a second, I think). I would really like to keep the bulk operations for performance, but the only way to make them work with `save()` overrides and `pre_save`/`post_save` signals is with a custom, third method that performs all additional `on_save()` logic, and can take a list of objects, so it can work together with bulk operations. That would be a lot of extra infrastructure. As a quick fix, we can revert to `save()` in `update_existing_and_create_new_courses`, `store_participations_in_db`, and `update_existing_and_create_new_user_profiles`. For production right now, calling the `save()` on all instances without a general contribution should fix the problem, but might create weird log entries. After #1841, it's ~7s (bulk_create) vs ~12s (save) for me locally for a 6000 rows import file. Probably acceptable for now, as it only affects the import run and not test runs. Sooooo, should we take the 8s hit in a hotfix now-ish, and make it faster again next year? Or should I review the performance PR and we merge that in the next couple of days? > Sooooo, should we take the 8s hit in a hotfix now-ish, and make it faster again next year? Or should I review the performance PR and we merge that in the next couple of days? The performance PR should be unrelated, I just wanted to have it to have some numbers regarding performance impact of `save()` vs `bulk_create`. We can revert to using `save` in the importer for now. --- I just stumbled across this test: https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/evaluation/tests/test_models.py#L212-L220 and I was a bit confused here. We create an evaluation without a `general_contribution` instance and expect the model methods to still work properly? Also, the getter for the general contribution explicitly handles the "does not exist yet" state, and explicitly returns `None` instead of just creating the object in the database: https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/evaluation/models.py#L776-L784 Is the intention here to handle `Evaluation` instances that were not yet saved to the database? The test doesn't express that as it explicitly uses `bulk_create` with `prepare` instead of just `prepare`. I guess this comes back to @niklasmohrin's point in https://github.com/e-valuation/EvaP/issues/1839#issuecomment-1366605212: What is our invariant? I see three possibilities 1. We do not allow `Evaluations` without a general contribution. * This does not work for newly created instances that are not yet saved to database. 2. We do not allow `Evaluations` stored in the database without a general contribution. * This matches our implementations of `save` and (at least some) methods that could be called with unsaved objects. * It requires all accesses to instances that are not known to be in the database to handle the case that the contribution does not yet exist -- I'd bet there's cases where we don't properly check this. * The test above would be wrong in this case. 3. `Evaluations` might not have a contribution. * Shouldn't be harder to handle than 2.? Code that requires existance could just use a get-or-create getter. * The importer would be correct in this case. We considered allowing not to have a general contribution in #1246, it seems there was no good fundamental reason to enfore existence of the general contribution, it just happened to be what we first implemented. We didn't consider `bulk_` operations circumventing `save()` though. Related (and funny) PR / issue combination: #1259 and #1260. Hm, shouldn't the questionnaire assign view just create it if users tried to use the general contribution? Or, if we dont want to allow that, it shouldnt display an option to assign to the general questionnaire, right? Do we currently have some invariants about what questionnaires exist for an evaluation? If so, we should probably enforce them with database contraints (#1800 could get another point) So the problem is that the importer [uses `bulk_create`](https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/staff/importers/enrollment.py#L739) instead of `save` to be faster, but we [use `save` to create additional objects, in this case, the general contribution](https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/evaluation/models.py#L456-L462). I wouldn't want to duplicate the "when a new evaluation is created, we also must create a general contribution for it" logic across the codebase. I think the approach of overriding `save` and having `pre_save` / `post_save` signals while also having methods like `bulk_create` that completely circumvent them is generally broken in django. We could defer creation of the general contribution to its first access using the getter for the `general_contribution` property. However, code can also try to access the database object via `Contribution.objects.filter(..., contributor=None)`. This would bypass any getters, causing errors. Searching for `contributor=None` gives me 17 places that look like they might break if the object doesn't exist in the database. If we use `save` instead of `bulk_*` in the importer, the importer tests will get 4 seconds slower (11s instead of 7s), and the import of somewhat realistically sized file (500 students, 100 courses, 4 enrollments per student) will get ~8s slower (currently 40s vs 48s, but that's due to a performance bug in openpyxl. There is no good reason for the importer to take longer than a second, I think). I would really like to keep the bulk operations for performance, but the only way to make them work with `save()` overrides and `pre_save`/`post_save` signals is with a custom, third method that performs all additional `on_save()` logic, and can take a list of objects, so it can work together with bulk operations. That would be a lot of extra infrastructure. As a quick fix, we can revert to `save()` in `update_existing_and_create_new_courses`, `store_participations_in_db`, and `update_existing_and_create_new_user_profiles`. For production right now, calling the `save()` on all instances without a general contribution should fix the problem, but might create weird log entries. After #1841, it's ~7s (bulk_create) vs ~12s (save) for me locally for a 6000 rows import file. Probably acceptable for now, as it only affects the import run and not test runs. Sooooo, should we take the 8s hit in a hotfix now-ish, and make it faster again next year? Or should I review the performance PR and we merge that in the next couple of days? > Sooooo, should we take the 8s hit in a hotfix now-ish, and make it faster again next year? Or should I review the performance PR and we merge that in the next couple of days? The performance PR should be unrelated, I just wanted to have it to have some numbers regarding performance impact of `save()` vs `bulk_create`. We can revert to using `save` in the importer for now. --- I just stumbled across this test: https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/evaluation/tests/test_models.py#L212-L220 and I was a bit confused here. We create an evaluation without a `general_contribution` instance and expect the model methods to still work properly? Also, the getter for the general contribution explicitly handles the "does not exist yet" state, and explicitly returns `None` instead of just creating the object in the database: https://github.com/e-valuation/EvaP/blob/e889dce5b2111f42f3cda777649c19d30de165be/evap/evaluation/models.py#L776-L784 Is the intention here to handle `Evaluation` instances that were not yet saved to the database? The test doesn't express that as it explicitly uses `bulk_create` with `prepare` instead of just `prepare`. I guess this comes back to @niklasmohrin's point in https://github.com/e-valuation/EvaP/issues/1839#issuecomment-1366605212: What is our invariant? I see three possibilities 1. We do not allow `Evaluations` without a general contribution. * This does not work for newly created instances that are not yet saved to database. 2. We do not allow `Evaluations` stored in the database without a general contribution. * This matches our implementations of `save` and (at least some) methods that could be called with unsaved objects. * It requires all accesses to instances that are not known to be in the database to handle the case that the contribution does not yet exist -- I'd bet there's cases where we don't properly check this. * The test above would be wrong in this case. 3. `Evaluations` might not have a contribution. * Shouldn't be harder to handle than 2.? Code that requires existance could just use a get-or-create getter. * The importer would be correct in this case. We considered allowing not to have a general contribution in #1246, it seems there was no good fundamental reason to enfore existence of the general contribution, it just happened to be what we first implemented. We didn't consider `bulk_` operations circumventing `save()` though. Related (and funny) PR / issue combination: #1259 and #1260.
2022-12-30T16:30:58
e-valuation/EvaP
1,853
e-valuation__EvaP-1853
[ "1749" ]
5487c09546940c6011bb7a31c9d6bdd9624305fb
diff --git a/evap/evaluation/management/commands/scss.py b/evap/evaluation/management/commands/scss.py --- a/evap/evaluation/management/commands/scss.py +++ b/evap/evaluation/management/commands/scss.py @@ -2,7 +2,7 @@ import subprocess # nosec from django.conf import settings -from django.core.management.base import BaseCommand +from django.core.management.base import BaseCommand, CommandError class Command(BaseCommand): @@ -36,7 +36,7 @@ def handle(self, *args, **options): try: subprocess.run(command, check=True) # nosec - except FileNotFoundError: - print("Could not find sass command", file=self.stderr) + except FileNotFoundError as e: + raise CommandError("Could not find sass command") from e except KeyboardInterrupt: pass diff --git a/evap/evaluation/management/commands/ts.py b/evap/evaluation/management/commands/ts.py --- a/evap/evaluation/management/commands/ts.py +++ b/evap/evaluation/management/commands/ts.py @@ -54,8 +54,8 @@ def handle(self, *args, **options): def run_command(self, command): try: subprocess.run(command, check=True) # nosec - except FileNotFoundError: - print(f"Could not find {command[0]} command", file=self.stderr) + except FileNotFoundError as e: + raise CommandError(f"Could not find {command[0]} command") from e except KeyboardInterrupt: pass except subprocess.CalledProcessError as e:
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -12,9 +12,6 @@ jobs: runs-on: ubuntu-20.04 - container: - image: python:3.8 - services: postgres: image: postgres @@ -28,24 +25,21 @@ jobs: steps: - name: Check out repository code - uses: actions/checkout@v2 - - name: Install dependencies - run: pip install -r requirements-dev.txt - - name: Add localsettings - run: cp evap/settings_test.py evap/localsettings.py + uses: actions/checkout@v3 + + - name: Setup python + uses: ./.github/setup_python + - name: Run tests run: coverage run manage.py test - name: Upload coverage - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 test_shuffled: name: Test (shuffled order) runs-on: ubuntu-20.04 - container: - image: python:3.8 - services: postgres: image: postgres @@ -59,11 +53,11 @@ jobs: steps: - name: Check out repository code - uses: actions/checkout@v2 - - name: Install dependencies - run: pip install -r requirements-dev.txt - - name: Add localsettings - run: cp evap/settings_test.py evap/localsettings.py + uses: actions/checkout@v3 + + - name: Setup python + uses: ./.github/setup_python + - name: Run tests run: python manage.py test --shuffle @@ -77,29 +71,26 @@ jobs: steps: - name: Check out repository code - uses: actions/checkout@v2 - - name: Install dependencies - run: pip install -r requirements-dev.txt - - name: Add localsettings - run: cp evap/settings_test.py evap/localsettings.py + uses: actions/checkout@v3 + + - name: Setup python + uses: ./.github/setup_python + - name: Run MyPy run: mypy -p evap linter: runs-on: ubuntu-20.04 - container: - image: python:3.8 - name: Linter steps: - name: Check out repository code - uses: actions/checkout@v2 - - name: Install dependencies - run: pip install -r requirements-dev.txt - - name: Add localsettings - run: cp evap/settings_test.py evap/localsettings.py + uses: actions/checkout@v3 + + - name: Setup python + uses: ./.github/setup_python + - name: Run linter run: pylint evap -j 0 @@ -111,18 +102,14 @@ jobs: steps: - name: Check out repository code - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - name: Install Python dependencies - run: pip install -r requirements-dev.txt - - name: Setup Node - uses: actions/setup-node@v2 - - name: Install Node dependencies - run: npm ci - - name: Add localsettings - run: cp evap/settings_test.py evap/localsettings.py + uses: actions/checkout@v3 + + - name: Setup python + uses: ./.github/setup_python + + - name: Setup nodejs + uses: ./.github/setup_nodejs + - name: Check code formatting run: black --check evap - name: Check imports formatting @@ -135,9 +122,6 @@ jobs: backup-process: runs-on: ubuntu-20.04 - container: - image: python:3.8 - services: postgres: image: postgres @@ -157,19 +141,25 @@ jobs: name: Backup process steps: - - name: Install sudo package - # otherwise, sudo commands in the scripts called will fail - run: apt update && apt install sudo - name: Check out repository code - uses: actions/checkout@v2 + uses: actions/checkout@v3 + with: + submodules: true + + - name: Setup python + uses: ./.github/setup_python + + - name: Setup nodejs + uses: ./.github/setup_nodejs + + - name: Install additional dependencies + run: sudo apt install gettext + - name: GitHub actions has wrong file ownership here, the checkout actions has a problem here (see their 1049) run: | git config --global --add safe.directory '*' sudo -H -u root git config --global --add safe.directory '*' - - name: Install dependencies - run: pip install -r requirements-dev.txt - - name: Add localsettings - run: cp evap/settings_test.py evap/localsettings.py + - name: Load test data run: | python manage.py migrate @@ -189,17 +179,17 @@ jobs: steps: - name: Check out repository code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: true - - name: Setup Node - uses: actions/setup-node@v2 - - name: Install Node dependencies - run: npm ci + + - name: Setup nodejs + uses: ./.github/setup_nodejs + - name: Compile Scss run: npx sass evap/static/scss/evap.scss evap/static/css/evap.css - name: Store Css - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: css path: evap/static/css/evap.css @@ -208,9 +198,6 @@ jobs: render_pages: runs-on: ubuntu-20.04 - container: - image: python:3.8 - name: Render Html pages services: @@ -231,19 +218,19 @@ jobs: steps: - name: Check out repository code - uses: actions/checkout@v2 - - name: Install dependencies - run: pip install -r requirements-dev.txt - - name: Add localsettings - run: cp evap/settings_test.py evap/localsettings.py + uses: actions/checkout@v3 + + - name: Setup python + uses: ./.github/setup_python + - name: Render pages run: coverage run manage.py ts render_pages - name: Upload coverage - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: flags: render-pages - name: Store rendered pages - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: rendered-pages path: evap/static/ts/rendered @@ -258,24 +245,22 @@ jobs: steps: - name: Check out repository code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: true - - name: Install packages - run: sudo apt-get install xvfb libgbm1 - - name: Setup Node - uses: actions/setup-node@v2 - - name: Install Node dependencies - run: npm ci + + - name: Setup nodejs + uses: ./.github/setup_nodejs + - name: Compile Typescript run: npx tsc --project evap/static/ts/tsconfig.compile.json - name: Load rendered pages - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: rendered-pages path: evap/static/ts/rendered - name: Load Css - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: css path: evap/static/css diff --git a/evap/evaluation/tests/test_commands.py b/evap/evaluation/tests/test_commands.py --- a/evap/evaluation/tests/test_commands.py +++ b/evap/evaluation/tests/test_commands.py @@ -8,6 +8,7 @@ from django.conf import settings from django.core import mail, management +from django.core.management import CommandError from django.db.models import Sum from django.test import TestCase from django.test.utils import override_settings @@ -212,10 +213,8 @@ def test_scss_production_called(self, mock_subprocess_run): def test_scss_called_with_no_sass_installed(self, mock_subprocess_run): mock_subprocess_run.side_effect = FileNotFoundError() - stderr = StringIO() - management.call_command("scss", stderr=stderr) - - self.assertEqual(stderr.getvalue(), "Could not find sass command\n\n") + with self.assertRaisesMessage(CommandError, "Could not find sass command"): + management.call_command("scss") class TestTsCommend(TestCase): @@ -261,6 +260,13 @@ def test_ts_test(self, mock_render_pages, mock_call_command, mock_subprocess_run ] ) + @patch("subprocess.run") + def test_ts_called_with_no_npm_installed(self, mock_subprocess_run): + mock_subprocess_run.side_effect = FileNotFoundError() + + with self.assertRaisesMessage(CommandError, "Could not find npx command"): + management.call_command("ts", "compile") + class TestUpdateEvaluationStatesCommand(TestCase): def test_update_evaluations_called(self): diff --git a/evap/settings_test.py b/evap/settings_test.py --- a/evap/settings_test.py +++ b/evap/settings_test.py @@ -5,22 +5,22 @@ "NAME": "evap", "USER": "postgres", "PASSWORD": "postgres", - "HOST": "postgres", + "HOST": "localhost", "PORT": "5432", } } CACHES = { "default": { "BACKEND": "django.core.cache.backends.redis.RedisCache", - "LOCATION": "redis://redis:6379/0", + "LOCATION": "redis://localhost:6379/0", }, "results": { "BACKEND": "django.core.cache.backends.redis.RedisCache", - "LOCATION": "redis://redis:6379/1", + "LOCATION": "redis://localhost:6379/1", "TIMEOUT": None, }, "sessions": { "BACKEND": "django.core.cache.backends.redis.RedisCache", - "LOCATION": "redis://redis:6379/2", + "LOCATION": "redis://localhost:6379/2", }, }
Investigate Github Action caching One can cache certain directories in github actions. For Python, caching the entire installation is used (including all the `site-packages`), so that wheels don't need to be rebuild. Additionally, the download could be faster. We should investigate how much there is to gain.
Agree as long as its not much hassle (causes errors, lots of configuration) Installing python requirements takes ~15s on a github runner, installing the node dependencies takes ~25s -- this is what we could gain, right? Our slowest actions currently are [Test + Coverage](https://github.com/e-valuation/EvaP/runs/6247385090) (~2:25min) with - 30s container setup (waiting for postgres to start) - 17s installing dependencies - 1:20min running tests [Backup process](https://github.com/e-valuation/EvaP/runs/6247384862) (~2:45min) - 30s container setup (waiting for postgres to start) - 16s installing dependencies - 35s load testdata - 24s backup - 46s loading that backup so it seems to me that the 15s for installing dependencies wouldn't really be the most relevant optimization -- but still, we could do it. In theory, backing up and restoring the database state can be done in ~1s (pg_dump can create an sql file that can be loaded back). It's so slow here because django does this one-object-at-a-time and does a lot of extra work that we don't actually need ([check out this monstrosity](https://github.com/django/django/blob/7119f40c9881666b6f9b5cf7df09ee1d21cc8344/django/core/management/commands/loaddata.py#L253)). We could consider changing our format to the .sql files created by pg_dump, for the backup during an upgrade as well as for our test_data.json. (Somewhat related: There was https://github.com/e-valuation/EvaP/issues/333 8 years ago :D) It doesn't really make sense to wait for postgres to start, only to _then_ start installing dependencies. We could make that step parallel (don't know if this is specifiable in github actions?)
2023-01-16T22:23:35
e-valuation/EvaP
1,937
e-valuation__EvaP-1937
[ "1819" ]
5c452f3625c80458e4bf2b145d3d6e6b3e97f8f3
diff --git a/evap/staff/urls.py b/evap/staff/urls.py --- a/evap/staff/urls.py +++ b/evap/staff/urls.py @@ -73,6 +73,7 @@ path("user/<int:user_id>/edit", views.user_edit, name="user_edit"), path("user/list", views.user_list, name="user_list"), path("user/delete", views.user_delete, name="user_delete"), + path("user/resend_email", views.user_resend_email, name="user_resend_email"), path("user/bulk_update", views.user_bulk_update, name="user_bulk_update"), path("user/merge", views.user_merge_selection, name="user_merge_selection"), path("user/<int:main_user_id>/merge/<int:other_user_id>", views.user_merge, name="user_merge"), diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -2125,7 +2125,14 @@ def notify_reward_points(grantings, **_kwargs): return redirect("staff:user_index") return render( - request, "staff_user_form.html", {"form": form, "evaluations_contributing_to": evaluations_contributing_to} + request, + "staff_user_form.html", + { + "form": form, + "evaluations_contributing_to": evaluations_contributing_to, + "has_due_evaluations": bool(user.get_sorted_due_evaluations()), + "user_id": user_id, + }, ) @@ -2141,6 +2148,23 @@ def user_delete(request): return HttpResponse() # 200 OK +@require_POST +@manager_required +def user_resend_email(request): + user = get_object_from_dict_pk_entry_or_logged_40x(UserProfile, request.POST, "user_id") + + template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED) + body_params = { + "user": user, + "evaluations": user.get_sorted_due_evaluations(), + "due_evaluations": {}, + } + + template.send_to_user(user, {}, body_params, use_cc=False) + messages.success(request, _("Successfully resent evaluation started email.")) + return HttpResponse() # 200 OK + + @manager_required def user_bulk_update(request): form = UserBulkUpdateForm(request.POST or None, request.FILES or None)
Manually send Evaluation Started email for user Sometimes the email address of a user has to be updated by managers after the email informing about the start of an evaluation was sent and bounced because the address was incorrect. In this situation, managers should be able to send an email informing about the currently running evaluations for this user (and the login code if applicable). On the user profile page, a button (`btn-light`) saying `Send email with evaluation list` should be added in the top right corner. This will send the `Evaluation Started` email template to the user, (see `send_to_user` and `send_to_users_in_evaluations` in `evaluation/models.py`) with all currently running evaluations in which the user can still vote (see `get_sorted_due_evaluations`). To avoid listing all evaluations twice in the email, the parameter `due_evaluations` should be empty.
@janno42 We discussed editing the email template because not all evaluations from `get_sorted_due_evaluations` have recently started. We settled on first implementing it as explained here and seeing what we want to do then :) For simplicity, the email should list all due evaluations as described above. I don't think we need to update the template or create a new one (manually sending this email is a comfort feature that wouldn't be used for many users so we don't have to fear to include too much information in the email).
2023-05-08T16:47:55
e-valuation/EvaP
1,941
e-valuation__EvaP-1941
[ "1927" ]
bc8a0eb70b17c41141ee1e2d1651ed02c4c840f7
diff --git a/evap/rewards/views.py b/evap/rewards/views.py --- a/evap/rewards/views.py +++ b/evap/rewards/views.py @@ -2,6 +2,7 @@ from django.contrib import messages from django.core.exceptions import BadRequest, SuspiciousOperation +from django.db.models import Sum from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect, render from django.utils.translation import get_language @@ -91,7 +92,14 @@ def index(request): def reward_point_redemption_events(request): upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by("date") past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by("-date") - template_data = {"upcoming_events": upcoming_events, "past_events": past_events} + total_points_granted = RewardPointGranting.objects.aggregate(Sum("value"))["value__sum"] or 0 + total_points_redeemed = RewardPointRedemption.objects.aggregate(Sum("value"))["value__sum"] or 0 + total_points_available = total_points_granted - total_points_redeemed + template_data = { + "upcoming_events": upcoming_events, + "past_events": past_events, + "total_points_available": total_points_available, + } return render(request, "rewards_reward_point_redemption_events.html", template_data)
Display number of available reward points On the staff page for reward point redemption events (`rewards/reward_point_redemption_events`), the total number of currently available (granted but not yet redeemed) number of reward points should be shown. The information should be placed in the upper right corner (in the same line as the "Create new event" button, vertically centered) with the following text: `Available reward points: <number>`.
2023-05-15T16:05:10
e-valuation/EvaP
1,954
e-valuation__EvaP-1954
[ "1808" ]
b1f5fb1b0728dd9c5df07377af5e1d93a41a04bc
diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -13,6 +13,8 @@ import sys from typing import Any, List, Tuple +from django.contrib.staticfiles.storage import ManifestStaticFilesStorage + BASE_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -133,12 +135,17 @@ }, } + +class ManifestStaticFilesStorageWithJsReplacement(ManifestStaticFilesStorage): + support_js_module_import_aggregation = True + + STORAGES = { "default": { "BACKEND": "django.core.files.storage.FileSystemStorage", }, "staticfiles": { - "BACKEND": "django.contrib.staticfiles.storage.ManifestStaticFilesStorage", + "BACKEND": "evap.settings.ManifestStaticFilesStorageWithJsReplacement", }, }
ManifestStaticFilesStorage cache-busting doesn't work for JavaScript imports We currently use the `ManifestStaticFilesStorage` to have long browser cache times and cache-busting after updates. On `collectstatic`, it creates a copy of all files that also contains the file hash in the name -> `/static/css/evap.css` becomes `/static/css/evap.bfc8f42bfc39.css"`. In django templates, we use the `static` template tag to mark places where this replacement is necessary. In CSS files, relative file names are automatically changed. However, this doesn't work for javascript files that import each other. For example, our current `contact_modal.js` imports stuff from `utils.js`. This import statement is missing the hash in the file name. The access will work, because the files are also available under their original name, but browsers might use the old, cached files. Currently, production serves files with a one year cache expiring time (3 hours for the raw files without hashes in the name). So, after an update, users might have broken javascript until that time runs out or they refresh with Ctrl+F5. Options: * Reduce the cache time, make the browser re-check more often (~10minutes?). Ensure that a re-check is a simple "nothing has changed" from the webserver (and is fast). Our webserver already sends out ETags with the requests. This would allow us to drop the ManifestStaticFilesStorage (or reduce it to only css and image files). I prefer this option. * We considered whether we can have a "dear browser, the website was last updated on X, please discard all earlier cached files" header, so that browsers recheck files where they have older versions, but it doesn't seem like there is such a header available in HTTP. * We could also manually change import-statements in js/ts files, maybe by changing the ManifestStaticFilesStorage, but it would probably be painful and error-prone. * We think we can not mark these places, like with the `static` template tag, because it would break javascript tooling. * JS tooling could probably solve this (webpack, ...). This probably exists somewhere on the internet, collectstatic could then run this. However, in my experience, javascript packaging always is a pain, so I'd vote to not do that. @niklasmohrin @karyon @janno42 opinions?
As far as I can tell, production serves files with a 1 year max-age. That was the point of having hashes in the filenames: to enable long cache expiration dates so the browser doesn't need to send a request for that file at all. ![image](https://user-images.githubusercontent.com/1891915/189767799-1f28d5e2-25ba-44c3-99a4-bc66c2b8c683.png) Regarding your first option: That would certainly work and be very simple and probably barely noticeable by the user, but I could imagine it introduces additional roundtrips to the server before rendering the page starts. Regarding the second option, I'm not sure how a last-modified header would help but there is one and production also responds with it: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified Regarding your third option: The ManifestStaticFilesStorage does fix import paths for source maps and css imports already, see [the ManifestStaticFilesStorage docs](https://docs.djangoproject.com/en/4.1/ref/contrib/staticfiles/#django.contrib.staticfiles.storage.ManifestStaticFilesStorage). Fixing paths of ES module imports is a missing feature, see https://code.djangoproject.com/ticket/32319. There was an attempt to fix this in [this django PR](https://github.com/django/django/pull/15058/files). Example code how this additional regex might be used is here: https://code.djangoproject.com/ticket/32383. This was not merged because of unhandled edge cases, but it might be enough for us. Personally I would prefer trying out the third option, if there's a way to prevent access to the original file name so we can catch this error earlier if this breaks down again. > As far as I can tell, production serves files with a 1 year max-age. That was the point of having hashes in the filenames: to enable long cache expiration dates so the browser doesn't need to send a request for that file at all. Ah, I probably checked the wrong file: Yes, all files with hashes in the name are currently served with an expiration of one year. The original files without a hash in the name are served with an expiration of 3 hours. https://github.com/e-valuation/EvaP/pull/1529/files#diff-4d64646f3ddb433e7cabd98f95d12d2886a06b1f461ff2e1b66fdfb1045aa374R11 code.djangoproject.com seems to be down currently, will have to look into that later. If the code works for us and we have known edge cases where we know it will break, I'd also be fine with that. Updated the text above for option 2 -- "last-modified" was a wrong description sine that exists already. The idea was to have a "dear browser, please discard all cached entries older than _timestamp_" header. But I don't know of any such header. Regarding the delayed page load on option 1: From within the HPI network, an "If-Modified-Since" request to revalidate whether the current cached version is still up to date takes 5 to 8 milliseconds. Round trip times may be higher from external networks though. This would then be added to one page load every ten minutes, increasing the time-to-interactive for that page load. The [django issue](https://code.djangoproject.com/ticket/32319) linked above was closed as fixed 3 months ago, and the [preliminary release notes for django 4.2](https://docs.djangoproject.com/en/dev/releases/4.2/#django-contrib-staticfiles) contain this: > [ManifestStaticFilesStorage](https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#django.contrib.staticfiles.storage.ManifestStaticFilesStorage) now replaces paths to JavaScript modules in import and export statements with their hashed counterparts. So this might just resolve with django 4.2 Updated [release notes for Django 4.2](https://docs.djangoproject.com/en/4.2/releases/4.2/): > [ManifestStaticFilesStorage](https://docs.djangoproject.com/en/4.2/ref/contrib/staticfiles/#django.contrib.staticfiles.storage.ManifestStaticFilesStorage) now has experimental support for replacing paths to JavaScript modules in import and export statements with their hashed counterparts. If you want to try it, subclass ManifestStaticFilesStorage and set the support_js_module_import_aggregation attribute to True.
2023-06-05T17:58:29
e-valuation/EvaP
1,968
e-valuation__EvaP-1968
[ "1780" ]
ad7850402a2d3937d172f5ac491de8070d8e4c93
diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -67,7 +67,7 @@ # email domains for the internal users of the hosting institution used to # figure out who is an internal user -INSTITUTION_EMAIL_DOMAINS = ["institution.example.com"] +INSTITUTION_EMAIL_DOMAINS = ["institution.example.com", "student.institution.example.com"] # List of tuples defining email domains that should be replaced on saving UserProfiles. # Emails ending on the first value will have this part replaced by the second value. diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -12,7 +12,19 @@ from django.contrib.messages.views import SuccessMessageMixin from django.core.exceptions import PermissionDenied, SuspiciousOperation from django.db import IntegrityError, transaction -from django.db.models import BooleanField, Case, Count, ExpressionWrapper, IntegerField, Prefetch, Q, Sum, When +from django.db.models import ( + BooleanField, + Case, + Count, + ExpressionWrapper, + Func, + IntegerField, + OuterRef, + Prefetch, + Q, + Sum, + When, +) from django.dispatch import receiver from django.forms import BaseForm, formset_factory from django.forms.models import inlineformset_factory, modelformset_factory @@ -2294,6 +2306,35 @@ class UserMergeSelectionView(FormView): form_class = UserMergeSelectionForm template_name = "staff_user_merge_selection.html" + def get_context_data(self, **kwargs) -> dict[str, Any]: + context = super().get_context_data(**kwargs) + + class UserNameFromEmail(Func): + # django docs support our usage here: + # https://docs.djangoproject.com/en/5.0/ref/models/expressions/#func-expressions + # pylint: disable=abstract-method + template = "split_part(%(expressions)s, '@', 1)" + + query = UserProfile.objects.annotate(username_part_of_email=UserNameFromEmail("email")) + + users_with_merge_candidates = query.annotate( + merge_candidate_pk=query.filter(username_part_of_email=UserNameFromEmail(OuterRef("email"))) + .filter(pk__lt=OuterRef("pk")) + .values("pk")[:1] + ).exclude(merge_candidate_pk=None) + + merge_candidate_ids = [user.merge_candidate_pk for user in users_with_merge_candidates] + merge_candidates_by_id = {user.pk: user for user in UserProfile.objects.filter(pk__in=merge_candidate_ids)} + + suggested_merges = [ + (user, merge_candidates_by_id[user.merge_candidate_pk]) + for user in users_with_merge_candidates + if not user.is_external and not merge_candidates_by_id[user.merge_candidate_pk].is_external + ] + + context["suggested_merges"] = suggested_merges + return context + def form_valid(self, form: UserMergeSelectionForm) -> HttpResponse: return redirect( "staff:user_merge",
diff --git a/evap/development/fixtures/test_data.json b/evap/development/fixtures/test_data.json --- a/evap/development/fixtures/test_data.json +++ b/evap/development/fixtures/test_data.json @@ -132980,7 +132980,7 @@ "title": "", "first_name_given": "", "first_name_chosen": "", - "last_name": "", + "last_name": "reviewer", "language": "", "is_proxy_user": false, "login_key": null, @@ -133008,7 +133008,7 @@ "title": "", "first_name_given": "", "first_name_chosen": "", - "last_name": "", + "last_name": "proxy", "language": "", "is_proxy_user": true, "login_key": null, @@ -133043,7 +133043,7 @@ "title": "", "first_name_given": "", "first_name_chosen": "", - "last_name": "", + "last_name": "proxy_delegate", "language": "", "is_proxy_user": false, "login_key": null, @@ -133071,7 +133071,79 @@ "title": "", "first_name_given": "", "first_name_chosen": "", - "last_name": "", + "last_name": "proxy_delegate_2", + "language": "", + "is_proxy_user": false, + "login_key": null, + "login_key_valid_until": null, + "is_active": true, + "notes": "", + "startpage": "DE", + "groups": [], + "user_permissions": [], + "delegates": [], + "cc_users": [] + } +}, +{ + "model": "evaluation.userprofile", + "fields": { + "password": "eZAyFmtqHydCIFtGdbevAxiVjiRpqMtmaVUCrmkcfXdoJDigmGWPVNHeoYYyRojokKUJjsgPSPvZkjiiIHSIQlBfOKtQFDbZlPEyKnrQRrHdPtEhUYHqJauIlyIkYpBM", + "last_login": null, + "is_superuser": false, + "email": "[email protected]", + "title": "", + "first_name_given": "Vincenzo Alfredo", + "first_name_chosen": "", + "last_name": "Boston", + "language": "", + "is_proxy_user": false, + "login_key": null, + "login_key_valid_until": null, + "is_active": true, + "notes": "", + "startpage": "DE", + "groups": [], + "user_permissions": [], + "delegates": [], + "cc_users": [] + } +}, +{ + "model": "evaluation.userprofile", + "fields": { + "password": "utAhMBbTpirVqtaoPpadEHdamaehnXWbEsliMMSnwDBYJcTnHluinAxkTeEupPoBzpuDBMYeXbpwmockMtQNYegbMuxkUBEBKqWGkOEFAWxzUFjdxevtIwYzvAgHCAwD", + "last_login": null, + "is_superuser": false, + "email": "[email protected]", + "title": "", + "first_name_given": "Bud", + "first_name_chosen": "", + "last_name": "LedBetter", + "language": "", + "is_proxy_user": false, + "login_key": null, + "login_key_valid_until": null, + "is_active": true, + "notes": "", + "startpage": "DE", + "groups": [], + "user_permissions": [], + "delegates": [], + "cc_users": [] + } +}, +{ + "model": "evaluation.userprofile", + "fields": { + "password": "naFmzOVrFhXrVVLsIGFYceDAarTGwDRFZKGJwBvKhNFCpupezBrwhorUHsyQSpUxLFKSQuOurcIyoBBYRjARXjzcJCbqYRiKRMOwvdTqwNjAbYDhUKbopBPDYhANXUkI", + "last_login": null, + "is_superuser": false, + "email": "[email protected]", + "title": "", + "first_name_given": "Melody", + "first_name_chosen": "", + "last_name": "Large", "language": "", "is_proxy_user": false, "login_key": null, diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -339,7 +339,7 @@ def get_post_params(cls): class TestUserMergeSelectionView(WebTestStaffMode): - url = "/staff/user/merge" + url = reverse("staff:user_merge_selection") @classmethod def setUpTestData(cls): @@ -348,6 +348,10 @@ def setUpTestData(cls): cls.main_user = baker.make(UserProfile, _fill_optional=["email"]) cls.other_user = baker.make(UserProfile, _fill_optional=["email"]) + # The merge candidate is created first, so the account is older. + cls.suggested_merge_candidate = baker.make(UserProfile, email="[email protected]") + cls.suggested_main_user = baker.make(UserProfile, email="[email protected]") + def test_redirection_user_merge_view(self): page = self.app.get(self.url, user=self.manager) @@ -360,6 +364,19 @@ def test_redirection_user_merge_view(self): self.assertContains(page, self.main_user.email) self.assertContains(page, self.other_user.email) + def test_suggested_merge(self): + page = self.app.get(self.url, user=self.manager) + + expected_url = reverse( + "staff:user_merge", args=[self.suggested_main_user.id, self.suggested_merge_candidate.id] + ) + unexpected_url = reverse( + "staff:user_merge", args=[self.suggested_merge_candidate.id, self.suggested_main_user.id] + ) + + self.assertContains(page, f'<a href="{expected_url}"') + self.assertNotContains(page, f'<a href="{unexpected_url}"') + class TestUserMergeView(WebTestStaffModeWith200Check): @classmethod
User profile merge suggestions On the user merge page, two user profiles can be selected and then be merged. The view should be extended by a second part (placed in a second card on the right, the two cards should be placed like those on the user profile index page) which lists user profiles that should probably be merged. The suggestion list should show all user profile pairs where - both users are internal and - the users' email addresses first part (before the `@`) matches. For each of the entries, a button should be shown which links to the user profile merge preview page of the suggested merge of the two user profiles. The main user profile to be kept is the one with the higher ID.
2023-06-26T16:43:27
e-valuation/EvaP
1,969
e-valuation__EvaP-1969
[ "1963", "1963" ]
4e9cb1f7ad0efdcbe4612617560865f0beea25f4
diff --git a/evap/evaluation/migrations/0140_alter_question_type.py b/evap/evaluation/migrations/0140_alter_question_type.py new file mode 100644 --- /dev/null +++ b/evap/evaluation/migrations/0140_alter_question_type.py @@ -0,0 +1,37 @@ +# Generated by Django 4.2.3 on 2023-07-17 21:24 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("evaluation", "0139_userprofile_startpage"), + ] + + operations = [ + migrations.AlterField( + model_name="question", + name="type", + field=models.PositiveSmallIntegerField( + choices=[ + ("Text", ((0, "Text question"),)), + ("Unipolar Likert", ((1, "Positive agreement question"), (12, "Negative agreement question"))), + ("Grade", ((2, "Grade question"),)), + ( + "Bipolar Likert", + ( + (6, "Easy-difficult question"), + (7, "Few-many question"), + (8, "Little-much question"), + (9, "Small-large question"), + (10, "Slow-fast question"), + (11, "Short-long question"), + ), + ), + ("Yes-no", ((3, "Positive yes-no question"), (4, "Negative yes-no question"))), + ("Layout", ((5, "Heading"),)), + ], + verbose_name="question type", + ), + ), + ] diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1088,7 +1088,8 @@ def remove_answers_to_questionnaires(self, questionnaires): class QuestionType: TEXT = 0 - LIKERT = 1 + POSITIVE_LIKERT = 1 + NEGATIVE_LIKERT = 12 GRADE = 2 EASY_DIFFICULT = 6 FEW_MANY = 7 @@ -1106,7 +1107,13 @@ class Question(models.Model): QUESTION_TYPES = ( (_("Text"), ((QuestionType.TEXT, _("Text question")),)), - (_("Unipolar Likert"), ((QuestionType.LIKERT, _("Agreement question")),)), + ( + _("Unipolar Likert"), + ( + (QuestionType.POSITIVE_LIKERT, _("Positive agreement question")), + (QuestionType.NEGATIVE_LIKERT, _("Negative agreement question")), + ), + ), (_("Grade"), ((QuestionType.GRADE, _("Grade question")),)), ( _("Bipolar Likert"), @@ -1168,8 +1175,12 @@ def answer_class(self): raise AssertionError(f"Unknown answer type: {self.type!r}") @property - def is_likert_question(self): - return self.type == QuestionType.LIKERT + def is_positive_likert_question(self): + return self.type == QuestionType.POSITIVE_LIKERT + + @property + def is_negative_likert_question(self): + return self.type == QuestionType.NEGATIVE_LIKERT @property def is_bipolar_likert_question(self): @@ -1207,7 +1218,8 @@ def is_rating_question(self): return ( self.is_grade_question or self.is_bipolar_likert_question - or self.is_likert_question + or self.is_positive_likert_question + or self.is_negative_likert_question or self.is_yes_no_question ) @@ -1231,6 +1243,7 @@ class Choices(NamedTuple): colors: tuple[str] grades: tuple[Number] names: list[StrOrPromise] + is_inverted: bool class BipolarChoices(NamedTuple): @@ -1241,6 +1254,7 @@ class BipolarChoices(NamedTuple): names: list[StrOrPromise] plus_name: StrOrPromise minus_name: StrOrPromise + is_inverted: bool NO_ANSWER = 6 @@ -1256,6 +1270,7 @@ class BipolarChoices(NamedTuple): "values": (-3, -2, -1, 0, 1, 2, 3, NO_ANSWER), "colors": ("red", "orange", "lime", "green", "lime", "orange", "red", "gray"), "grades": (5, 11 / 3, 7 / 3, 1, 7 / 3, 11 / 3, 5), + "is_inverted": False, } BASE_YES_NO_CHOICES = { @@ -1266,7 +1281,7 @@ class BipolarChoices(NamedTuple): } CHOICES: dict[int, Choices | BipolarChoices] = { - QuestionType.LIKERT: Choices( + QuestionType.POSITIVE_LIKERT: Choices( names=[ _("Strongly\nagree"), _("Agree"), @@ -1275,6 +1290,19 @@ class BipolarChoices(NamedTuple): _("Strongly\ndisagree"), _("No answer"), ], + is_inverted=False, + **BASE_UNIPOLAR_CHOICES, # type: ignore + ), + QuestionType.NEGATIVE_LIKERT: Choices( + names=[ + _("Strongly\ndisagree"), + _("Disagree"), + _("Neutral"), + _("Agree"), + _("Strongly\nagree"), + _("No answer"), + ], + is_inverted=True, **BASE_UNIPOLAR_CHOICES, # type: ignore ), QuestionType.GRADE: Choices( @@ -1286,6 +1314,7 @@ class BipolarChoices(NamedTuple): "5", _("No answer"), ], + is_inverted=False, **BASE_UNIPOLAR_CHOICES, # type: ignore ), QuestionType.EASY_DIFFICULT: BipolarChoices( @@ -1384,6 +1413,7 @@ class BipolarChoices(NamedTuple): _("No"), _("No answer"), ], + is_inverted=False, **BASE_YES_NO_CHOICES, # type: ignore ), QuestionType.NEGATIVE_YES_NO: Choices( @@ -1392,6 +1422,7 @@ class BipolarChoices(NamedTuple): _("Yes"), _("No answer"), ], + is_inverted=True, **BASE_YES_NO_CHOICES, # type: ignore ), }
diff --git a/evap/development/fixtures/test_data.json b/evap/development/fixtures/test_data.json --- a/evap/development/fixtures/test_data.json +++ b/evap/development/fixtures/test_data.json @@ -1498,10 +1498,10 @@ "fields": { "order": 370, "questionnaire": 89, - "text_de": "... stand auch außerhalb der Meetings zur Verfügung.", - "text_en": "... was available even outside the meetings.", + "text_de": "... stand nur selten zur Verfügung.", + "text_en": "... was rarely available.", "allows_additional_textanswers": true, - "type": 1 + "type": 12 } }, { @@ -110004,6 +110004,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110026,6 +110027,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110048,6 +110050,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110070,6 +110073,7 @@ "login_key": null, "login_key_valid_until": "2012-04-21", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110092,6 +110096,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110114,6 +110119,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110136,6 +110142,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110158,6 +110165,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110180,6 +110188,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110202,6 +110211,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110224,6 +110234,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110246,6 +110257,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110268,6 +110280,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110290,6 +110303,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110312,6 +110326,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110334,6 +110349,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110356,6 +110372,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110378,6 +110395,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110400,6 +110418,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110422,6 +110441,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110444,6 +110464,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110466,6 +110487,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110488,6 +110510,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110510,6 +110533,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110532,6 +110556,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110554,6 +110579,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110576,6 +110602,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110598,6 +110625,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110620,6 +110648,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110642,6 +110671,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110664,6 +110694,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110686,6 +110717,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110708,6 +110740,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110730,6 +110763,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110752,6 +110786,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110774,6 +110809,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110796,6 +110832,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110818,6 +110855,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110840,6 +110878,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110862,6 +110901,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110884,6 +110924,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110906,6 +110947,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110928,6 +110970,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110950,6 +110993,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110972,6 +111016,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -110994,6 +111039,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111016,6 +111062,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111038,6 +111085,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111060,6 +111108,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111082,6 +111131,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111104,6 +111154,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111126,6 +111177,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111148,6 +111200,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111170,6 +111223,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111192,6 +111246,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111214,6 +111269,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111236,6 +111292,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111258,6 +111315,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111280,6 +111338,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111302,6 +111361,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111324,6 +111384,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111346,6 +111407,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111368,6 +111430,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111390,6 +111453,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111412,6 +111476,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111434,6 +111499,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111456,6 +111522,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111478,6 +111545,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111500,6 +111568,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111522,6 +111591,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -111557,6 +111627,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111579,6 +111650,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111601,6 +111673,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111623,6 +111696,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111645,6 +111719,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111667,6 +111742,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111689,6 +111765,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111711,6 +111788,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111733,6 +111811,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111755,6 +111834,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111777,6 +111857,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111799,6 +111880,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111821,6 +111903,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111843,6 +111926,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111865,6 +111949,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111887,6 +111972,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111909,6 +111995,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111931,6 +112018,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111953,6 +112041,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111975,6 +112064,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -111997,6 +112087,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112019,6 +112110,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112041,6 +112133,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112063,6 +112156,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112085,6 +112179,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112107,6 +112202,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112129,6 +112225,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112151,6 +112248,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112173,6 +112271,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112195,6 +112294,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112217,6 +112317,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112239,6 +112340,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112261,6 +112363,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112287,6 +112390,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112309,6 +112413,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112335,6 +112440,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112357,6 +112463,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112379,6 +112486,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112401,6 +112509,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112423,6 +112532,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112445,6 +112555,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112467,6 +112578,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112489,6 +112601,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112518,6 +112631,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112540,6 +112654,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112562,6 +112677,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112584,6 +112700,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112614,6 +112731,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112640,6 +112758,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112662,6 +112781,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112684,6 +112804,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112706,6 +112827,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112728,6 +112850,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112754,6 +112877,7 @@ "login_key": 841793788, "login_key_valid_until": "2013-09-30", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112783,6 +112907,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -112812,6 +112937,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112834,6 +112960,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112856,6 +112983,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112878,6 +113006,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112900,6 +113029,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112922,6 +113052,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112944,6 +113075,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112966,6 +113098,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -112988,6 +113121,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113010,6 +113144,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113032,6 +113167,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113054,6 +113190,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113076,6 +113213,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113098,6 +113236,7 @@ "login_key": 1551612459, "login_key_valid_until": "2013-09-17", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -113127,6 +113266,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113149,6 +113289,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113171,6 +113312,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113193,6 +113335,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113215,6 +113358,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113237,6 +113381,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113259,6 +113404,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113281,6 +113427,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -113307,6 +113454,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113329,6 +113477,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -113358,6 +113507,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113380,6 +113530,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113402,6 +113553,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113424,6 +113576,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113446,6 +113599,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113468,6 +113622,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113490,6 +113645,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113512,6 +113668,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113534,6 +113691,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113556,6 +113714,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113578,6 +113737,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113600,6 +113760,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113622,6 +113783,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113644,6 +113806,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113666,6 +113829,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113688,6 +113852,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113710,6 +113875,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113732,6 +113898,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113754,6 +113921,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113776,6 +113944,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113798,6 +113967,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113820,6 +113990,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113842,6 +114013,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113864,6 +114036,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113886,6 +114059,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113908,6 +114082,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113930,6 +114105,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113952,6 +114128,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113974,6 +114151,7 @@ "login_key": null, "login_key_valid_until": "2012-10-01", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -113996,6 +114174,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114018,6 +114197,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114040,6 +114220,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114062,6 +114243,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114084,6 +114266,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114106,6 +114289,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114128,6 +114312,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114150,6 +114335,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114172,6 +114358,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114194,6 +114381,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114216,6 +114404,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114238,6 +114427,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114260,6 +114450,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114282,6 +114473,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114304,6 +114496,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114326,6 +114519,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114348,6 +114542,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114370,6 +114565,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114392,6 +114588,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114414,6 +114611,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114436,6 +114634,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114458,6 +114657,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114480,6 +114680,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114502,6 +114703,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114524,6 +114726,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114546,6 +114749,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114568,6 +114772,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114590,6 +114795,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114612,6 +114818,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114634,6 +114841,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114656,6 +114864,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114678,6 +114887,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -114734,6 +114944,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114756,6 +114967,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114778,6 +114990,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114800,6 +115013,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114822,6 +115036,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114844,6 +115059,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114866,6 +115082,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114888,6 +115105,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114910,6 +115128,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114932,6 +115151,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114954,6 +115174,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114976,6 +115197,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -114998,6 +115220,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115020,6 +115243,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115042,6 +115266,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115064,6 +115289,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115086,6 +115312,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115108,6 +115335,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115130,6 +115358,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115152,6 +115381,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115174,6 +115404,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115196,6 +115427,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115218,6 +115450,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115240,6 +115473,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115262,6 +115496,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115284,6 +115519,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115306,6 +115542,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115328,6 +115565,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115350,6 +115588,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115372,6 +115611,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115394,6 +115634,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115416,6 +115657,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115438,6 +115680,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115460,6 +115703,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115482,6 +115726,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115504,6 +115749,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115526,6 +115772,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115548,6 +115795,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115570,6 +115818,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115592,6 +115841,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115614,6 +115864,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115636,6 +115887,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115658,6 +115910,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115680,6 +115933,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115702,6 +115956,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115724,6 +115979,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115746,6 +116002,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115768,6 +116025,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115790,6 +116048,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115812,6 +116071,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115834,6 +116094,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115856,6 +116117,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115878,6 +116140,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115900,6 +116163,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115922,6 +116186,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115944,6 +116209,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115966,6 +116232,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -115988,6 +116255,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116010,6 +116278,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116032,6 +116301,7 @@ "login_key": null, "login_key_valid_until": "2012-05-10", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116054,6 +116324,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116076,6 +116347,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116098,6 +116370,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116120,6 +116393,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116142,6 +116416,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116164,6 +116439,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116186,6 +116462,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116208,6 +116485,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116230,6 +116508,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116252,6 +116531,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116274,6 +116554,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116296,6 +116577,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116318,6 +116600,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116340,6 +116623,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116362,6 +116646,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116384,6 +116669,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116406,6 +116692,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116428,6 +116715,7 @@ "login_key": 658405473, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -116466,6 +116754,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116488,6 +116777,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116510,6 +116800,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116532,6 +116823,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -116558,6 +116850,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116580,6 +116873,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116602,6 +116896,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116624,6 +116919,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -116653,6 +116949,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116675,6 +116972,7 @@ "login_key": 630273331, "login_key_valid_until": "2014-12-01", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -116704,6 +117002,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116726,6 +117025,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116748,6 +117048,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116770,6 +117071,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116792,6 +117094,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116814,6 +117117,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116836,6 +117140,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116858,6 +117163,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116880,6 +117186,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116902,6 +117209,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116924,6 +117232,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116946,6 +117255,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116968,6 +117278,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -116990,6 +117301,7 @@ "login_key": null, "login_key_valid_until": "2013-05-16", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117012,6 +117324,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117034,6 +117347,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117056,6 +117370,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117078,6 +117393,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117100,6 +117416,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117122,6 +117439,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117144,6 +117462,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117166,6 +117485,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117188,6 +117508,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117210,6 +117531,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117232,6 +117554,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117254,6 +117577,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117276,6 +117600,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117298,6 +117623,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117320,6 +117646,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117342,6 +117669,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117364,6 +117692,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117386,6 +117715,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117408,6 +117738,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117430,6 +117761,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117452,6 +117784,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117474,6 +117807,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117496,6 +117830,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117518,6 +117853,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117540,6 +117876,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117562,6 +117899,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117584,6 +117922,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117606,6 +117945,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117628,6 +117968,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117650,6 +117991,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117672,6 +118014,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117694,6 +118037,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117716,6 +118060,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117738,6 +118083,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117760,6 +118106,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117782,6 +118129,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117804,6 +118152,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117826,6 +118175,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117848,6 +118198,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117870,6 +118221,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117892,6 +118244,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117914,6 +118267,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117936,6 +118290,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117958,6 +118313,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -117980,6 +118336,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118002,6 +118359,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118024,6 +118382,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118046,6 +118405,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118068,6 +118428,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118090,6 +118451,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118112,6 +118474,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118134,6 +118497,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Manager" @@ -118160,6 +118524,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118182,6 +118547,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118204,6 +118570,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118226,6 +118593,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118248,6 +118616,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118270,6 +118639,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118292,6 +118662,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118314,6 +118685,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118336,6 +118708,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118358,6 +118731,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118380,6 +118754,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118402,6 +118777,7 @@ "login_key": null, "login_key_valid_until": "2013-09-28", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118424,6 +118800,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118446,6 +118823,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118468,6 +118846,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118490,6 +118869,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118512,6 +118892,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Manager" @@ -118538,6 +118919,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118560,6 +118942,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118582,6 +118965,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118604,6 +118988,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118626,6 +119011,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118648,6 +119034,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118670,6 +119057,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118692,6 +119080,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118714,6 +119103,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118736,6 +119126,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118758,6 +119149,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118780,6 +119172,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118802,6 +119195,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118824,6 +119218,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118846,6 +119241,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118868,6 +119264,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118890,6 +119287,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118912,6 +119310,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118934,6 +119333,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118956,6 +119356,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -118978,6 +119379,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119000,6 +119402,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119022,6 +119425,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119044,6 +119448,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119066,6 +119471,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119088,6 +119494,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119110,6 +119517,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119132,6 +119540,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119154,6 +119563,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119176,6 +119586,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119198,6 +119609,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119220,6 +119632,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119242,6 +119655,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119264,6 +119678,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119286,6 +119701,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119308,6 +119724,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119330,6 +119747,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119352,6 +119770,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119374,6 +119793,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119396,6 +119816,7 @@ "login_key": null, "login_key_valid_until": "2012-05-08", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119418,6 +119839,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119440,6 +119862,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119462,6 +119885,7 @@ "login_key": 979985223, "login_key_valid_until": "2020-05-25", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119484,6 +119908,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119506,6 +119931,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119528,6 +119954,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119550,6 +119977,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119572,6 +120000,7 @@ "login_key": 1818483627, "login_key_valid_until": "2020-05-25", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119594,6 +120023,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119616,6 +120046,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119638,6 +120069,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119660,6 +120092,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119682,6 +120115,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119704,6 +120138,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119726,6 +120161,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119748,6 +120184,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119770,6 +120207,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119792,6 +120230,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119814,6 +120253,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119836,6 +120276,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119858,6 +120299,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119880,6 +120322,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119902,6 +120345,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119924,6 +120368,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119946,6 +120391,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119968,6 +120414,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -119990,6 +120437,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120012,6 +120460,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120034,6 +120483,7 @@ "login_key": null, "login_key_valid_until": "2014-01-30", "is_active": true, + "startpage": "DE", "groups": [ [ "Manager" @@ -120060,6 +120510,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120070,7 +120521,7 @@ "model": "evaluation.userprofile", "fields": { "password": "pbkdf2_sha256$600000$8dcuxbY2OgmCEEj3nlUQWG$qk5ic3Er9FrkCd3VUjC1DEfxd76LPVKrzaMEWGSaxyA=", - "last_login": "2023-07-17T21:33:07.189", + "last_login": "2023-09-18T21:02:20.716", "is_superuser": true, "email": "[email protected]", "title": "", @@ -120082,6 +120533,7 @@ "login_key": 530207530, "login_key_valid_until": "2020-05-25", "is_active": true, + "startpage": "DE", "groups": [ [ "Manager" @@ -120111,6 +120563,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120133,6 +120586,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120155,6 +120609,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120177,6 +120632,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120199,6 +120655,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120221,6 +120678,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120243,6 +120701,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120265,6 +120724,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120287,6 +120747,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120309,6 +120770,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120331,6 +120793,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120353,6 +120816,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120375,6 +120839,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120397,6 +120862,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120419,6 +120885,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120441,6 +120908,7 @@ "login_key": 71453046, "login_key_valid_until": "2020-05-25", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120463,6 +120931,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120485,6 +120954,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120507,6 +120977,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120529,6 +121000,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120551,6 +121023,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120573,6 +121046,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120595,6 +121069,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120617,6 +121092,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120639,6 +121115,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120661,6 +121138,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120683,6 +121161,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120705,6 +121184,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120727,6 +121207,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120749,6 +121230,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120771,6 +121253,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120793,6 +121276,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120815,6 +121299,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120837,6 +121322,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120859,6 +121345,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120881,6 +121368,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120903,6 +121391,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120925,6 +121414,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120947,6 +121437,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120969,6 +121460,7 @@ "login_key": null, "login_key_valid_until": "2013-12-22", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -120991,6 +121483,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121013,6 +121506,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121035,6 +121529,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121057,6 +121552,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121079,6 +121575,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121101,6 +121598,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121123,6 +121621,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121145,6 +121644,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121167,6 +121667,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121189,6 +121690,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121211,6 +121713,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121233,6 +121736,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121255,6 +121759,7 @@ "login_key": 1209312068, "login_key_valid_until": "2020-05-25", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121277,6 +121782,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121299,6 +121805,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121321,6 +121828,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121343,6 +121851,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121365,6 +121874,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121387,6 +121897,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121409,6 +121920,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121431,6 +121943,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121453,6 +121966,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121475,6 +121989,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121497,6 +122012,7 @@ "login_key": null, "login_key_valid_until": "2013-05-07", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121519,6 +122035,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121541,6 +122058,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121563,6 +122081,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121585,6 +122104,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121607,6 +122127,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121629,6 +122150,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121651,6 +122173,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121673,6 +122196,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121695,6 +122219,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121717,6 +122242,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121739,6 +122265,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121761,6 +122288,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121783,6 +122311,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121805,6 +122334,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121827,6 +122357,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121849,6 +122380,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121871,6 +122403,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121893,6 +122426,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121915,6 +122449,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121937,6 +122472,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121959,6 +122495,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -121981,6 +122518,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122003,6 +122541,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122025,6 +122564,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122047,6 +122587,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122069,6 +122610,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122091,6 +122633,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122113,6 +122656,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122135,6 +122679,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122157,6 +122702,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122179,6 +122725,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122201,6 +122748,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122223,6 +122771,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122245,6 +122794,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122267,6 +122817,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122289,6 +122840,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122311,6 +122863,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122333,6 +122886,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122355,6 +122909,7 @@ "login_key": 679371237, "login_key_valid_until": "2020-05-25", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122377,6 +122932,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122399,6 +122955,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122421,6 +122978,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122443,6 +123001,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122465,6 +123024,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122487,6 +123047,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122509,6 +123070,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122531,6 +123093,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122553,6 +123116,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122575,6 +123139,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122597,6 +123162,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122619,6 +123185,7 @@ "login_key": 1639072630, "login_key_valid_until": "2012-05-14", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -122648,6 +123215,7 @@ "login_key": 500083702, "login_key_valid_until": "2014-10-27", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -122677,6 +123245,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122699,6 +123268,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122721,6 +123291,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122743,6 +123314,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122765,6 +123337,7 @@ "login_key": 2127579123, "login_key_valid_until": "2013-03-04", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122787,6 +123360,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122809,6 +123383,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122831,6 +123406,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122853,6 +123429,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122875,6 +123452,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122897,6 +123475,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122919,6 +123498,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122941,6 +123521,7 @@ "login_key": null, "login_key_valid_until": "2013-05-21", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122963,6 +123544,7 @@ "login_key": 3816853, "login_key_valid_until": "2013-02-21", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -122985,6 +123567,7 @@ "login_key": 1456662462, "login_key_valid_until": "2013-12-23", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123007,6 +123590,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123029,6 +123613,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123051,6 +123636,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123073,6 +123659,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123095,6 +123682,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123117,6 +123705,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123139,6 +123728,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123161,6 +123751,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123183,6 +123774,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123205,6 +123797,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123227,6 +123820,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123249,6 +123843,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123271,6 +123866,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123293,6 +123889,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123315,6 +123912,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123337,6 +123935,7 @@ "login_key": 28628139, "login_key_valid_until": "2014-11-10", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -123366,6 +123965,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -123395,6 +123995,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123417,6 +124018,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123439,6 +124041,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123461,6 +124064,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123483,6 +124087,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123505,6 +124110,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123527,6 +124133,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123549,6 +124156,7 @@ "login_key": 1219355322, "login_key_valid_until": "2012-10-07", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123571,6 +124179,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -123597,6 +124206,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123619,6 +124229,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -123648,6 +124259,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -123677,6 +124289,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -123703,6 +124316,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123725,6 +124339,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123747,6 +124362,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123769,6 +124385,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123791,6 +124408,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123813,6 +124431,7 @@ "login_key": 1729717352, "login_key_valid_until": "2012-10-03", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123835,6 +124454,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123857,6 +124477,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123879,6 +124500,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123901,6 +124523,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123923,6 +124546,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123945,6 +124569,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123967,6 +124592,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -123989,6 +124615,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124011,6 +124638,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124033,6 +124661,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124055,6 +124684,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124077,6 +124707,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124099,6 +124730,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124121,6 +124753,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124143,6 +124776,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124165,6 +124799,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124187,6 +124822,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124209,6 +124845,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124231,6 +124868,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124253,6 +124891,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124275,6 +124914,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124297,6 +124937,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124319,6 +124960,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124341,6 +124983,7 @@ "login_key": 1049235434, "login_key_valid_until": "2014-08-26", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -124370,6 +125013,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124392,6 +125036,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124414,6 +125059,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124436,6 +125082,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124458,6 +125105,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124480,6 +125128,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124502,6 +125151,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124524,6 +125174,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124546,6 +125197,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124568,6 +125220,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124590,6 +125243,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124612,6 +125266,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124634,6 +125289,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124656,6 +125312,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124678,6 +125335,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124700,6 +125358,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124722,6 +125381,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124744,6 +125404,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124766,6 +125427,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124788,6 +125450,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124810,6 +125473,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124832,6 +125496,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124854,6 +125519,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124876,6 +125542,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124898,6 +125565,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124920,6 +125588,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124942,6 +125611,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124964,6 +125634,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -124986,6 +125657,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Manager" @@ -125012,6 +125684,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125034,6 +125707,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125056,6 +125730,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125078,6 +125753,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125100,6 +125776,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125122,6 +125799,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125144,6 +125822,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125166,6 +125845,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125188,6 +125868,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125210,6 +125891,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125232,6 +125914,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125254,6 +125937,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125276,6 +125960,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125298,6 +125983,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125320,6 +126006,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125342,6 +126029,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125364,6 +126052,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125386,6 +126075,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125408,6 +126098,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125430,6 +126121,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125452,6 +126144,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125474,6 +126167,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125496,6 +126190,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125518,6 +126213,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125540,6 +126236,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125562,6 +126259,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125584,6 +126282,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125606,6 +126305,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125628,6 +126328,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125650,6 +126351,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125672,6 +126374,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125694,6 +126397,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125716,6 +126420,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125738,6 +126443,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125760,6 +126466,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125782,6 +126489,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125804,6 +126512,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125826,6 +126535,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125848,6 +126558,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125870,6 +126581,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125892,6 +126604,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125914,6 +126627,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125936,6 +126650,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125958,6 +126673,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -125980,6 +126696,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126002,6 +126719,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126024,6 +126742,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126046,6 +126765,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126068,6 +126788,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126090,6 +126811,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126112,6 +126834,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126134,6 +126857,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126156,6 +126880,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126178,6 +126903,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126200,6 +126926,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126222,6 +126949,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126244,6 +126972,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126266,6 +126995,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126288,6 +127018,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126310,6 +127041,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126332,6 +127064,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126354,6 +127087,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126376,6 +127110,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126398,6 +127133,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126420,6 +127156,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126442,6 +127179,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126464,6 +127202,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126486,6 +127225,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126508,6 +127248,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126530,6 +127271,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126552,6 +127294,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126574,6 +127317,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126596,6 +127340,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126618,6 +127363,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126640,6 +127386,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126662,6 +127409,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126684,6 +127432,7 @@ "login_key": 165190195, "login_key_valid_until": "2014-08-11", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126713,6 +127462,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126735,6 +127485,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126757,6 +127508,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126779,6 +127531,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126801,6 +127554,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126823,6 +127577,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126845,6 +127600,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126867,6 +127623,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126889,6 +127646,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126911,6 +127669,7 @@ "login_key": null, "login_key_valid_until": "2013-05-30", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126933,6 +127692,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126955,6 +127715,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126977,6 +127738,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -126999,6 +127761,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127021,6 +127784,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127043,6 +127807,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127065,6 +127830,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127087,6 +127853,7 @@ "login_key": 210979156, "login_key_valid_until": "2014-10-28", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127109,6 +127876,7 @@ "login_key": null, "login_key_valid_until": "2014-01-21", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127131,6 +127899,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127153,6 +127922,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127175,6 +127945,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127197,6 +127968,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127219,6 +127991,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127241,6 +128014,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127263,6 +128037,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127285,6 +128060,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127307,6 +128083,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127329,6 +128106,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127351,6 +128129,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127373,6 +128152,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127395,6 +128175,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127417,6 +128198,7 @@ "login_key": 2120134533, "login_key_valid_until": "2014-08-11", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -127446,6 +128228,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127468,6 +128251,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127490,6 +128274,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127512,6 +128297,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127534,6 +128320,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127556,6 +128343,7 @@ "login_key": 983446795, "login_key_valid_until": "2013-10-17", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127578,6 +128366,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127600,6 +128389,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127622,6 +128412,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127644,6 +128435,7 @@ "login_key": 740394133, "login_key_valid_until": "2013-12-22", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127666,6 +128458,7 @@ "login_key": 822319998, "login_key_valid_until": "2013-12-22", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127688,6 +128481,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127710,6 +128504,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127732,6 +128527,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127754,6 +128550,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127776,6 +128573,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127798,6 +128596,7 @@ "login_key": 2052836776, "login_key_valid_until": "2013-10-17", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127820,6 +128619,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127842,6 +128642,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127864,6 +128665,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127886,6 +128688,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127908,6 +128711,7 @@ "login_key": 2076676098, "login_key_valid_until": "2013-10-18", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127930,6 +128734,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127952,6 +128757,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127974,6 +128780,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -127996,6 +128803,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128018,6 +128826,7 @@ "login_key": 1692681790, "login_key_valid_until": "2013-10-19", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128040,6 +128849,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128062,6 +128872,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128084,6 +128895,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128106,6 +128918,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128128,6 +128941,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128150,6 +128964,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128172,6 +128987,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128194,6 +129010,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128216,6 +129033,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128238,6 +129056,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128260,6 +129079,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128282,6 +129102,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128304,6 +129125,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128326,6 +129148,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128348,6 +129171,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128370,6 +129194,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128392,6 +129217,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128414,6 +129240,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128436,6 +129263,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128458,6 +129286,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128480,6 +129309,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128502,6 +129332,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128524,6 +129355,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128546,6 +129378,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128568,6 +129401,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128590,6 +129424,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128612,6 +129447,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128634,6 +129470,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128656,6 +129493,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128678,6 +129516,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128700,6 +129539,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128722,6 +129562,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128744,6 +129585,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128766,6 +129608,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128788,6 +129631,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128810,6 +129654,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128832,6 +129677,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128854,6 +129700,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128876,6 +129723,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128898,6 +129746,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128920,6 +129769,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128942,6 +129792,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128964,6 +129815,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -128986,6 +129838,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129008,6 +129861,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129030,6 +129884,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129052,6 +129907,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129074,6 +129930,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129096,6 +129953,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129118,6 +129976,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129140,6 +129999,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129162,6 +130022,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129184,6 +130045,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129206,6 +130068,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129228,6 +130091,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129250,6 +130114,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129272,6 +130137,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129294,6 +130160,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129316,6 +130183,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129338,6 +130206,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129360,6 +130229,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129382,6 +130252,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Manager" @@ -129408,6 +130279,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129430,6 +130302,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129452,6 +130325,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129474,6 +130348,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129496,6 +130371,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129518,6 +130394,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129540,6 +130417,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129562,6 +130440,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129584,6 +130463,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129606,6 +130486,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129628,6 +130509,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129650,6 +130532,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129672,6 +130555,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129694,6 +130578,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129716,6 +130601,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129738,6 +130624,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129760,6 +130647,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129782,6 +130670,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129804,6 +130693,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129826,6 +130716,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129848,6 +130739,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129870,6 +130762,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129892,6 +130785,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129914,6 +130808,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129936,6 +130831,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129958,6 +130854,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -129980,6 +130877,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130002,6 +130900,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130024,6 +130923,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130046,6 +130946,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130068,6 +130969,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130090,6 +130992,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130112,6 +131015,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130134,6 +131038,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130156,6 +131061,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130178,6 +131084,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130200,6 +131107,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130222,6 +131130,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130244,6 +131153,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130266,6 +131176,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130288,6 +131199,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130310,6 +131222,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130332,6 +131245,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130354,6 +131268,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130376,6 +131291,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130398,6 +131314,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130420,6 +131337,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130442,6 +131360,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130464,6 +131383,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130486,6 +131406,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130508,6 +131429,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130530,6 +131452,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130552,6 +131475,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130574,6 +131498,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130596,6 +131521,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130618,6 +131544,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130640,6 +131567,7 @@ "login_key": null, "login_key_valid_until": "2014-08-06", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130662,6 +131590,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -130688,6 +131617,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130710,6 +131640,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130732,6 +131663,7 @@ "login_key": 16347808, "login_key_valid_until": "2014-10-26", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130754,6 +131686,7 @@ "login_key": 702798179, "login_key_valid_until": "2014-08-21", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -130783,6 +131716,7 @@ "login_key": 929495153, "login_key_valid_until": "2014-08-21", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130812,6 +131746,7 @@ "login_key": 110846776, "login_key_valid_until": "2014-11-07", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -130841,6 +131776,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130863,6 +131799,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130885,6 +131822,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [ @@ -130914,6 +131852,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130936,6 +131875,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130958,6 +131898,7 @@ "login_key": 707881715, "login_key_valid_until": "2014-10-20", "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -130980,6 +131921,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -131002,6 +131944,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -131024,6 +131967,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -131046,6 +131990,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], @@ -131068,6 +132013,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Grade publisher" @@ -131094,6 +132040,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Reviewer" @@ -131120,6 +132067,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Reviewer" @@ -131153,6 +132101,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [ [ "Reviewer" @@ -131179,6 +132128,7 @@ "login_key": null, "login_key_valid_until": null, "is_active": true, + "startpage": "DE", "groups": [], "user_permissions": [], "delegates": [], diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -297,7 +297,7 @@ def test_second_vote_sets_can_publish_text_results_to_true(self): ) evaluation.save() top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.POSITIVE_LIKERT) evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) self.assertFalse(evaluation.can_publish_text_results) diff --git a/evap/evaluation/tests/test_views.py b/evap/evaluation/tests/test_views.py --- a/evap/evaluation/tests/test_views.py +++ b/evap/evaluation/tests/test_views.py @@ -7,7 +7,7 @@ from django_webtest import WebTest from model_bakery import baker -from evap.evaluation.models import UserProfile +from evap.evaluation.models import Evaluation, Question, QuestionType, UserProfile from evap.evaluation.tests.tools import WebTestWith200Check, create_evaluation_with_responsible_and_editor @@ -201,3 +201,31 @@ def test_edit_display_name(self): page = self.app.get(self.url, user=self.responsible) self.assertContains(page, "testdisplayname") + + +class TestNegativeLikertQuestions(WebTest): + @classmethod + def setUpTestData(cls): + cls.voting_user = baker.make(UserProfile, email="[email protected]") + + cls.evaluation = baker.make( + Evaluation, + participants=[cls.voting_user], + state=Evaluation.State.IN_EVALUATION, + ) + + cls.question = baker.make( + Question, + type=QuestionType.NEGATIVE_LIKERT, + text_en="Negative Likert Question", + text_de="Negative Likert Frage", + ) + + cls.evaluation.general_contribution.questionnaires.add(cls.question.questionnaire) + + cls.url = reverse("student:vote", args=[cls.evaluation.pk]) + + def test_answer_ordering(self): + page = self.app.get(self.url, user=self.voting_user, status=200).body.decode() + self.assertLess(page.index("Strongly<br>disagree"), page.index("Strongly<br>agree")) + self.assertIn("The answer scale is inverted for this question", page) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -56,10 +56,10 @@ def test_questionnaire_ordering(self): questionnaire_3 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.BOTTOM) questionnaire_4 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.BOTTOM) - question_1 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_1) - question_2 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_2) - question_3 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_3) - question_4 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_4) + question_1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_1) + question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2) + question_3 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_3) + question_4 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_4) evaluation.general_contribution.questionnaires.set( [questionnaire_1, questionnaire_2, questionnaire_3, questionnaire_4] @@ -110,7 +110,7 @@ def test_heading_question_filtering(self): questionnaire = baker.make(Questionnaire) baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=0) heading_question = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=1) - likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=2) + likert_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire, order=2) baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=3) contribution = baker.make( @@ -202,7 +202,7 @@ def test_course_type_ordering(self): cache_results(evaluation_2) questionnaire = baker.make(Questionnaire) - question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire) evaluation_1.general_contribution.questionnaires.set([questionnaire]) make_rating_answer_counters(question, evaluation_1.general_contribution) @@ -359,9 +359,9 @@ def test_exclude_used_but_unanswered_questionnaires(self): course__degrees=[degree], ) used_questionnaire = baker.make(Questionnaire) - used_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=used_questionnaire) + used_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=used_questionnaire) unused_questionnaire = baker.make(Questionnaire) - unused_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=unused_questionnaire) + unused_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=unused_questionnaire) evaluation.general_contribution.questionnaires.set([used_questionnaire, unused_questionnaire]) make_rating_answer_counters(used_question, evaluation.general_contribution) @@ -413,8 +413,8 @@ def test_correct_grades_and_bottom_numbers(self): ) questionnaire1 = baker.make(Questionnaire, order=1) questionnaire2 = baker.make(Questionnaire, order=2) - question1 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire1) - question2 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire2) + question1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire1) + question2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire2) make_rating_answer_counters(question1, evaluation.general_contribution, [1, 0, 1, 0, 0]) make_rating_answer_counters(question2, evaluation.general_contribution, [0, 1, 0, 1, 0]) @@ -448,7 +448,7 @@ def test_course_grade(self): expected_average = 2.0 questionnaire = baker.make(Questionnaire) - question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire) for grades, e in zip(grades_per_eval, evaluations): make_rating_answer_counters(question, e.general_contribution, grades) e.general_contribution.questionnaires.set([questionnaire]) @@ -504,8 +504,10 @@ def test_contributor_result_export(self): general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR) - general_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=general_questionnaire) - contributor_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=contributor_questionnaire) + general_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=general_questionnaire) + contributor_question = baker.make( + Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=contributor_questionnaire + ) evaluation_1.general_contribution.questionnaires.set([general_questionnaire]) make_rating_answer_counters(general_question, evaluation_1.general_contribution, [2, 0, 0, 0, 0]) diff --git a/evap/results/tests/test_tools.py b/evap/results/tests/test_tools.py --- a/evap/results/tests/test_tools.py +++ b/evap/results/tests/test_tools.py @@ -178,8 +178,11 @@ def setUpTestData(cls): ) cls.questionnaire = baker.make(Questionnaire) cls.question_grade = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.GRADE) - cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LIKERT) - cls.question_likert_2 = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LIKERT) + cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.POSITIVE_LIKERT) + cls.question_likert_2 = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.POSITIVE_LIKERT) + cls.question_negative_likert = baker.make( + Question, questionnaire=cls.questionnaire, type=QuestionType.NEGATIVE_LIKERT + ) cls.question_bipolar = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.FEW_MANY) cls.question_bipolar_2 = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LITTLE_MUCH) cls.general_contribution = cls.evaluation.general_contribution @@ -214,6 +217,9 @@ def test_average_grade(self): *make_rating_answer_counters(self.question_likert, self.contribution1, [0, 0, 4, 0, 0], False), *make_rating_answer_counters(self.question_likert, self.general_contribution, [0, 0, 0, 0, 5], False), *make_rating_answer_counters(self.question_likert_2, self.general_contribution, [0, 0, 3, 0, 0], False), + *make_rating_answer_counters( + self.question_negative_likert, self.general_contribution, [0, 0, 0, 4, 0], False + ), *make_rating_answer_counters( self.question_bipolar, self.general_contribution, [0, 0, 0, 0, 0, 0, 2], False ), @@ -235,7 +241,9 @@ def test_average_grade(self): contributor2_average = 4 contributors_average = ((4 * contributor1_average) + (2 * contributor2_average)) / (4 + 2) # 2.9333333 - general_non_grade_average = ((5 * 5) + (3 * 3) + (2 * 5) + (4 * 7 / 3)) / (5 + 3 + 2 + 4) # 3.80952380 + general_non_grade_average = ((5 * 5) + (3 * 3) + (4 * 4) + (2 * 5) + (4 * 7 / 3)) / ( + 5 + 3 + 4 + 2 + 4 + ) # 3.85185185 contributors_percentage = settings.CONTRIBUTIONS_WEIGHT / ( settings.CONTRIBUTIONS_WEIGHT + settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT @@ -246,11 +254,11 @@ def test_average_grade(self): total_grade = ( contributors_percentage * contributors_average + general_non_grade_percentage * general_non_grade_average - ) # 1.1 + 2.38095238 = 3.48095238 + ) # 1.1 + 2.4074074 = 3.5074074 average_grade = distribution_to_grade(calculate_average_distribution(self.evaluation)) self.assertAlmostEqual(average_grade, total_grade) - self.assertAlmostEqual(average_grade, 3.48095238) + self.assertAlmostEqual(average_grade, 3.5074074) @override_settings( CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT=4, @@ -464,7 +472,7 @@ def setUpTestData(cls): ) cls.questionnaire = baker.make(Questionnaire) cls.question = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.TEXT) - cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LIKERT) + cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.POSITIVE_LIKERT) cls.general_contribution = cls.evaluation.general_contribution cls.general_contribution.questionnaires.set([cls.questionnaire]) cls.responsible1_contribution = baker.make( diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -347,7 +347,9 @@ def setUpTestData(cls): questionnaires=[questionnaire], contributor=contributor, ) - cls.likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=2) + cls.likert_question = baker.make( + Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire, order=2 + ) cls.url = f"/results/semester/{cls.semester.id}/evaluation/{cls.evaluation.id}" def test_many_answers_evaluation_no_warning(self): @@ -406,17 +408,19 @@ def test_questionnaire_ordering(self): bottom_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.BOTTOM) top_heading_question = baker.make(Question, type=QuestionType.HEADING, questionnaire=top_questionnaire, order=0) - top_likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=top_questionnaire, order=1) + top_likert_question = baker.make( + Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=top_questionnaire, order=1 + ) contributor_likert_question = baker.make( - Question, type=QuestionType.LIKERT, questionnaire=contributor_questionnaire + Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=contributor_questionnaire ) bottom_heading_question = baker.make( Question, type=QuestionType.HEADING, questionnaire=bottom_questionnaire, order=0 ) bottom_likert_question = baker.make( - Question, type=QuestionType.LIKERT, questionnaire=bottom_questionnaire, order=1 + Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=bottom_questionnaire, order=1 ) self.evaluation.general_contribution.questionnaires.set([top_questionnaire, bottom_questionnaire]) @@ -446,7 +450,7 @@ def test_heading_question_filtering(self): heading_question_0 = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=0) heading_question_1 = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=1) - likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=2) + likert_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire, order=2) heading_question_2 = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=3) contribution = baker.make( @@ -504,7 +508,7 @@ def test_preview_with_rating_answers(self): Evaluation, state=Evaluation.State.EVALUATED, course=baker.make(Course, semester=self.semester) ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire, order=1) evaluation.general_contribution.questionnaires.set([questionnaire]) participants = baker.make(UserProfile, _bulk_create=True, _quantity=20) evaluation.participants.set(participants) @@ -527,7 +531,7 @@ def test_unpublished_single_results_show_results(self) -> None: voters=participants, ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire, order=1) evaluation.general_contribution.questionnaires.set([questionnaire]) make_rating_answer_counters(likert_question, evaluation.general_contribution) @@ -579,7 +583,7 @@ def setUpTestData(cls): questionnaire = baker.make(Questionnaire) cls.question_grade = baker.make(Question, questionnaire=questionnaire, type=QuestionType.GRADE) - baker.make(Question, questionnaire=questionnaire, type=QuestionType.LIKERT) + baker.make(Question, questionnaire=questionnaire, type=QuestionType.POSITIVE_LIKERT) cls.evaluation.general_contribution.questionnaires.set([questionnaire]) cls.responsible_contribution = baker.make( Contribution, contributor=responsible, evaluation=cls.evaluation, questionnaires=[questionnaire] @@ -914,7 +918,7 @@ def setUpTestData(cls): cls.url = f"/results/semester/{evaluation.course.semester.id}/evaluation/{evaluation.id}?view=export" questionnaire = baker.make(Questionnaire) - baker.make(Question, questionnaire=questionnaire, type=QuestionType.LIKERT) + baker.make(Question, questionnaire=questionnaire, type=QuestionType.POSITIVE_LIKERT) evaluation.general_contribution.questionnaires.set([questionnaire]) baker.make( diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -602,12 +602,12 @@ def test_prevent_contribution_deletion_with_answers(self): def test_answers_for_removed_questionnaires_deleted(self): # pylint: disable=too-many-locals evaluation = baker.make(Evaluation) - general_question_1 = baker.make(Question, type=QuestionType.LIKERT) - general_question_2 = baker.make(Question, type=QuestionType.LIKERT) + general_question_1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT) + general_question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT) general_questionnaire_1 = baker.make(Questionnaire, questions=[general_question_1]) general_questionnaire_2 = baker.make(Questionnaire, questions=[general_question_2]) evaluation.general_contribution.questionnaires.set([general_questionnaire_1, general_questionnaire_2]) - contributor_question = baker.make(Question, type=QuestionType.LIKERT) + contributor_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT) contributor_questionnaire = baker.make( Questionnaire, type=Questionnaire.Type.CONTRIBUTOR, @@ -1036,12 +1036,12 @@ def test_unused_questionnaire_visibility(self): def test_answers_for_removed_questionnaires_deleted(self): # pylint: disable=too-many-locals evaluation = baker.make(Evaluation) - general_question_1 = baker.make(Question, type=QuestionType.LIKERT) - general_question_2 = baker.make(Question, type=QuestionType.LIKERT) + general_question_1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT) + general_question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT) general_questionnaire_1 = baker.make(Questionnaire, questions=[general_question_1]) general_questionnaire_2 = baker.make(Questionnaire, questions=[general_question_2]) evaluation.general_contribution.questionnaires.set([general_questionnaire_1, general_questionnaire_2]) - contributor_question = baker.make(Question, type=QuestionType.LIKERT) + contributor_question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT) contributor_questionnaire = baker.make( Questionnaire, type=Questionnaire.Type.CONTRIBUTOR, diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -2543,7 +2543,7 @@ def setUpTestData(cls): ) cls.url = reverse("staff:evaluation_textanswers", args=[cls.evaluation.pk]) top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.POSITIVE_LIKERT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) questionnaire = baker.make(Questionnaire) @@ -2755,7 +2755,7 @@ def setUpTestData(cls): state=Evaluation.State.IN_EVALUATION, ) top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.POSITIVE_LIKERT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) question = baker.make(Question, type=QuestionType.TEXT) @@ -3008,7 +3008,7 @@ def setUpTestData(cls): baker.make( Question, questionnaire=questionnaire, - type=iter([QuestionType.TEXT, QuestionType.GRADE, QuestionType.LIKERT]), + type=iter([QuestionType.TEXT, QuestionType.GRADE, QuestionType.POSITIVE_LIKERT]), _quantity=3, _bulk_create=True, allows_additional_textanswers=False, @@ -3263,7 +3263,7 @@ def setUpTestData(cls): state=Evaluation.State.IN_EVALUATION, ) top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.POSITIVE_LIKERT) cls.text_question = baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.TEXT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) diff --git a/evap/student/tests/test_views.py b/evap/student/tests/test_views.py --- a/evap/student/tests/test_views.py +++ b/evap/student/tests/test_views.py @@ -82,7 +82,7 @@ def setUpTestData(cls): Question, questionnaire=cls.contributor_questionnaire, order=1, type=QuestionType.TEXT ) cls.contributor_likert_question = baker.make( - Question, questionnaire=cls.contributor_questionnaire, order=2, type=QuestionType.LIKERT + Question, questionnaire=cls.contributor_questionnaire, order=2, type=QuestionType.POSITIVE_LIKERT ) cls.top_heading_question = baker.make( @@ -92,7 +92,7 @@ def setUpTestData(cls): Question, questionnaire=cls.top_general_questionnaire, order=1, type=QuestionType.TEXT ) cls.top_likert_question = baker.make( - Question, questionnaire=cls.top_general_questionnaire, order=2, type=QuestionType.LIKERT + Question, questionnaire=cls.top_general_questionnaire, order=2, type=QuestionType.POSITIVE_LIKERT ) cls.top_grade_question = baker.make( Question, questionnaire=cls.top_general_questionnaire, order=3, type=QuestionType.GRADE @@ -105,7 +105,7 @@ def setUpTestData(cls): Question, questionnaire=cls.bottom_general_questionnaire, order=1, type=QuestionType.TEXT ) cls.bottom_likert_question = baker.make( - Question, questionnaire=cls.bottom_general_questionnaire, order=2, type=QuestionType.LIKERT + Question, questionnaire=cls.bottom_general_questionnaire, order=2, type=QuestionType.POSITIVE_LIKERT ) cls.bottom_grade_question = baker.make( Question, questionnaire=cls.bottom_general_questionnaire, order=3, type=QuestionType.GRADE
Inverted Likert Scale We currently have the following likert scale, ranging from "Strongly agree" (positive) to "Strongly disagree" (negative): ![image](https://github.com/e-valuation/EvaP/assets/1781719/bf194a23-0776-47b4-855d-bb59bf877b63) The connotation is shown by color. In the results computation, grades reach from 1.0 ("Strongly agree") to 5.0 ("Strongly disagree"). For some questions, an inverted scale would be a better fit (e.g., "I felt overwhelmed by the workload"). Currently, these questions have to be rephrased ("I did not feel overwhelmed ...") which is not optimal. An inverted likert scale should be introduced, using the range from "Strongly disagree" (green, 1.0) to "Strongly agree" (red, 5.0). The current 'LIKERT' scale should be renamed to "POSITIVE_LIKERT" ("Positive agreement") and the new scale will be "NEGATIVE_LIKERT" ("Negative agreement"). Inverted Likert Scale We currently have the following likert scale, ranging from "Strongly agree" (positive) to "Strongly disagree" (negative): ![image](https://github.com/e-valuation/EvaP/assets/1781719/bf194a23-0776-47b4-855d-bb59bf877b63) The connotation is shown by color. In the results computation, grades reach from 1.0 ("Strongly agree") to 5.0 ("Strongly disagree"). For some questions, an inverted scale would be a better fit (e.g., "I felt overwhelmed by the workload"). Currently, these questions have to be rephrased ("I did not feel overwhelmed ...") which is not optimal. An inverted likert scale should be introduced, using the range from "Strongly disagree" (green, 1.0) to "Strongly agree" (red, 5.0). The current 'LIKERT' scale should be renamed to "POSITIVE_LIKERT" ("Positive agreement") and the new scale will be "NEGATIVE_LIKERT" ("Negative agreement").
2023-06-26T18:40:13
e-valuation/EvaP
1,978
e-valuation__EvaP-1978
[ "1976" ]
cedec25410705304354517e18a7cf50e1ac0aee3
diff --git a/evap/staff/importers/enrollment.py b/evap/staff/importers/enrollment.py --- a/evap/staff/importers/enrollment.py +++ b/evap/staff/importers/enrollment.py @@ -3,7 +3,7 @@ from collections.abc import Iterable from dataclasses import dataclass, fields from datetime import date, datetime -from typing import TypeAlias, TypeGuard, TypeVar +from typing import NoReturn, TypeAlias, TypeGuard, TypeVar from django.conf import settings from django.db import transaction @@ -39,7 +39,9 @@ @dataclass(frozen=True) class InvalidValue: # We make this a dataclass to make sure all instances compare equal. - pass + + def __bool__(self) -> NoReturn: + raise NotImplementedError("Bool conversion of InvalidValue is likely a bug") invalid_value = InvalidValue() @@ -326,6 +328,7 @@ def __init__(self, semester: Semester): @staticmethod def get_merge_hindrances(course_data: CourseData, merge_candidate: Course) -> list[str]: hindrances = [] + if merge_candidate.type != course_data.course_type: hindrances.append(_("the course type does not match")) @@ -336,9 +339,24 @@ def get_merge_hindrances(course_data: CourseData, merge_candidate: Course) -> li merge_candidate_evaluations = merge_candidate.evaluations.all() if len(merge_candidate_evaluations) != 1: hindrances.append(_("the existing course does not have exactly one evaluation")) - elif merge_candidate_evaluations[0].wait_for_grade_upload_before_publishing != course_data.is_graded: + return hindrances + + merge_candidate_evaluation: Evaluation = merge_candidate_evaluations[0] + + if merge_candidate_evaluation.wait_for_grade_upload_before_publishing != course_data.is_graded: hindrances.append(_("the evaluation of the existing course has a mismatching grading specification")) + if merge_candidate_evaluation.is_single_result: + hindrances.append(_("the evaluation of the existing course is a single result")) + return hindrances + + if merge_candidate_evaluation.state >= Evaluation.State.IN_EVALUATION: + hindrances.append( + _("the import would add participants to the existing evaluation but the evaluation is already running") + ) + else: + assert merge_candidate_evaluation._participant_count is None + return hindrances def set_course_merge_target(self, course_data: CourseData) -> None: @@ -407,7 +425,7 @@ def check_course_data(self, course_data: CourseData, location: ExcelFileLocation except CourseMergeLogic.NameEnCollisionException: self.name_en_collision_tracker.add_location_for_key(location, course_data.name_en) - if course_data.merge_into_course: + if course_data.merge_into_course != invalid_value and course_data.merge_into_course: self.course_merged_tracker.add_location_for_key(location, course_data.name_en) self.name_en_by_name_de.setdefault(course_data.name_de, course_data.name_en)
diff --git a/evap/staff/tests/test_importers.py b/evap/staff/tests/test_importers.py --- a/evap/staff/tests/test_importers.py +++ b/evap/staff/tests/test_importers.py @@ -306,6 +306,8 @@ def test_wrong_column_count(self): class TestEnrollmentImport(ImporterTestCase): + semester: Semester + @classmethod def setUpTestData(cls): cls.random_excel_file_content = excel_data.random_file_content @@ -320,7 +322,7 @@ def setUpTestData(cls): cls.default_excel_content = excel_data.create_memory_excel_file(excel_data.test_enrollment_data_filedata) cls.empty_excel_content = excel_data.create_memory_excel_file(excel_data.test_enrollment_data_empty_filedata) - def create_existing_course(self): + def create_existing_course(self) -> tuple[Course, Evaluation]: existing_course = baker.make( Course, name_de="Schütteln", @@ -710,6 +712,7 @@ def test_existing_course_different_attributes(self): self.default_excel_content, self.semester, self.vote_start_datetime, self.vote_end_date, test_run=False ) + self.assertEqual({}, importer_log.warnings_by_category()) self.assertErrorIs( importer_log, ImporterLogEntry.Category.COURSE, @@ -723,6 +726,67 @@ def test_existing_course_different_attributes(self): existing_course.refresh_from_db() self.assertEqual(old_dict, model_to_dict(existing_course)) + def test_existing_course_with_published_evaluation(self): + __, existing_evaluation = self.create_existing_course() + + # Attempt with state = Published + Evaluation.objects.filter(pk=existing_evaluation.pk).update(state=Evaluation.State.PUBLISHED) + existing_evaluation = Evaluation.objects.get(pk=existing_evaluation.pk) + + importer_log = import_enrollments( + self.default_excel_content, self.semester, self.vote_start_datetime, self.vote_end_date, test_run=False + ) + + self.assertEqual({}, importer_log.warnings_by_category()) + self.assertErrorIs( + importer_log, + ImporterLogEntry.Category.COURSE, + "Sheet &quot;BA Belegungen&quot;, row 2 and 1 other place: " + + "Course &quot;Shake&quot; already exists in this semester, but the courses can not be merged for the following reasons:<br /> " + + "- the import would add participants to the existing evaluation but the evaluation is already running", + ) + + # Attempt with earlier state but set _participant_count + Evaluation.objects.filter(pk=existing_evaluation.pk).update(state=Evaluation.State.APPROVED) + existing_evaluation = Evaluation.objects.get(pk=existing_evaluation.pk) + existing_evaluation._participant_count = existing_evaluation.participants.count() + existing_evaluation._voter_count = existing_evaluation.voters.count() + existing_evaluation.save() + + with override_settings(DEBUG=False): + importer_log = import_enrollments( + self.default_excel_content, self.semester, self.vote_start_datetime, self.vote_end_date, test_run=False + ) + self.assertEqual( + [msg.message for msg in importer_log.errors_by_category()[ImporterLogEntry.Category.GENERAL]], + ["Import aborted after exception: ''. No data was imported."], + ) + + def test_existing_course_with_single_result(self): + __, existing_evaluation = self.create_existing_course() + existing_evaluation.is_single_result = True + existing_evaluation.save() + + old_evaluation_count = Evaluation.objects.count() + old_dict = model_to_dict(existing_evaluation) + + importer_log = import_enrollments( + self.default_excel_content, self.semester, self.vote_start_datetime, self.vote_end_date, test_run=False + ) + + self.assertEqual({}, importer_log.warnings_by_category()) + self.assertErrorIs( + importer_log, + ImporterLogEntry.Category.COURSE, + "Sheet &quot;BA Belegungen&quot;, row 2 and 1 other place: " + + "Course &quot;Shake&quot; already exists in this semester, but the courses can not be merged for the following reasons:<br /> " + + "- the evaluation of the existing course is a single result", + ) + + self.assertEqual(Evaluation.objects.count(), old_evaluation_count) + existing_evaluation = Evaluation.objects.get(pk=existing_evaluation.pk) + self.assertEqual(old_dict, model_to_dict(existing_evaluation)) + def test_existing_course_equal_except_evaluations(self): existing_course, __ = self.create_existing_course() baker.make(Evaluation, course=existing_course, name_de="Zweite Evaluation", name_en="Second Evaluation") @@ -734,6 +798,7 @@ def test_existing_course_equal_except_evaluations(self): self.default_excel_content, self.semester, self.vote_start_datetime, self.vote_end_date, test_run=False ) + self.assertEqual({}, importer_log.warnings_by_category()) self.assertErrorIs( importer_log, ImporterLogEntry.Category.COURSE, @@ -758,6 +823,7 @@ def test_existing_course_different_grading(self): self.default_excel_content, self.semester, self.vote_start_datetime, self.vote_end_date, test_run=False ) + self.assertEqual({}, importer_log.warnings_by_category()) self.assertErrorIs( importer_log, ImporterLogEntry.Category.COURSE,
Importer must not change data of published evaluations When importing enrollment data, the importer checks if there are already courses/evaluations with the same name. If there is exactly one evaluation for the course name specified in the import file, all participants of this course who are not yet in the participant list of this existing evaluation will be added to the import file. This should not happen with already published evaluations, because then `participants` will be changed, although `_participant_count` has already been set at publication and is therefore stored in the cache. The numbers would then differ, leading to incorrectly displayed values and making operations impossible, since the equality of these numbers is asserted when unpublishing. *Edit: We don't want any courses that have a state >= `IN_EVALUATION` to be modified.* If a midterm evaluation was manually created, finished, and published, and this is the only existing evaluation for a course, it should still not be touched by the importer during enrollment import. Instead, an error message should be displayed stating that a published evaluation already exists for this course and that it cannot be imported.
2023-07-10T16:25:32
e-valuation/EvaP
1,979
e-valuation__EvaP-1979
[ "1956" ]
a80ec12e780f0a7f65e07d043b3b6b5d45f21b64
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -31,7 +31,7 @@ TextAnswer, UserProfile, ) -from evap.evaluation.tools import date_to_datetime +from evap.evaluation.tools import clean_email, date_to_datetime from evap.results.tools import STATES_WITH_RESULT_TEMPLATE_CACHING, STATES_WITH_RESULTS_CACHING, cache_results from evap.results.views import update_template_cache, update_template_cache_of_published_evaluations_in_course from evap.staff.tools import remove_user_from_represented_and_ccing_users @@ -987,7 +987,7 @@ def clean_evaluations_participating_in(self): return evaluations_participating_in def clean_email(self): - email = self.cleaned_data.get("email") + email = clean_email(self.cleaned_data.get("email")) if email is None: return None @@ -999,7 +999,7 @@ def clean_email(self): if user_with_same_email.exists(): raise forms.ValidationError(_("A user with the email '%s' already exists") % email) - return email.lower() + return email def save(self, *args, **kw): super().save(*args, **kw)
diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -1,7 +1,7 @@ from unittest.mock import patch from django.forms.models import inlineformset_factory -from django.test import TestCase +from django.test import TestCase, override_settings from model_bakery import baker from evap.contributor.forms import EvaluationForm as ContributorEvaluationForm @@ -121,6 +121,10 @@ def test_evaluation_email_form(self): class UserFormTests(TestCase): + @classmethod + def setUpTestData(cls): + cls.existing_user = baker.make(UserProfile, email="[email protected]") + def test_user_form(self): """ Tests the UserForm with one valid and one invalid input dataset. @@ -135,6 +139,7 @@ def test_user_form(self): form = UserForm(instance=user, data=data) self.assertFalse(form.is_valid()) + @override_settings(INSTITUTION_EMAIL_REPLACEMENTS=[("institution.example.com", "example.com")]) def test_user_with_same_email(self): """ Tests whether the user form correctly handles email adresses @@ -155,6 +160,11 @@ def test_user_with_same_email(self): form = UserForm(instance=user, data=data) self.assertTrue(form.is_valid()) + data = {"email": "[email protected]"} + form = UserForm(instance=user, data=data) + self.assertFalse(form.is_valid()) + self.assertIn("A user with the email '[email protected]' already exists", form.errors["email"]) + def test_user_cannot_be_removed_from_evaluation_already_voted_for(self): student = baker.make(UserProfile) baker.make(Evaluation, participants=[student], voters=[student], course__semester__is_active=True)
Missing form error for duplicate email address When changing a user's email address to an address that does not yet exist, but results in an existing address after applying the replacements in `evaluation.tools.clean_email`, an internal server error is thrown. `staff.forms.clean_email` should call `evaluation.tools.clean_email` to prevent this. Test case: - replace the `INSTITUTION_EMAIL_REPLACEMENTS` definition in the settings to `INSTITUTION_EMAIL_REPLACEMENTS: List[Tuple[str, str]] = [("institution.example.com", "institution.com")]` - change Oma Abner's email address to `[email protected]` - change any other user's email address to `[email protected]`
2023-07-10T16:47:59
e-valuation/EvaP
2,036
e-valuation__EvaP-2036
[ "2027" ]
de8abc2058c3502471f92b21ce2458a2a0f546cf
diff --git a/evap/development/management/commands/translate.py b/evap/development/management/commands/translate.py --- a/evap/development/management/commands/translate.py +++ b/evap/development/management/commands/translate.py @@ -9,3 +9,11 @@ class Command(BaseCommand): def handle(self, *args, **options): self.stdout.write('Executing "manage.py makemessages --locale=de --ignore=node_modules/*"') call_command("makemessages", "--locale=de", "--ignore=node_modules/*") + call_command( + "makemessages", + "--domain=djangojs", + "--extension=js,ts", + "--locale=de", + "--ignore=node_modules/*", + "--ignore=evap/static/js/*.min.js", + ) diff --git a/evap/urls.py b/evap/urls.py --- a/evap/urls.py +++ b/evap/urls.py @@ -1,6 +1,9 @@ import django.contrib.auth.views from django.conf import settings from django.urls import include, path +from django.views.i18n import JavaScriptCatalog + +from evap.middleware import no_login_required urlpatterns = [ path("", include('evap.evaluation.urls')), @@ -13,6 +16,8 @@ path("logout", django.contrib.auth.views.LogoutView.as_view(next_page="/"), name="django-auth-logout"), path("oidc/", include('mozilla_django_oidc.urls')), + + path("catalog.js", no_login_required(JavaScriptCatalog.as_view()), name="javascript-catalog"), ] if settings.DEBUG:
diff --git a/evap/evaluation/tests/test_views.py b/evap/evaluation/tests/test_views.py --- a/evap/evaluation/tests/test_views.py +++ b/evap/evaluation/tests/test_views.py @@ -8,7 +8,20 @@ from model_bakery import baker from evap.evaluation.models import Evaluation, Question, QuestionType, UserProfile -from evap.evaluation.tests.tools import WebTestWith200Check, create_evaluation_with_responsible_and_editor +from evap.evaluation.tests.tools import ( + WebTestWith200Check, + create_evaluation_with_responsible_and_editor, + store_ts_test_asset, +) + + +class RenderJsTranslationCatalog(WebTest): + url = reverse("javascript-catalog") + + def render_pages(self): + # Not using render_pages decorator to manually create a single (special) javascript file + content = self.app.get(self.url).content + store_ts_test_asset("catalog.js", content) @override_settings(PASSWORD_HASHERS=["django.contrib.auth.hashers.MD5PasswordHasher"]) diff --git a/evap/evaluation/tests/tools.py b/evap/evaluation/tests/tools.py --- a/evap/evaluation/tests/tools.py +++ b/evap/evaluation/tests/tools.py @@ -86,6 +86,15 @@ def let_user_vote_for_evaluation(user, evaluation, create_answers=False): RatingAnswerCounter.objects.bulk_update(rac_by_contribution_question.values(), ["count"]) +def store_ts_test_asset(relative_path: str, content) -> None: + absolute_path = os.path.join(settings.STATICFILES_DIRS[0], "ts", "rendered", relative_path) + + os.makedirs(os.path.dirname(absolute_path), exist_ok=True) + + with open(absolute_path, "wb") as file: + file.write(content) + + def render_pages(test_item): """Decorator which annotates test methods which render pages. The containing class is expected to include a `url` attribute which matches a valid path. @@ -94,19 +103,15 @@ def render_pages(test_item): The value is a byte string of the page content.""" @functools.wraps(test_item) - def decorator(self): + def decorator(self) -> None: pages = test_item(self) - static_directory = settings.STATICFILES_DIRS[0] - url = getattr(self, "render_pages_url", self.url) - # Remove the leading slash from the url to prevent that an absolute path is created - directory = os.path.join(static_directory, "ts", "rendered", url[1:]) - os.makedirs(directory, exist_ok=True) for name, content in pages.items(): - with open(os.path.join(directory, f"{name}.html"), "wb") as html_file: - html_file.write(content) + # Remove the leading slash from the url to prevent that an absolute path is created + path = os.path.join(url[1:], f"{name}.html") + store_ts_test_asset(path, content) return decorator diff --git a/evap/static/ts/tests/utils/page.ts b/evap/static/ts/tests/utils/page.ts --- a/evap/static/ts/tests/utils/page.ts +++ b/evap/static/ts/tests/utils/page.ts @@ -20,14 +20,26 @@ async function createPage(browser: Browser): Promise<Page> { const extension = path.extname(request.url()); const pathname = new URL(request.url()).pathname; if (extension === ".html") { + // requests like /evap/evap/static/ts/rendered/results/student.html request.continue(); } else if (pathname.startsWith(staticPrefix)) { + // requests like /static/css/tom-select.bootstrap5.min.css const asset = pathname.substr(staticPrefix.length); const body = fs.readFileSync(path.join(__dirname, "..", "..", "..", asset)); request.respond({ contentType: contentTypeByExtension.get(extension), body, }); + } else if (pathname.endsWith("catalog.js")) { + // request for /catalog.js + // some pages will error out if translation functions are not available + // rendered in RenderJsTranslationCatalog + const absolute_fs_path = path.join(__dirname, "..", "..", "..", "ts", "rendered", "catalog.js"); + const body = fs.readFileSync(absolute_fs_path); + request.respond({ + contentType: contentTypeByExtension.get(extension), + body, + }); } else { request.abort(); }
Translations in Javascript and Typescript When writing Javascript and Typescript in separate, non-HTML files, we can't use the Django template functions `trans`, `blocktrans`, etc. anymore. We have worked around this by putting translated strings into the DOM and accessing them via Javascript then. Instead of doing this, we want to have a unified approach where the use-site can just write `trans("The server is not responding.")` or so. There are two possible approaches: 1. DIY: We have a function `trans(english: string, to: Language = window.LANGUAGE): string` with `type Language = "English" | "German"`. This function looks up the string in a global dictionary (for example `window.translationDictionary` or so). I am not sure what it should do if the string is not present, probably return the English string and emit a warning? This dictionary would be defined in a script tag in a HTML file, something like (possibly with an implementation that doesn't repeat the strings a little less): ```html <script type="text/javascript"> window.translationDictionary = { "de": { {% language 'de' %} "The server is not responding": "{% trans 'The server is not responding' %}", {% endlanguage %} } }; </script> ``` 2. Use Django's builtin functionality: There is a builtin way that configures an extra endpoint to make all translations available (https://docs.djangoproject.com/en/4.2/topics/i18n/translation/#internationalization-in-javascript-code). A plus is that it also supports `ngettext` and so on. It seems like it can also detect all strings used in translations, but the setup may be a bit tricky with Typescript thrown into the mix. I think I prefer the first approach, but maybe we encounter difficulties with it or decide that we will need `ngettext` etc. in the future and go with the Django versions directly.
2023-10-16T18:50:57
e-valuation/EvaP
2,040
e-valuation__EvaP-2040
[ "1965" ]
baaa7dd5c55749a9fa17f5ad1f14e2e2bd4fec53
diff --git a/evap/grades/urls.py b/evap/grades/urls.py --- a/evap/grades/urls.py +++ b/evap/grades/urls.py @@ -13,5 +13,5 @@ path("grade_document/<int:grade_document_id>/edit", views.edit_grades, name="edit_grades"), path("delete_grades", views.delete_grades, name="delete_grades"), - path("toggle_no_grades", views.toggle_no_grades, name="toggle_no_grades"), + path("set_no_grades", views.set_no_grades, name="set_no_grades"), ] diff --git a/evap/grades/views.py b/evap/grades/views.py --- a/evap/grades/views.py +++ b/evap/grades/views.py @@ -1,6 +1,6 @@ from django.conf import settings from django.contrib import messages -from django.core.exceptions import PermissionDenied +from django.core.exceptions import PermissionDenied, SuspiciousOperation from django.db.models.query import QuerySet from django.http import FileResponse, HttpResponse from django.shortcuts import get_object_or_404, redirect, render @@ -146,12 +146,18 @@ def upload_grades(request, course_id): @require_POST @grade_publisher_required -def toggle_no_grades(request): +def set_no_grades(request): course = get_object_from_dict_pk_entry_or_logged_40x(Course, request.POST, "course_id") + + try: + status = bool(int(request.POST["status"])) + except (KeyError, TypeError, ValueError) as e: + raise SuspiciousOperation from e + if course.semester.grade_documents_are_deleted: raise PermissionDenied - course.gets_no_grade_documents = not course.gets_no_grade_documents + course.gets_no_grade_documents = status course.save() if course.gets_no_grade_documents:
diff --git a/evap/grades/tests.py b/evap/grades/tests.py --- a/evap/grades/tests.py +++ b/evap/grades/tests.py @@ -135,7 +135,7 @@ def test_upload_final_grades(self): evaluation.save() self.helper_check_final_grade_upload(course, 0) - def test_toggle_no_grades(self): + def test_set_no_grades(self): evaluation = self.evaluation evaluation.manager_approve() evaluation.begin_evaluation() @@ -146,8 +146,8 @@ def test_toggle_no_grades(self): self.assertFalse(evaluation.course.gets_no_grade_documents) self.app.post( - "/grades/toggle_no_grades", - params={"course_id": evaluation.course.id}, + "/grades/set_no_grades", + params={"course_id": evaluation.course.id, "status": "1"}, user=self.grade_publisher, status=200, ) @@ -160,8 +160,17 @@ def test_toggle_no_grades(self): ) self.app.post( - "/grades/toggle_no_grades", - params={"course_id": evaluation.course.id}, + "/grades/set_no_grades", + params={"course_id": evaluation.course.id, "status": "0"}, + user=self.grade_publisher, + status=200, + ) + evaluation = Evaluation.objects.get(id=evaluation.id) + self.assertFalse(evaluation.course.gets_no_grade_documents) + + self.app.post( + "/grades/set_no_grades", + params={"course_id": evaluation.course.id, "status": "0"}, user=self.grade_publisher, status=200, )
Replace `toggle_no_grades` with an idempotent alternative Currently, there is a URL for toggling grade status of a course (`grades:toggle_no_grades`). Instead of having a toggle, we would like an idempotent operation, that is, the endpoint should be something like `grades:set_gets_grade_documents` where the desired boolean value is sent with the request. This way, two people who want to change the status don't revert each others action but instead set the same value.
/assign assign pls :)
2023-10-16T19:57:46
e-valuation/EvaP
2,057
e-valuation__EvaP-2057
[ "2002" ]
274714a2eaea9cf4351a7ca17bb7f7746fb02fca
diff --git a/evap/contributor/views.py b/evap/contributor/views.py --- a/evap/contributor/views.py +++ b/evap/contributor/views.py @@ -288,6 +288,7 @@ def export_contributor_results(contributor): include_not_enough_voters=True, include_unpublished=False, contributor=contributor, + verbose_heading=False, ) return response diff --git a/evap/results/exporters.py b/evap/results/exporters.py --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -160,18 +160,22 @@ def filter_evaluations(semesters, evaluation_states, degrees, course_types, cont return evaluations_with_results, used_questionnaires, course_results_exist def write_headings_and_evaluation_info( - self, evaluations_with_results, semesters, contributor, degrees, course_types + self, evaluations_with_results, semesters, contributor, degrees, course_types, verbose_heading ): - export_name = "Evaluation" + export_name = _("Evaluation") if contributor: export_name += f"\n{contributor.full_name}" elif len(semesters) == 1: export_name += f"\n{semesters[0].name}" - degree_names = [degree.name for degree in Degree.objects.filter(pk__in=degrees)] - course_type_names = [course_type.name for course_type in CourseType.objects.filter(pk__in=course_types)] - self.write_cell( - _("{}\n\n{}\n\n{}").format(export_name, ", ".join(degree_names), ", ".join(course_type_names)), "headline" - ) + if verbose_heading: + degree_names = [degree.name for degree in Degree.objects.filter(pk__in=degrees)] + course_type_names = [course_type.name for course_type in CourseType.objects.filter(pk__in=course_types)] + self.write_cell( + f"{export_name}\n\n{', '.join(degree_names)}\n\n{', '.join(course_type_names)}", + "headline", + ) + else: + self.write_cell(export_name, "headline") for evaluation, __ in evaluations_with_results: title = evaluation.full_name @@ -285,7 +289,13 @@ def write_questionnaire(self, questionnaire, evaluations_with_results, contribut # pylint: disable=arguments-differ def export_impl( - self, semesters, selection_list, include_not_enough_voters=False, include_unpublished=False, contributor=None + self, + semesters, + selection_list, + include_not_enough_voters=False, + include_unpublished=False, + contributor=None, + verbose_heading=True, ): # We want to throw early here, since workbook.save() will throw an IndexError otherwise. assert len(selection_list) > 0 @@ -309,7 +319,7 @@ def export_impl( ) self.write_headings_and_evaluation_info( - evaluations_with_results, semesters, contributor, degrees, course_types + evaluations_with_results, semesters, contributor, degrees, course_types, verbose_heading ) for questionnaire in used_questionnaires:
diff --git a/evap/contributor/tests/test_views.py b/evap/contributor/tests/test_views.py --- a/evap/contributor/tests/test_views.py +++ b/evap/contributor/tests/test_views.py @@ -1,4 +1,6 @@ +import xlrd from django.core import mail +from django.urls import reverse from django_webtest import WebTest from model_bakery import baker @@ -284,3 +286,17 @@ def test_display_request_buttons(self): page = self.app.get(self.url, user=self.responsible) self.assertEqual(page.body.decode().count("Request changes"), 0) self.assertEqual(page.body.decode().count("Request creation of new account"), 2) + + +class TestContributorResultsExportView(WebTest): + @classmethod + def setUpTestData(cls): + result = create_evaluation_with_responsible_and_editor() + cls.url = reverse("contributor:export") + cls.user = result["responsible"] + + def test_concise_header(self): + response = self.app.get(self.url, user=self.user) + + workbook = xlrd.open_workbook(file_contents=response.content) + self.assertEqual(workbook.sheets()[0].row_values(0)[0], f"Evaluation\n{self.user.full_name}")
Remove degrees and course types from contributor export header In the contributor results export file, the header includes "Evaluation", the contributor's name, and all existing degrees and course types. Instead, only "Evaluation" and the name should be exported. Exports other than the contributor export should not be modified.
@janno42 With the default testdata, both users `evap` and `contributor` currently only get an empty excel file when pressing the "Export results" button on the contributor index page (which is the button I think this issue is about, right?). This hindered progress with this issue, as it makes it hard to understand the issue (as there is no data row with these headers). Do we want to update test data to include some results for the user `evap` by default? @Tillbtn @finger813 Executing the following lines in `./manage.py shell_plus` already made some data show up for me, maybe that's enough for you for now: ``` from model_bakery import baker e = Evaluation.objects.filter(name_de="Softwarearchitektur").first() u = UserProfile.objects.filter(email="[email protected]").first() c = baker.make(Contribution, evaluation=e, contributor=u) ``` It looks like it isn't necessary that there are any actual results for the contributor. You can login as `[email protected]` to have useful test data. @richardebeling Adding some data for `contributor` is something we can do in addition.
2023-10-30T17:15:48
e-valuation/EvaP
2,060
e-valuation__EvaP-2060
[ "2055" ]
431077a06101500db6c884b72840544e12f9be48
diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py --- a/evap/evaluation/tools.py +++ b/evap/evaluation/tools.py @@ -2,14 +2,15 @@ from abc import ABC, abstractmethod from collections import defaultdict from collections.abc import Iterable, Mapping -from typing import Any, TypeVar +from typing import Any, Protocol, TypeVar from urllib.parse import quote import xlwt from django.conf import settings from django.core.exceptions import SuspiciousOperation, ValidationError from django.db.models import Model -from django.http import HttpResponse +from django.forms.formsets import BaseFormSet +from django.http import HttpRequest, HttpResponse from django.shortcuts import get_object_or_404 from django.utils.translation import get_language from django.views.generic import FormView @@ -42,7 +43,7 @@ def get_object_from_dict_pk_entry_or_logged_40x(model_cls: type[M], dict_obj: Ma raise SuspiciousOperation from e -def is_prefetched(instance, attribute_name: str): +def is_prefetched(instance, attribute_name: str) -> bool: """ Is the given related attribute prefetched? Can be used to do ordering or counting in python and avoid additional database queries @@ -58,7 +59,7 @@ def is_prefetched(instance, attribute_name: str): return False -def discard_cached_related_objects(instance): +def discard_cached_related_objects(instance: M) -> M: """ Discard all cached related objects (for ForeignKey and M2M Fields). Useful if there were changes, but django's caching would still give us the old @@ -66,44 +67,44 @@ def discard_cached_related_objects(instance): hierarchy (e.g. for storing instances in a cache) """ # Extracted from django's refresh_from_db, which sadly doesn't offer this part alone (without hitting the DB). - for field in instance._meta.concrete_fields: + for field in instance._meta.concrete_fields: # type: ignore if field.is_relation and field.is_cached(instance): field.delete_cached_value(instance) - for field in instance._meta.related_objects: + for field in instance._meta.related_objects: # type: ignore if field.is_cached(instance): field.delete_cached_value(instance) - instance._prefetched_objects_cache = {} + instance._prefetched_objects_cache = {} # type: ignore return instance -def is_external_email(email): +def is_external_email(email: str) -> bool: return not any(email.endswith("@" + domain) for domain in settings.INSTITUTION_EMAIL_DOMAINS) -def sort_formset(request, formset): +def sort_formset(request: HttpRequest, formset: BaseFormSet) -> None: if request.POST: # if not, there will be no cleaned_data and the models should already be sorted anyways formset.is_valid() # make sure all forms have cleaned_data formset.forms.sort(key=lambda f: f.cleaned_data.get("order", 9001)) -def date_to_datetime(date): +def date_to_datetime(date: datetime.date) -> datetime.datetime: return datetime.datetime(year=date.year, month=date.month, day=date.day) -def vote_end_datetime(vote_end_date): +def vote_end_datetime(vote_end_date: datetime.date) -> datetime.datetime: # The evaluation actually ends at EVALUATION_END_OFFSET_HOURS:00 of the day AFTER self.vote_end_date. return date_to_datetime(vote_end_date) + datetime.timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) -def get_parameter_from_url_or_session(request, parameter, default=False): - result = request.GET.get(parameter, None) - if result is None: # if no parameter is given take session value +def get_parameter_from_url_or_session(request: HttpRequest, parameter: str, default=False) -> bool: + result_str = request.GET.get(parameter, None) + if result_str is None: # if no parameter is given take session value result = request.session.get(parameter, default) else: - result = {"true": True, "false": False}.get(result.lower()) # convert parameter to boolean + result = {"true": True, "false": False}.get(result_str.lower()) # convert parameter to boolean request.session[parameter] = result # store value for session return result @@ -115,7 +116,10 @@ def translate(**kwargs): return property(lambda self: getattr(self, kwargs[get_language() or "en"])) -def clean_email(email): +EmailT = TypeVar("EmailT", str, None) + + +def clean_email(email: EmailT) -> EmailT: if email: email = email.strip().lower() # Replace email domains in case there are multiple alias domains used in the organisation and all emails should @@ -126,11 +130,11 @@ def clean_email(email): return email -def capitalize_first(string): +def capitalize_first(string: str) -> str: return string[0].upper() + string[1:] -def ilen(iterable): +def ilen(iterable: Iterable) -> int: return sum(1 for _ in iterable) @@ -148,7 +152,7 @@ class FormsetView(FormView): def form_class(self): return self.formset_class - def get_context_data(self, **kwargs): + def get_context_data(self, **kwargs) -> dict[str, Any]: context = super().get_context_data(**kwargs) context["formset"] = context.pop("form") return context @@ -157,19 +161,24 @@ def get_context_data(self, **kwargs): # `get_formset_kwargs`. Users can thus override `get_formset_kwargs` instead. If it is not overridden, we delegate # to the original `get_form_kwargs` instead. The same approach is used for the other renamed methods. - def get_form_kwargs(self): + def get_form_kwargs(self) -> dict: return self.get_formset_kwargs() - def get_formset_kwargs(self): + def get_formset_kwargs(self) -> dict: return super().get_form_kwargs() - def form_valid(self, form): + def form_valid(self, form) -> HttpResponse: return self.formset_valid(form) - def formset_valid(self, formset): + def formset_valid(self, formset) -> HttpResponse: return super().form_valid(formset) +class HasFormValid(Protocol): + def form_valid(self, form): + pass + + class SaveValidFormMixin: """ Call `form.save()` if the submitted form is valid. @@ -178,7 +187,7 @@ class SaveValidFormMixin: example if a formset for a collection of objects is submitted. """ - def form_valid(self, form): + def form_valid(self: HasFormValid, form) -> HttpResponse: form.save() return super().form_valid(form) @@ -193,11 +202,11 @@ class AttachmentResponse(HttpResponse): _to the response instance_ as if it was a writable file. """ - def __init__(self, filename, content_type=None, **kwargs): + def __init__(self, filename: str, content_type=None, **kwargs) -> None: super().__init__(content_type=content_type, **kwargs) self.set_content_disposition(filename) - def set_content_disposition(self, filename): + def set_content_disposition(self, filename: str) -> None: try: filename.encode("ascii") self["Content-Disposition"] = f'attachment; filename="{filename}"' @@ -215,7 +224,7 @@ class HttpResponseNoContent(HttpResponse): status_code = 204 - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) del self["content-type"] @@ -244,7 +253,7 @@ class ExcelExporter(ABC): # have a sheet added at initialization. default_sheet_name: str | None = None - def __init__(self): + def __init__(self) -> None: self.workbook = xlwt.Workbook() self.cur_row = 0 self.cur_col = 0 @@ -253,7 +262,7 @@ def __init__(self): else: self.cur_sheet = None - def write_cell(self, label="", style="default"): + def write_cell(self, label: str | None = "", style: str = "default") -> None: """Write a single cell and move to the next column.""" self.cur_sheet.write( self.cur_row, @@ -263,11 +272,11 @@ def write_cell(self, label="", style="default"): ) self.cur_col += 1 - def next_row(self): + def next_row(self) -> None: self.cur_col = 0 self.cur_row += 1 - def write_row(self, vals, style="default"): + def write_row(self, vals: Iterable[str], style: str = "default") -> None: """ Write a cell for every value and go to the next row. Styling can be chosen @@ -278,16 +287,16 @@ def write_row(self, vals, style="default"): self.write_cell(val, style=style(val) if callable(style) else style) self.next_row() - def write_empty_row_with_styles(self, styles): + def write_empty_row_with_styles(self, styles: Iterable[str]) -> None: for style in styles: self.write_cell(None, style) self.next_row() @abstractmethod - def export_impl(self, *args, **kwargs): + def export_impl(self, *args, **kwargs) -> None: """Specify the logic to insert the data into the sheet here.""" - def export(self, response, *args, **kwargs): + def export(self, response, *args, **kwargs) -> None: """Convenience method to avoid some boilerplate.""" self.export_impl(*args, **kwargs) self.workbook.save(response) diff --git a/evap/grades/views.py b/evap/grades/views.py --- a/evap/grades/views.py +++ b/evap/grades/views.py @@ -1,3 +1,5 @@ +from typing import Any + from django.conf import settings from django.contrib import messages from django.core.exceptions import PermissionDenied, SuspiciousOperation @@ -23,7 +25,7 @@ class IndexView(TemplateView): template_name = "grades_index.html" - def get_context_data(self, **kwargs): + def get_context_data(self, **kwargs) -> dict[str, Any]: return super().get_context_data(**kwargs) | { "semesters": Semester.objects.filter(grade_documents_are_deleted=False), "disable_breadcrumb_grades": True, @@ -51,19 +53,19 @@ class SemesterView(DetailView): object: Semester - def get_object(self, *args, **kwargs): + def get_object(self, *args, **kwargs) -> Semester: semester = super().get_object(*args, **kwargs) if semester.grade_documents_are_deleted: raise PermissionDenied return semester - def get_context_data(self, **kwargs): - courses = ( + def get_context_data(self, **kwargs) -> dict[str, Any]: + query = ( self.object.courses.filter(evaluations__wait_for_grade_upload_before_publishing=True) .exclude(evaluations__state=Evaluation.State.NEW) .distinct() ) - courses = course_grade_document_count_tuples(courses) + courses = course_grade_document_count_tuples(query) return super().get_context_data(**kwargs) | { "courses": courses, @@ -77,13 +79,13 @@ class CourseView(DetailView): model = Course pk_url_kwarg = "course_id" - def get_object(self, *args, **kwargs): + def get_object(self, *args, **kwargs) -> Course: course = super().get_object(*args, **kwargs) if course.semester.grade_documents_are_deleted: raise PermissionDenied return course - def get_context_data(self, **kwargs): + def get_context_data(self, **kwargs) -> dict[str, Any]: return super().get_context_data(**kwargs) | { "semester": self.object.semester, "grade_documents": self.object.grade_documents.all(), diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -14,7 +14,7 @@ from django.db import IntegrityError, transaction from django.db.models import BooleanField, Case, Count, ExpressionWrapper, IntegerField, Prefetch, Q, Sum, When from django.dispatch import receiver -from django.forms import formset_factory +from django.forms import BaseForm, formset_factory from django.forms.models import inlineformset_factory, modelformset_factory from django.http import Http404, HttpRequest, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect from django.shortcuts import get_object_or_404, redirect, render @@ -580,7 +580,8 @@ class SemesterCreateView(SuccessMessageMixin, CreateView): form_class = SemesterForm success_message = gettext_lazy("Successfully created semester.") - def get_success_url(self): + def get_success_url(self) -> str: + assert self.object is not None return reverse("staff:semester_view", args=[self.object.id]) @@ -592,7 +593,7 @@ class SemesterEditView(SuccessMessageMixin, UpdateView): pk_url_kwarg = "semester_id" success_message = gettext_lazy("Successfully updated semester.") - def get_success_url(self): + def get_success_url(self) -> str: return reverse("staff:semester_view", args=[self.object.id]) @@ -1050,13 +1051,13 @@ class CourseEditView(SuccessMessageMixin, UpdateView): object: Course - def get_object(self, *args, **kwargs): + def get_object(self, *args, **kwargs) -> Course: course = super().get_object(*args, **kwargs) if self.request.method == "POST" and not course.can_be_edited_by_manager: raise SuspiciousOperation("Modifying this course is not allowed.") return course - def get_context_data(self, **kwargs): + def get_context_data(self, **kwargs) -> dict[str, Any]: context_data = super().get_context_data(**kwargs) | { "semester": self.object.semester, "editable": self.object.can_be_edited_by_manager, @@ -1065,7 +1066,9 @@ def get_context_data(self, **kwargs): context_data["course_form"] = context_data.pop("form") return context_data - def form_valid(self, form): + def form_valid(self, form: BaseForm) -> HttpResponse: + assert isinstance(form, CourseForm) # https://www.github.com/typeddjango/django-stubs/issues/1809 + if self.request.POST.get("operation") not in ("save", "save_create_evaluation", "save_create_single_result"): raise SuspiciousOperation("Invalid POST operation") @@ -1074,7 +1077,7 @@ def form_valid(self, form): update_template_cache_of_published_evaluations_in_course(self.object) return response - def get_success_url(self): + def get_success_url(self) -> str: match self.request.POST["operation"]: case "save": return reverse("staff:semester_view", args=[self.object.semester.id]) @@ -1082,6 +1085,7 @@ def get_success_url(self): return reverse("staff:evaluation_create_for_course", args=[self.object.id]) case "save_create_single_result": return reverse("staff:single_result_create_for_course", args=[self.object.id]) + raise SuspiciousOperation("Unexpected operation") @require_POST @@ -2289,7 +2293,7 @@ class UserMergeSelectionView(FormView): form_class = UserMergeSelectionForm template_name = "staff_user_merge_selection.html" - def form_valid(self, form): + def form_valid(self, form: UserMergeSelectionForm) -> HttpResponse: return redirect( "staff:user_merge", form.cleaned_data["main_user"].id, @@ -2333,7 +2337,7 @@ class TemplateEditView(SuccessMessageMixin, UpdateView): success_url = reverse_lazy("staff:index") template_name = "staff_template_form.html" - def get_context_data(self, **kwargs) -> dict: + def get_context_data(self, **kwargs) -> dict[str, Any]: context = super().get_context_data(**kwargs) template = context["template"] = context.pop("emailtemplate") @@ -2376,7 +2380,7 @@ class FaqIndexView(SuccessMessageMixin, SaveValidFormMixin, FormsetView): success_url = reverse_lazy("staff:faq_index") success_message = gettext_lazy("Successfully updated the FAQ sections.") - def get_context_data(self, **kwargs): + def get_context_data(self, **kwargs) -> dict[str, Any]: return super().get_context_data(**kwargs) | {"sections": FaqSection.objects.all()}
Type-check class based views from #1964 In #1964, we made a mistake with the instance variables and return types inside [`UserMergeSelectionView::get_success_url`](https://github.com/e-valuation/EvaP/pull/1964/files#diff-94e55a2578328edb5cc0dd3b57b2ceae72f5e824fff0742ae770d59451f046d7R2292). Mypy didn't complain about these mistakes. I would have expected that the function signature of `get_success_url` from the base class is applied, but [mypy doesn't do this](https://mypy.readthedocs.io/en/stable/class_basics.html#overriding-statically-typed-methods). To make mypy detect such errors, I think we want to type-annotate the class based views introduced in #1964 and possibly also mark them with the `@override` decorator.
2023-10-30T19:53:40
e-valuation/EvaP
2,065
e-valuation__EvaP-2065
[ "1958" ]
b4d7d3b298aaaba415665bf37e2c6fbd2efcb3d3
diff --git a/evap/staff/forms.py b/evap/staff/forms.py --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -957,6 +957,7 @@ class Meta: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + self.user_with_same_email = None evaluations_in_active_semester = Evaluation.objects.filter(course__semester=Semester.active_semester()) excludes = [x.id for x in evaluations_in_active_semester if x.is_single_result] evaluations_in_active_semester = evaluations_in_active_semester.exclude(id__in=excludes) @@ -998,7 +999,8 @@ def clean_email(self): if self.instance and self.instance.pk: user_with_same_email = user_with_same_email.exclude(pk=self.instance.pk) - if user_with_same_email.exists(): + if user_with_same_email: + self.user_with_same_email = user_with_same_email.first() raise forms.ValidationError(_("A user with the email '%s' already exists") % email) return email diff --git a/evap/staff/views.py b/evap/staff/views.py --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -2211,6 +2211,7 @@ def notify_reward_points(grantings, **_kwargs): "evaluations_contributing_to": evaluations_contributing_to, "has_due_evaluations": bool(user.get_sorted_due_evaluations()), "user_id": user_id, + "user_with_same_email": form.user_with_same_email, }, )
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -272,12 +272,22 @@ def setUpTestData(cls): cls.testuser = baker.make(UserProfile) cls.url = f"/staff/user/{cls.testuser.pk}/edit" - def test_questionnaire_edit(self): + def test_user_edit(self): page = self.app.get(self.url, user=self.manager, status=200) form = page.forms["user-form"] - form["email"] = "[email protected]" + form["email"] = "[email protected]" form.submit() - self.assertTrue(UserProfile.objects.filter(email="[email protected]").exists()) + self.assertTrue(UserProfile.objects.filter(email="[email protected]").exists()) + + def test_user_edit_duplicate_email(self): + second_user = baker.make(UserProfile, email="[email protected]") + page = self.app.get(self.url, user=self.manager, status=200) + form = page.forms["user-form"] + form["email"] = second_user.email + page = form.submit() + self.assertContains( + page, "A user with this email address already exists. You probably want to merge the users." + ) @patch("evap.staff.forms.remove_user_from_represented_and_ccing_users") def test_inactive_edit(self, mock_remove):
Suggest user merge if email address already exists As a manager, when changing a user's email address to an existing email address of another user, an error is shown in the user form. In addition, a warning message should be shown on top of the page, suggesting to merge the two users with a link to the merge page (where the user currently edited will be the main user).
We should probably wait for #1780 to see what we can reuse then :) The two issues are mostly independent, so no need to wait.
2023-11-06T18:11:10
e-valuation/EvaP
2,071
e-valuation__EvaP-2071
[ "1693" ]
439fb3e49f2045502313a568172755e95895a9c6
diff --git a/evap/evaluation/migrations/0142_alter_evaluation_state.py b/evap/evaluation/migrations/0142_alter_evaluation_state.py new file mode 100644 --- /dev/null +++ b/evap/evaluation/migrations/0142_alter_evaluation_state.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.7 on 2023-11-13 20:59 + +from django.db import migrations +import django_fsm + + +class Migration(migrations.Migration): + dependencies = [ + ("evaluation", "0141_userprofile_notes"), + ] + + operations = [ + migrations.AlterField( + model_name="evaluation", + name="state", + field=django_fsm.FSMIntegerField(default=10, protected=True, verbose_name="state"), + ), + ] diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -389,7 +389,7 @@ class State: REVIEWED = 70 PUBLISHED = 80 - state = FSMIntegerField(default=State.NEW, protected=True) + state = FSMIntegerField(default=State.NEW, protected=True, verbose_name=_("state")) course = models.ForeignKey(Course, models.PROTECT, verbose_name=_("course"), related_name="evaluations") @@ -779,14 +779,14 @@ def unpublish(self): self._participant_count = None STATE_STR_CONVERSION = { - State.NEW: "new", - State.PREPARED: "prepared", - State.EDITOR_APPROVED: "editor_approved", - State.APPROVED: "approved", - State.IN_EVALUATION: "in_evaluation", - State.EVALUATED: "evaluated", - State.REVIEWED: "reviewed", - State.PUBLISHED: "published", + State.NEW: _("new"), + State.PREPARED: _("prepared"), + State.EDITOR_APPROVED: _("editor_approved"), + State.APPROVED: _("approved"), + State.IN_EVALUATION: _("in_evaluation"), + State.EVALUATED: _("evaluated"), + State.REVIEWED: _("reviewed"), + State.PUBLISHED: _("published"), } @classmethod @@ -994,7 +994,7 @@ def unlogged_fields(self): @classmethod def transform_log_action(cls, field_action): - if field_action.label == "State": + if field_action.label.lower() == Evaluation.state.field.verbose_name.lower(): return FieldAction( field_action.label, field_action.type, [cls.state_to_str(state) for state in field_action.items] )
diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -2176,6 +2176,17 @@ def test_questionnaire_with_answers_warning(self): '<label class="form-check-label badge bg-danger" for="id_contributions-1-questionnaires_0">', page ) + @patch.dict(Evaluation.STATE_STR_CONVERSION, {Evaluation.State.PREPARED: "mock-translated-prepared"}) + def test_state_change_log_translated(self): + page = self.app.get(self.url, user=self.manager) + self.assertNotIn("mock-translated-prepared", page) + + self.evaluation.ready_for_editors() + self.evaluation.save() + + page = self.app.get(self.url, user=self.manager) + self.assertIn("mock-translated-prepared", page) + class TestEvaluationDeleteView(WebTestStaffMode): csrf_checks = False
Translate evaluation state names in log display When displaying a logentry for a change of the `state` field of an `evaluation`, the state names should not only be mapped from their integer representation to string values (what we currently do), but also be translated into the display language. For example, although I am using the site in German, the logs at the end of the evaluation edit site for "Fachspezifisches Englisch (Level 2)" are saying: "State: reviewed → evaluated"
2023-11-13T20:04:11
e-valuation/EvaP
2,073
e-valuation__EvaP-2073
[ "2066" ]
63606adfeed9603059943e030a806cd4f7585fc5
diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py --- a/evap/evaluation/tools.py +++ b/evap/evaluation/tools.py @@ -1,4 +1,5 @@ import datetime +import typing from abc import ABC, abstractmethod from collections import defaultdict from collections.abc import Iterable, Mapping @@ -12,13 +13,17 @@ from django.forms.formsets import BaseFormSet from django.http import HttpRequest, HttpResponse from django.shortcuts import get_object_or_404 +from django.utils.datastructures import MultiValueDict from django.utils.translation import get_language from django.views.generic import FormView +from django_stubs_ext import StrOrPromise M = TypeVar("M", bound=Model) T = TypeVar("T") Key = TypeVar("Key") Value = TypeVar("Value") +CellValue = str | int | float | None +CV = TypeVar("CV", bound=CellValue) def unordered_groupby(key_value_pairs: Iterable[tuple[Key, Value]]) -> dict[Key, list[Value]]: @@ -35,7 +40,9 @@ def unordered_groupby(key_value_pairs: Iterable[tuple[Key, Value]]) -> dict[Key, return dict(result) -def get_object_from_dict_pk_entry_or_logged_40x(model_cls: type[M], dict_obj: Mapping[str, Any], key: str) -> M: +def get_object_from_dict_pk_entry_or_logged_40x( + model_cls: type[M], dict_obj: MultiValueDict[str, Any] | Mapping[str, Any], key: str +) -> M: try: return get_object_or_404(model_cls, pk=dict_obj[key]) # ValidationError happens for UUID id fields when passing invalid arguments @@ -130,7 +137,8 @@ def clean_email(email: EmailT) -> EmailT: return email -def capitalize_first(string: str) -> str: +def capitalize_first(string: StrOrPromise) -> str: + """Realize lazy promise objects and capitalize first letter.""" return string[0].upper() + string[1:] @@ -174,6 +182,7 @@ def formset_valid(self, formset) -> HttpResponse: return super().form_valid(formset) [email protected]_checkable class HasFormValid(Protocol): def form_valid(self, form): pass @@ -262,7 +271,7 @@ def __init__(self) -> None: else: self.cur_sheet = None - def write_cell(self, label: str | None = "", style: str = "default") -> None: + def write_cell(self, label: CellValue = "", style: str = "default") -> None: """Write a single cell and move to the next column.""" self.cur_sheet.write( self.cur_row, @@ -276,7 +285,7 @@ def next_row(self) -> None: self.cur_col = 0 self.cur_row += 1 - def write_row(self, vals: Iterable[str], style: str = "default") -> None: + def write_row(self, vals: Iterable[CV], style: str | typing.Callable[[CV], str] = "default") -> None: """ Write a cell for every value and go to the next row. Styling can be chosen diff --git a/evap/settings.py b/evap/settings.py --- a/evap/settings.py +++ b/evap/settings.py @@ -418,8 +418,12 @@ def CHARACTER_ALLOWED_IN_NAME(character): # pylint: disable=invalid-name TESTING = "test" in sys.argv or "pytest" in sys.modules -# speed up tests +# speed up tests and activate typeguard introspection if TESTING: + from typeguard import install_import_hook + + install_import_hook(("evap", "tools")) + # do not use ManifestStaticFilesStorage as it requires running collectstatic beforehand STORAGES["staticfiles"]["BACKEND"] = "django.contrib.staticfiles.storage.StaticFilesStorage" diff --git a/evap/staff/importers/enrollment.py b/evap/staff/importers/enrollment.py --- a/evap/staff/importers/enrollment.py +++ b/evap/staff/importers/enrollment.py @@ -73,7 +73,14 @@ def differing_fields(self, other) -> set[str]: return {field.name for field in fields(self) if getattr(self, field.name) != getattr(other, field.name)} -class ValidCourseData(CourseData): +class ValidCourseDataMeta(type): + def __instancecheck__(cls, instance: object) -> TypeGuard["ValidCourseData"]: + if not isinstance(instance, CourseData): + return False + return all_fields_valid(instance) + + +class ValidCourseData(CourseData, metaclass=ValidCourseDataMeta): """Typing: CourseData instance where no element is invalid_value""" degrees: set[Degree]
diff --git a/evap/evaluation/tests/test_models_logging.py b/evap/evaluation/tests/test_models_logging.py --- a/evap/evaluation/tests/test_models_logging.py +++ b/evap/evaluation/tests/test_models_logging.py @@ -5,7 +5,7 @@ from model_bakery import baker from evap.evaluation.models import Contribution, Course, Evaluation, Questionnaire, UserProfile -from evap.evaluation.models_logging import FieldAction +from evap.evaluation.models_logging import FieldAction, InstanceActionType class TestLoggedModel(TestCase): @@ -52,7 +52,10 @@ def test_data_attribute_is_correctly_parsed_to_fieldactions(self): ) def test_deletion_data(self): - self.assertEqual(self.evaluation._get_change_data(action_type="delete")["course"]["delete"][0], self.course.id) + self.assertEqual( + self.evaluation._get_change_data(action_type=InstanceActionType.DELETE)["course"]["delete"][0], + self.course.id, + ) self.evaluation.delete() self.assertEqual(self.evaluation.related_logentries().count(), 0)
Check correctness of type annotations at test runtime Plausible but wrong type annotations could give a false sense of security and are therefore (imho) more harmful than no type annotations. When we go through old code and annotate it (as we currently do in #2035), we might make mistakes that mypy does not catch, e.g. because calling code that uses other argument types simply isn't typed yet. There is https://typeguard.readthedocs.io/en/latest/, which allows to inject run-time type checking into the program. I'm thinking of running the tests with this injected so that we can catch cases where we annotated stuff wrongly or too strictly. One issue I can see is that this disallows passing mocked arguments in tests (especially if the validation uses `isinstance`). We might have to add manual exclusions for such cases. I wouldn't want this to be too invasive though, a best case scenario for me would be a handful of lines to inject the checking and maybe a handful more lines for manual exclusions where we use mocks. @niklasmohrin @Kakadus thoughts?
Sounds like a cool Attempt :)
2023-11-20T18:47:17
e-valuation/EvaP
2,076
e-valuation__EvaP-2076
[ "2074" ]
2fadfc90e49c4dbfc1dc58d2015543c32b5a8156
diff --git a/evap/student/views.py b/evap/student/views.py --- a/evap/student/views.py +++ b/evap/student/views.py @@ -161,11 +161,15 @@ def render_vote_page(request, evaluation, preview, for_rendering_in_modal=False) evaluation_form_group_top += evaluation_form_group_bottom evaluation_form_group_bottom = [] + contributor_errors_exist = any(form.errors for form_group in form_groups.values() for form in form_group) + errors_exist = contributor_errors_exist or any( + any(form.errors for form in form_group) + for form_group in [evaluation_form_group_top, evaluation_form_group_bottom] + ) + template_data = { - "errors_exist": any( - any(form.errors for form in form_group) - for form_group in [*(form_groups.values()), evaluation_form_group_top, evaluation_form_group_bottom] - ), + "contributor_errors_exist": contributor_errors_exist, + "errors_exist": errors_exist, "evaluation_form_group_top": evaluation_form_group_top, "evaluation_form_group_bottom": evaluation_form_group_bottom, "contributor_form_groups": contributor_form_groups,
diff --git a/evap/student/tests/test_views.py b/evap/student/tests/test_views.py --- a/evap/student/tests/test_views.py +++ b/evap/student/tests/test_views.py @@ -202,6 +202,7 @@ def test_incomplete_general_vote_form(self): self.fill_form(form, fill_general_complete=False) response = form.submit(status=200) self.assertIn("vote for all rating questions", response) + self.assertNotIn("skip the questions about a single person", response) form = page.forms["student-vote-form"] @@ -239,6 +240,7 @@ def test_incomplete_contributors_vote_form(self): self.fill_form(form, fill_contributors_complete=False) response = form.submit(status=200) self.assertIn("vote for all rating questions", response) + self.assertIn("skip the questions about a single person", response) form = page.forms["student-vote-form"]
Change voting error message based on type of missing answer Submitting a poll form with missing answers will result in an error message: `Please make sure to vote for all rating questions. You can also click on "I can't give feedback" to skip the questions about a single person.` The second part only makes sense if at least one of the missing answers is to a contributor question. If all of the missing answers are for general questions, this sentence shouldn't be displayed.
2023-11-20T19:30:45
e-valuation/EvaP
2,216
e-valuation__EvaP-2216
[ "2020" ]
143604c23e10365cd6d5265428ff50342bec4135
diff --git a/evap/rewards/urls.py b/evap/rewards/urls.py --- a/evap/rewards/urls.py +++ b/evap/rewards/urls.py @@ -7,6 +7,7 @@ urlpatterns = [ path("", views.index, name="index"), + path("reward_points_export", views.reward_points_export, name="reward_points_export"), path("reward_point_redemption_events/", views.reward_point_redemption_events, name="reward_point_redemption_events"), path("reward_point_redemption_event/create", views.RewardPointRedemptionEventCreateView.as_view(), name="reward_point_redemption_event_create"), path("reward_point_redemption_event/<int:event_id>/edit", views.RewardPointRedemptionEventEditView.as_view(), name="reward_point_redemption_event_edit"), diff --git a/evap/rewards/views.py b/evap/rewards/views.py --- a/evap/rewards/views.py +++ b/evap/rewards/views.py @@ -1,3 +1,4 @@ +import csv from datetime import datetime from django.contrib import messages @@ -14,7 +15,7 @@ from django.views.generic import CreateView, UpdateView from evap.evaluation.auth import manager_required, reward_user_required -from evap.evaluation.models import Semester +from evap.evaluation.models import Semester, UserProfile from evap.evaluation.tools import AttachmentResponse, get_object_from_dict_pk_entry_or_logged_40x from evap.rewards.exporters import RewardsExporter from evap.rewards.forms import RewardPointRedemptionEventForm @@ -158,6 +159,32 @@ def reward_point_redemption_event_export(request, event_id): return response +@manager_required +def reward_points_export(request): + filename = _("RewardPoints") + f"-{get_language()}.csv" + response = AttachmentResponse(filename, content_type="text/csv") + + writer = csv.writer(response, delimiter=";", lineterminator="\n") + writer.writerow([_("Email address"), _("Number of points")]) + profiles_with_points = ( + UserProfile.objects.annotate( + points=Sum("reward_point_grantings__value", default=0) - Sum("reward_point_redemptions__value", default=0) + ) + .filter(points__gt=0) + .order_by("-points") + ) + + for profile in profiles_with_points.all(): + writer.writerow( + [ + profile.email, + profile.points, + ] + ) + + return response + + @require_POST @manager_required def semester_activation_edit(request, semester_id):
diff --git a/evap/rewards/tests/test_views.py b/evap/rewards/tests/test_views.py --- a/evap/rewards/tests/test_views.py +++ b/evap/rewards/tests/test_views.py @@ -180,7 +180,7 @@ def test_edit_redemption_event(self): self.assertEqual(RewardPointRedemptionEvent.objects.get(pk=self.event.pk).name, "new name") -class TestExportView(WebTestStaffModeWith200Check): +class TestEventExportView(WebTestStaffModeWith200Check): @classmethod def setUpTestData(cls): cls.test_users = [make_manager()] @@ -189,6 +189,30 @@ def setUpTestData(cls): cls.url = f"/rewards/reward_point_redemption_event/{event.pk}/export" +class TestPointsExportView(WebTestStaffModeWith200Check): + @classmethod + def setUpTestData(cls): + cls.test_users = [make_manager()] + cls.url = reverse("rewards:reward_points_export") + + cls.student = baker.make(UserProfile, email="[email protected]") + cls.event = baker.make(RewardPointRedemptionEvent, redeem_end_date=date.today() + timedelta(days=1)) + + def test_positive_points(self): + baker.make(RewardPointGranting, user_profile=self.student, value=5) + baker.make(RewardPointRedemption, user_profile=self.student, event=self.event, value=3) + + response = self.app.get(self.url, user=self.test_users[0], status=200) + self.assertIn("[email protected];2", response) + + def test_zero_points(self): + baker.make(RewardPointGranting, user_profile=self.student, value=5) + baker.make(RewardPointRedemption, user_profile=self.student, event=self.event, value=5) + + response = self.app.get(self.url, user=self.test_users[0], status=200) + self.assertNotIn("[email protected]", response) + + @override_settings( REWARD_POINTS=[ (1 / 3, 1),
Export Reward Point Summary A new button `Export reward points` should be added to the right of the text showing the number of available reward points on the staff reward points redemption events page. Clicking the button should download a CSV file containing a summary of the reward points. This file should contain the two columns `Email` and `Points` for each user, listing the number of points currently available for that user (grantings minus redemptions) next to the user's email address. A line should only be added for users where this number of available points is not zero.
for the sake of consistency i think this should either result in a xls file or the `Export Redemptions` button should output a csv as well It also should have consistent content: `Export Redemptions` gives Last name, First name, Email address and Number of points, not only Email address and points We talked about this shortly in person and I agree partially: Since both sheets seem to be more on the "just data" than the "this is a spreadsheet with colors and stuff like that" side, I would even say that both should be CSV (as Excel can just open the CSV too). @janno42 What do you think? @joendter do you want to continue working on this or should we unassign you? Go ahead and unassign me > We talked about this shortly in person and I agree partially: Since both sheets seem to be more on the "just data" than the "this is a spreadsheet with colors and stuff like that" side, I would even say that both should be CSV (as Excel can just open the CSV too). @janno42 What do you think? I'd say CSV and just email and points is sufficient for the use case of this issue. The exported file is used internally for a quick overview of the current reward point status and we don't need the name information because the export is not used for, e.g., email generation. I also prefer CSV over Excel where we don't need formatting.
2024-06-05T12:40:58
pytorch/pytorch
131
pytorch__pytorch-131
[ "539" ]
9cd68129da50023929aff0ca4e4ba667ae75d785
diff --git a/torch/nn/functions/thnn/auto.py b/torch/nn/functions/thnn/auto.py --- a/torch/nn/functions/thnn/auto.py +++ b/torch/nn/functions/thnn/auto.py @@ -185,6 +185,7 @@ def _generate_function_classes(scope_dict): function_by_name = {fn.name: fn for fn in function_list} classes_to_generate = {fn.name.partition('_')[0] for fn in function_list} exceptions = { + 'Linear', 'SpatialConvolutionMM', 'SparseLinear', 'TemporalConvolution',
Port LBFGS from Lua optim fixes #483
2016-10-17T16:18:12
pytorch/pytorch
195
pytorch__pytorch-195
[ "153" ]
3e5c121c56e42e8b303962ab0db532afbed128af
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -9,8 +9,8 @@ import sys import os -# TODO: make this more robust -WITH_CUDA = os.path.exists('/Developer/NVIDIA/CUDA-7.5/include') or os.path.exists('/usr/local/cuda/include') +CUDA_HOME = os.getenv('CUDA_HOME', '/usr/local/cuda') +WITH_CUDA = os.path.exists(CUDA_HOME) WITH_CUDNN = WITH_CUDA DEBUG = False @@ -176,18 +176,17 @@ def run(self): pass if WITH_CUDA: - if platform.system() == 'Darwin': - cuda_path = '/Developer/NVIDIA/CUDA-7.5' - cuda_include_path = cuda_path + '/include' - cuda_lib_path = cuda_path + '/lib' - else: - cuda_path = '/usr/local/cuda' - cuda_include_path = cuda_path + '/include' - cuda_lib_path = cuda_path + '/lib64' + cuda_lib_dirs = ['lib64', 'lib'] + cuda_include_path = os.path.join(CUDA_HOME, 'include') + for lib_dir in cuda_lib_dirs: + cuda_lib_path = os.path.join(CUDA_HOME, lib_dir) + if os.path.exists(cuda_lib_path): + break include_dirs.append(cuda_include_path) extra_link_args.append('-L' + cuda_lib_path) extra_link_args.append('-Wl,-rpath,' + cuda_lib_path) extra_compile_args += ['-DWITH_CUDA'] + extra_compile_args += ['-DCUDA_LIB_PATH=' + cuda_lib_path] main_libraries += ['THC'] main_sources += [ "torch/csrc/cuda/Module.cpp", diff --git a/torch/cuda/__init__.py b/torch/cuda/__init__.py --- a/torch/cuda/__init__.py +++ b/torch/cuda/__init__.py @@ -2,6 +2,7 @@ import contextlib import platform import ctypes +import os import torch _initialized = False @@ -11,10 +12,28 @@ def is_available(): return (hasattr(torch._C, '_cuda_isDriverSufficient') and torch._C._cuda_isDriverSufficient()) -def _lazy_init(): - global _initialized, _cudart - if _initialized: - return + +def _load_cudart(): + system = platform.system() + lib_name = 'libcudart.' + ('dylib' if system == 'Darwin' else 'so') + lib_paths = [ + lib_name, + os.path.join(torch._C._cuda_getLibPath(), lib_name), + os.path.join('/usr/local/cuda/lib64', lib_name), + os.path.join('/usr/local/cuda/lib', lib_name), + ] + for path in lib_paths: + try: + return ctypes.cdll.LoadLibrary(path) + except OSError: + pass + raise RuntimeError("couldn't find libcudart. Make sure CUDA libraries " + "are installed in a default location, or that they're in " + + ("DYLD_LIBRARY_PATH" if system == 'Darwin' else "LD_LIBRARY_PATH") + + ".") + + +def _check_driver(): if not hasattr(torch._C, '_cuda_isDriverSufficient'): raise AssertionError("Torch not compiled with CUDA enabled") if not torch._C._cuda_isDriverSufficient(): @@ -33,14 +52,18 @@ def _lazy_init(): Alternatively, go to: https://pytorch.org/binaries to install a PyTorch version that has been compiled with your version of the CUDA driver.""".format(str(torch._C._cuda_getDriverVersion()))) + + +def _lazy_init(): + global _initialized, _cudart + if _initialized: + return + _check_driver() assert torch._C._cuda_init() - _initialized = True - if platform.system() == 'Darwin': - _cudart = ctypes.cdll.LoadLibrary('libcudart.dylib') - else: - _cudart = ctypes.cdll.LoadLibrary('libcudart.so') + _cudart = _load_cudart() _cudart.cudaGetErrorName.restype = ctypes.c_char_p _cudart.cudaGetErrorString.restype = ctypes.c_char_p + _initialized = True def cudart():
figure out a way to load libcudart.so from it's compile-time path right now if libcudart.so is not in LD_LIBRARY_PATH, though it was found at compile-time, these lines will fail: https://github.com/pytorch/pytorch/blob/master/torch/cuda/__init__.py#L39-L43 Avoid this, by idk doing something... I think i have a few good ideas.
We need to add /usr/local/cuda/lib64 (or that's mac dir) to the rpath of _C the problem i dont think is to add it to rpath, it's because we ctypes.load Right. We could try with the default cuda installation path if it's not found from LD_LIBRARY_PATH. Actually, do we really want to use the compile path? We should try to detect it at runtime, right? Or are we going to ship CUDA libs in our binaries? we first load using runtime paths, and if it fails, fallback to path known at compile time. I fear this is going to get brittle, especially if you are shipping CUDA libs. e.g. a user swaps HW and picks up a new driver and/or toolkit. As @soumith says, I'm pretty sure you want to use the runtime paths. Similarly, CUDA 7.5 won't run on Pascal and we have a cudnn built for 7.5 that will not do the right thing on CUDA 8, so those need to go together. If a user installs the CUDA bits via deb/rpm the right things are supposed to happen. If you push bits, we need to figure out how to make sure things line up. But this is perhaps a much larger packaging conversation. @thatguymike Yeah, I've implemented it to first try loading it without any path and then try `/usr/local/cuda/lib64` and `/usr/local/cuda/lib`. I'll push the commit tomorrow.
2016-11-02T08:25:29
pytorch/pytorch
208
pytorch__pytorch-208
[ "122" ]
bc08011e72e7fc05ae05a21ea6aa2b274a202371
diff --git a/torch/tensor.py b/torch/tensor.py --- a/torch/tensor.py +++ b/torch/tensor.py @@ -91,7 +91,7 @@ def __reduce__(self): return type(self), (self.tolist(),) def __repr__(self): - return repr(str(self)) + return str(self) def __str__(self): # All strings are unicode in Python 3, while we have to encode unicode
print truncated tensors In torch, printing a tensor will print the whole tensor, even if that one is huge. This is sometimes annoying, especially when using a notebook, as it crashes it. Numpy has a very nice way to display big tensors by truncating them. Would it be possible so have something similar for pytorch?
Yeah, we haven't implemented it yet, but that's definitely on our roadmap Implemented in #202. I should say that I have some artefacts that appear with #202 while printing a tensor. Here is an example output: ``` python a = torch.rand(5) print(a) ``` gives ``` '\n 0.6233\n 0.7240\n 0.1248\n 0.3522\n 0.9047\n[torch.FloatTensor of size 5]\n' ``` Curiously, if I simply do ``` python print(torch.rand(5)) ``` It works fine and print nicely, with the truncation for large tensors. I'm using Python 2.7.6 (probably the one that came with my system and not from anaconda). Maybe it's just a bad setup in my machine? hmmm, it works fine on python 2.7.12. wonder what changed between .6 and .12 ``` -0.7472 3.2232 -0.9434 -0.5391 1.9560 1.3194 1.8168 -2.2081 -0.9746 1.4805 [torch.FloatTensor of size 10] ``` ok i got a repro. I opened my ipython and did this: ``` In [6]: a=torch.randn(10) In [7]: a Out[7]: '\n-1.9817\n 0.2041\n-0.2836\n-0.3300\n 1.2530\n 0.3965\n-1.1826\n-1.6713\n-0.6749\n-1.8690\n[torch.FloatTensor of size 10]\n' In [8]: print(a) -1.9817 0.2041 -0.2836 -0.3300 1.2530 0.3965 -1.1826 -1.6713 -0.6749 -1.8690 [torch.FloatTensor of size 10] ``` @soumith my example was flawed, sorry. Using `print` works for me, but when only entering `a` it prints the `\n`s, as in your example.
2016-11-08T18:34:36
pytorch/pytorch
227
pytorch__pytorch-227
[ "222" ]
26d626a47c9c676af456ce6b6025ce5575ad4b2f
diff --git a/torch/storage.py b/torch/storage.py --- a/torch/storage.py +++ b/torch/storage.py @@ -51,6 +51,9 @@ def double(self, async=False): def float(self, async=False): return self.type(type(self).__module__ + '.FloatStorage', async) + def half(self, async=False): + return self.type(type(self).__module__ + '.HalfStorage', async) + def long(self, async=False): return self.type(type(self).__module__ + '.LongStorage', async) diff --git a/torch/tensor.py b/torch/tensor.py --- a/torch/tensor.py +++ b/torch/tensor.py @@ -42,6 +42,9 @@ def double(self, async=False): def float(self, async=False): return self.type(type(self).__module__ + '.FloatTensor') + def half(self, async=False): + return self.type(type(self).__module__ + '.HalfTensor') + def long(self, async=False): return self.type(type(self).__module__ + '.LongTensor')
.half() missing and copies from HalfTensor to anything else missing wrapper code is missing. ``` import torch import torch.cuda a = torch.cuda.FloatTensor(100).half() --------------------------------------------------------------------------- AttributeError: 'FloatTensor' object has no attribute 'half' ``` ``` a = torch.cuda.HalfTensor(100) a.float() --------------------------------------------------------------------------- RuntimeError: Copy function from HalfTensor to FloatTensor isn't implemented! print(a) --------------------------------------------------------------------------- RuntimeError: Copy function from HalfTensor to DoubleTensor isn't implemented! ```
2016-11-17T21:19:03
pytorch/pytorch
255
pytorch__pytorch-255
[ "253" ]
bcfa2d6c79e74275ef555baee45f89e397e7f0b8
diff --git a/torch/nn/functions/rnn.py b/torch/nn/functions/rnn.py --- a/torch/nn/functions/rnn.py +++ b/torch/nn/functions/rnn.py @@ -154,12 +154,12 @@ def AutogradRNN(mode, input_size, hidden_size, num_layers=1, batch_first=False, def forward(input, weight, hidden): if batch_first: - input.transpose(0, 1) + input = input.transpose(0, 1) nexth, output = func(input, hidden, weight) if batch_first: - output.transpose(0, 1) + output = output.transpose(0, 1) return output, nexth
batch_first broken in AutogradRNN The last line here fails on CPU or when CUDNN is otherwise unavailable: ```python l, b, t, x, h = 2, 3, 5, 10, 20 rnn = nn.LSTM(x, h, l, batch_first=True) inpt = Variable(torch.randn(b, t, x)) h0 = Variable(torch.randn(l, b, h)) c0 = Variable(torch.randn(l, b, h)) output, hn = rnn(inpt, (h0, c0)) ``` This is because `AutogradRNN.forward` accidentally assumes `Tensor`'s in-place `transpose` semantics rather than the functional semantics of `Variable` (`cudnn.rnn.forward` gets it right): ```python def forward(input, weight, hidden): if batch_first: input.transpose(0, 1) nexth, output = func(input, hidden, weight) if batch_first: output.transpose(0, 1) ``` I can push a PR that fixes this, or one of the devs can put it in the next bugfix PR: ```python def forward(input, weight, hidden): if batch_first: input = input.transpose(0, 1) nexth, output = func(input, hidden, weight) if batch_first: output = output.transpose(0, 1) ```
2016-11-25T23:01:25
pytorch/pytorch
325
pytorch__pytorch-325
[ "323" ]
cc6e3c92d2f1a9cd8c34d4753173b4024a618f4a
diff --git a/torch/autograd/functions/stochastic.py b/torch/autograd/functions/stochastic.py --- a/torch/autograd/functions/stochastic.py +++ b/torch/autograd/functions/stochastic.py @@ -7,23 +7,29 @@ class Multinomial(StochasticFunction): - def __init__(self, num_samples): + def __init__(self, num_samples, with_replacement): super(Multinomial, self).__init__() self.num_samples = num_samples + self.with_replacement = with_replacement def forward(self, probs): - samples = probs.multinomial(self.num_samples) + samples = probs.multinomial(self.num_samples, self.with_replacement) self.save_for_backward(probs, samples) self.mark_non_differentiable(samples) return samples def backward(self, reward): probs, samples = self.saved_tensors + if probs.dim() == 1: + probs = probs.unsqueeze(0) + samples = samples.unsqueeze(0) grad_probs = probs.new().resize_as_(probs).zero_() - output_probs = probs.index_select(0, samples) + output_probs = probs.gather(1, samples) output_probs.add_(1e-6).cinv_() output_probs.neg_().mul_(reward) - grad_probs.index_add_(0, samples, output_probs) + # TODO: add batched index_add + for i in range(probs.size(0)): + grad_probs[i].index_add_(0, samples[i], output_probs[i]) return grad_probs diff --git a/torch/autograd/functions/tensor.py b/torch/autograd/functions/tensor.py --- a/torch/autograd/functions/tensor.py +++ b/torch/autograd/functions/tensor.py @@ -103,7 +103,7 @@ def backward(self, grad_output): class Expand(Function): - def __init__(self, *sizes): + def __init__(self, sizes): super(Expand, self).__init__() self.sizes = sizes self.expanded_dims = [] @@ -164,7 +164,7 @@ def backward(self, grad_output): class Permute(Function): - def __init__(self, *dim_indices): + def __init__(self, dim_indices): super(Permute, self).__init__() self.dim_indices = dim_indices self.rev_dim_indices = [None for _ in range(len(dim_indices))] diff --git a/torch/autograd/variable.py b/torch/autograd/variable.py --- a/torch/autograd/variable.py +++ b/torch/autograd/variable.py @@ -517,7 +517,7 @@ def expand(self, *sizes): raise ValueError("expand expects a several ints or a single " "torch.Size argument") sizes = sizes[0] - return Expand(*sizes)(self) + return Expand(sizes)(self) def expand_as(self, tensor): return Expand(tensor.size())(self) @@ -548,7 +548,7 @@ def unsqueeze(self, dim): return Unsqueeze(dim)(self) def permute(self, *permutation): - return Permute(*permutation)(self) + return Permute(permutation)(self) def diag(self, diagonal_idx=0): return Diag(diagonal_idx)(self) @@ -559,8 +559,8 @@ def tril(self, diagonal_idx=0): def triu(self, diagonal_idx=0): return Triu(diagonal_idx)(self) - def multinomial(self, num_samples=1): - return Multinomial(num_samples)(self) + def multinomial(self, num_samples=1, with_replacement=False): + return Multinomial(num_samples, with_replacement)(self) def bernoulli(self): return Bernoulli()(self)
diff --git a/test/test_autograd.py b/test/test_autograd.py --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -458,16 +458,18 @@ def backward(self, grad_output): self.assertEqual(x.grad, x.data.clone().fill_(1)) def test_stochastic(self): - x = Variable(torch.rand(10), requires_grad=True) - stddevs = Variable(torch.rand(10) * 5, requires_grad=True) + x = Variable(torch.rand(2, 10), requires_grad=True) + stddevs = Variable(torch.rand(2, 10) * 5, requires_grad=True) y = (x * 2).clamp(0, 1) - y = y / y.sum().expand_as(y) + y = y / y.sum(1).expand_as(y) samples_multi = y.multinomial(5) + samples_multi_flat = y[0].multinomial(5) samples_bernoulli = y.bernoulli() samples_norm = torch.normal(y) samples_norm_std = torch.normal(y, stddevs) z = samples_multi * 2 + 4 - z = torch.cat([z, z]) + z = z + samples_multi_flat.unsqueeze(0).expand_as(samples_multi) + z = torch.cat([z, z], 1) z = z.double() z = z + samples_bernoulli + samples_norm + samples_norm_std last_sample = torch.normal(z, 4) @@ -475,15 +477,17 @@ def test_stochastic(self): self.assertFalse(z.requires_grad) self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True)) - samples_multi.reinforce(torch.randn(5)) + samples_multi.reinforce(torch.randn(2, 5)) + self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True)) + samples_multi_flat.reinforce(torch.randn(5)) self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True)) - samples_bernoulli.reinforce(torch.randn(10)) + samples_bernoulli.reinforce(torch.randn(2, 10)) self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True)) - samples_norm.reinforce(torch.randn(10)) + samples_norm.reinforce(torch.randn(2, 10)) self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True)) - samples_norm_std.reinforce(torch.randn(10)) + samples_norm_std.reinforce(torch.randn(2, 10)) self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True)) - last_sample.reinforce(torch.randn(10)) + last_sample.reinforce(torch.randn(2, 10)) last_sample.backward(retain_variables=True) z.backward() @@ -528,12 +532,12 @@ def index_variable(num_indices, max_indices): (PowConstant, (3.14,), (torch.rand(L, L),) ), (Transpose, (0, 1), (torch.rand(L, L),) ), (Transpose, (2, 0), (torch.rand(S, S, S),), '3d' ), - (Permute, (0, 4, 3, 5, 1, 2), ((1, 2, 3, 4, 5, 6),) ), + (Permute, ((0, 4, 3, 5, 1, 2),), ((1, 2, 3, 4, 5, 6),) ), (Index, ((1, 2),), (torch.rand(S, S, S),) ), (Index, (slice(0, 3),), (torch.rand(S, S, S),), 'slice' ), (Index, ((slice(0, 3), 1),),(torch.rand(S, S, S),), 'slice_index' ), (View, (S*S, S), (torch.rand(S, S, S),) ), - (Expand, (S, 5, S, 5), ((S, 1, S, 1),) ), + (Expand, ((S, 5, S, 5),), ((S, 1, S, 1),) ), (Exp, (), (torch.rand(S, S, S),) ), (Log, (), (torch.rand(S, S, S) + 1e-2,) ), (Log1p, (), (torch.rand(S, S, S),) ), diff --git a/test/test_torch.py b/test/test_torch.py --- a/test/test_torch.py +++ b/test/test_torch.py @@ -2407,14 +2407,14 @@ def isBinary(t): self.assertTrue(isBinary(q.bernoulli())) def test_normal(self): - q = torch.Tensor(50, 50) + q = torch.Tensor(100, 100) q.normal_() - self.assertEqual(q.mean(), 0, 0.1) - self.assertEqual(q.std(), 1, 0.1) + self.assertEqual(q.mean(), 0, 0.2) + self.assertEqual(q.std(), 1, 0.2) q.normal_(2, 3) - self.assertEqual(q.mean(), 2, 0.1) - self.assertEqual(q.std(), 3, 0.1) + self.assertEqual(q.mean(), 2, 0.3) + self.assertEqual(q.std(), 3, 0.3) mean = torch.Tensor(100, 100) std = torch.Tensor(100, 100) @@ -2435,13 +2435,13 @@ def test_normal(self): r = torch.normal(2, std) self.assertEqual(r.mean(), 2, 0.2) - self.assertEqual(r[:,:50].std(), 4, 0.2) + self.assertEqual(r[:,:50].std(), 4, 0.3) self.assertEqual(r[:,50:].std(), 1, 0.2) r = torch.normal(mean, std) self.assertEqual(r[:50].mean(), 0, 0.2) self.assertEqual(r[50:].mean(), 1, 0.2) - self.assertEqual(r[:,:50].std(), 4, 0.2) + self.assertEqual(r[:,:50].std(), 4, 0.3) self.assertEqual(r[:,50:].std(), 1, 0.2) def test_serialization(self):
Problem with Multinomial (Stochastic Nodes) The following code makes an error: ``` === CODE import torch import torch.nn as nn from torch.autograd import Variable from torch.autograd.functions import Multinomial from torch.autograd.functions import Bernoulli l=nn.Linear(5,2) s=nn.Softmax() m=Multinomial(1) x=Variable(torch.randn(1,5),requires_grad=True) probabilities=s(l(x)) y=m(probabilities) print(y) y.reinforce(torch.randn(1,1)) y.backward() ==== ERROR Traceback (most recent call last): File "rl/tutorials/tmp.py", line 16, in <module> y.backward() File "/home/denoyer/anaconda3/lib/python3.5/site-packages/torch/autograd/variable.py", line 90, in backward self._execution_engine.run_backward(self, gradient, retain_variables) File "/home/denoyer/anaconda3/lib/python3.5/site-packages/torch/autograd/stochastic_function.py", line 14, in _do_backward result = super(StochasticFunction, self)._do_backward((self.reward,), retain_variables) File "/home/denoyer/anaconda3/lib/python3.5/site-packages/torch/autograd/functions/stochastic.py", line 23, in backward output_probs = probs.index_select(0, samples) RuntimeError: Index is supposed to be a vector at /tmp/pip-jim5mxb2-build/torch/lib/TH/generic/THTensorMath.c:136 ```` It is also true with y.reinforce(torch.randn(1))
2016-12-17T10:59:48
pytorch/pytorch
397
pytorch__pytorch-397
[ "365" ]
dcf5f8671c4a3239af453f0630f79d0ca73eea65
diff --git a/tools/cwrap/plugins/CuDNNPlugin.py b/tools/cwrap/plugins/CuDNNPlugin.py --- a/tools/cwrap/plugins/CuDNNPlugin.py +++ b/tools/cwrap/plugins/CuDNNPlugin.py @@ -52,7 +52,7 @@ class CuDNNPlugin(CWrapPlugin): $options } - THPUtils_invalidArguments(args, "$readable_name", $num_options, $expected_args); + THPUtils_invalidArguments(args, kwargs, "$readable_name", $num_options, $expected_args); return NULL; END_HANDLE_TH_ERRORS } diff --git a/tools/cwrap/plugins/StandaloneExtension.py b/tools/cwrap/plugins/StandaloneExtension.py --- a/tools/cwrap/plugins/StandaloneExtension.py +++ b/tools/cwrap/plugins/StandaloneExtension.py @@ -70,7 +70,7 @@ class StandaloneExtension(CWrapPlugin): int __argcount = args ? PyTuple_Size(args) : 0; $options } else { - THPUtils_invalidArguments(args, "$name", 1, $expected_args); + THPUtils_invalidArguments(args, NULL, "$name", 1, $expected_args); return NULL; } END_HANDLE_TH_ERRORS diff --git a/tools/cwrap/plugins/THPPlugin.py b/tools/cwrap/plugins/THPPlugin.py --- a/tools/cwrap/plugins/THPPlugin.py +++ b/tools/cwrap/plugins/THPPlugin.py @@ -85,7 +85,7 @@ class THPPlugin(CWrapPlugin): $options } - THPUtils_invalidArguments(args, "$readable_name", $num_options, $expected_args); + THPUtils_invalidArguments(args, kwargs, "$readable_name", $num_options, $expected_args); return NULL; END_HANDLE_TH_ERRORS } @@ -174,6 +174,18 @@ def format_args(args, var_args=False): for arg in args if not arg.get('ignore_check', False) and not arg.get('output')] + output_args = list(filter(lambda a: a.get('output'), args)) + if output_args: + if len(output_args) > 1: + out_type = 'tuple[' + out_type += ', '.join( + self.TYPE_NAMES[arg['type']] for arg in output_args) + out_type += ']' + option_desc += ['#' + out_type + ' out'] + else: + arg = output_args[0] + option_desc += ['#' + self.TYPE_NAMES[arg['type']] + ' out'] + if option_desc: return '({})'.format(', '.join(option_desc)) else:
invalidArguments doesn't take keyword args into account If someone passes keyword arguments to any of our C functions, they won't appear in the error message (as if they weren't specified). There's the same issue with the `out` argument (see #364) - if you specify correct args, but e.g. pass `out=(torch.LongTensor(), torch.FloatTensor())` to `max`, the message will appear as if all arguments were correct (while `out` has invalid order). It can be pretty confusing.
2017-01-03T17:21:26
pytorch/pytorch
546
pytorch__pytorch-546
[ "523" ]
ca555abcf93fe7c874705d881ba4839bef495ed0
diff --git a/torch/nn/_functions/conv.py b/torch/nn/_functions/conv.py --- a/torch/nn/_functions/conv.py +++ b/torch/nn/_functions/conv.py @@ -149,6 +149,8 @@ def _thnn(self, fn_name, input, weight, *args): res = [] for g in range(self.groups): def group(tensor, dim=None): + if tensor is None: + return None if dim is None: dim = 0 if tensor.dim() == 1 else 1 n = tensor.size(dim) // self.groups @@ -158,7 +160,8 @@ def group(tensor, dim=None): grouped_args += [group(t) for t in args] res.append(impl[fn_name](self, self._bufs[g], *grouped_args)) if fn_name == 'grad_params': - return [torch.cat(t, 0) for t in zip(*res)] + return [torch.cat(t, 0) if t[0] is not None else None + for t in zip(*res)] else: return torch.cat(res, 1) @@ -178,8 +181,11 @@ def _view3d(*tensors): # view 4d tensor as 3d output = [] for t in tensors: - assert t.dim() == 4 and t.size(2) == 1 - output += [t.squeeze(2)] + if t is None: + output += [None] + else: + assert t.dim() == 4 and t.size(2) == 1 + output += [t.squeeze(2)] return output diff --git a/torch/nn/parameter.py b/torch/nn/parameter.py --- a/torch/nn/parameter.py +++ b/torch/nn/parameter.py @@ -2,6 +2,25 @@ class Parameter(Variable): + """A kind of Variable that is to be considered a module parameter. + + Parameters are :class:`~torch.autograd.Variable` subclasses, that have a + very special property when used with :class:`Module` s - when they're + assigned as Module attributes they are automatically added to the list of + its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator. + Assigning a Variable doesn't have such effect. This is because one might + want to cache some temporary state, like last hidden state of the RNN, in + the model. If there was no such class as :class:`Parameter`, these + temporaries would get registered too. + + Another difference is that parameters can't be volatile and that they + require gradient by default. + + Arguments: + data (Tensor): parameter tensor. + requires_grad (bool, optional): if the parameter requires gradient. See + :ref:`excluding-subgraphs` for more details. + """ def __init__(self, data, requires_grad=True): super(Parameter, self).__init__(data, requires_grad=requires_grad) diff --git a/torch/optim/optimizer.py b/torch/optim/optimizer.py --- a/torch/optim/optimizer.py +++ b/torch/optim/optimizer.py @@ -24,6 +24,8 @@ def __init__(self, params, defaults): self.state = defaultdict(dict) self.param_groups = list(params) + if len(self.param_groups) == 0: + raise ValueError("optimizer got an empty parameter list") if not isinstance(self.param_groups[0], dict): self.param_groups = [{'params': self.param_groups}] @@ -50,6 +52,8 @@ def __init__(self, params, defaults): if not param.requires_grad: raise ValueError("optimizing a parameter that doesn't " "require gradients") + if param.creator is not None: + raise ValueError("can't optimize a non-leaf Variable") def __getstate__(self): return {
diff --git a/test/test_nn.py b/test/test_nn.py --- a/test/test_nn.py +++ b/test/test_nn.py @@ -1308,6 +1308,11 @@ def add_test(test): input_size=(2, 4, 6, 5), cudnn=True, ), + dict( + fullname='Conv2d_groups_thnn', + constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2), + input_size=(2, 4, 6, 5), + ), dict( module_name='ConvTranspose2d', constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)),
nn.Conv2d errors with bias=False and groups=2 x = Variable(torch.rand((16, 64, 8, 8))) res = nn.Conv2d(16, 16, kernel_size=3, groups=2, bias=False)(x) results in: > File ".../torch/nn/_functions/conv.py", line 88, in _update_output return self._thnn('update_output', input, weight, bias) File ".../torch/nn/_functions/conv.py", line 158, in _thnn grouped_args += [group(t) for t in args] File ".../torch/nn/_functions/conv.py", line 153, in group dim = 0 if tensor.dim() == 1 else 1 AttributeError: 'NoneType' object has no attribute 'dim' The `t` in line 158 is `None` because there is no bias. I am running on CPU so this is in the `_thnn` code.
2017-01-22T23:09:32
pytorch/pytorch
628
pytorch__pytorch-628
[ "596" ]
fb2d28f477c76bd94e3e3e9d2f424caa295d75c3
diff --git a/tools/cwrap/plugins/THPPlugin.py b/tools/cwrap/plugins/THPPlugin.py --- a/tools/cwrap/plugins/THPPlugin.py +++ b/tools/cwrap/plugins/THPPlugin.py @@ -16,6 +16,9 @@ class THPPlugin(CWrapPlugin): 'THBoolTensor*': Template('((THPBoolTensor*)$arg)->cdata'), 'THIndexTensor*': Template('((THPIndexTensor*)$arg)->cdata'), + 'THCudaTensor*': Template('((THCPFloatTensor*)$arg)->cdata'), + 'THCudaDoubleTensor*': Template('((THCPDoubleTensor*)$arg)->cdata'), + 'THSFloatTensor*': Template('((THSPFloatTensor*)$arg)->cdata'), 'THSDoubleTensor*': Template('((THSPDoubleTensor*)$arg)->cdata'), 'THSLongTensor*': Template('((THSPLongTensor*)$arg)->cdata'), @@ -49,6 +52,9 @@ class THPPlugin(CWrapPlugin): 'THBoolTensor*': Template('(PyObject*)Py_TYPE($arg) == THPBoolTensorClass'), 'THIndexTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIndexTensorClass'), + 'THCudaTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPFloatTensorClass'), + 'THCudaDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPDoubleTensorClass'), + 'THSDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPDoubleTensorClass'), 'THSFloatTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPFloatTensorClass'), 'THSLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPLongTensorClass'), @@ -160,6 +166,8 @@ def _allocate(typename, tmpl, cuda_tmpl=None, sparse=False): 'THIndexTensor*': '" THPModuleStr "LongTensor', 'THFloatTensor*': '" THPModuleStr "FloatTensor', 'THDoubleTensor*': '" THPModuleStr "DoubleTensor', + 'THCudaTensor*': 'torch.cuda.FloatTensor', + 'THCudaDoubleTensor*': 'torch.cuda.DoubleTensor', 'THSize*': 'torch.Size', 'THStride*': 'tuple', 'long': 'int', diff --git a/torch/nn/modules/rnn.py b/torch/nn/modules/rnn.py --- a/torch/nn/modules/rnn.py +++ b/torch/nn/modules/rnn.py @@ -103,7 +103,7 @@ class RNN(RNNBase): Args: input_size: The number of expected features in the input x hidden_size: The number of features in the hidden state h - num_layers: the size of the convolving kernel. + num_layers: Number of recurrent layers. nonlinearity: The non-linearity to use ['tanh'|'relu']. Default: 'tanh' bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True batch_first: If True, then the input tensor is provided as (batch, seq, feature) @@ -111,14 +111,14 @@ class RNN(RNNBase): bidirectional: If True, becomes a bidirectional RNN. Default: False Inputs: input, h_0 - - `input`: A (seq_len x batch x input_size) tensor containing the features of the input sequence. - - `h_0`: A ((num_layers * num_directions) x batch x hidden_size) tensor containing the initial hidden state - for each element in the batch. + - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. + - **h_0** (num_layers * num_directions, batch, hidden_size): tensor containing the initial hidden state + for each element in the batch. Outputs: output, h_n - - `output`: A (seq_len x batch x hidden_size) tensor containing the output features (h_k) - from the last layer of the RNN, for each k - - `h_n`: A (num_layers x batch x hidden_size) tensor containing the hidden state for k=seq_len + - **output** (seq_len, batch, hidden_size * num_directions): tensor containing the output features (h_k) + from the last layer of the RNN, for each k. + - **h_n** (num_layers * num_directions, batch, hidden_size): tensor containing the hidden state for k=seq_len. Attributes: weight_ih_l[k]: the learnable input-hidden weights of the k-th layer, @@ -178,24 +178,25 @@ class LSTM(RNNBase): Args: input_size: The number of expected features in the input x hidden_size: The number of features in the hidden state h - num_layers: the size of the convolving kernel. + num_layers: Number of recurrent layers. bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True batch_first: If True, then the input tensor is provided as (batch, seq, feature) dropout: If non-zero, introduces a dropout layer on the outputs of each RNN layer bidirectional: If True, becomes a bidirectional RNN. Default: False - Inputs: `input, (h_0, c_0)` - - `input` : A (seq_len x batch x input_size) tensor containing the features of the input sequence. - - `h_0` : A ((num_layers * num_directions) x batch x hidden_size) tensor containing - the initial hidden state for each element in the batch. - - `c_0` : A ((num_layers * num_directions) x batch x hidden_size) tensor containing - the initial cell state for each element in the batch. + Inputs: input, (h_0, c_0) + - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. + - **h_0** (num_layers \* num_directions, batch, hidden_size): tensor containing + the initial hidden state for each element in the batch. + - **c_0** (num_layers \* num_directions, batch, hidden_size): tensor containing + the initial cell state for each element in the batch. + Outputs: output, (h_n, c_n) - - `output` : A (seq_len x batch x hidden_size) tensor containing the output features `(h_t)` from the last layer - of the RNN, for each t - - `h_n` : A (num_layers x batch x hidden_size) tensor containing the hidden state for t=seq_len - - `c_n` : A (num_layers x batch x hidden_size) tensor containing the cell state for t=seq_len + - **output** (seq_len, batch, hidden_size * num_directions): tensor containing + the output features `(h_t)` from the last layer of the RNN, for each t. + - **h_n** (num_layers * num_directions, batch, hidden_size): tensor containing the hidden state for t=seq_len + - **c_n** (num_layers * num_directions, batch, hidden_size): tensor containing the cell state for t=seq_len Attributes: weight_ih_l[k] : the learnable input-hidden weights of the k-th layer `(W_ir|W_ii|W_in)`, of shape @@ -241,21 +242,21 @@ class GRU(RNNBase): Args: input_size: The number of expected features in the input x hidden_size: The number of features in the hidden state h - num_layers: the size of the convolving kernel. + num_layers: Number of recurrent layers. bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True batch_first: If True, then the input tensor is provided as (batch, seq, feature) dropout: If non-zero, introduces a dropout layer on the outputs of each RNN layer bidirectional: If True, becomes a bidirectional RNN. Default: False - Inputs: `input, h_0` - - `input` : A `(seq_len x batch x input_size)` tensor containing the features of the input sequence. - - `h_0` : A `((num_layers * num_directions) x batch x hidden_size)` tensor containing the initial - hidden state for each element in the batch. + Inputs: input, h_0 + - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. + - **h_0** (num_layers * num_directions, batch, hidden_size): tensor containing the initial + hidden state for each element in the batch. - Outputs: `output, h_n` - - `output` : A `(seq_len x batch x hidden_size)` tensor containing the output features `(h_t)` from - the last layer of the RNN, for each t - - `h_n` : A `(num_layers x batch x hidden_size)` tensor containing the hidden state for t=seq_len + Outputs: output, h_n + - **output** (seq_len, batch, hidden_size * num_directions): tensor containing the output features h_t from + the last layer of the RNN, for each t. + - **h_n** (num_layers * num_directions, batch, hidden_size): tensor containing the hidden state for t=seq_len Attributes: weight_ih_l[k] : the learnable input-hidden weights of the k-th layer (W_ir|W_ii|W_in), of shape @@ -303,12 +304,12 @@ class RNNCell(RNNCellBase): bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True nonlinearity: The non-linearity to use ['tanh'|'relu']. Default: 'tanh' - Inputs: `input, hidden` - - input: A `(batch x input_size)` tensor containing input features - - hidden: A `(batch x hidden_size)` tensor containing the initial hidden state for each element in the batch. + Inputs: input, hidden + - **input** (batch, input_size): tensor containing input features + - **hidden** (batch, hidden_size): tensor containing the initial hidden state for each element in the batch. - Outputs: `h'` - - `h'`: A `(batch x hidden_size)` tensor containing the next hidden state for each element in the batch + Outputs: h' + - **h'** (batch, hidden_size): tensor containing the next hidden state for each element in the batch Attributes: weight_ih: the learnable input-hidden weights, of shape `(input_size x hidden_size)` @@ -383,14 +384,14 @@ class LSTMCell(RNNCellBase): hidden_size: The number of features in the hidden state h bias: If `False`, then the layer does not use bias weights `b_ih` and `b_hh`. Default: True - Inputs: `input, (h_0, c_0)` - - `input` : A `(batch x input_size)` tensor containing input features - - `h_0` : A `(batch x hidden_size)` tensor containing the initial hidden state for each element in the batch. - - `c_0` : A `(batch x hidden_size)` tensor containing the initial cell state for each element in the batch. + Inputs: input, (h_0, c_0) + - **input** (batch, input_size): tensor containing input features + - **h_0** (batch, hidden_size): tensor containing the initial hidden state for each element in the batch. + - **c_0** (batch. hidden_size): tensor containing the initial cell state for each element in the batch. - Outputs: `h_1, c_1` - - h_1: A `(batch x hidden_size)` tensor containing the next hidden state for each element in the batch - - c_1: A `(batch x hidden_size)` tensor containing the next cell state for each element in the batch + Outputs: h_1, c_1 + - **h_1** (batch, hidden_size): tensor containing the next hidden state for each element in the batch + - **c_1** (batch, hidden_size): tensor containing the next cell state for each element in the batch Attributes: weight_ih: the learnable input-hidden weights, of shape `(input_size x hidden_size)` @@ -455,12 +456,12 @@ class GRUCell(RNNCellBase): hidden_size: The number of features in the hidden state h bias: If `False`, then the layer does not use bias weights `b_ih` and `b_hh`. Default: `True` - Inputs: `input, hidden` - - `input` : A `(batch x input_size)` tensor containing input features - - `hidden` : A `(batch x hidden_size)` tensor containing the initial hidden state for each element in the batch. + Inputs: input, hidden + - **input** (batch, input_size): tensor containing input features + - **hidden** (batch, hidden_size): tensor containing the initial hidden state for each element in the batch. - Outputs: `h'` - - `h'`: A `(batch x hidden_size)` tensor containing the next hidden state for each element in the batch + Outputs: h' + - **h'**: (batch, hidden_size): tensor containing the next hidden state for each element in the batch Attributes: weight_ih: the learnable input-hidden weights, of shape `(input_size x hidden_size)`
Bug on Bernoulli (using GPU) Hi, This code makes an error at line 20, which seems strange since bernoulli works for tensor, but not for variables ``` import torch import torch.nn.functional as F from torch.autograd import Variable a=F.sigmoid(torch.randn(10,1)) #This is OK a.bernoulli() #This is OK also b=a.cuda() b.bernoulli() #This is OK va=Variable(a) va.bernoulli() #This is a bug ! vb=Variable(a).cuda() vb.bernoulli() ```
2017-01-28T22:35:58
pytorch/pytorch
833
pytorch__pytorch-833
[ "101" ]
c76770f40e1dcdf22c70f5db78660a8c2755c33c
diff --git a/torch/nn/init.py b/torch/nn/init.py --- a/torch/nn/init.py +++ b/torch/nn/init.py @@ -0,0 +1,259 @@ +import math +import random + +import torch +from torch.autograd import Variable + + +def uniform(tensor, a=0, b=1): + """Fills the input Tensor or Variable with values drawn from a uniform U(a,b) + + Args: + tensor: a n-dimension torch.Tensor + a: the lower bound of the uniform distribution + b: the upper bound of the uniform distribution + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.uniform(w) + """ + if isinstance(tensor, Variable): + uniform(tensor.data, a=a, b=b) + return tensor + return tensor.uniform_(a, b) + + +def normal(tensor, mean=0, std=1): + """Fills the input Tensor or Variable with values drawn from a normal distribution with the given mean and std + + Args: + tensor: a n-dimension torch.Tensor + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.normal(w) + """ + if isinstance(tensor, Variable): + normal(tensor.data, mean=mean, std=std) + return tensor + return tensor.normal_(mean, std) + + +def constant(tensor, val): + """Fills the input Tensor or Variable with the value `val` + + Args: + tensor: a n-dimension torch.Tensor + val: the value to fill the tensor with + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.constant(w) + """ + if isinstance(tensor, Variable): + constant(tensor.data, val) + return tensor + return tensor.fill_(val) + + +def _calculate_fan_in_and_fan_out(tensor): + if tensor.ndimension() < 2: + raise ValueError("fan in and fan out can not be computed for tensor of size ", tensor.size()) + + if tensor.ndimension() == 2: # Linear + fan_in = tensor.size(1) + fan_out = tensor.size(0) + else: + num_input_fmaps = tensor.size(1) + num_output_fmaps = tensor.size(0) + receptive_field_size = 1 + if tensor.dim() > 2: + receptive_field_size = tensor[0][0].numel() + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +def xavier_uniform(tensor, gain=1): + """Fills the input Tensor or Variable with values according to the method described in "Understanding the difficulty + of training deep feedforward neural networks" - Glorot, X. and Bengio, Y., using a uniform distribution. + + The resulting tensor will have values sampled from U(-a, a) where a = gain * sqrt(2/(fan_in + fan_out)) * sqrt(3) + + Args: + tensor: a n-dimension torch.Tensor + gain: an optional scaling factor to be applied + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.xavier_uniform(w, gain=math.sqrt(2.0)) + """ + if isinstance(tensor, Variable): + xavier_uniform(tensor.data, gain=gain) + return tensor + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + std = gain * math.sqrt(2.0 / (fan_in + fan_out)) + a = math.sqrt(3.0) * std + return tensor.uniform_(-a, a) + + +def xavier_normal(tensor, gain=1): + """Fills the input Tensor or Variable with values according to the method described in "Understanding the difficulty + of training deep feedforward neural networks" - Glorot, X. and Bengio, Y., using a normal distribution. + + The resulting tensor will have values sampled from normal distribution with mean=0 and + std = gain * sqrt(2/(fan_in + fan_out)) + + Args: + tensor: a n-dimension torch.Tensor + gain: an optional scaling factor to be applied + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.xavier_normal(w) + """ + if isinstance(tensor, Variable): + xavier_normal(tensor.data, gain=gain) + return tensor + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + std = gain * math.sqrt(2.0 / (fan_in + fan_out)) + return tensor.normal_(0, std) + + +def _calculate_correct_fan(tensor, mode): + mode = mode.lower() + valid_modes = ['fan_in', 'fan_out'] + if mode not in valid_modes: + raise ValueError("mode {} not supported, please use one of {}".format(mode, valid_modes)) + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + return fan_in + else: + return fan_out + + +def kaiming_uniform(tensor, a=0, mode='fan_in'): + """Fills the input Tensor or Variable with values according to the method described in "Delving deep into rectifiers: + Surpassing human-level performance on ImageNet classification" - He, K. et al using a uniform distribution. + + The resulting tensor will have values sampled from U(-bound, bound) + where bound = sqrt(2/((1 + a^2) * fan_in)) * sqrt(3) + + Args: + tensor: a n-dimension torch.Tensor + a: the coefficient of the slope of the rectifier used after this layer (0 for ReLU by default) + mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the + weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass. + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.kaiming_uniform(w, mode='fan_in') + """ + if isinstance(tensor, Variable): + kaiming_uniform(tensor.data, a=a, mode=mode) + return tensor + + fan = _calculate_correct_fan(tensor, mode) + std = math.sqrt(2.0 / ((1 + a ** 2) * fan)) + bound = math.sqrt(3.0) * std + return tensor.uniform_(-bound, bound) + + +def kaiming_normal(tensor, a=0, mode='fan_in'): + """Fills the input Tensor or Variable with values according to the method described in "Delving deep into rectifiers: + Surpassing human-level performance on ImageNet classification" - He, K. et al using a normal distribution. + + The resulting tensor will have values sampled from normal distribution with mean=0 and + std = sqrt(2/((1 + a^2) * fan_in)) + + Args: + tensor: a n-dimension torch.Tensor + a: the coefficient of the slope of the rectifier used after this layer (0 for ReLU by default) + mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the + weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass. + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.kaiming_normal(w, mode='fan_out') + """ + if isinstance(tensor, Variable): + kaiming_normal(tensor.data, a=a, mode=mode) + return tensor + + fan = _calculate_correct_fan(tensor, mode) + std = math.sqrt(2.0 / ((1 + a ** 2) * fan)) + return tensor.normal_(0, std) + + +def orthogonal(tensor, gain=1): + """Fills the input Tensor or Variable with a (semi) orthogonal matrix. The input tensor must have at least 2 dimensions, + and for tensors with more than 2 dimensions the trailing dimensions are flattened. viewed as 2D representation + with rows equal to the first dimension and columns equal to the product of as a sparse matrix, where the + non-zero elements will be drawn from a normal distribution with mean=0 and std=`std`. + Reference: "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks"-Saxe, A. et al. + + Args: + tensor: a n-dimension torch.Tensor, where n >= 2 + gain: optional gain to be applied + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.orthogonal(w) + """ + if isinstance(tensor, Variable): + orthogonal(tensor.data, gain=gain) + return tensor + + if tensor.ndimension() < 2: + raise ValueError("Only tensors with 2 or more dimensions are supported.") + rows = tensor.size(0) + cols = tensor[0].numel() + flattened = torch.Tensor(rows, cols).normal_(0, 1) + + u, s, v = torch.svd(flattened, some=True) + if u.is_same_size(flattened): + tensor.view_as(u).copy_(u) + else: + tensor.view_as(v.t()).copy_(v.t()) + + tensor.mul_(gain) + return tensor + + +def sparse(tensor, sparsity, std=0.01): + """Fills the 2D input Tensor or Variable as a sparse matrix, where the non-zero elements will be drawn from a + normal distribution with mean=0 and std=`std`. + + Args: + tensor: a n-dimension torch.Tensor + sparsity: The fraction of elements in each column to be set to zero + std: the standard deviation of the normal distribution used to generate the non-zero values + + Examples: + >>> w = torch.Tensor(3, 5) + >>> nninit.sparse(w, sparsity=0.1) + """ + if isinstance(tensor, Variable): + sparse(tensor.data, sparsity, std=std) + return tensor + + if tensor.ndimension() != 2: + raise ValueError("Sparse initialization only supported for 2D inputs") + tensor.normal_(0, std) + rows, cols = tensor.size(0), tensor.size(1) + num_zeros = int(math.ceil(cols * sparsity)) + + for col_idx in range(tensor.size(1)): + row_indices = list(range(rows)) + random.shuffle(row_indices) + zero_indices = row_indices[:num_zeros] + for row_idx in zero_indices: + tensor[row_idx, col_idx] = 0 + + return tensor
diff --git a/test/common.py b/test/common.py --- a/test/common.py +++ b/test/common.py @@ -3,6 +3,7 @@ import argparse import unittest import contextlib +from functools import wraps from itertools import product from copy import deepcopy @@ -31,6 +32,24 @@ def run_tests(): except ImportError: TEST_NUMPY = False +TEST_SCIPY = True +try: + import scipy +except ImportError: + TEST_SCIPY = False + + +def skipIfNoLapack(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + try: + fn(*args, **kwargs) + except Exception as e: + if 'Lapack library not found' in e.args[0]: + raise unittest.SkipTest('Compiled without Lapack') + raise + return wrapper + def get_cpu_type(t): assert t.__module__ == 'torch.cuda' diff --git a/test/test_nn.py b/test/test_nn.py --- a/test/test_nn.py +++ b/test/test_nn.py @@ -1,24 +1,29 @@ import math -import torch import random import unittest import itertools import contextlib from copy import deepcopy from itertools import repeat, product -from functools import wraps +from functools import wraps, reduce +from operator import mul +import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel as dp +import torch.nn.init as init import torch.nn.utils.rnn as rnn_utils from torch.nn.utils import clip_grad_norm from torch.autograd import Variable from torch.nn import Parameter from common_nn import NNTestCase, ModuleTest, CriterionTest, TestBase, \ module_tests, criterion_tests, TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, \ - TEST_CUDNN_VERSION, PRECISION -from common import freeze_rng_state, run_tests + TEST_CUDNN_VERSION +from common import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, TEST_SCIPY + +if TEST_SCIPY: + from scipy import stats def default_tensor_type(type): @@ -33,12 +38,13 @@ def wrapper(*args, **kwargs): return fn(*args, **kwargs) finally: torch.set_default_tensor_type(old_type) + return wrapper + return decorator class InputVariableMixin(object): - def _get_input(self): input = TestBase._get_input(self) @@ -49,11 +55,11 @@ def map_variables(i): return Variable(i, requires_grad=True) else: return type(i)(map_variables(elem) for elem in i) + return map_variables(input) class NewModuleTest(InputVariableMixin, ModuleTest): - def __init__(self, *args, **kwargs): super(NewModuleTest, self).__init__(*args, **kwargs) self.cudnn = kwargs.get('cudnn', False) @@ -169,7 +175,6 @@ def _get_target(self, target): class TestNN(NNTestCase): - def _forward(self, module, input): with freeze_rng_state(): return module(input) @@ -405,12 +410,12 @@ def num_params(module): return len(list(module.parameters())) class Net(nn.Module): - def __init__(self): super(Net, self).__init__() self.l1 = l self.l2 = l self.param = Parameter(torch.Tensor(3, 5)) + l = nn.Linear(10, 20) n = Net() s = nn.Sequential(n, n, n, n) @@ -420,12 +425,12 @@ def __init__(self): def test_modules(self): class Net(nn.Module): - def __init__(self): super(Net, self).__init__() self.l1 = l self.l2 = l self.param = Variable(torch.Tensor(3, 5)) + l = nn.Linear(10, 20) n = Net() s = nn.Sequential(n, n, n, n) @@ -454,6 +459,7 @@ def check(): self.assertIs(m1, m2) for i in range(len(modules)): self.assertIs(module_list[i], modules[i]) + check() modules += [nn.Conv2d(3, 4, 3)] module_list += [modules[-1]] @@ -488,6 +494,7 @@ def check(): self.assertIs(p1, p2) for i in range(len(parameters)): self.assertIs(parameters[i], param_list[i]) + check() parameters += [make_param()] param_list += [parameters[-1]] @@ -551,6 +558,7 @@ def test_non_leaf_parameters(self): def assign_weight(): l2.weight = l1.weight + 2 + self.assertRaises(TypeError, assign_weight) # This should work though l2.weight = Parameter(torch.randn(10, 10)) @@ -841,9 +849,9 @@ def fn(input): return [input, (input.sin(), input.cos(), [input.add(1)]), input] class Net(nn.Module): - def forward(self, input): return fn(input) + i = Variable(torch.randn(2, 2).float().cuda(1)) gpus = range(torch.cuda.device_count()) output = dp.data_parallel(Net(), i, gpus) @@ -862,9 +870,9 @@ def fn(input): return input[1][0] class Net(nn.Module): - def forward(self, *input): return fn(input) + i = Variable(torch.randn(20, 3).float().cuda(1)) input = (i.cos(), (i.sin(), i), i.sin()) gpus = range(torch.cuda.device_count()) @@ -956,6 +964,7 @@ def test_parameter_assignment(self): def num_params(): return len(list(l.parameters())) + self.assertEqual(num_params(), 2) new_param = Parameter(torch.randn(5, 5)) @@ -977,6 +986,7 @@ def num_params(): # It shouldn't be possible to replace a parameter with a Variable def assign_var(): l.param_attr = Variable(torch.Tensor(5, 5)) + self.assertRaises(TypeError, assign_var) # But replacing it with None should be fine l.param_attr = None @@ -1072,12 +1082,10 @@ def test_MaxUnpool2d_output_size(self): size = torch.LongStorage((1, 1) + size) mu(output_small, indices_small, output_size=size) else: - self.assertRaises(ValueError, lambda: - mu(output_small, indices_small, (h, w))) + self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w))) def test_container_copy(self): class Model(nn.Module): - def __init__(self): super(Model, self).__init__() self.linear = nn.Linear(4, 5) @@ -1567,6 +1575,277 @@ def test_batchnorm_eval(self): self.assertEqual(grad1, grad2) +class TestNNInit(TestCase): + def setUp(self): + random.seed(123) + torch.manual_seed(123) + + def _is_normal(self, tensor, mean, std): + if isinstance(tensor, Variable): + tensor = tensor.data + samples = list(tensor.view(-1)) + p_value = stats.kstest(samples, 'norm', args=(mean, std)).pvalue + return p_value > 0.0001 + + def _is_uniform(self, tensor, a, b): + if isinstance(tensor, Variable): + tensor = tensor.data + samples = list(tensor.view(-1)) + p_value = stats.kstest(samples, 'uniform', args=(a, (b - a))).pvalue + return p_value > 0.0001 + + def _create_random_nd_tensor(self, dims, size_min, size_max, as_variable): + size = [random.randint(size_min, size_max) for _ in range(dims)] + tensor = torch.zeros(size) + if as_variable: + tensor = Variable(tensor) + return tensor + + def _random_float(self, a, b): + return (b - a) * random.random() + a + + @unittest.skipIf(not TEST_SCIPY, "Scipy not found.") + def test_uniform(self): + for as_variable in [True, False]: + for dims in [1, 2, 4]: + input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable) + a = self._random_float(-3, 3) + b = a + self._random_float(1, 5) + init.uniform(input_tensor, a=a, b=b) + assert self._is_uniform(input_tensor, a, b) + + @unittest.skipIf(not TEST_SCIPY, "Scipy not found.") + def test_normal(self): + for as_variable in [True, False]: + for dims in [1, 2, 4]: + input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable) + mean = self._random_float(-3, 3) + std = self._random_float(1, 5) + init.normal(input_tensor, mean=mean, std=std) + + assert self._is_normal(input_tensor, mean, std) + + def test_constant(self): + for as_variable in [True, False]: + for dims in [1, 2, 4]: + input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5, as_variable=as_variable) + val = self._random_float(1, 10) + init.constant(input_tensor, val) + if as_variable: + input_tensor = input_tensor.data + + self.assertEqual(input_tensor, input_tensor.clone().fill_(val)) + + def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self): + for as_variable in [True, False]: + for dims in [0, 1]: + tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable) + with self.assertRaises(ValueError): + init.xavier_uniform(tensor) + + def test_xavier_normal_errors_on_inputs_smaller_than_2d(self): + for as_variable in [True, False]: + for dims in [0, 1]: + tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable) + with self.assertRaises(ValueError): + init.xavier_normal(tensor) + + @unittest.skipIf(not TEST_SCIPY, "Scipy not found.") + def test_xavier_uniform(self): + for as_variable in [True, False]: + for use_gain in [True, False]: + for dims in [2, 4]: + input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25, + as_variable=as_variable) + gain = 1 + + if use_gain: + gain = self._random_float(0.1, 2) + init.xavier_uniform(input_tensor, gain=gain) + else: + init.xavier_uniform(input_tensor) + + if as_variable: + input_tensor = input_tensor.data + + fan_in = input_tensor.size(1) + fan_out = input_tensor.size(0) + if input_tensor.dim() > 2: + fan_in *= input_tensor[0, 0].numel() + fan_out *= input_tensor[0, 0].numel() + + expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out)) + bounds = expected_std * math.sqrt(3) + assert self._is_uniform(input_tensor, -bounds, bounds) + + @unittest.skipIf(not TEST_SCIPY, "Scipy not found.") + def test_xavier_normal(self): + for as_variable in [True, False]: + for use_gain in [True, False]: + for dims in [2, 4]: + input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25, + as_variable=as_variable) + gain = 1 + + if use_gain: + gain = self._random_float(0.1, 2) + init.xavier_normal(input_tensor, gain=gain) + else: + init.xavier_normal(input_tensor) + + if as_variable: + input_tensor = input_tensor.data + + fan_in = input_tensor.size(1) + fan_out = input_tensor.size(0) + if input_tensor.dim() > 2: + fan_in *= input_tensor[0, 0].numel() + fan_out *= input_tensor[0, 0].numel() + + expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out)) + assert self._is_normal(input_tensor, 0, expected_std) + + def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self): + for as_variable in [True, False]: + for dims in [0, 1]: + with self.assertRaises(ValueError): + tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable) + init.kaiming_uniform(tensor) + + def test_kaiming_normal_errors_on_inputs_smaller_than_2d(self): + for as_variable in [True, False]: + for dims in [0, 1]: + with self.assertRaises(ValueError): + tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable) + init.kaiming_normal(tensor) + + @unittest.skipIf(not TEST_SCIPY, "Scipy not found.") + def test_kaiming_uniform(self): + for as_variable in [True, False]: + for use_a in [True, False]: + for dims in [2, 4]: + for mode in ['fan_in', 'fan_out']: + input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25, + as_variable=as_variable) + if use_a: + a = self._random_float(0.1, 2) + init.kaiming_uniform(input_tensor, a=a, mode=mode) + else: + a = 0 + init.kaiming_uniform(input_tensor, mode=mode) + + if as_variable: + input_tensor = input_tensor.data + + fan_in = input_tensor.size(1) + fan_out = input_tensor.size(0) + if input_tensor.dim() > 2: + fan_in *= input_tensor[0, 0].numel() + fan_out *= input_tensor[0, 0].numel() + + if mode == 'fan_in': + n = fan_in + else: + n = fan_out + + expected_std = math.sqrt(2.0 / ((1 + a**2) * n)) + bounds = expected_std * math.sqrt(3.0) + assert self._is_uniform(input_tensor, -bounds, bounds) + + @unittest.skipIf(not TEST_SCIPY, "Scipy not found.") + def test_kaiming_normal(self): + for as_variable in [True, False]: + for use_a in [True, False]: + for dims in [2, 4]: + for mode in ['fan_in', 'fan_out']: + input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25, + as_variable=as_variable) + if use_a: + a = self._random_float(0.1, 2) + init.kaiming_normal(input_tensor, a=a, mode=mode) + else: + a = 0 + init.kaiming_normal(input_tensor, mode=mode) + + if as_variable: + input_tensor = input_tensor.data + + fan_in = input_tensor.size(1) + fan_out = input_tensor.size(0) + if input_tensor.dim() > 2: + fan_in *= input_tensor[0, 0].numel() + fan_out *= input_tensor[0, 0].numel() + + if mode == 'fan_in': + n = fan_in + else: + n = fan_out + + expected_std = math.sqrt(2.0 / ((1 + a**2) * n)) + assert self._is_normal(input_tensor, 0, expected_std) + + def test_sparse_only_works_on_2d_inputs(self): + for as_variable in [True, False]: + for dims in [1, 3]: + with self.assertRaises(ValueError): + sparsity = self._random_float(0.1, 0.9) + tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3, as_variable=as_variable) + init.sparse(tensor, sparsity) + + @unittest.skipIf(not TEST_SCIPY, "Scipy not found.") + def test_sparse_default_std(self): + for as_variable in [True, False]: + for use_random_std in [True, False]: + input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35, as_variable=as_variable) + rows, cols = input_tensor.size(0), input_tensor.size(1) + sparsity = self._random_float(0.1, 0.2) + + std = 0.01 # default std + if use_random_std: + std = self._random_float(0.01, 0.2) + init.sparse(input_tensor, sparsity=sparsity, std=std) + else: + init.sparse(input_tensor, sparsity=sparsity) + + if as_variable: + input_tensor = input_tensor.data + + for col_idx in range(input_tensor.size(1)): + column = input_tensor[:, col_idx] + assert column[column == 0].nelement() >= math.ceil(sparsity * cols) + + assert self._is_normal(input_tensor[input_tensor != 0], 0, std) + + @skipIfNoLapack + def test_orthogonal(self): + for as_variable in [True, False]: + for use_gain in [True, False]: + for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]: + input_tensor = torch.zeros(tensor_size) + gain = 1.0 + + if as_variable: + input_tensor = Variable(input_tensor) + + if use_gain: + gain = self._random_float(0.1, 2) + init.orthogonal(input_tensor, gain=gain) + else: + init.orthogonal(input_tensor) + + if as_variable: + input_tensor = input_tensor.data + + rows, cols = tensor_size[0], reduce(mul, tensor_size[1:]) + flattened_tensor = input_tensor.view(rows, cols) + if rows > cols: + self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor), + torch.eye(cols) * gain ** 2, prec=1e-6) + else: + self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()), + torch.eye(rows) * gain ** 2, prec=1e-6) + + def add_test(test): test_name = test.get_name() cuda_test_name = test_name + '_cuda' @@ -1957,7 +2236,6 @@ def add_test(test): ), ] - for test_params in module_tests + new_module_tests: # TODO: CUDA is not implemented yet if 'constructor' not in test_params: @@ -1973,7 +2251,6 @@ def add_test(test): class UnpoolingNet(nn.Module): - def __init__(self, pool, unpool): super(UnpoolingNet, self).__init__() self.pool = pool @@ -2002,6 +2279,5 @@ def forward(self, input): input_size=(1, 1, 2, 4, 6), fullname='MaxUnpool3d_net')) - if __name__ == '__main__': run_tests() diff --git a/test/test_torch.py b/test/test_torch.py --- a/test/test_torch.py +++ b/test/test_torch.py @@ -7,9 +7,8 @@ import tempfile import unittest import warnings -from itertools import product, chain -from functools import wraps -from common import TestCase, iter_indices, TEST_NUMPY, run_tests, download_file +from itertools import product +from common import TestCase, iter_indices, TEST_NUMPY, run_tests, download_file, skipIfNoLapack if TEST_NUMPY: import numpy as np @@ -17,18 +16,6 @@ SIZE = 100 -def skipIfNoLapack(fn): - @wraps(fn) - def wrapper(*args, **kwargs): - try: - fn(*args, **kwargs) - except Exception as e: - if 'Lapack library not found' in e.args[0]: - raise unittest.SkipTest('Compiled without Lapack') - raise - return wrapper - - class TestTorch(TestCase): def test_dot(self):
weight initializations for conv2d and linear Allow different weight initialization schemes for conv2d and linear. See: https://github.com/Kaixhin/nninit for some schemes.
I thought to add another class called `Initializers` with a function that we could pass the weights and a string with the `initialization` name. Also add a string argument to every layer called "initialization". In a Linear layer the init should be something like this: ```python def __init__(self, in_features, out_features, bias=True, initializer=None): super(Linear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) Initializers.initialize(self.weight, initializer) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() ``` Do you think that this is the best approach? If not, what would be the best way to do this? I think it's better to pass in a function as the initializer and call it with weights as argument. It's clean, verbose, and you don't have to do error checking for invalid/incompatible strings. For example: ```python nn.Linear(20, 20, weight_init=nn.init.orthogonal) ``` Do you want to implement it, or was your comment only a suggestion? Chainer has a nice approach, where the initializer argument is a little polymorphic (can be a callable or a scalar/matrix value). Looks like this: ```python super().__init__( maxout=Maxout(maxout_size, maxout_size, 2, initialW=initializers.GlorotUniform()), softmax_linear=Linear(maxout_size, task.num_classes, initialW=0)) ``` @apaszke, I am thinking to implement it. I think that String-like is cleaner. But we could go the way Chainer implements it, as shown by @jekbradbury. With strings or class names being accepted. What do you guys think?
2017-02-23T10:22:39
pytorch/pytorch
855
pytorch__pytorch-855
[ "838" ]
bd7a5ad6f070de9df72496b7b6ebbfc0542082b1
diff --git a/tools/cwrap/plugins/THPPlugin.py b/tools/cwrap/plugins/THPPlugin.py --- a/tools/cwrap/plugins/THPPlugin.py +++ b/tools/cwrap/plugins/THPPlugin.py @@ -303,8 +303,6 @@ def generate_out_options(self, declaration): def process_declarations(self, declarations): new_declarations = [] - register_only = [d for d in declarations if d.get('only_register', False)] - declarations = [d for d in declarations if not d.get('only_register', False)] def has_arg_type(declaration, type_name): return any(arg['type'] == type_name @@ -322,8 +320,16 @@ def has_output_args(declaration): for arg in option['arguments']) for declaration in declarations: + # Disable all methods for THHalfTensor, unless cpu_half is True + if not declaration.get('cpu_half', False): + defined_if = '!defined(TH_REAL_IS_HALF)' + if 'defined_if' in declaration: + defined_if += ' && (' + declaration['defined_if'] + ')' + declaration['defined_if'] = defined_if + if declaration.get('only_register', False): continue + declaration.setdefault('python_name', declaration['name']) declaration.setdefault('variables', []) if has_arg_type(declaration, 'THSize*'): @@ -353,7 +359,9 @@ def has_output_args(declaration): if arg['name'] == 'self': arg['ignore_check'] = True - declarations = [d for d in declarations if not d.get('only_stateless', False)] + register_only = [d for d in declarations if d.get('only_register', False)] + declarations = [d for d in declarations + if (not d.get('only_stateless', False)) and (not d.get('only_register', False))] self.declarations.extend(filter(lambda x: not x.get('only_stateless', False), register_only)) self.stateless_declarations.extend(filter(lambda x: x.get('only_stateless', False), register_only)) @@ -390,11 +398,14 @@ def declare_methods(self, stateless, sparse): if 'defined_if' in declaration: entry = self.preprocessor_guard(entry, declaration['defined_if']) tensor_methods += entry - return self.TENSOR_METHODS_DECLARATION.substitute( + generated = self.TENSOR_METHODS_DECLARATION.substitute( methods=tensor_methods, stateless=('' if not stateless else 'stateless_'), sparse=('' if not sparse else 'S'), ) + if sparse: + generated = '#ifndef TH_REAL_IS_HALF\n' + generated + '\n#endif\n\n' + return generated def process_full_file(self, code): # We have to find a place before all undefs diff --git a/torch/__init__.py b/torch/__init__.py --- a/torch/__init__.py +++ b/torch/__init__.py @@ -151,6 +151,10 @@ class FloatStorage(_C.FloatStorageBase, _StorageBase): pass +class HalfStorage(_C.HalfStorageBase, _StorageBase): + pass + + class LongStorage(_C.LongStorageBase, _StorageBase): pass @@ -191,6 +195,16 @@ def storage_type(cls): return FloatStorage +class HalfTensor(_C.HalfTensorBase, _TensorBase): + + def is_signed(self): + return True + + @classmethod + def storage_type(cls): + return HalfStorage + + class LongTensor(_C.LongTensorBase, _TensorBase): def is_signed(self):
diff --git a/test/test_torch.py b/test/test_torch.py --- a/test/test_torch.py +++ b/test/test_torch.py @@ -2577,6 +2577,35 @@ def test_serialization(self): rootview = c[8] self.assertEqual(rootview.data_ptr(), c[0].data_ptr()) + def test_half_tensor(self): + x = torch.randn(5, 5).float() + y = torch.randn(5, 5).float() + xh, yh = x.half(), y.half() + + self.assertEqual(x.half().float(), x, 1e-3) + + z = torch.Tensor(5, 5) + self.assertEqual(z.copy_(xh), x, 1e-3) + + with tempfile.NamedTemporaryFile() as f: + torch.save(xh, f) + f.seek(0) + xh2 = torch.load(f) + self.assertEqual(xh, xh2) + + @unittest.skipIf(not torch.cuda.is_available(), 'no CUDA') + def test_half_tensor_cuda(self): + x = torch.randn(5, 5).half() + self.assertEqual(x.cuda().cpu(), x) + + xc = x.cuda() + with tempfile.NamedTemporaryFile() as f: + torch.save(xc, f) + f.seek(0) + xc2 = torch.load(f) + self.assertIsInstance(xc2, type(xc)) + self.assertEqual(xc, xc2) + @unittest.skipIf(not torch.cuda.is_available(), 'no CUDA') def test_serialization_cuda(self): device_count = torch.cuda.device_count()
expose CPU HalfTensor, fix GPU HalfTensor serialization An issue to track exposing CPU half tensors. Note: CPU half tensors don't have math cc: @ajbrock Right now, calling `.cpu()` on a `torch.cuda.HalfTensor` gives an error. ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-8-29bf7ba9ae5b> in <module>() ----> 1 c.cpu() /home/fatbox/miniconda2/lib/python2.7/site-packages/torch/tensor.pyc in cpu(self) 48 def cpu(self): 49 """Returns a CPU copy of this tensor if it's not already on the CPU""" ---> 50 return self.type(getattr(torch, self.__class__.__name__)) 51 52 def double(self): AttributeError: 'module' object has no attribute 'HalfTensor' ``` Also, saving `torch.cuda.HalfTensor` with `torch.save` gives the error: ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-9-ed138460cf2d> in <module>() ----> 1 torch.save(c, 'c.pth') /home/fatbox/miniconda2/lib/python2.7/site-packages/torch/serialization.pyc in save(obj, f, pickle_module, pickle_protocol) 121 f = open(f, "wb") 122 try: --> 123 return _save(obj, f, pickle_module, pickle_protocol) 124 finally: 125 if new_fd: /home/fatbox/miniconda2/lib/python2.7/site-packages/torch/serialization.pyc in _save(obj, f, pickle_module, pickle_protocol) 216 _add_to_tar(pickle_objects, tar, 'pickle') 217 _add_to_tar(save_tensors, tar, 'tensors') --> 218 _add_to_tar(save_storages, tar, 'storages') 219 220 /home/fatbox/miniconda2/lib/python2.7/site-packages/torch/serialization.pyc in _add_to_tar(fn, tar_file, name) 29 def _add_to_tar(fn, tar_file, name): 30 tmp_file = tempfile.NamedTemporaryFile(delete=False) ---> 31 fn(tmp_file) 32 tmp_file.close() 33 /home/fatbox/miniconda2/lib/python2.7/site-packages/torch/serialization.pyc in save_storages(f) 187 for key, storage in serialized_storages.items(): 188 location = location_tag(storage) --> 189 storage_type = normalize_storage_type(type(storage)) 190 pickle_module.dump((key, location, storage_type), f, 191 protocol=pickle_protocol) /home/fatbox/miniconda2/lib/python2.7/site-packages/torch/serialization.pyc in normalize_storage_type(storage_type) 97 98 def normalize_storage_type(storage_type): ---> 99 return getattr(torch, storage_type.__name__) 100 101 AttributeError: 'module' object has no attribute 'HalfStorage' ```
2017-02-26T12:51:26
pytorch/pytorch
928
pytorch__pytorch-928
[ "786" ]
0b7374eb4454aff624d5afea73b0c8293c832c0d
diff --git a/torch/__init__.py b/torch/__init__.py --- a/torch/__init__.py +++ b/torch/__init__.py @@ -31,6 +31,13 @@ # automatically filled by the dynamic loader. import os as _dl_flags +# if we have numpy, it *must* be imported before the call to setdlopenflags() +# or there is risk that later c modules will segfault when importing numpy +try: + import numpy as np +except: + pass + # first check if the os package has the required flags if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_NOW'): try:
Importing scipy.misc after torch causes segfault As shown in below. Is this something expected? This seems most closely related to https://github.com/pytorch/pytorch/issues/595, but it is closed without any specific solutions. ``` (venv) $ pip freeze appdirs==1.4.0 numpy==1.12.0 packaging==16.8 pyparsing==2.1.10 PyYAML==3.12 scipy==0.18.1 six==1.10.0 torch==0.1.9.post2 (venv) $ python -c 'import torch, scipy.misc' Segmentation fault (core dumped) (venv) $ python -c 'import scipy.misc, torch' (venv) $ echo 'import torch, scipy.misc' > spam.py (venv) $ python spam.py Segmentation fault (core dumped) (venv) $ gdb --args python spam.py GNU gdb (Ubuntu 7.7.1-0ubuntu5~14.04.2) 7.7.1 Copyright (C) 2014 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html> This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Type "show copying" and "show warranty" for details. This GDB was configured as "x86_64-linux-gnu". Type "show configuration" for configuration details. For bug reporting instructions, please see: <http://www.gnu.org/software/gdb/bugs/>. Find the GDB manual and other documentation resources online at: <http://www.gnu.org/software/gdb/documentation/>. For help, type "help". Type "apropos word" to search for commands related to "word"... Reading symbols from python...(no debugging symbols found)...done. (gdb) run Starting program: /tmp/venv/bin/python spam.py [Thread debugging using libthread_db enabled] Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1". [New Thread 0x7fffc7088700 (LWP 5549)] [New Thread 0x7fffc6887700 (LWP 5550)] [New Thread 0x7fffc4086700 (LWP 5551)] [New Thread 0x7fffc1885700 (LWP 5552)] [New Thread 0x7fffbf084700 (LWP 5553)] [New Thread 0x7fffbc883700 (LWP 5554)] [New Thread 0x7fffba082700 (LWP 5555)] [New Thread 0x7fffb7881700 (LWP 5556)] [New Thread 0x7fffb5080700 (LWP 5557)] [New Thread 0x7fffb287f700 (LWP 5558)] [New Thread 0x7fffb007e700 (LWP 5559)] Program received signal SIGSEGV, Segmentation fault. 0x00007fffc9d65fc0 in PyArray_API () from /tmp/venv/local/lib/python2.7/site-packages/numpy/core/multiarray.so ```
Importing `scipy.ndimage` after `torch` also causes segv: ``` (venv) $ python -c 'import torch, scipy.ndimage' Segmentation fault (core dumped) ``` Happens with sklearn as well. `python -c 'import torch, sklearn' Segmentation fault (core dumped)`. Reversing the order of imports seems to fix things for me though. Can you reinstall sklearn or scipy? I think I've seen a case where that helped. I've tried reinstalling things. Doesn't fix it. Could it be because scikit seems to build using openblas and not MKL? I don't think so, it's a problem with numpy initialization. AFAIK the same was happening with TF, not sure if it's solved now. same. run import scipy/ import sklearn after torch will cause error. but it's all right if load these two first.
2017-03-05T16:10:30