repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
zulip/zulip | 19,852 | zulip__zulip-19852 | [
"19833"
] | 306011a963e36c39c24a0e94f0de494969c8d4aa | diff --git a/zerver/data_import/slack.py b/zerver/data_import/slack.py
--- a/zerver/data_import/slack.py
+++ b/zerver/data_import/slack.py
@@ -1410,7 +1410,7 @@ def get_slack_api_data(slack_api_url: str, get_param: str, **kwargs: Any) -> Any
if not kwargs.get("token"):
raise AssertionError("Slack token missing in kwargs")
token = kwargs.pop("token")
- data = requests.get(slack_api_url, headers={"Authorization": f"Bearer {token}"}, **kwargs)
+ data = requests.get(slack_api_url, headers={"Authorization": f"Bearer {token}"}, params=kwargs)
if data.status_code == requests.codes.ok:
result = data.json()
| diff --git a/zerver/tests/test_slack_importer.py b/zerver/tests/test_slack_importer.py
--- a/zerver/tests/test_slack_importer.py
+++ b/zerver/tests/test_slack_importer.py
@@ -3,7 +3,8 @@
from io import BytesIO
from typing import Any, Dict, Iterator, List, Set, Tuple
from unittest import mock
-from unittest.mock import ANY, call
+from unittest.mock import ANY
+from urllib.parse import parse_qs, urlparse
import orjson
import responses
@@ -53,24 +54,101 @@ def remove_folder(path: str) -> None:
def request_callback(request: PreparedRequest) -> Tuple[int, Dict[str, str], bytes]:
- if request.url != "https://slack.com/api/users.list":
+ valid_endpoint = False
+ endpoints = [
+ "https://slack.com/api/users.list",
+ "https://slack.com/api/users.info",
+ "https://slack.com/api/team.info",
+ ]
+ for endpoint in endpoints:
+ if request.url and endpoint in request.url:
+ valid_endpoint = True
+ break
+ if not valid_endpoint:
return (404, {}, b"")
if request.headers.get("Authorization") != "Bearer xoxp-valid-token":
return (200, {}, orjson.dumps({"ok": False, "error": "invalid_auth"}))
- return (200, {}, orjson.dumps({"ok": True, "members": "user_data"}))
+ if request.url == "https://slack.com/api/users.list":
+ return (200, {}, orjson.dumps({"ok": True, "members": "user_data"}))
+
+ query_from_url = str(urlparse(request.url).query)
+ qs = parse_qs(query_from_url)
+ if request.url and "https://slack.com/api/users.info" in request.url:
+ user2team_dict = {
+ "U061A3E0G": "T6LARQE2Z",
+ "U061A8H1G": "T7KJRQE8Y",
+ "U8X25EBAB": "T5YFFM2QY",
+ }
+ try:
+ user_id = qs["user"][0]
+ team_id = user2team_dict[user_id]
+ except KeyError:
+ return (200, {}, orjson.dumps({"ok": False, "error": "user_not_found"}))
+ return (200, {}, orjson.dumps({"ok": True, "user": {"id": user_id, "team_id": team_id}}))
+ # Else, https://slack.com/api/team.info
+ team_not_found: Tuple[int, Dict[str, str], bytes] = (
+ 200,
+ {},
+ orjson.dumps({"ok": False, "error": "team_not_found"}),
+ )
+ try:
+ team_id = qs["team"][0]
+ except KeyError:
+ return team_not_found
+
+ team_dict = {
+ "T6LARQE2Z": "foreignteam1",
+ "T7KJRQE8Y": "foreignteam2",
+ }
+ try:
+ team_domain = team_dict[team_id]
+ except KeyError:
+ return team_not_found
+ return (200, {}, orjson.dumps({"ok": True, "team": {"id": team_id, "domain": team_domain}}))
class SlackImporter(ZulipTestCase):
@responses.activate
def test_get_slack_api_data(self) -> None:
token = "xoxp-valid-token"
+
+ # Users list
slack_user_list_url = "https://slack.com/api/users.list"
responses.add_callback(responses.GET, slack_user_list_url, callback=request_callback)
self.assertEqual(
get_slack_api_data(slack_user_list_url, "members", token=token), "user_data"
)
+
+ # Users info
+ slack_users_info_url = "https://slack.com/api/users.info"
+ user_id = "U8X25EBAB"
+ responses.add_callback(responses.GET, slack_users_info_url, callback=request_callback)
+ self.assertEqual(
+ get_slack_api_data(slack_users_info_url, "user", token=token, user=user_id),
+ {"id": user_id, "team_id": "T5YFFM2QY"},
+ )
+ # Should error if the required user argument is not specified
+ with self.assertRaises(Exception) as invalid:
+ get_slack_api_data(slack_users_info_url, "user", token=token)
+ self.assertEqual(invalid.exception.args, ("Error accessing Slack API: user_not_found",))
+ # Should error if the required user is not found
+ with self.assertRaises(Exception) as invalid:
+ get_slack_api_data(slack_users_info_url, "user", token=token, user="idontexist")
+ self.assertEqual(invalid.exception.args, ("Error accessing Slack API: user_not_found",))
+
+ # Team info
+ slack_team_info_url = "https://slack.com/api/team.info"
+ responses.add_callback(responses.GET, slack_team_info_url, callback=request_callback)
+ with self.assertRaises(Exception) as invalid:
+ get_slack_api_data(slack_team_info_url, "team", token=token, team="wedontexist")
+ self.assertEqual(invalid.exception.args, ("Error accessing Slack API: team_not_found",))
+ # Should error if the required user argument is not specified
+ with self.assertRaises(Exception) as invalid:
+ get_slack_api_data(slack_team_info_url, "team", token=token)
+ self.assertEqual(invalid.exception.args, ("Error accessing Slack API: team_not_found",))
+
token = "xoxp-invalid-token"
with self.assertRaises(Exception) as invalid:
get_slack_api_data(slack_user_list_url, "members", token=token)
@@ -140,10 +218,10 @@ def test_get_timezone(self) -> None:
self.assertEqual(get_user_timezone(user_no_timezone), "America/New_York")
@mock.patch("zerver.data_import.slack.get_data_file")
- @mock.patch("zerver.data_import.slack.get_slack_api_data")
@mock.patch("zerver.data_import.slack.get_messages_iterator")
+ @responses.activate
def test_fetch_shared_channel_users(
- self, messages_mock: mock.Mock, api_mock: mock.Mock, data_file_mock: mock.Mock
+ self, messages_mock: mock.Mock, data_file_mock: mock.Mock
) -> None:
users = [{"id": "U061A1R2R"}, {"id": "U061A5N1G"}, {"id": "U064KUGRJ"}]
data_file_mock.side_effect = [
@@ -153,19 +231,19 @@ def test_fetch_shared_channel_users(
],
[],
]
- api_mock.side_effect = [
- {"id": "U061A3E0G", "team_id": "T6LARQE2Z"},
- {"domain": "foreignteam1"},
- {"id": "U061A8H1G", "team_id": "T7KJRQE8Y"},
- {"domain": "foreignteam2"},
- ]
messages_mock.return_value = [
{"user": "U061A1R2R"},
{"user": "U061A5N1G"},
{"user": "U061A8H1G"},
]
+ # Users info
+ slack_users_info_url = "https://slack.com/api/users.info"
+ responses.add_callback(responses.GET, slack_users_info_url, callback=request_callback)
+ # Team info
+ slack_team_info_url = "https://slack.com/api/team.info"
+ responses.add_callback(responses.GET, slack_team_info_url, callback=request_callback)
slack_data_dir = self.fixture_file_name("", type="slack_fixtures")
- fetch_shared_channel_users(users, slack_data_dir, "token")
+ fetch_shared_channel_users(users, slack_data_dir, "xoxp-valid-token")
# Normal users
self.assert_length(users, 5)
@@ -176,20 +254,16 @@ def test_fetch_shared_channel_users(
self.assertEqual(users[2]["id"], "U064KUGRJ")
# Shared channel users
- self.assertEqual(users[3]["id"], "U061A3E0G")
- self.assertEqual(users[3]["team_domain"], "foreignteam1")
- self.assertEqual(users[3]["is_mirror_dummy"], True)
- self.assertEqual(users[4]["id"], "U061A8H1G")
- self.assertEqual(users[4]["team_domain"], "foreignteam2")
- self.assertEqual(users[4]["is_mirror_dummy"], True)
-
- api_calls = [
- call("https://slack.com/api/users.info", "user", token="token", user="U061A3E0G"),
- call("https://slack.com/api/team.info", "team", token="token", team="T6LARQE2Z"),
- call("https://slack.com/api/users.info", "user", token="token", user="U061A8H1G"),
- call("https://slack.com/api/team.info", "team", token="token", team="T7KJRQE8Y"),
- ]
- api_mock.assert_has_calls(api_calls, any_order=True)
+ # We need to do this because the outcome order of `users` list is
+ # not deterministic.
+ fourth_fifth = [users[3], users[4]]
+ fourth_fifth.sort(key=lambda x: x["id"])
+ self.assertEqual(fourth_fifth[0]["id"], "U061A3E0G")
+ self.assertEqual(fourth_fifth[0]["team_domain"], "foreignteam1")
+ self.assertEqual(fourth_fifth[0]["is_mirror_dummy"], True)
+ self.assertEqual(fourth_fifth[1]["id"], "U061A8H1G")
+ self.assertEqual(fourth_fifth[1]["team_domain"], "foreignteam2")
+ self.assertEqual(fourth_fifth[1]["is_mirror_dummy"], True)
@mock.patch("zerver.data_import.slack.get_data_file")
def test_users_to_zerver_userprofile(self, mock_get_data_file: mock.Mock) -> None:
| convert_slack_data issue
Got an error when tried to convert slack archive:
`./manage.py convert_slack_data export-edited-3.zip --token xoxb-HIDED --output converted_slack_data --verbosity 3 --traceback
Converting data ...
Traceback (most recent call last):
File "./manage.py", line 52, in <module>
execute_from_command_line(sys.argv)
File "/srv/zulip-venv-cache/cc77818846b328558cc9444c67cbbe0121ca80f4/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/srv/zulip-venv-cache/cc77818846b328558cc9444c67cbbe0121ca80f4/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/srv/zulip-venv-cache/cc77818846b328558cc9444c67cbbe0121ca80f4/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/srv/zulip-venv-cache/cc77818846b328558cc9444c67cbbe0121ca80f4/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/home/zulip/deployments/2021-09-07-12-55-30/zerver/management/commands/convert_slack_data.py", line 56, in handle
do_convert_data(path, output_dir, token, threads=num_threads)
File "/home/zulip/deployments/2021-09-07-12-55-30/zerver/data_import/slack.py", line 1270, in do_convert_data
fetch_shared_channel_users(user_list, slack_data_dir, token)
File "/home/zulip/deployments/2021-09-07-12-55-30/zerver/data_import/slack.py", line 1182, in fetch_shared_channel_users
user = get_slack_api_data(
File "/home/zulip/deployments/2021-09-07-12-55-30/zerver/data_import/slack.py", line 1372, in get_slack_api_data
data = requests.get(
File "/srv/zulip-venv-cache/cc77818846b328558cc9444c67cbbe0121ca80f4/zulip-py3-venv/lib/python3.8/site-packages/requests/api.py", line 76, in get
return request('get', url, params=params, **kwargs)
File "/srv/zulip-venv-cache/cc77818846b328558cc9444c67cbbe0121ca80f4/zulip-py3-venv/lib/python3.8/site-packages/requests/api.py", line 61, in request
return session.request(method=method, url=url, **kwargs)
TypeError: request() got an unexpected keyword argument 'user'`
| 2021-09-28T07:26:33 |
|
zulip/zulip | 19,858 | zulip__zulip-19858 | [
"19822"
] | 076d9eeb16504b34508aa628f5588b59036d1f37 | diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -27,7 +27,7 @@
""".strip()
NAV_LIST_ITEM_TEMPLATE = """
-<li data-language="{data_language}" tabindex="0">{name}</li>
+<li data-language="{data_language}" tabindex="0">{label}</li>
""".strip()
DIV_TAB_CONTENT_TEMPLATE = """
@@ -38,7 +38,7 @@
# If adding new entries here, also check if you need to update
# tabbed-instructions.js
-TAB_DISPLAY_NAMES = {
+TAB_SECTION_LABELS = {
"desktop-web": "Desktop/Web",
"ios": "iOS",
"android": "Android",
@@ -73,6 +73,7 @@
"not-stream": "From other views",
"via-recent-topics": "Via recent topics",
"via-left-sidebar": "Via left sidebar",
+ "instructions-for-all-platforms": "Instructions for all platforms",
}
@@ -97,7 +98,10 @@ def run(self, lines: List[str]) -> List[str]:
else:
tab_class = "no-tabs"
tab_section["tabs"] = [
- {"tab_name": "null_tab", "start": tab_section["start_tabs_index"]}
+ {
+ "tab_name": "instructions-for-all-platforms",
+ "start": tab_section["start_tabs_index"],
+ }
]
nav_bar = self.generate_nav_bar(tab_section)
content_blocks = self.generate_content_blocks(tab_section, lines)
@@ -137,10 +141,16 @@ def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str])
def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
li_elements = []
for tab in tab_section["tabs"]:
- li = NAV_LIST_ITEM_TEMPLATE.format(
- data_language=tab.get("tab_name"), name=TAB_DISPLAY_NAMES.get(tab.get("tab_name"))
- )
+ tab_name = tab.get("tab_name")
+ tab_label = TAB_SECTION_LABELS.get(tab_name)
+ if tab_label is None:
+ raise ValueError(
+ f"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py"
+ )
+
+ li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)
li_elements.append(li)
+
return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements))
def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
| diff --git a/templates/zerver/tests/markdown/test_tabbed_sections_missing_tabs.md b/templates/zerver/tests/markdown/test_tabbed_sections_missing_tabs.md
new file mode 100644
--- /dev/null
+++ b/templates/zerver/tests/markdown/test_tabbed_sections_missing_tabs.md
@@ -0,0 +1,11 @@
+# Heading
+
+{start_tabs}
+{tab|ios}
+iOS instructions
+
+{tab|minix}
+
+Minix instructions. We expect an exception because the minix tab doesn't have a declared label.
+
+{end_tabs}
diff --git a/zerver/tests/test_templates.py b/zerver/tests/test_templates.py
--- a/zerver/tests/test_templates.py
+++ b/zerver/tests/test_templates.py
@@ -76,10 +76,10 @@ def test_markdown_tabbed_sections_extension(self) -> None:
<p>
<div class="code-section no-tabs" markdown="1">
<ul class="nav">
- <li data-language="null_tab" tabindex="0">None</li>
+ <li data-language="instructions-for-all-platforms" tabindex="0">Instructions for all platforms</li>
</ul>
<div class="blocks">
- <div data-language="null_tab" markdown="1"></p>
+ <div data-language="instructions-for-all-platforms" markdown="1"></p>
<p>Instructions for all platforms</p>
<p></div>
</div>
@@ -92,6 +92,15 @@ def test_markdown_tabbed_sections_extension(self) -> None:
expected_html_sans_whitespace = expected_html.replace(" ", "").replace("\n", "")
self.assertEqual(content_sans_whitespace, expected_html_sans_whitespace)
+ def test_markdown_tabbed_sections_missing_tabs(self) -> None:
+ template = get_template("tests/test_markdown.html")
+ context = {
+ "markdown_test_file": "zerver/tests/markdown/test_tabbed_sections_missing_tabs.md",
+ }
+ expected_regex = "^Tab 'minix' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py$"
+ with self.assertRaisesRegex(ValueError, expected_regex):
+ template.render(context)
+
def test_markdown_nested_code_blocks(self) -> None:
template = get_template("tests/test_markdown.html")
context = {
| markdown/tabbed_sections: Raise exception for missing tab name.
As discovered in #19807, missing tab names are currently silently ignored by our `tabbed_sections` Markdown extension, this is not right. We should raise an exception somewhere so that missing tab names are caught before they make their way into production. This should hopefully be a quick fix! Ideally, we should do this in a manner such that something in `zerver.tests.test_markdown` or some other test file fails when a tab name is missing.
Thanks to @alya for reporting this bug!
| Hello @zulip/server-markdown members, this issue was labeled with the "area: markdown" label, so you may want to check it out!
<!-- areaLabelAddition -->
I would expect `test_docs` to fail if an exception is thrown when rendering a page, since I believe we render all /help/ pages in those tests.
Hi, I'd like to work on this
@zulipbot claim
Welcome to Zulip, @pradyumn014! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
| 2021-09-28T19:46:42 |
zulip/zulip | 19,928 | zulip__zulip-19928 | [
"19899"
] | 7882a1a7f42b54c1140c4d9a4a666ba26141fa68 | diff --git a/zerver/data_import/slack.py b/zerver/data_import/slack.py
--- a/zerver/data_import/slack.py
+++ b/zerver/data_import/slack.py
@@ -4,6 +4,7 @@
import secrets
import shutil
import subprocess
+import zipfile
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Type, TypeVar
@@ -1290,7 +1291,8 @@ def do_convert_data(original_path: str, output_dir: str, token: str, threads: in
if not os.path.exists(slack_data_dir):
os.makedirs(slack_data_dir)
- subprocess.check_call(["unzip", "-q", original_path, "-d", slack_data_dir])
+ with zipfile.ZipFile(original_path) as zipObj:
+ zipObj.extractall(slack_data_dir)
elif os.path.isdir(original_path):
slack_data_dir = original_path
else:
| Problem with unzip and get slack user/team
Hi
We have a problem with unzip file
If folder name has symbols e.g. ΓΆ or Ρyrillic, we have wrong folder name, like on screen

I found implementation

I commented subprocess and add zipfile
Maybe its a better and you can add to master
I had a problem with get user and team, request didnt work for me. With another file all was ok.
So I changed func get_slack_api_data in zerver/data_imports/slack.py

Maybe it is better too
| Thanks for the report! Can you give an example filename for the Slack export zip file to make it easier to reproduce?
@rht @hackerkid @garg3133 can you help investigate this?
Hello @zulip/server-misc, @zulip/server-production members, this issue was labeled with the "area: export/import", "area: production installer" labels, so you may want to check it out!
<!-- areaLabelAddition -->
[slack_export.zip](https://github.com/zulip/zulip/files/7291506/slack_export.zip)
@timabbott it is an empty zip with 2 folders, on this folders I had problem
Awesome, that should make this accessible for anyone to investigate and debug.
If I read the OP correctly, using `ZipFile` solves the unicode problem?
Yeah. It seems possible it could be solved by just calling `unzip` in some slightly modified way; I don't see a strong reason to prefer one approach over the other.
I think the problem is that the zip file is created on Windows, and hence it uses Windows encoding. Here is a test:
- First I extract using `7z x`, which produces the folders with the correct names, `stΓΆcklin` and `Π²ΡΠ΄ΠΊΡΠΈΡΠΊΠΈ`
- Then I make a zip file from a folder, using `zip` command
This newly created zip file will output a proper Unicode name. To prove it, here is a sample zip file:
[Π²ΡΠ΄ΠΊΡΠΈΡΠΊΠΈ.zip](https://github.com/zulip/zulip/files/7304195/default.zip).
Nevertheless, we should expect a Windows-encoded zip file, and hence should use that `zipfile` Python library. | 2021-10-07T14:33:20 |
|
zulip/zulip | 19,971 | zulip__zulip-19971 | [
"19077"
] | 9cf5a03f2aca83df487d11382090056f8bff7572 | diff --git a/zerver/views/users.py b/zerver/views/users.py
--- a/zerver/views/users.py
+++ b/zerver/views/users.py
@@ -639,7 +639,7 @@ def get_subscription_backend(
stream_id: int = REQ(json_validator=check_int, path_only=True),
) -> HttpResponse:
target_user = access_user_by_id(user_profile, user_id, for_admin=False)
- (stream, sub) = access_stream_by_id(user_profile, stream_id)
+ (stream, sub) = access_stream_by_id(user_profile, stream_id, allow_realm_admin=True)
subscription_status = {"is_subscribed": subscribed_to_stream(target_user, stream_id)}
| diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -1320,6 +1320,26 @@ def test_get_user_subscription_status(self) -> None:
)
self.assertTrue(result["is_subscribed"])
+ self.login("iago")
+ stream = self.make_stream("private_stream", invite_only=True)
+ # Unsubscribed admin can check subscription status in a private stream.
+ result = orjson.loads(
+ self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
+ )
+ self.assertFalse(result["is_subscribed"])
+
+ # Unsubscribed non-admins cannot check subscription status in a private stream.
+ self.login("shiva")
+ result = self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}")
+ self.assert_json_error(result, "Invalid stream id")
+
+ # Subscribed non-admins can check subscription status in a private stream
+ self.subscribe(self.example_user("shiva"), stream.name)
+ result = orjson.loads(
+ self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
+ )
+ self.assertFalse(result["is_subscribed"])
+
class ActivateTest(ZulipTestCase):
def test_basics(self) -> None:
| Usr stream subscription status, private streams, and realm admins
For private streams, the endpoint
```
/users/{user_id}/subscription/{stream_id}
```
will show invalid "stream id" even for administrators (I do get the stream id from `client.get_streams(include_all_active=True)`, so I should have access to the stream).
My impression is that it would be safe and reasonable to add `allow_realm_admin=True` to this call:
https://github.com/zulip/zulip/blob/706ec9714c76d98c2ac230c3a5adf35dfd0c438a/zerver/views/users.py#L636
and experimentally that fixes it.
So the fix is trivial, but before proposing a patch, I would need to look at creating tests that go with it, which I didn't do yet.
| @t-vi thanks for the report! I believe your proposed change is correct; sorry that we didn't see this report at the time.
@sahil839 do you want to just do a quick PR changing that and adding tests?
Hello @zulip/server-api, @zulip/server-streams members, this issue was labeled with the "area: stream settings", "area: api" labels, so you may want to check it out!
<!-- areaLabelAddition -->
| 2021-10-15T10:32:31 |
zulip/zulip | 19,985 | zulip__zulip-19985 | [
"19900"
] | 54d037f24a8acaca49c6f44b17f2eb4c333af916 | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -72,6 +72,7 @@
r".zuliprc",
r"__\w+\.\w+__",
# Things using "I"
+ r"I understand",
r"I say",
r"I want",
r"I'm",
diff --git a/zerver/lib/onboarding.py b/zerver/lib/onboarding.py
--- a/zerver/lib/onboarding.py
+++ b/zerver/lib/onboarding.py
@@ -68,38 +68,40 @@ def send_initial_pms(user: UserProfile) -> None:
" " + _("We also have a guide for [Setting up your organization]({help_url}).")
).format(help_url=help_url)
- welcome_msg = _("Hello, and welcome to Zulip!")
+ welcome_msg = _("Hello, and welcome to Zulip!") + "π"
+ demo_org_warning = ""
if user.realm.demo_organization_scheduled_deletion_date is not None:
- welcome_msg += " " + _(
- "Note that this is a [demo organization]({demo_org_help_url}) and will be automatically deleted in 30 days."
+ demo_org_warning = (
+ _(
+ "Note that this is a [demo organization]({demo_org_help_url}) and will be "
+ "**automatically deleted** in 30 days."
+ )
+ + "\n\n"
)
content = "".join(
[
welcome_msg + " ",
_("This is a private message from me, Welcome Bot.") + "\n\n",
- "* "
- + _(
+ _(
"If you are new to Zulip, check out our [Getting started guide]({getting_started_url})!"
+ ),
+ "{organization_setup_text}" + "\n\n",
+ "{demo_org_warning}",
+ _(
+ "I can also help you get set up! Just click anywhere on this message or press `r` to reply."
)
- + "{organization_setup_text}\n",
- "* " + _("[Add a profile picture]({profile_url}).") + "\n",
- "* " + _("[Browse and subscribe to streams]({streams_url}).") + "\n",
- "* " + _("Download our [mobile and desktop apps]({apps_url}).") + " ",
- _("Zulip also works great in a browser.") + "\n",
- "* " + _("You can type `?` to learn more about Zulip shortcuts.") + "\n\n",
- _("Practice sending a few messages by replying to this conversation.") + " ",
- _("Click anywhere on this message or press `r` to reply."),
+ + "\n\n",
+ _("Here are a few messages I understand:") + " ",
+ bot_commands(is_initial_pm=True),
]
)
content = content.format(
- getting_started_url="/help/getting-started-with-zulip",
- apps_url="/apps",
- profile_url="#settings/profile",
- streams_url="#streams/all",
organization_setup_text=organization_setup_text,
+ demo_org_warning=demo_org_warning,
demo_org_help_url="/help/demo-organizations",
+ getting_started_url="/help/getting-started-with-zulip",
)
internal_send_private_message(
@@ -107,22 +109,124 @@ def send_initial_pms(user: UserProfile) -> None:
)
+def bot_commands(is_initial_pm: bool = False) -> str:
+ commands = [
+ "apps",
+ "edit profile",
+ "theme",
+ "streams",
+ "topics",
+ "message formatting",
+ "keyboard shortcuts",
+ ]
+ if is_initial_pm:
+ commands.append("help")
+ return ", ".join(["`" + command + "`" for command in commands]) + "."
+
+
+def select_welcome_bot_response(human_response_lower: str) -> str:
+ # Given the raw (pre-markdown-rendering) content for a private
+ # message from the user to Welcome Bot, select the appropriate reply.
+ if human_response_lower in ["app", "apps"]:
+ return _(
+ "You can [download](/apps) the [mobile and desktop apps](/apps). "
+ "Zulip also works great in a browser."
+ )
+ elif human_response_lower == "profile":
+ return _(
+ "Go to [Profile settings](#settings/profile) "
+ "to add a [profile picture](/help/change-your-profile-picture) "
+ "and edit your [profile information](/help/edit-your-profile)."
+ )
+ elif human_response_lower == "theme":
+ return _(
+ "Go to [Display settings](#settings/display-settings) "
+ "to [switch between the light and dark themes](/help/dark-theme), "
+ "[pick your favorite emoji theme](/help/emoji-and-emoticons#change-your-emoji-set), "
+ "[change your language](/help/change-your-language), "
+ "and make other tweaks to your Zulip experience."
+ )
+ elif human_response_lower in ["stream", "streams", "channel", "channels"]:
+ return "".join(
+ [
+ _(
+ "In Zulip, streams [determine who gets a message](/help/streams-and-topics). "
+ "They are similar to channels in other chat apps."
+ )
+ + "\n\n",
+ _("[Browse and subscribe to streams](#streams/all)."),
+ ]
+ )
+ elif human_response_lower in ["topic", "topics"]:
+ return "".join(
+ [
+ _(
+ "In Zulip, topics [tell you what a message is about](/help/streams-and-topics). "
+ "They are light-weight subjects, very similar to the subject line of an email."
+ )
+ + "\n\n",
+ _(
+ "Check out [Recent topics](#recent_topics) to see what's happening! "
+ 'You can return to this conversation by clicking "Private messages" in the upper left.'
+ ),
+ ]
+ )
+ elif human_response_lower in ["keyboard", "shortcuts", "keyboard shortcuts"]:
+ return "".join(
+ [
+ _(
+ "Zulip's [keyboard shortcuts](#keyboard-shortcuts) "
+ "let you navigate the app quickly and efficiently."
+ )
+ + "\n\n",
+ _("Press `?` any time to see a [cheat sheet](#keyboard-shortcuts)."),
+ ]
+ )
+ elif human_response_lower in ["formatting", "message formatting"]:
+ return "".join(
+ [
+ _(
+ "Zulip uses [Markdown](/help/format-your-message-using-markdown), "
+ "an intuitive format for **bold**, *italics*, bulleted lists, and more. "
+ "Click [here](#message-formatting) for a cheat sheet."
+ )
+ + "\n\n",
+ _(
+ "Check out our [messaging tips](/help/messaging-tips) "
+ "to learn about emoji reactions, code blocks and much more!"
+ ),
+ ]
+ )
+ elif human_response_lower in ["help", "?"]:
+ return "".join(
+ [
+ _("Here are a few messages I understand:") + " ",
+ bot_commands() + "\n\n",
+ _(
+ "Check out our [Getting started guide](/help/getting-started-with-zulip), "
+ "or browse the [Help center](/help/) to learn more!"
+ ),
+ ]
+ )
+ else:
+ return "".join(
+ [
+ _(
+ "Iβm sorry, I did not understand your message. Please try one of the following commands:"
+ )
+ + " ",
+ bot_commands(),
+ ]
+ )
+
+
def send_welcome_bot_response(send_request: SendMessageRequest) -> None:
+ """Given the send_request object for a private message from the user
+ to welcome-bot, trigger the welcome-bot reply."""
welcome_bot = get_system_bot(settings.WELCOME_BOT, send_request.message.sender.realm_id)
- human_recipient_id = send_request.message.sender.recipient_id
- assert human_recipient_id is not None
- if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2:
- content = (
- _("Congratulations on your first reply!") + " "
- ":tada:"
- "\n"
- "\n"
- + _(
- "Feel free to continue using this space to practice your new messaging "
- "skills. Or, try clicking on some of the stream names to your left!"
- )
- )
- internal_send_private_message(welcome_bot, send_request.message.sender, content)
+ human_response_lower = send_request.message.content.lower()
+ content = select_welcome_bot_response(human_response_lower)
+ internal_send_private_message(welcome_bot, send_request.message.sender, content)
@transaction.atomic
| diff --git a/zerver/tests/test_tutorial.py b/zerver/tests/test_tutorial.py
--- a/zerver/tests/test_tutorial.py
+++ b/zerver/tests/test_tutorial.py
@@ -32,22 +32,136 @@ def test_tutorial_status(self) -> None:
user = self.example_user("hamlet")
self.assertEqual(user.tutorial_status, expected_db_status)
- def test_single_response_to_pm(self) -> None:
+ def test_response_to_pm_for_app(self) -> None:
user = self.example_user("hamlet")
bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
- content = "whatever"
+ messages = ["app", "Apps"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "You can [download](/apps) the [mobile and desktop apps](/apps). "
+ "Zulip also works great in a browser."
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_edit(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["profile", "Profile"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "Go to [Profile settings](#settings/profile) "
+ "to add a [profile picture](/help/change-your-profile-picture) "
+ "and edit your [profile information](/help/edit-your-profile)."
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_theme(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["theme", "Theme"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "Go to [Display settings](#settings/display-settings) "
+ "to [switch between the light and dark themes](/help/dark-theme), "
+ "[pick your favorite emoji theme](/help/emoji-and-emoticons#change-your-emoji-set), "
+ "[change your language](/help/change-your-language), and make other tweaks to your Zulip experience."
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_stream(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["Streams", "streams", "channels"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "In Zulip, streams [determine who gets a message](/help/streams-and-topics). "
+ "They are similar to channels in other chat apps.\n\n"
+ "[Browse and subscribe to streams](#streams/all)."
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_topic(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["Topics", "topics"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "In Zulip, topics [tell you what a message is about](/help/streams-and-topics). "
+ "They are light-weight subjects, very similar to the subject line of an email.\n\n"
+ "Check out [Recent topics](#recent_topics) to see what's happening! "
+ 'You can return to this conversation by clicking "Private messages" in the upper left.'
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_shortcuts(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["Keyboard shortcuts", "shortcuts", "Shortcuts"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "Zulip's [keyboard shortcuts](#keyboard-shortcuts) "
+ "let you navigate the app quickly and efficiently.\n\n"
+ "Press `?` any time to see a [cheat sheet](#keyboard-shortcuts)."
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_formatting(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["message formatting", "Formatting"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "Zulip uses [Markdown](/help/format-your-message-using-markdown), "
+ "an intuitive format for **bold**, *italics*, bulleted lists, and more. "
+ "Click [here](#message-formatting) for a cheat sheet.\n\n"
+ "Check out our [messaging tips](/help/messaging-tips) to learn about emoji reactions, "
+ "code blocks and much more!"
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_help(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["help", "Help", "?"]
+ self.login_user(user)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "Here are a few messages I understand: "
+ "`apps`, `edit profile`, `theme`, "
+ "`streams`, `topics`, `message formatting`, `keyboard shortcuts`.\n\n"
+ "Check out our [Getting started guide](/help/getting-started-with-zulip), "
+ "or browse the [Help center](/help/) to learn more!"
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
+
+ def test_response_to_pm_for_undefined(self) -> None:
+ user = self.example_user("hamlet")
+ bot = get_system_bot(settings.WELCOME_BOT, user.realm_id)
+ messages = ["Hello", "HAHAHA", "OKOK", "LalulaLapas"]
self.login_user(user)
- self.send_personal_message(user, bot, content)
- user_messages = message_stream_count(user)
- expected_response = (
- "Congratulations on your first reply! :tada:\n\n"
- "Feel free to continue using this space to practice your new messaging "
- "skills. Or, try clicking on some of the stream names to your left!"
- )
- self.assertEqual(most_recent_message(user).content, expected_response)
- # Welcome bot shouldn't respond to further PMs.
- self.send_personal_message(user, bot, content)
- self.assertEqual(message_stream_count(user), user_messages + 1)
+ for content in messages:
+ self.send_personal_message(user, bot, content)
+ expected_response = (
+ "Iβm sorry, I did not understand your message. Please try one of the following commands: "
+ "`apps`, `edit profile`, `theme`, `streams`, "
+ "`topics`, `message formatting`, `keyboard shortcuts`."
+ )
+ self.assertEqual(most_recent_message(user).content, expected_response)
def test_no_response_to_group_pm(self) -> None:
user1 = self.example_user("hamlet")
| Make Welcome Bot more interactive
At present, Welcome Bot gives the user lots of information all at once. It may be unappealing to process so much content when one is just starting to explore a new app. Also, the bot doesn't offer motivation to engage with it, which would draw the user into sending their first Zulip messages.
To try to address this, we should make the Welcome Bot more interactive. In can provide a small amount of key information to start with, offering to teach the user more upon request.
An initial set of messages to be implemented is described below. In general, we should make it easy to create additional message/response interactions.
# New initial messages
## For a non-admin user
Hello, and welcome to Zulip! This is a private message from me, Welcome Bot. If you are new to Zulip, check out our [Getting started guide](https://zulip.com/help/getting-started-with-zulip)!
----------
I can help you get set up! Just click anywhere on this message or press `r` to reply.
Here are a few messages I understand: β**apps**β, β**edit profile**β, β**dark mode**β, β**light mode**β, β**streams**β, and β**topics**β.
----------
If you would like more help, send β**help**β, or type `?` to learn about Zulipβs keyboard shortcuts.
----------
Notes:
- We should have tests to make sure the list of advertised messages matches the messages the bot actually understands.
- It should be possible to manually determine the order of commands in the message (i.e. not just alphabetical).
# Additional responses to user's messages
In general, I think itβs helpful to be a bit flexible about the keywords, but Iβm generally not too concerned about getting all the possible variants of what people type.
We should always ignore capitalization and formatting.
## Apps
(also: `app`, `applications`, `[any word] apps`)
- You can [download](https://zulip.com/apps) the [mobile and desktop apps](https://zulip.com/apps). Zulip also works great in a browser.
## Edit profile
(also: `profile`)
- Go to [Profile settings](http://[org URL]/#settings/profile) to [add a profile picture](https://zulip.com/help/change-your-profile-picture) and [edit your profile information](https://zulip.com/help/edit-your-profile).
**Messages for βdark modeβ, βlight modeβ, βstreamsβ, and βtopicsβ will be specified in more detail once we have the initial framework.**
## Any messages not on the list
- Iβm sorry, I did not understand your message. Please try one of the following commands: "**apps**β, β**edit profile**β, β**dark mode**β, β**light mode**β, β**streams**β, and β**topics**β.
# Other messages
We donβt want to spam with it all the time, but we may want to remind the user about the commands the bot know. Ideally, we should probably do this every time the last reminder scrolls off the screen. It would also be OK to do it every N messages (we should experiment to find a good N). This could be done as a follow-up after we have the initial version.
- **Message (same as in the intro)**: Here are a few messages I understand: β**apps**β, β**edit profile**β, β**dark mode**β, β**light mode**β, β**streams**β, and β**topics**β.
[CZO discussion thread](https://chat.zulip.org/#narrow/stream/101-design/topic/welcome.20bot)
| Hello @zulip/server-bots, @zulip/server-onboarding members, this issue was labeled with the "area: onboarding", "area: bots" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
> * It should be possible to manually determine the order of commands in the message (i.e. not just alphabetical).
I am unable to understand what this means.
@raghupalash I was just pointing out that the commands suggested by the bot (βappsβ, βedit profileβ, βdark modeβ, βlight modeβ, βstreamsβ, and βtopicsβ) are not in alphabetical order.
Okay, thanks @alya! Also, If you got the time, the PR is ready for a review(or atleast I think it is ready).
Awesome, thanks @raghupalash ! Here are the remaining messages to add:
## Dark mode, light mode
(also: `night mode`, `day mode`, `theme`, `mode`)
- Go to [Display settings](http://[org URL]/#settings/display-settings) to [switch between light and dark mode](https://zulip.com/help/night-mode), [pick your favorite emoji theme](https://zulip.com/help/emoji-and-emoticons#change-your-emoji-set), [change your language](https://zulip.com/help/change-your-language), and make other tweaks to your Zulip experience.
## Streams
(also: `channels`)
In Zulip, streams [determine who gets a message](https://zulip.com/help/streams-and-topics). They are similar to channels in other apps.
[Browse and subscribe to streams](http://[org URL]/#streams/all).
## Topics
In Zulip, topics [tell you what a message is about](https://zulip.com/help/streams-and-topics). They are light-weight subjects, very similar to the subject line of an email.
Check out [Recent topics](http://[org URL]/#recent_topics) to see what's happening!
## Keyboard shortcuts
(also: `shortcuts`)
Zulip's [keyboard shortcuts](http://[org URL]/#keyboard-shortcuts) let you navigate the app quickly and efficiently.
Press `?` any time to see a [cheat sheet](http://[org URL]/#keyboard-shortcuts).
## Message formatting
(also: `format`, `formatting`)
Zulip uses [Markdown](https://zulip.com/help/format-your-message-using-markdown), an intuitive format for bold, italics, bulleted lists, and more. Click [here](http://[org URL]/#keyboard-shortcuts) for a cheat sheet.
<Is there a URL we can use in the message above to make sure it opens to the keyboard shortcuts tab?>
Check out our [messaging tips](https://zulip.com/help/messaging-tips) to learn about [emoji reactions](https://zulip.com/help/emoji-reactions), [code blocks](https://zulip.com/help/code-blocks) and much more!
## Help
(also: `?`)
Here are a few messages I understand: < list >
Check out our [Getting started guide](https://zulip.com/help/getting-started-with-zulip), or browse the [help documentation](https://zulip.com/help) to learn more!
The list of messages the bot says it understands becomes:
βappsβ, βedit profileβ, βdark modeβ, βlight modeβ, βstreamsβ, βtopicsβ, "message formatting", and "keyboard shortcuts".
I haven't checked whether this is currently the case, but for admins, we should keep the "We also have a guide for [Setting up your organization]({help_url})." line.
Alright, I'm on it. Btw I didn't touch the code that was related to organization setup, it was left as it is.
> <Is there a URL we can use in the message above to make sure it opens to the keyboard shortcuts tab?>
did you mean the message formatting tab? I figured that we need to go to `#message-formatting`.
> > <Is there a URL we can use in the message above to make sure it opens to the keyboard shortcuts tab?>
>
> did you mean the message formatting tab? I figured that we need to go to `#message-formatting`.
Ah, yeah, that's right -- thanks! | 2021-10-16T08:48:14 |
zulip/zulip | 19,987 | zulip__zulip-19987 | [
"18409"
] | f6c78a35a4d875cd2617dd9672472ffa2c44ac6a | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 105
+API_FEATURE_LEVEL = 106
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = "163.0"
+PROVISION_VERSION = "164.0"
diff --git a/zerver/views/users.py b/zerver/views/users.py
--- a/zerver/views/users.py
+++ b/zerver/views/users.py
@@ -163,7 +163,7 @@ def update_user_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: int,
- full_name: Optional[str] = REQ(default=None, json_validator=check_string),
+ full_name: Optional[str] = REQ(default=None),
role: Optional[int] = REQ(
default=None,
json_validator=check_int_in(
| diff --git a/zerver/tests/test_audit_log.py b/zerver/tests/test_audit_log.py
--- a/zerver/tests/test_audit_log.py
+++ b/zerver/tests/test_audit_log.py
@@ -206,7 +206,7 @@ def test_change_full_name(self) -> None:
start = timezone_now()
new_name = "George Hamletovich"
self.login("iago")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assertTrue(result.status_code == 200)
query = RealmAuditLog.objects.filter(
diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -377,7 +377,7 @@ def test_admin_user_can_change_full_name(self) -> None:
new_name = "new name"
self.login("iago")
hamlet = self.example_user("hamlet")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch(f"/json/users/{hamlet.id}", req)
self.assert_json_success(result)
hamlet = self.example_user("hamlet")
@@ -385,21 +385,21 @@ def test_admin_user_can_change_full_name(self) -> None:
def test_non_admin_cannot_change_full_name(self) -> None:
self.login("hamlet")
- req = dict(full_name=orjson.dumps("new name").decode())
+ req = dict(full_name="new name")
result = self.client_patch("/json/users/{}".format(self.example_user("othello").id), req)
self.assert_json_error(result, "Insufficient permission")
def test_admin_cannot_set_long_full_name(self) -> None:
new_name = "a" * (UserProfile.MAX_NAME_LENGTH + 1)
self.login("iago")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Name too long!")
def test_admin_cannot_set_short_full_name(self) -> None:
new_name = "a"
self.login("iago")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Name too short!")
@@ -407,7 +407,7 @@ def test_not_allowed_format(self) -> None:
# Name of format "Alice|999" breaks in Markdown
new_name = "iago|72"
self.login("iago")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid format!")
@@ -415,21 +415,21 @@ def test_allowed_format_complex(self) -> None:
# Adding characters after r'|d+' doesn't break Markdown
new_name = "Hello- 12iago|72k"
self.login("iago")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_success(result)
def test_not_allowed_format_complex(self) -> None:
new_name = "Hello- 12iago|72"
self.login("iago")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid format!")
def test_admin_cannot_set_full_name_with_invalid_characters(self) -> None:
new_name = "Opheli*"
self.login("iago")
- req = dict(full_name=orjson.dumps(new_name).decode())
+ req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid characters in name!")
| Fix encoding of `full_name` parameter under `update_user` endpoint to not use `json_validator`.
The `update_user_backend` view has `full_name` parameter which uses `json_validator` for validating string inputs. This is not necessary and hence can be removed on the lines of similar changes made in #18356. For reference, see: https://chat.zulip.org/#narrow/stream/3-backend/topic/API.20format.20cleanup . We want this to be done most preferably after zulip/python-zulip-api#683.
| @zulipbot add "area: api"
Hello @zulip/server-api members, this issue was labeled with the "area: api" label, so you may want to check it out!
<!-- areaLabelAddition -->
This is part of the bigger issue #18035 major part of which is already completed through #18356. | 2021-10-16T17:57:07 |
zulip/zulip | 19,996 | zulip__zulip-19996 | [
"19938"
] | 9381a3bd45969892a7c5e7817765a46415927616 | diff --git a/zerver/webhooks/github/view.py b/zerver/webhooks/github/view.py
--- a/zerver/webhooks/github/view.py
+++ b/zerver/webhooks/github/view.py
@@ -29,6 +29,14 @@
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GITHUB_EVENT")
+TOPIC_FOR_DISCUSSION = "{repo} discussion #{number}: {title}"
+DISCUSSION_TEMPLATE = (
+ "{author} started a new discussion [{title}]({url}) in {category}:\n```quote\n{body}\n```"
+)
+DISCUSSION_COMMENT_TEMPLATE = (
+ "{author} [commented]({comment_url}) on [discussion]({discussion_url}):\n```quote\n{body}\n```"
+)
+
class Helper:
def __init__(
@@ -254,6 +262,27 @@ def get_push_commits_body(helper: Helper) -> str:
)
+def get_discussion_body(helper: Helper) -> str:
+ payload = helper.payload
+ return DISCUSSION_TEMPLATE.format(
+ author=get_sender_name(payload),
+ title=payload["discussion"]["title"],
+ url=payload["discussion"]["html_url"],
+ body=payload["discussion"]["body"],
+ category=payload["discussion"]["category"]["name"],
+ )
+
+
+def get_discussion_comment_body(helper: Helper) -> str:
+ payload = helper.payload
+ return DISCUSSION_COMMENT_TEMPLATE.format(
+ author=get_sender_name(payload),
+ body=payload["comment"]["body"],
+ discussion_url=payload["discussion"]["html_url"],
+ comment_url=payload["comment"]["html_url"],
+ )
+
+
def get_public_body(helper: Helper) -> str:
payload = helper.payload
return "{} made the repository [{}]({}) public.".format(
@@ -602,6 +631,12 @@ def get_subject_based_on_type(payload: Dict[str, Any], event: str) -> str:
return get_organization_name(payload)
elif event == "check_run":
return f"{get_repository_name(payload)} / checks"
+ elif event.startswith("discussion"):
+ return TOPIC_FOR_DISCUSSION.format(
+ repo=get_repository_name(payload),
+ number=payload["discussion"]["number"],
+ title=payload["discussion"]["title"],
+ )
return get_repository_name(payload)
@@ -614,6 +649,8 @@ def get_subject_based_on_type(payload: Dict[str, Any], event: str) -> str:
"delete": partial(get_create_or_delete_body, action="deleted"),
"deployment": get_deployment_body,
"deployment_status": get_change_deployment_status_body,
+ "discussion": get_discussion_body,
+ "discussion_comment": get_discussion_comment_body,
"fork": get_fork_body,
"gollum": get_wiki_pages_body,
"issue_comment": get_issue_comment_body,
| diff --git a/zerver/webhooks/github/tests.py b/zerver/webhooks/github/tests.py
--- a/zerver/webhooks/github/tests.py
+++ b/zerver/webhooks/github/tests.py
@@ -12,6 +12,7 @@
TOPIC_ORGANIZATION = "baxterandthehackers organization"
TOPIC_BRANCH = "public-repo / changes"
TOPIC_WIKI = "public-repo / wiki pages"
+TOPIC_DISCUSSION = "public-repo discussion #90: Welcome to discussions!"
class GitHubWebhookTest(WebhookTestCase):
@@ -483,3 +484,11 @@ def test_team_edited_with_unsupported_keys(self) -> None:
msg,
)
self.assertTrue(stack_info)
+
+ def test_discussion_msg(self) -> None:
+ expected_message = "Codertocat started a new discussion [Welcome to discussions!](https://github.com/baxterthehacker/public-repo/discussions/90) in General:\n```quote\nWe're glad to have you here!\n```"
+ self.check_webhook("discussion", TOPIC_DISCUSSION, expected_message)
+
+ def test_discussion_comment_msg(self) -> None:
+ expected_message = "Codertocat [commented](https://github.com/baxterthehacker/public-repo/discussions/90#discussioncomment-544078) on [discussion](https://github.com/baxterthehacker/public-repo/discussions/90):\n```quote\nI have so many questions to ask you!\n```"
+ self.check_webhook("discussion_comment", TOPIC_DISCUSSION, expected_message)
| GitHub webhook: add support for discussions
The GitHub webhook currently doesn't support discussions. This is the response for a "discussion created" event:
```json
{
"result":"error",
"msg":"The 'discussion:created' event isn't currently supported by the GitHub webhook",
"webhook_name":"GitHub",
"event_type":"discussion:created",
"code":"UNSUPPORTED_WEBHOOK_EVENT_TYPE"
}
```
It would be nice if discussion events could be supported by the GitHub webhook.
| Hello @zulip/server-integrations members, this issue was labeled with the "area: integrations" label, so you may want to check it out!
<!-- areaLabelAddition -->
This seems worth doing, and is a good issue for new contributors. See https://zulip.com/api/incoming-webhooks-overview for our documentation on writing integrations; in this case, one just needs to add features to the existing GitHub integration.
I would like to try working on this issue !
@zulipbot claim
Welcome to Zulip, @parthn2! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@zulipbot claim
Hello @madrix01, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@zulipbot claim | 2021-10-18T12:06:57 |
zulip/zulip | 20,015 | zulip__zulip-20015 | [
"16013"
] | 4839b7ed272a9526ed4c055c3d7a3a8660ae5714 | diff --git a/zerver/lib/markdown/__init__.py b/zerver/lib/markdown/__init__.py
--- a/zerver/lib/markdown/__init__.py
+++ b/zerver/lib/markdown/__init__.py
@@ -1795,7 +1795,22 @@ def __init__(
options.log_errors = False
compiled_re2 = re2.compile(prepare_linkifier_pattern(source_pattern), options=options)
- self.format_string = format_string
+
+ # Find percent-encoded bytes and escape them from the python
+ # interpolation. That is:
+ # %(foo)s -> %(foo)s
+ # %% -> %%
+ # %ab -> %%ab
+ # %%ab -> %%ab
+ # %%%ab -> %%%%ab
+ #
+ # We do this here, rather than before storing, to make edits
+ # to the underlying linkifier more straightforward, and
+ # because JS does not have a real formatter.
+ self.format_string = re.sub(
+ r"(?<!%)(%%)*%([a-fA-F0-9][a-fA-F0-9])", r"\1%%\2", format_string
+ )
+
super().__init__(compiled_re2, md)
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
diff --git a/zerver/migrations/0368_alter_realmfilter_url_format_string.py b/zerver/migrations/0368_alter_realmfilter_url_format_string.py
new file mode 100644
--- /dev/null
+++ b/zerver/migrations/0368_alter_realmfilter_url_format_string.py
@@ -0,0 +1,20 @@
+# Generated by Django 3.2.8 on 2021-10-20 23:42
+
+from django.db import migrations, models
+
+from zerver.models import filter_format_validator
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("zerver", "0367_scimclient"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="realmfilter",
+ name="url_format_string",
+ field=models.TextField(validators=[filter_format_validator]),
+ ),
+ ]
diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -1120,10 +1120,36 @@ def filter_pattern_validator(value: str) -> Pattern[str]:
def filter_format_validator(value: str) -> None:
- regex = re.compile(r"^([\.\/:a-zA-Z0-9#_?=&;~-]+%\(([a-zA-Z0-9_-]+)\)s)+[/a-zA-Z0-9#_?=&;~-]*$")
+ """Verifies URL-ness, and then %(foo)s.
+
+ URLValidator is assumed to catch anything which is malformed as a
+ URL; the regex then verifies the format-string pieces.
+ """
+
+ URLValidator()(value)
+
+ regex = re.compile(
+ r"""
+ ^
+ (
+ [^%] # Any non-percent,
+ | # OR...
+ % ( # A %, which can mean:
+ \( [a-zA-Z0-9_-]+ \) s # Interpolation group
+ | # OR
+ % # %%, which is an escaped %
+ | # OR
+ [0-9a-fA-F][0-9a-fA-F] # URL percent-encoded bytes, which we
+ # special-case in markdown translation
+ )
+ )+ # Those happen one or more times
+ $
+ """,
+ re.VERBOSE,
+ )
if not regex.match(value):
- raise ValidationError(_("Invalid URL format string."))
+ raise ValidationError(_("Invalid format string in URL."))
class RealmFilter(models.Model):
@@ -1134,7 +1160,7 @@ class RealmFilter(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField()
- url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
+ url_format_string: str = models.TextField(validators=[filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
| diff --git a/frontend_tests/puppeteer_tests/realm-linkifier.ts b/frontend_tests/puppeteer_tests/realm-linkifier.ts
--- a/frontend_tests/puppeteer_tests/realm-linkifier.ts
+++ b/frontend_tests/puppeteer_tests/realm-linkifier.ts
@@ -105,10 +105,7 @@ async function test_edit_invalid_linkifier(page: Page): Promise<void> {
page,
edit_linkifier_format_status_selector,
);
- assert.strictEqual(
- edit_linkifier_format_status,
- "Failed: Enter a valid URL.,Invalid URL format string.",
- );
+ assert.strictEqual(edit_linkifier_format_status, "Failed: Enter a valid URL.");
await page.click(".close-modal-btn");
await page.waitForSelector("#dialog_widget_modal", {hidden: true});
diff --git a/zerver/tests/test_markdown.py b/zerver/tests/test_markdown.py
--- a/zerver/tests/test_markdown.py
+++ b/zerver/tests/test_markdown.py
@@ -1401,6 +1401,20 @@ def assert_conversion(content: str, should_have_converted: bool = True) -> None:
],
)
+ # Test URI escaping
+ RealmFilter(
+ realm=realm,
+ pattern=r"url-(?P<id>[0-9]+)",
+ url_format_string="https://example.com/%%%ba/%(id)s",
+ ).save()
+ msg = Message(sender=self.example_user("hamlet"))
+ content = "url-123 is well-escaped"
+ converted = markdown_convert(content, message_realm=realm, message=msg)
+ self.assertEqual(
+ converted.rendered_content,
+ '<p><a href="https://example.com/%%ba/123">url-123</a> is well-escaped</p>',
+ )
+
def test_multiple_matching_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
diff --git a/zerver/tests/test_realm_linkifiers.py b/zerver/tests/test_realm_linkifiers.py
--- a/zerver/tests/test_realm_linkifiers.py
+++ b/zerver/tests/test_realm_linkifiers.py
@@ -1,7 +1,9 @@
import re
+from django.core.exceptions import ValidationError
+
from zerver.lib.test_classes import ZulipTestCase
-from zerver.models import RealmFilter
+from zerver.models import RealmFilter, filter_format_validator
class RealmFilterTest(ZulipTestCase):
@@ -43,7 +45,9 @@ def test_create(self) -> None:
data["pattern"] = r"ZUL-(?P<id>\d+)"
data["url_format_string"] = "https://realm.com/my_realm_filter/"
result = self.client_post("/json/realm/filters", info=data)
- self.assert_json_error(result, "Invalid URL format string.")
+ self.assert_json_error(
+ result, "Group 'id' in linkifier pattern is not present in URL format string."
+ )
data["url_format_string"] = "https://realm.com/my_realm_filter/#hashtag/%(id)s"
result = self.client_post("/json/realm/filters", info=data)
@@ -117,13 +121,15 @@ def test_create(self) -> None:
result, "Group 'id' in linkifier pattern is not present in URL format string."
)
- # BUG: In theory, this should be valid, since %% should be a
- # valid escaping method. It's unlikely someone actually wants
- # to do this, though.
- data["pattern"] = r"ZUL-(?P<id>\d+)"
+ data["pattern"] = r"ZUL-ESCAPE-(?P<id>\d+)"
data["url_format_string"] = r"https://realm.com/my_realm_filter/%%(ignored)s/%(id)s"
result = self.client_post("/json/realm/filters", info=data)
- self.assert_json_error(result, "Invalid URL format string.")
+ self.assert_json_success(result)
+
+ data["pattern"] = r"ZUL-URI-(?P<id>\d+)"
+ data["url_format_string"] = "https://example.com/%ba/%(id)s"
+ result = self.client_post("/json/realm/filters", info=data)
+ self.assert_json_success(result)
data["pattern"] = r"(?P<org>[a-zA-Z0-9_-]+)/(?P<repo>[a-zA-Z0-9_-]+)#(?P<id>[0-9]+)"
data["url_format_string"] = "https://github.com/%(org)s/%(repo)s/issue/%(id)s"
@@ -200,3 +206,46 @@ def test_update(self) -> None:
data["url_format_string"] = "https://realm.com/my_realm_filter/%(id)s"
result = self.client_patch(f"/json/realm/filters/{linkifier_id + 1}", info=data)
self.assert_json_error(result, "Linkifier not found.")
+
+ def test_valid_urls(self) -> None:
+ valid_urls = [
+ "http://example.com/",
+ "https://example.com/",
+ "https://user:[email protected]/",
+ "https://example.com/@user/thing",
+ "https://example.com/!path",
+ "https://example.com/foo.bar",
+ "https://example.com/foo[bar]",
+ "https://example.com/%(foo)s",
+ "https://example.com/%(foo)s%(bars)s",
+ "https://example.com/%(foo)s/and/%(bar)s",
+ "https://example.com/?foo=%(foo)s",
+ "https://example.com/%ab",
+ "https://example.com/%ba",
+ "https://example.com/%21",
+ "https://example.com/words%20with%20spaces",
+ "https://example.com/back%20to%20%(back)s",
+ "https://example.com/encoded%2fwith%2fletters",
+ "https://example.com/encoded%2Fwith%2Fupper%2Fcase%2Fletters",
+ "https://example.com/%%",
+ "https://example.com/%%(",
+ "https://example.com/%%()",
+ "https://example.com/%%(foo",
+ "https://example.com/%%(foo)",
+ "https://example.com/%%(foo)s",
+ ]
+ for url in valid_urls:
+ filter_format_validator(url)
+
+ invalid_urls = [
+ "file:///etc/passwd",
+ "data:text/plain;base64,SGVsbG8sIFdvcmxkIQ==",
+ "https://example.com/%(foo)",
+ "https://example.com/%()s",
+ "https://example.com/%4!",
+ "https://example.com/%(foo",
+ "https://example.com/%2(foo)s",
+ ]
+ for url in invalid_urls:
+ with self.assertRaises(ValidationError):
+ filter_format_validator(url)
| Allow more characters (`%`, `!`, `+`, `[`, `]`, `@`) in linkifier urls
https://chat.zulip.org/#narrow/stream/19-documentation/topic/linkifier/near/967173
We cannot use the % character in linkifier format strings currently. We should either document this or fix this.
Example: `https://trac.example.com/epic/%(num)s/hello%25world` is an invalid format string.
| I also can't use a literal `@` character. This makes URLs with an `@` in them impossible to output from a linkifier, which basically makes it impossible to linkify URLs of the form `https://observablehq.com/@jrus/sinebar`.
While we are at it apparently @ can't be used in the match pattern either. Ideally I'd like to be able to make a linkifier for
`@jrus/sinebar` β https://observablehq.com/@jrus/sinebar
Currently trying to put a `%` or `@` into the URL format string results in `Failed: Invalid URL format string.`
Here is the spec: https://url.spec.whatwg.org
> The **URL code points** are ASCII alphanumeric, U+0021 (!), U+0024 ($), U+0026 (&), U+0027 ('), U+0028 LEFT PARENTHESIS, U+0029 RIGHT PARENTHESIS, U+002A (*), U+002B (+), U+002C (,), U+002D (-), U+002E (.), U+002F (/), U+003A (:), U+003B (;), U+003D (=), U+003F (?), U+0040 (@), U+005F (_), U+007E (~), and code points in the range U+00A0 to U+10FFFD, inclusive, excluding surrogates and noncharacters.
> The **fragment percent-encode set** is the C0 control percent-encode set and U+0020 SPACE, U+0022 ("), U+003C (<), U+003E (>), and U+0060 (`).
> The **query percent-encode set** is the C0 control percent-encode set and U+0020 SPACE, U+0022 ("), U+0023 (#), U+003C (<), and U+003E (>).
Whereas the current validator supports only: `[/a-zA-Z0-9#_?=&;~-]*`, which does not match the above.
https://github.com/zulip/zulip/blob/master/zerver/models.py#L751-L769
* * *
Perhaps `[/a-zA-Z0-9#_?=&;~-]*` could be changed to ``[-!$&'()*+,./:;=?@_~\w"#<>`%]*`` or the like.
Spaces should also probably be allowed in output URLs, but if necessary people could be forced to encode them as `%20`.
I've encountered the same issue with bang (`!`) characters, which are used in Icinga2 resource URLs. I don't understand why a second validation is needed when the URL submitted is already run through the standard `URLValidator`. Many valid characters are needlessly blocked...
Another requested character is `.`: https://chat.zulip.org/#narrow/stream/9-issues/topic/linkifier.20with.20period/near/1264973
(Though I agree with @jcharaoui that we probably shouldn't be applying a regex like this at all.)
| 2021-10-20T01:19:57 |
zulip/zulip | 20,038 | zulip__zulip-20038 | [
"13948"
] | 14b07669cc342d5da81df7e8367a55a22f05eb41 | diff --git a/zerver/lib/types.py b/zerver/lib/types.py
--- a/zerver/lib/types.py
+++ b/zerver/lib/types.py
@@ -59,6 +59,7 @@ class LinkifierDict(TypedDict):
class SAMLIdPConfigDict(TypedDict, total=False):
entity_id: str
url: str
+ slo_url: str
attr_user_permanent_id: str
attr_first_name: str
attr_last_name: str
diff --git a/zproject/backends.py b/zproject/backends.py
--- a/zproject/backends.py
+++ b/zproject/backends.py
@@ -35,6 +35,7 @@
from django_auth_ldap.backend import LDAPBackend, _LDAPUser, ldap_error
from lxml.etree import XMLSyntaxError
from onelogin.saml2.errors import OneLogin_Saml2_Error
+from onelogin.saml2.logout_request import OneLogin_Saml2_Logout_Request
from onelogin.saml2.response import OneLogin_Saml2_Response
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from requests import HTTPError
@@ -62,6 +63,7 @@
do_create_user,
do_deactivate_user,
do_reactivate_user,
+ do_regenerate_api_key,
do_update_user_custom_profile_data_if_changed,
)
from zerver.lib.avatar import avatar_url, is_avatar_new
@@ -73,6 +75,7 @@
from zerver.lib.rate_limiter import RateLimitedObject
from zerver.lib.redis_utils import get_dict_from_redis, get_redis_client, put_dict_in_redis
from zerver.lib.request import RequestNotes
+from zerver.lib.sessions import delete_user_sessions
from zerver.lib.subdomains import get_subdomain
from zerver.lib.types import ProfileDataElementValue
from zerver.lib.url_encoding import append_url_query_string
@@ -2252,21 +2255,33 @@ def get_data_from_redis(cls, key: str) -> Optional[Dict[str, Any]]:
return data
- def get_issuing_idp(self, SAMLResponse: str) -> Optional[str]:
+ def get_issuing_idp(self, saml_response_or_request: Tuple[str, str]) -> Optional[str]:
"""
- Given a SAMLResponse, returns which of the configured IdPs is declared as the issuer.
+ Given a SAMLResponse or SAMLRequest, returns which of the configured IdPs
+ is declared as the issuer.
This value MUST NOT be trusted as the true issuer!
The signatures are not validated, so it can be tampered with by the user.
That's not a problem for this function,
and true validation happens later in the underlying libraries, but it's important
to note this detail. The purpose of this function is merely as a helper to figure out which
- of the configured IdPs' information to use for parsing and validating the response.
+ of the configured IdPs' information to use for parsing and validating the request.
"""
try:
config = self.generate_saml_config()
saml_settings = OneLogin_Saml2_Settings(config, sp_validation_only=True)
- resp = OneLogin_Saml2_Response(settings=saml_settings, response=SAMLResponse)
- issuers = resp.get_issuers()
+ if saml_response_or_request[1] == "SAMLResponse":
+ resp = OneLogin_Saml2_Response(
+ settings=saml_settings, response=saml_response_or_request[0]
+ )
+ issuers = resp.get_issuers()
+ else:
+ assert saml_response_or_request[1] == "SAMLRequest"
+
+ # The only valid SAMLRequest we can receive is a LogoutRequest.
+ logout_request_xml = OneLogin_Saml2_Logout_Request(
+ config, saml_response_or_request[0]
+ ).get_xml()
+ issuers = [OneLogin_Saml2_Logout_Request.get_issuer(logout_request_xml)]
except self.SAMLRESPONSE_PARSING_EXCEPTIONS:
self.logger.info("Error while parsing SAMLResponse:", exc_info=True)
return None
@@ -2357,10 +2372,76 @@ def _check_entitlements(
)
raise AuthFailed(self, error_msg)
+ def process_logout(self, subdomain: str, idp_name: str) -> Optional[HttpResponse]:
+ """
+ We override process_logout, because we need to customize
+ the way of revoking sessions and introduce NameID validation.
+
+ The python-social-auth and python3-saml implementations expect a simple
+ callback function without arguments, to delete the session. We're not
+ happy with that for two reasons:
+ 1. These implementations don't look at the NameID in the LogoutRequest, which
+ is not quite correct, as a LogoutRequest to logout user X can be delivered
+ through any means, and doesn't need a session to be valid.
+ E.g. a backchannel logout request sent by the IdP wouldn't have a session cookie.
+ Also, hypothetically, a LogoutRequest to logout user Y shouldn't logout user X, even if the
+ request is made with a session cookie belonging to user X.
+ 2. We want to revoke all sessions for the user, not just the current session
+ of the request, so after validating the LogoutRequest, we need to identify
+ the user by the NameID, do some validation and then revoke all sessions.
+
+ TODO: This does not return a LogoutResponse in case of failure, like the spec requires.
+ https://github.com/zulip/zulip/issues/20076 is the related issue with more detail
+ on how to implement the desired behavior.
+ """
+ idp = self.get_idp(idp_name)
+ auth = self._create_saml_auth(idp)
+ # This validates the LogoutRequest and prepares the response
+ # (the URL to which to redirect the client to convey the response to the IdP)
+ # but is a no-op otherwise because keep_local_session=True keeps it from
+ # doing anything else. We want to take care of revoking session on our own.
+ url = auth.process_slo(keep_local_session=True)
+ errors = auth.get_errors()
+ if errors:
+ self.logger.info("/complete/saml/: LogoutRequest failed: %s", errors)
+ return None
+
+ logout_request_xml = auth.get_last_request_xml()
+ name_id = OneLogin_Saml2_Logout_Request.get_nameid(logout_request_xml)
+ try:
+ validate_email(name_id)
+ except ValidationError:
+ self.logger.info(
+ "/complete/saml/: LogoutRequest failed: NameID is not a valid email address: %s",
+ name_id,
+ )
+ return None
+
+ return_data: Dict[str, Any] = {}
+
+ realm = get_realm(subdomain)
+ user_profile = common_get_active_user(name_id, realm, return_data)
+ if user_profile is None:
+ self.logger.info(
+ "/complete/saml/: LogoutRequest failed: No user with email specified in NameID found in realm %s. return_data=%s",
+ realm.id,
+ return_data,
+ )
+ return None
+
+ self.logger.info(
+ "/complete/saml/: LogoutRequest triggered deletion of all session for user %s",
+ user_profile.id,
+ )
+ delete_user_sessions(user_profile)
+ do_regenerate_api_key(user_profile, user_profile)
+
+ return HttpResponseRedirect(url)
+
def auth_complete(self, *args: Any, **kwargs: Any) -> Optional[HttpResponse]:
"""
Additional ugly wrapping on top of auth_complete in SocialAuthMixin.
- We handle two things here:
+ We handle two things for processing SAMLResponses here:
1. Working around bad RelayState or SAMLResponse parameters in the request.
Both parameters should be present if the user came to /complete/saml/ through
the IdP as intended. The errors can happen if someone simply types the endpoint into
@@ -2370,26 +2451,35 @@ def auth_complete(self, *args: Any, **kwargs: Any) -> Optional[HttpResponse]:
into the RelayState. We need to read them and set those values in the session,
and then change the RelayState param to the idp_name, because that's what
SAMLAuth.auth_complete() expects.
+
+ Additionally, this handles incoming LogoutRequests for IdP-initated logout.
"""
+
+ SAMLRequest = self.strategy.request_data().get("SAMLRequest")
SAMLResponse = self.strategy.request_data().get("SAMLResponse")
- if SAMLResponse is None:
- self.logger.info("/complete/saml/: No SAMLResponse in request.")
+ if SAMLResponse is None and SAMLRequest is None:
+ self.logger.info("/complete/saml/: No SAMLResponse or SAMLRequest in request.")
return None
+ elif SAMLRequest is not None:
+ saml_response_or_request = (SAMLRequest, "SAMLRequest")
+ elif SAMLResponse is not None:
+ saml_response_or_request = (SAMLResponse, "SAMLResponse")
relayed_params = self.get_relayed_params()
subdomain = self.choose_subdomain(relayed_params)
if subdomain is None:
error_msg = (
- "/complete/saml/: Can't figure out subdomain for this authentication request. "
- + "relayed_params: %s"
+ "/complete/saml/: Can't figure out subdomain for this %s. " + "relayed_params: %s"
)
- self.logger.info(error_msg, relayed_params)
+ self.logger.info(error_msg, saml_response_or_request[1], relayed_params)
return None
- idp_name = self.get_issuing_idp(SAMLResponse)
+ idp_name = self.get_issuing_idp(saml_response_or_request)
if idp_name is None:
- self.logger.info("/complete/saml/: No valid IdP as issuer of the SAMLResponse.")
+ self.logger.info(
+ "/complete/saml/: No valid IdP as issuer of the %s.", saml_response_or_request[1]
+ )
return None
idp_valid = self.validate_idp_for_subdomain(idp_name, subdomain)
@@ -2401,6 +2491,9 @@ def auth_complete(self, *args: Any, **kwargs: Any) -> Optional[HttpResponse]:
self.logger.info(error_msg, idp_name, subdomain)
return None
+ if saml_response_or_request[1] == "SAMLRequest":
+ return self.process_logout(subdomain, idp_name)
+
result = None
try:
params = relayed_params.copy()
diff --git a/zproject/computed_settings.py b/zproject/computed_settings.py
--- a/zproject/computed_settings.py
+++ b/zproject/computed_settings.py
@@ -1129,6 +1129,13 @@ def zulip_path(path: str) -> str:
default_signature_alg = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"
SOCIAL_AUTH_SAML_SECURITY_CONFIG["signatureAlgorithm"] = default_signature_alg
+if "wantMessagesSigned" not in SOCIAL_AUTH_SAML_SECURITY_CONFIG:
+ # This setting controls whether LogoutRequests delivered to us
+ # need to be signed. The default of False is not acceptable,
+ # because we don't want anyone to be able to submit a request
+ # to get other users logged out.
+ SOCIAL_AUTH_SAML_SECURITY_CONFIG["wantMessagesSigned"] = True
+
for idp_name, idp_dict in SOCIAL_AUTH_SAML_ENABLED_IDPS.items():
if DEVELOPMENT:
idp_dict["entity_id"] = get_secret("saml_entity_id", "")
| diff --git a/zerver/tests/fixtures/saml/logoutrequest.txt b/zerver/tests/fixtures/saml/logoutrequest.txt
new file mode 100644
--- /dev/null
+++ b/zerver/tests/fixtures/saml/logoutrequest.txt
@@ -0,0 +1 @@
+<samlp:LogoutRequest xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" xmlns="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" Destination="http://zulip.testserver/complete/saml/" ID="ID_fed666cd-2f56-48a8-89f6-cc34723c011a" IssueInstant="2021-10-21T11:15:18.548Z" Version="2.0"><saml:Issuer>https://idp.testshib.org/idp/shibboleth</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress">{email}</saml:NameID><samlp:SessionIndex>843cfae0-52d6-4ace-871d-78c61a81d4fb::2b1b41e9-f8c8-4114-ab1d-13a01889fb23</samlp:SessionIndex></samlp:LogoutRequest>
diff --git a/zerver/tests/test_auth_backends.py b/zerver/tests/test_auth_backends.py
--- a/zerver/tests/test_auth_backends.py
+++ b/zerver/tests/test_auth_backends.py
@@ -29,7 +29,9 @@
from django_auth_ldap.backend import LDAPSearch, _LDAPUser
from jwt.exceptions import PyJWTError
from onelogin.saml2.auth import OneLogin_Saml2_Auth
+from onelogin.saml2.logout_request import OneLogin_Saml2_Logout_Request
from onelogin.saml2.response import OneLogin_Saml2_Response
+from onelogin.saml2.utils import OneLogin_Saml2_Utils
from social_core.exceptions import AuthFailed, AuthStateForbidden
from social_django.storage import BaseDjangoStorage
from social_django.strategy import DjangoStrategy
@@ -1931,9 +1933,157 @@ def generate_saml_response(
return saml_response
+ def generate_saml_logout_request_from_idp(self, email: str) -> str:
+ """
+ The logoutrequest.txt fixture has a pre-generated LogoutRequest,
+ with {email} placeholder, that can
+ be filled out with the data we want.
+ """
+ unencoded_logout_request = self.fixture_data("logoutrequest.txt", type="saml").format(
+ email=email,
+ )
+ logout_request: str = base64.b64encode(unencoded_logout_request.encode()).decode()
+
+ return logout_request
+
+ def make_idp_initiated_logout_request(
+ self, email: str, make_validity_checks_pass: bool = True
+ ) -> HttpResponse:
+ samlrequest = self.generate_saml_logout_request_from_idp(email)
+ parameters = {"SAMLRequest": samlrequest}
+
+ if make_validity_checks_pass:
+ # It's hard to create fully-correct LogoutRequests with signatures in tests,
+ # so we rely on mocking the validating functions instead.
+ with mock.patch.object(
+ OneLogin_Saml2_Logout_Request, "is_valid", return_value=True
+ ), mock.patch.object(
+ OneLogin_Saml2_Auth,
+ "validate_request_signature",
+ return_value=True,
+ ):
+ result = self.client_get("http://zulip.testserver/complete/saml/", parameters)
+ else:
+ result = self.client_get("http://zulip.testserver/complete/saml/", parameters)
+ return result
+
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name)
+ def test_saml_idp_initiated_logout_success(self) -> None:
+ hamlet = self.example_user("hamlet")
+ old_api_key = hamlet.api_key
+ self.login("hamlet")
+
+ self.assert_logged_in_user_id(hamlet.id)
+ result = self.make_idp_initiated_logout_request(hamlet.delivery_email)
+ self.assert_logged_in_user_id(None)
+
+ # The expected response is a redirect to the IdP's slo_url endpoint
+ # with a SAMLResponse announcing success.
+ self.assertEqual(result.status_code, 302)
+ redirect_to = result["Location"]
+ self.assertIn(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS["test_idp"]["slo_url"], redirect_to)
+
+ parsed = urllib.parse.urlparse(redirect_to)
+ query_dict = urllib.parse.parse_qs(parsed.query)
+
+ self.assertIn("SAMLResponse", query_dict)
+ # Do some very basic parsing of the SAMLResponse to verify it's a success response.
+ saml_response_encoded = query_dict["SAMLResponse"][0]
+ saml_response = OneLogin_Saml2_Utils.decode_base64_and_inflate(
+ saml_response_encoded
+ ).decode()
+ self.assertIn(
+ '<samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" />', saml_response
+ )
+
+ hamlet.refresh_from_db()
+ # Ensure that the user's api_key was rotated:
+ self.assertNotEqual(hamlet.api_key, old_api_key)
+
+ def test_saml_idp_initiated_logout_request_for_different_user(self) -> None:
+ """
+ This test verifies that sessions are revoked based on the NameID
+ in the LogoutRequest rather than just the logged in session cookie.
+ """
+ hamlet = self.example_user("hamlet")
+ cordelia = self.example_user("cordelia")
+ cordelia_old_api_key = cordelia.api_key
+ self.login("hamlet")
+
+ self.assert_logged_in_user_id(hamlet.id)
+ # We're logged in as hamlet, but deliver a LogoutRequest for cordelia.
+ # This means our session should not be affected.
+ self.make_idp_initiated_logout_request(cordelia.delivery_email)
+ self.assert_logged_in_user_id(hamlet.id)
+
+ cordelia.refresh_from_db()
+ # Cordelia's api_key should have been rotated:
+ self.assertNotEqual(cordelia.api_key, cordelia_old_api_key)
+
+ def test_saml_idp_initiated_logout_invalid_nameid_format(self) -> None:
+ hamlet = self.example_user("hamlet")
+ self.login("hamlet")
+
+ self.assert_logged_in_user_id(hamlet.id)
+ with self.assertLogs("zulip.auth.saml") as mock_logger:
+ # LogoutRequests need to have the email address in NameID
+ # so putting "hamlet" there is invalid.
+ result = self.make_idp_initiated_logout_request("hamlet")
+ self.assert_logged_in_user_id(hamlet.id)
+
+ self.assertEqual(
+ mock_logger.output,
+ [
+ "INFO:zulip.auth.saml:/complete/saml/: LogoutRequest failed: NameID is not a valid email address: hamlet"
+ ],
+ )
+ self.assertEqual(result.status_code, 302)
+ self.assertEqual(result["Location"], "/")
+
+ def test_saml_idp_initiated_logout_user_not_in_realm(self) -> None:
+ hamlet = self.example_user("hamlet")
+ self.login("hamlet")
+
+ self.assert_logged_in_user_id(hamlet.id)
+ with self.assertLogs("zulip.auth.saml") as mock_logger:
+ result = self.make_idp_initiated_logout_request("[email protected]")
+ self.assert_logged_in_user_id(hamlet.id)
+
+ self.assertEqual(
+ mock_logger.output,
+ [
+ "INFO:zulip.auth.saml:/complete/saml/: LogoutRequest failed: No user with email specified in NameID found in realm 2. return_data={}"
+ ],
+ )
+ self.assertEqual(result.status_code, 302)
+ self.assertEqual(result["Location"], "/")
+
+ def test_saml_idp_initiated_logout_invalid_signature(self) -> None:
+ hamlet = self.example_user("hamlet")
+ self.login("hamlet")
+
+ self.assert_logged_in_user_id(hamlet.id)
+ with self.assertLogs("zulip.auth.saml") as mock_logger:
+ # LogoutRequests we generate in tests don't have signatures. We can use
+ # the make_validity_checks_pass argument to disable mocking of python3-saml
+ # internal validation functions to make validation of our LogoutRequest fail
+ # and test our error-handling of that.
+ result = self.make_idp_initiated_logout_request(
+ hamlet.delivery_email, make_validity_checks_pass=False
+ )
+ self.assert_logged_in_user_id(hamlet.id)
+
+ self.assertEqual(
+ mock_logger.output,
+ [
+ "INFO:zulip.auth.saml:/complete/saml/: LogoutRequest failed: ['invalid_logout_request_signature', 'Signature validation failed. Logout Request rejected']"
+ ],
+ )
+ self.assertEqual(result.status_code, 302)
+ self.assertEqual(result["Location"], "/")
+
def test_auth_registration_with_no_name_provided(self) -> None:
"""
The SAMLResponse may not actually provide name values, which is considered
@@ -2085,7 +2235,12 @@ def test_social_auth_complete_bad_params(self) -> None:
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
- m.output, [self.logger_output("/complete/saml/: No SAMLResponse in request.", "info")]
+ m.output,
+ [
+ self.logger_output(
+ "/complete/saml/: No SAMLResponse or SAMLRequest in request.", "info"
+ )
+ ],
)
# Check that POSTing the RelayState, but with missing SAMLResponse,
@@ -2101,7 +2256,12 @@ def test_social_auth_complete_bad_params(self) -> None:
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
- m.output, [self.logger_output("/complete/saml/: No SAMLResponse in request.", "info")]
+ m.output,
+ [
+ self.logger_output(
+ "/complete/saml/: No SAMLResponse or SAMLRequest in request.", "info"
+ )
+ ],
)
# Now test bad SAMLResponses.
@@ -2160,7 +2320,7 @@ def test_social_auth_complete_no_subdomain(self) -> None:
m.output,
[
self.logger_output(
- "/complete/saml/: Can't figure out subdomain for this authentication request. relayed_params: {}".format(
+ "/complete/saml/: Can't figure out subdomain for this SAMLResponse. relayed_params: {}".format(
"{}"
),
"info",
@@ -2493,7 +2653,7 @@ def test_idp_initiated_signin_subdomain_implicit_invalid(self) -> None:
m.output,
[
self.logger_output(
- "/complete/saml/: Can't figure out subdomain for this authentication request. relayed_params: {}",
+ "/complete/saml/: Can't figure out subdomain for this SAMLResponse. relayed_params: {}",
"info",
)
],
diff --git a/zproject/test_extra_settings.py b/zproject/test_extra_settings.py
--- a/zproject/test_extra_settings.py
+++ b/zproject/test_extra_settings.py
@@ -250,6 +250,7 @@ def set_loglevel(logger_name: str, level: str) -> None:
"test_idp": {
"entity_id": "https://idp.testshib.org/idp/shibboleth",
"url": "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO",
+ "slo_url": "https://idp.testshib.org/idp/profile/SAML2/Redirect/Logout",
"x509cert": get_from_file_if_exists("zerver/tests/fixtures/saml/idp.crt"),
"attr_user_permanent_id": "email",
"attr_first_name": "first_name",
| SAML Logout
Excuse me if this is not the appropriate site to ask.
I'm testing Zulip SAML login with Microsoft ADFS. Login is working fine, but I can't find how to set up the Logout Service. Is it implemented?
As a side note shouldn't be the default signing algorithm set to sha-256?
Keep up this great work!
| Hello @zulip/server-authentication members, this issue was labeled with the "area: authentication" label, so you may want to check it out!
<!-- areaLabelAddition -->
I don't think we do SAML logout service yet; it's definitely something we want to add soon.
Regarding signing, according to https://python-social-auth-docs.readthedocs.io/en/latest/backends/saml.html, python-social-auth is using the defaults from `python-saml`, which seem to be incorrectly SHA-1: https://github.com/onelogin/python-saml/issues/269.
We probably can just change this ourselves; can you test whether this modified security config in `/etc/zulip/settings.py` correctly does SHA-256?:
```
SOCIAL_AUTH_SAML_SECURITY_CONFIG =
{
# If you've set up the optional private and public server keys,
# set this to True to enable signing of SAMLRequests using the
# private key.
"authnRequestsSigned": False,
"signatureAlgorithm": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
}
```
> Is it implemented?
Work in progress currently, I got into working on it for a while, but then got distracted with other tasks. I'll try to find time to get back to it soon.
> I'm testing Zulip SAML login with Microsoft ADFS.
Did you have to set ``lowercase_urlencoding`` option to ``True``, or did things work fine without that? I'm asking because ``python3-saml`` doc says:
> lowercase_urlencoding - Defaults to false. ADFS users should set this to true.
Which I bumped into some time ago and it seems like a very annoying fiddly detail to have to figure out to get things working.
> As a side note shouldn't be the default signing algorithm set to sha-256?
Ouch, good catch. We certainly should change that.
> We probably can just change this ourselves; can you test whether this modified security config in `/etc/zulip/settings.py` correctly does SHA-256?:
>
> ```
> SOCIAL_AUTH_SAML_SECURITY_CONFIG =
> {
> # If you've set up the optional private and public server keys,
> # set this to True to enable signing of SAMLRequests using the
> # private key.
> "authnRequestsSigned": False,
> "signatureAlgorithm": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
> }
> ```
That's exactly what I did to get it to work. And yes, it does.
> lowercase_urlencoding
I did not set that, and authentication it's working so far. But I'll have it in mind.
Hello @mateuszmandera, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
Hello,
Is there some news about the SAML logout service ?
@mateuszmandera did you begin something one this?
Thank you.
Hi @gdelafond! Sadly no new progress on this since then. But I've been working on SAML related things in general lately, so it'd make sense for me to pick this back up once I'm done with the SCIM integration #19708
@mateuszmandera SCIM will be nice to have :)
Is there a way we could help on SAML logout?
@gdelafond certainly! In what way are you interested in helping?
Structurally, we can do a little writeup that points out the functions one would want to call to do the plumbing, and then we can help write tests once one has a working prototype, but I'm not sure what you had in mind. | 2021-10-21T12:19:47 |
zulip/zulip | 20,053 | zulip__zulip-20053 | [
"19938"
] | fe7a1c07225a8b0ce4c97492708c414b43af2167 | diff --git a/zerver/webhooks/github/view.py b/zerver/webhooks/github/view.py
--- a/zerver/webhooks/github/view.py
+++ b/zerver/webhooks/github/view.py
@@ -30,12 +30,8 @@
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GITHUB_EVENT")
TOPIC_FOR_DISCUSSION = "{repo} discussion #{number}: {title}"
-DISCUSSION_TEMPLATE = (
- "{author} started a new discussion [{title}]({url}) in {category}:\n```quote\n{body}\n```"
-)
-DISCUSSION_COMMENT_TEMPLATE = (
- "{author} [commented]({comment_url}) on [discussion]({discussion_url}):\n```quote\n{body}\n```"
-)
+DISCUSSION_TEMPLATE = "{author} created [discussion #{discussion_id}]({url}) in {category}:\n```quote\n### {title}\n{body}\n```"
+DISCUSSION_COMMENT_TEMPLATE = "{author} [commented]({comment_url}) on [discussion #{discussion_id}]({discussion_url}):\n```quote\n{body}\n```"
class Helper:
@@ -266,10 +262,11 @@ def get_discussion_body(helper: Helper) -> str:
payload = helper.payload
return DISCUSSION_TEMPLATE.format(
author=get_sender_name(payload),
- title=payload["discussion"]["title"],
url=payload["discussion"]["html_url"],
body=payload["discussion"]["body"],
category=payload["discussion"]["category"]["name"],
+ discussion_id=payload["discussion"]["number"],
+ title=payload["discussion"]["title"],
)
@@ -280,6 +277,7 @@ def get_discussion_comment_body(helper: Helper) -> str:
body=payload["comment"]["body"],
discussion_url=payload["discussion"]["html_url"],
comment_url=payload["comment"]["html_url"],
+ discussion_id=payload["discussion"]["number"],
)
| diff --git a/zerver/webhooks/github/tests.py b/zerver/webhooks/github/tests.py
--- a/zerver/webhooks/github/tests.py
+++ b/zerver/webhooks/github/tests.py
@@ -486,9 +486,9 @@ def test_team_edited_with_unsupported_keys(self) -> None:
self.assertTrue(stack_info)
def test_discussion_msg(self) -> None:
- expected_message = "Codertocat started a new discussion [Welcome to discussions!](https://github.com/baxterthehacker/public-repo/discussions/90) in General:\n```quote\nWe're glad to have you here!\n```"
+ expected_message = "Codertocat created [discussion #90](https://github.com/baxterthehacker/public-repo/discussions/90) in General:\n```quote\n### Welcome to discussions!\nWe're glad to have you here!\n```"
self.check_webhook("discussion", TOPIC_DISCUSSION, expected_message)
def test_discussion_comment_msg(self) -> None:
- expected_message = "Codertocat [commented](https://github.com/baxterthehacker/public-repo/discussions/90#discussioncomment-544078) on [discussion](https://github.com/baxterthehacker/public-repo/discussions/90):\n```quote\nI have so many questions to ask you!\n```"
+ expected_message = "Codertocat [commented](https://github.com/baxterthehacker/public-repo/discussions/90#discussioncomment-544078) on [discussion #90](https://github.com/baxterthehacker/public-repo/discussions/90):\n```quote\nI have so many questions to ask you!\n```"
self.check_webhook("discussion_comment", TOPIC_DISCUSSION, expected_message)
| GitHub webhook: add support for discussions
The GitHub webhook currently doesn't support discussions. This is the response for a "discussion created" event:
```json
{
"result":"error",
"msg":"The 'discussion:created' event isn't currently supported by the GitHub webhook",
"webhook_name":"GitHub",
"event_type":"discussion:created",
"code":"UNSUPPORTED_WEBHOOK_EVENT_TYPE"
}
```
It would be nice if discussion events could be supported by the GitHub webhook.
| Hello @zulip/server-integrations members, this issue was labeled with the "area: integrations" label, so you may want to check it out!
<!-- areaLabelAddition -->
This seems worth doing, and is a good issue for new contributors. See https://zulip.com/api/incoming-webhooks-overview for our documentation on writing integrations; in this case, one just needs to add features to the existing GitHub integration.
I would like to try working on this issue !
@zulipbot claim
Welcome to Zulip, @parthn2! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@zulipbot claim
Hello @madrix01, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@zulipbot claim | 2021-10-23T17:34:34 |
zulip/zulip | 20,069 | zulip__zulip-20069 | [
"20045"
] | 862061fa53ad085e1a528a26cc89e636485fb0b9 | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -105,6 +105,7 @@
'<a href="/api" target="_blank">API</a> documentation?'
),
r'Most stream administration is done on the <a href="/#streams">Streams page</a>.',
+ r"Add global time<br />Everyone sees global times in their own time zone.",
r"one or more people...",
r"confirmation email",
r"invites remaining",
| Add compose box button to insert local time
The [local times widget](https://zulip.com/help/format-your-message-using-markdown#mention-a-time) is very useful, but can be difficult to discover, and requires remembering a bit of syntax (`<time`) in order to use.
To address these challenges, we should add a compose box button to insert a local time.
* Icon: https://fontawesome.com/v4.7/icon/clock-o
* Location: Between the smiley and `GIF` under the compose box
* Action: Opens the calendar widget (currently accessed via typing `<time`) for inserting a timezone-sensitive time.
The time should be inserted in the position of the cursor.
| Hello @zulip/server-compose members, this issue was labeled with the "area: compose" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
@zulipbot claim | 2021-10-26T10:19:33 |
|
zulip/zulip | 20,119 | zulip__zulip-20119 | [
"19838"
] | 937d398f4efbf4af1b735015212e494033818559 | diff --git a/zerver/lib/rest.py b/zerver/lib/rest.py
--- a/zerver/lib/rest.py
+++ b/zerver/lib/rest.py
@@ -147,7 +147,10 @@ def rest_dispatch(request: HttpRequest, **kwargs: Any) -> HttpResponse:
target_function = authenticated_rest_api_view(
allow_webhook_access="allow_incoming_webhooks" in view_flags,
)(target_function)
- elif request.path.startswith("/json") and "allow_anonymous_user_web" in view_flags:
+ elif (
+ request.path.startswith(("/json", "/avatar"))
+ and "allow_anonymous_user_web" in view_flags
+ ):
# For endpoints that support anonymous web access, we do that.
# TODO: Allow /api calls when this is stable enough.
auth_kwargs = dict(allow_unauthenticated=True)
diff --git a/zerver/views/users.py b/zerver/views/users.py
--- a/zerver/views/users.py
+++ b/zerver/views/users.py
@@ -1,10 +1,12 @@
from typing import Any, Dict, List, Optional, Union
from django.conf import settings
+from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
+from zerver.context_processors import get_valid_realm_from_request
from zerver.decorator import require_member_or_admin, require_realm_admin
from zerver.forms import PASSWORD_TOO_WEAK_ERROR, CreateUserForm
from zerver.lib.actions import (
@@ -32,6 +34,7 @@
from zerver.lib.exceptions import (
CannotDeactivateLastUserError,
JsonableError,
+ MissingAuthenticationError,
OrganizationOwnerRequired,
)
from zerver.lib.integrations import EMBEDDED_BOTS
@@ -219,7 +222,10 @@ def update_user_backend(
def avatar(
- request: HttpRequest, user_profile: UserProfile, email_or_id: str, medium: bool = False
+ request: HttpRequest,
+ maybe_user_profile: Union[UserProfile, AnonymousUser],
+ email_or_id: str,
+ medium: bool = False,
) -> HttpResponse:
"""Accepts an email address or user ID and returns the avatar"""
is_email = False
@@ -228,8 +234,25 @@ def avatar(
except ValueError:
is_email = True
+ if not maybe_user_profile.is_authenticated:
+ # Allow anonynous access to avatars only if spectators are
+ # enabled in the organization.
+ realm = get_valid_realm_from_request(request)
+ # TODO: Replace with realm.allow_web_public_streams_access()
+ # when the method is available.
+ if not realm.has_web_public_streams():
+ raise MissingAuthenticationError()
+
+ # We only allow the ID format for accessing a user's avatar
+ # for spectators. This is mainly for defense in depth, since
+ # email_address_visibility should mean spectators only
+ # interact with fake email addresses anyway.
+ if is_email:
+ raise MissingAuthenticationError()
+ else:
+ realm = maybe_user_profile.realm
+
try:
- realm = user_profile.realm
if is_email:
avatar_user_profile = get_user_including_cross_realm(email_or_id, realm)
else:
diff --git a/zproject/urls.py b/zproject/urls.py
--- a/zproject/urls.py
+++ b/zproject/urls.py
@@ -669,9 +669,14 @@
rest_path("thumbnail", GET=(backend_serve_thumbnail, {"override_api_url_scheme"})),
# Avatars have the same constraint because their URLs are included
# in API data structures used by both the mobile and web clients.
- rest_path("avatar/<email_or_id>", GET=(avatar, {"override_api_url_scheme"})),
rest_path(
- "avatar/<email_or_id>/medium", {"medium": True}, GET=(avatar, {"override_api_url_scheme"})
+ "avatar/<email_or_id>",
+ GET=(avatar, {"override_api_url_scheme", "allow_anonymous_user_web"}),
+ ),
+ rest_path(
+ "avatar/<email_or_id>/medium",
+ {"medium": True},
+ GET=(avatar, {"override_api_url_scheme", "allow_anonymous_user_web"}),
),
]
| diff --git a/frontend_tests/node_tests/popovers.js b/frontend_tests/node_tests/popovers.js
--- a/frontend_tests/node_tests/popovers.js
+++ b/frontend_tests/node_tests/popovers.js
@@ -52,6 +52,7 @@ const alice = {
is_guest: false,
is_admin: false,
role: 400,
+ date_joined: "2021-11-01T16:32:16.458735+00:00",
};
const me = {
@@ -111,6 +112,7 @@ function test_ui(label, f) {
}
test_ui("sender_hover", ({override, mock_template}) => {
+ page_params.is_spectator = false;
override($.fn, "popover", noop);
override(emoji, "get_emoji_details_by_name", noop);
@@ -162,7 +164,7 @@ test_ui("sender_hover", ({override, mock_template}) => {
mock_template("user_info_popover_title.hbs", false, (opts) => {
assert.deepEqual(opts, {
- user_avatar: "avatar/[email protected]",
+ user_avatar: "http://zulip.zulipdev.com/avatar/42?s=50",
user_is_guest: false,
});
return "title-html";
@@ -196,6 +198,8 @@ test_ui("sender_hover", ({override, mock_template}) => {
status_text: "on the beach",
status_emoji_info,
user_mention_syntax: "@**Alice Smith**",
+ date_joined: undefined,
+ spectator_view: false,
});
return "content-html";
});
diff --git a/zerver/tests/test_upload.py b/zerver/tests/test_upload.py
--- a/zerver/tests/test_upload.py
+++ b/zerver/tests/test_upload.py
@@ -1047,28 +1047,44 @@ def test_get_user_avatar(self) -> None:
self.logout()
- # Test /avatar/<email_or_id> endpoint with HTTP basic auth.
- response = self.api_get(hamlet, "/avatar/[email protected]", {"foo": "bar"})
- redirect_url = response["Location"]
- self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia)) + "&foo=bar"))
+ with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
+ # Test /avatar/<email_or_id> endpoint with HTTP basic auth.
+ response = self.api_get(hamlet, "/avatar/[email protected]", {"foo": "bar"})
+ redirect_url = response["Location"]
+ self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia)) + "&foo=bar"))
- response = self.api_get(hamlet, f"/avatar/{cordelia.id}", {"foo": "bar"})
- redirect_url = response["Location"]
- self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia)) + "&foo=bar"))
+ response = self.api_get(hamlet, f"/avatar/{cordelia.id}", {"foo": "bar"})
+ redirect_url = response["Location"]
+ self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia)) + "&foo=bar"))
- # Test cross_realm_bot avatar access using email.
- response = self.api_get(hamlet, "/avatar/[email protected]", {"foo": "bar"})
- redirect_url = response["Location"]
- self.assertTrue(redirect_url.endswith(str(avatar_url(cross_realm_bot)) + "&foo=bar"))
+ # Test cross_realm_bot avatar access using email.
+ response = self.api_get(hamlet, "/avatar/[email protected]", {"foo": "bar"})
+ redirect_url = response["Location"]
+ self.assertTrue(redirect_url.endswith(str(avatar_url(cross_realm_bot)) + "&foo=bar"))
- # Test cross_realm_bot avatar access using id.
- response = self.api_get(hamlet, f"/avatar/{cross_realm_bot.id}", {"foo": "bar"})
- redirect_url = response["Location"]
- self.assertTrue(redirect_url.endswith(str(avatar_url(cross_realm_bot)) + "&foo=bar"))
+ # Test cross_realm_bot avatar access using id.
+ response = self.api_get(hamlet, f"/avatar/{cross_realm_bot.id}", {"foo": "bar"})
+ redirect_url = response["Location"]
+ self.assertTrue(redirect_url.endswith(str(avatar_url(cross_realm_bot)) + "&foo=bar"))
+
+ # Without spectators enabled, no unauthenticated access.
+ response = self.client_get("/avatar/[email protected]", {"foo": "bar"})
+ self.assert_json_error(
+ response,
+ "Not logged in: API authentication or user session required",
+ status_code=401,
+ )
+
+ # Allow unauthenticated/spectator requests by ID.
+ response = self.client_get(f"/avatar/{cordelia.id}", {"foo": "bar"})
+ self.assertEqual(302, response.status_code)
+ # Disallow unauthenticated/spectator requests by email.
response = self.client_get("/avatar/[email protected]", {"foo": "bar"})
self.assert_json_error(
- response, "Not logged in: API authentication or user session required", status_code=401
+ response,
+ "Not logged in: API authentication or user session required",
+ status_code=401,
)
def test_get_user_avatar_medium(self) -> None:
@@ -1090,18 +1106,34 @@ def test_get_user_avatar_medium(self) -> None:
self.logout()
- # Test /avatar/<email_or_id>/medium endpoint with HTTP basic auth.
- response = self.api_get(hamlet, "/avatar/[email protected]/medium", {"foo": "bar"})
- redirect_url = response["Location"]
- self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia, True)) + "&foo=bar"))
+ with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
+ # Test /avatar/<email_or_id>/medium endpoint with HTTP basic auth.
+ response = self.api_get(hamlet, "/avatar/[email protected]/medium", {"foo": "bar"})
+ redirect_url = response["Location"]
+ self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia, True)) + "&foo=bar"))
- response = self.api_get(hamlet, f"/avatar/{cordelia.id}/medium", {"foo": "bar"})
- redirect_url = response["Location"]
- self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia, True)) + "&foo=bar"))
+ response = self.api_get(hamlet, f"/avatar/{cordelia.id}/medium", {"foo": "bar"})
+ redirect_url = response["Location"]
+ self.assertTrue(redirect_url.endswith(str(avatar_url(cordelia, True)) + "&foo=bar"))
+
+ # Without spectators enabled, no unauthenticated access.
+ response = self.client_get("/avatar/[email protected]/medium", {"foo": "bar"})
+ self.assert_json_error(
+ response,
+ "Not logged in: API authentication or user session required",
+ status_code=401,
+ )
+ # Allow unauthenticated/spectator requests by ID.
+ response = self.client_get(f"/avatar/{cordelia.id}/medium", {"foo": "bar"})
+ self.assertEqual(302, response.status_code)
+
+ # Disallow unauthenticated/spectator requests by email.
response = self.client_get("/avatar/[email protected]/medium", {"foo": "bar"})
self.assert_json_error(
- response, "Not logged in: API authentication or user session required", status_code=401
+ response,
+ "Not logged in: API authentication or user session required",
+ status_code=401,
)
def test_non_valid_user_avatar(self) -> None:
| Make avatars publically accessible in organizations with web public streams enabled
At present, the prototype web-public streams system (#13172 / #18532) is not able to display large-size avatars for users, and likely cannot display small-size avatars for users who've uploaded a custom avatar (i.e. not Gravatar).
At the same time, we want to preserve the model that user-uploaded avatars are only available to users (not the Internet) in organizations that are not using web-public streams.
I think addressing this properly means making the `GET /avatar/{user_id}` endpoints allow access to unauthenticated requests if `realm.has_web_public_streams()`, and not otherwise.
| 2021-11-01T11:36:48 |
|
zulip/zulip | 20,135 | zulip__zulip-20135 | [
"20081"
] | 4d055a66957c05432d3983f3f1ad205ecf7c6625 | diff --git a/zerver/lib/export.py b/zerver/lib/export.py
--- a/zerver/lib/export.py
+++ b/zerver/lib/export.py
@@ -1947,7 +1947,6 @@ def export_realm_wrapper(
threads: int,
upload: bool,
public_only: bool,
- delete_after_upload: bool,
percent_callback: Optional[Callable[[Any], None]] = None,
consent_message_id: Optional[int] = None,
) -> Optional[str]:
@@ -1958,9 +1957,8 @@ def export_realm_wrapper(
public_only=public_only,
consent_message_id=consent_message_id,
)
- print(f"Finished exporting to {output_dir}")
+ shutil.rmtree(output_dir)
print(f"Tarball written to {tarball_path}")
-
if not upload:
return None
@@ -1971,12 +1969,10 @@ def export_realm_wrapper(
public_url = zerver.lib.upload.upload_backend.upload_export_tarball(
realm, tarball_path, percent_callback=percent_callback
)
- print()
- print(f"Uploaded to {public_url}")
+ print(f"\nUploaded to {public_url}")
- if delete_after_upload:
- os.remove(tarball_path)
- print(f"Successfully deleted the tarball at {tarball_path}")
+ os.remove(tarball_path)
+ print(f"Successfully deleted the tarball at {tarball_path}")
return public_url
diff --git a/zerver/management/commands/export.py b/zerver/management/commands/export.py
--- a/zerver/management/commands/export.py
+++ b/zerver/management/commands/export.py
@@ -104,11 +104,6 @@ def add_arguments(self, parser: ArgumentParser) -> None:
action="store_true",
help="Whether to upload resulting tarball to s3 or LOCAL_UPLOADS_DIR",
)
- parser.add_argument(
- "--delete-after-upload",
- action="store_true",
- help="Automatically delete the local tarball after a successful export",
- )
self.add_realm_args(parser, required=True)
def handle(self, *args: Any, **options: Any) -> None:
@@ -215,7 +210,6 @@ def percent_callback(bytes_transferred: Any) -> None:
threads=num_threads,
upload=options["upload"],
public_only=public_only,
- delete_after_upload=options["delete_after_upload"],
percent_callback=percent_callback,
consent_message_id=consent_message_id,
)
diff --git a/zerver/worker/queue_processors.py b/zerver/worker/queue_processors.py
--- a/zerver/worker/queue_processors.py
+++ b/zerver/worker/queue_processors.py
@@ -979,7 +979,6 @@ def failure_processor(event: Dict[str, Any]) -> None:
threads=6,
upload=True,
public_only=True,
- delete_after_upload=True,
)
except Exception:
export_event.extra_data = orjson.dumps(
| diff --git a/zerver/tests/test_management_commands.py b/zerver/tests/test_management_commands.py
--- a/zerver/tests/test_management_commands.py
+++ b/zerver/tests/test_management_commands.py
@@ -525,7 +525,6 @@ def test_command_with_consented_message_id(self) -> None:
realm=realm,
public_only=False,
consent_message_id=message.id,
- delete_after_upload=False,
threads=mock.ANY,
output_dir=mock.ANY,
percent_callback=mock.ANY,
| The `delete_after_upload` flag in export_realm_wrapper should also delete the `output_dir`
https://github.com/zulip/zulip/blob/73a6f2a1a7fbfb939b8bdaded4148f9dcf8a6e81/zerver/lib/export.py#L1944-L1980
After a successful export and upload, the `delete_after_upload` flag will delete the `.tar.gz` file -- but will leave the full un-gzip'd `output_dir`. This means that even successful exports will build up in `/tmp` over time.
We should make the `delete_after_upload` flag also delete the `output_dir`.
| Hello @zulip/server-misc members, this issue was labeled with the "area: export/import" label, so you may want to check it out!
<!-- areaLabelAddition -->
Do we want to always delete the `output_dir` after completing the export?
I think the main reason to want to not do so is for development, and it seems like we could perhaps have a `--keep-unpacked-copy` option or something there.
@timabbott @alexmv we can also set different flags for zipped and unzipped copy, like `--delete-zipped-copy` and `--delete-unpacked-copy`. Adding `--keep-unpacked-copy` would require `--delete-after-upload` to be true.
I don't think we need different flags for the zipped vs unpacked version -- either I want a backup, or I don't, and what format I get doesn't really matter, since they're interchangeable. And to Tim's point, I agree that defaulting to _not_ keeping a backup is probably the right default.
@alexmv Ok, I get it now. So we need need to add another flag `--keep-unpacked-copy`, which wouldn't delete `output_dir` if the flag is passed otherwise delete `output_dir` by default. I would like to work on this!
@timabbott's preferences may differ, but here is what I would like:
- Running `./manage.py export --upload` would default to leaving nothing left in `/tmp`
- Running `./manage export` without `--upload` would leave a `.tar.gz` in `/tmp` and no unpacked directory
- There is no `--delete-after-upload` flag, since that is the default.
I don't think there's a need for a `--keep-unpacked-copy`, since making the directory from the successful tarball is easy, and an unsuccessful tarball will result in the unpacked directory still being on disk (since we won't have cleaned it up).
The `--keep-unpacked-copy` option (previous default behavior) is something that I used to use a lot when we were developing the export tool -- made the edit/refresh debugging cycle fast. It may have outlived its purpose; I'd be fine with just removing the option and seeing if we ever find that we wish we had it.
So yeah, I think the above plan would be great. | 2021-11-02T08:58:42 |
zulip/zulip | 20,142 | zulip__zulip-20142 | [
"19224"
] | 069d6ced6912d4c3bf90377120f29e94d2c68b2c | diff --git a/zerver/lib/push_notifications.py b/zerver/lib/push_notifications.py
--- a/zerver/lib/push_notifications.py
+++ b/zerver/lib/push_notifications.py
@@ -889,8 +889,28 @@ def handle_remove_push_notification(user_profile_id: int, message_ids: List[int]
"""
user_profile = get_user_profile_by_id(user_profile_id)
message_ids = bulk_access_messages_expect_usermessage(user_profile_id, message_ids)
- gcm_payload, gcm_options = get_remove_payload_gcm(user_profile, message_ids)
- apns_payload = get_remove_payload_apns(user_profile, message_ids)
+
+ # APNs has a 4KB limit on the maximum size of messages, which
+ # translated to several hundred message IDs in one of these
+ # notifications. In rare cases, it's possible for someone to mark
+ # thousands of push notification eligible messages as read at
+ # once. We could handle this situation with a loop, but we choose
+ # to truncate instead to avoid extra network traffic, because it's
+ # very likely the user has manually cleared the notifications in
+ # their mobile device's UI anyway.
+ #
+ # When truncating, we keep only the newest N messages in this
+ # remove event. This is optimal because older messages are the
+ # ones most likely to have already been manually cleared at some
+ # point in the past.
+ #
+ # We choose 200 here because a 10-digit message ID plus a comma and
+ # space consume 12 bytes, and 12 x 200 = 2400 bytes is still well
+ # below the 4KB limit (leaving plenty of space for metadata).
+ MAX_APNS_MESSAGE_IDS = 200
+ truncated_message_ids = list(sorted(message_ids))[-MAX_APNS_MESSAGE_IDS:]
+ gcm_payload, gcm_options = get_remove_payload_gcm(user_profile, truncated_message_ids)
+ apns_payload = get_remove_payload_apns(user_profile, truncated_message_ids)
if uses_notification_bouncer():
send_notifications_to_bouncer(user_profile_id, apns_payload, gcm_payload, gcm_options)
@@ -908,6 +928,10 @@ def handle_remove_push_notification(user_profile_id: int, message_ids: List[int]
if apple_devices:
send_apple_push_notification(user_profile_id, apple_devices, apns_payload)
+ # We intentionally use the non-truncated message_ids here. We are
+ # assuming in this very rare case that the user has manually
+ # dismissed these notifications on the device side, and the server
+ # should no longer track them as outstanding notifications.
UserMessage.objects.filter(
user_profile_id=user_profile_id,
message_id__in=message_ids,
| "event": "remove" push notifications can exceed the 4096 byte APNs limit
Apple push notifications are [limited](https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/generating_a_remote_notification) to 4096 bytes. However, the `"event": "remove"` push notifications added by #15179 can exceed this limit with a long enough list of `zulip_message_ids`. Such a long payload might be rejected or, if even longer, cause `h2.exceptions.FrameTooLargeError` when `aioapns` tries to send it.
(Cc @hashirsarwar, @zulip/mobile.)
| https://chat.zulip.org/#narrow/stream/378-api-design/topic/truncating.20.60remove.60.20push.20notifications/near/1230040
| 2021-11-02T22:17:10 |
|
zulip/zulip | 20,148 | zulip__zulip-20148 | [
"19907"
] | 4d055a66957c05432d3983f3f1ad205ecf7c6625 | diff --git a/zerver/views/home.py b/zerver/views/home.py
--- a/zerver/views/home.py
+++ b/zerver/views/home.py
@@ -148,6 +148,10 @@ def home_real(request: HttpRequest) -> HttpResponse:
if request.user.is_authenticated:
user_profile = request.user
realm = user_profile.realm
+
+ # User is logged in and hence no longer `prefers_web_public_view`.
+ if "prefers_web_public_view" in request.session.keys():
+ del request.session["prefers_web_public_view"]
else:
realm = get_valid_realm_from_request(request)
| diff --git a/zerver/tests/test_home.py b/zerver/tests/test_home.py
--- a/zerver/tests/test_home.py
+++ b/zerver/tests/test_home.py
@@ -330,6 +330,12 @@ def test_logged_out_home(self) -> None:
]
expected_keys = [i for i in self.expected_page_params_keys if i not in removed_keys]
self.assertEqual(actual_keys, expected_keys)
+ self.assertEqual(self.client.session.get("prefers_web_public_view"), True)
+
+ # Web public session key should clear once user is logged in
+ self.login("hamlet")
+ self.client_get("/")
+ self.assertEqual(self.client.session.get("prefers_web_public_view"), None)
def test_home_under_2fa_without_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
| Clear `prefers_web_public_view` cookie when logging in or registering an account
In #19902, we added functionality to remember who had accessed Zulip as a spectator and immediately take them to the spectator experience. We probably want to clear the `prefers_web_public_view` cookie when logging in via `do_login` or registering a new account, since the user likely doesn't want to get the spectator experience on future visits to the site.
| @timabbott is this issue open?
Howdy! It's open, though it's part of a very large project and may require some detailed knowledge or good code reading skills, so if you're relatively new to reading large codebases, it may not be a good fit; that's why we haven't tagged the "help wanted" label on it. | 2021-11-03T09:40:41 |
zulip/zulip | 20,206 | zulip__zulip-20206 | [
"18569"
] | 1e4593b2ae89c47a6a045685d9a874e57f875449 | diff --git a/zerver/lib/actions.py b/zerver/lib/actions.py
--- a/zerver/lib/actions.py
+++ b/zerver/lib/actions.py
@@ -503,7 +503,12 @@ def process_new_human_user(
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
- if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
+ if (
+ not mit_beta_user
+ and prereg_user is not None
+ and prereg_user.referred_by is not None
+ and prereg_user.referred_by.is_active
+ ):
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
| diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py
--- a/zerver/tests/test_signup.py
+++ b/zerver/tests/test_signup.py
@@ -1281,6 +1281,31 @@ def test_invite_mirror_dummy_user(self) -> None:
inviter.email,
)
+ def test_invite_from_now_deactivated_user(self) -> None:
+ """
+ While accepting an invitation from a user,
+ processing for a new user account will only
+ be completed if the inviter is not deactivated
+ after sending the invite.
+ """
+ inviter = self.example_user("hamlet")
+ self.login_user(inviter)
+ invitee = self.nonreg_email("alice")
+
+ result = self.invite(invitee, ["Denmark"])
+ self.assert_json_success(result)
+
+ prereg_user = PreregistrationUser.objects.get(email=invitee)
+ change_user_is_active(inviter, False)
+ do_create_user(
+ invitee,
+ "password",
+ inviter.realm,
+ "full name",
+ prereg_user=prereg_user,
+ acting_user=None,
+ )
+
def test_successful_invite_user_as_owner_from_owner_account(self) -> None:
self.login("desdemona")
invitee = self.nonreg_email("alice")
| Accepting an invitation from a now-deactivated user produces a server error
It doesn't show any errors to the user registering, happily. But we should add a check in `process_new_human_user` that `prereg_user.referred_by` isn't deactivated before trying to send the message:
https://github.com/zulip/zulip/blob/c97956c5ff9457e01e642a8f0695e97b44921f3a/zerver/lib/actions.py#L481-L490
| @alxmv Is it that a user is considered deactivated if the `is_active` attribute of the User is set to False?
Hello @zulip/server-onboarding members, this issue was labeled with the "area: invitations" label, so you may want to check it out!
<!-- areaLabelAddition -->
@Parth-Mittal-NITK: Yup!
@zulipbot claim
@alexmv Cool.
Any manual tests required since you have mentioned that it doesn't show any error to the user registering? | 2021-11-09T16:09:53 |
zulip/zulip | 20,279 | zulip__zulip-20279 | [
"20264"
] | e7b9173ef5b19e286e7d240b42f14f746dace0b3 | diff --git a/zerver/lib/markdown/api_arguments_table_generator.py b/zerver/lib/markdown/api_arguments_table_generator.py
--- a/zerver/lib/markdown/api_arguments_table_generator.py
+++ b/zerver/lib/markdown/api_arguments_table_generator.py
@@ -191,4 +191,6 @@ def generate_data_type(schema: Mapping[str, Any]) -> str:
data_type = "(" + generate_data_type(schema["items"]) + ")[]"
else:
data_type = schema["type"]
+ if "nullable" in schema and schema["nullable"]:
+ data_type = data_type + " | null"
return data_type
| Display whether fields are nullable in the OpenAPI documentation
It appears in our `zulip.yaml` file that we have various fields, like the `stream_weekly_traffic` part of the [register response](https://zulip.com/api/register-queue), that are correctly marked in the OpenAPI field as `nullable: true`. However, we don't display that fact when displaying the type as e.g. "integer" for one of these fields. We should fix that.
See https://chat.zulip.org/#narrow/stream/378-api-design/topic/.22type.22.20annotations.20and.20.60null.60/near/1282157 for background and discussion on how to display the type.
| Hello @zulip/server-api members, this issue was labeled with the "area: documentation (api and integrations)" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2021-11-17T14:29:54 |
|
zulip/zulip | 20,319 | zulip__zulip-20319 | [
"20301"
] | 717c4ae603f575187c0844b5539e308f5c292aef | diff --git a/zerver/lib/events.py b/zerver/lib/events.py
--- a/zerver/lib/events.py
+++ b/zerver/lib/events.py
@@ -143,7 +143,9 @@ def fetch_initial_state_data(
if want("alert_words"):
state["alert_words"] = [] if user_profile is None else user_alert_words(user_profile)
- if want("custom_profile_fields"):
+ # Spectators can't access full user profiles or personal settings,
+ # so there's no need to send custom profile field data.
+ if want("custom_profile_fields") and user_profile is not None:
fields = custom_profile_fields_for_realm(realm.id)
state["custom_profile_fields"] = [f.as_dict() for f in fields]
state["custom_profile_field_types"] = {
@@ -392,6 +394,8 @@ def fetch_initial_state_data(
user_profile,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
+ # Don't send custom profile field values to spectators.
+ include_custom_profile_fields=user_profile is not None,
)
state["cross_realm_bots"] = list(get_cross_realm_dicts())
| diff --git a/zerver/tests/test_home.py b/zerver/tests/test_home.py
--- a/zerver/tests/test_home.py
+++ b/zerver/tests/test_home.py
@@ -349,6 +349,8 @@ def test_logged_out_home(self) -> None:
page_params = self._get_page_params(result)
actual_keys = sorted(str(k) for k in page_params.keys())
removed_keys = [
+ "custom_profile_field_types",
+ "custom_profile_fields",
"last_event_id",
"narrow",
"narrow_stream",
| Hide data about custom profile fields from logged out users
We do not display custom profile data in the logged out UI. To minimize the amount of information about users and organizations that is world-viewable, we should remove access to custom profile field data from the API for logged out users.
| This should be a straightforward change to the `events.py` code; @amanagr FYI.
(To be clear, this is just the data shared `events.py` about which custom profile fields exist in the organization; I think we already don't share users' values for the custom profile fields via code in `zerver/lib/users.py`). | 2021-11-20T17:48:29 |
zulip/zulip | 20,354 | zulip__zulip-20354 | [
"15307"
] | aebbbcd03d0eac15fd5c9138d42e5bc783175c42 | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 110
+API_FEATURE_LEVEL = 111
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/views/streams.py b/zerver/views/streams.py
--- a/zerver/views/streams.py
+++ b/zerver/views/streams.py
@@ -877,7 +877,6 @@ def update_subscription_properties_backend(
"pin_to_top": check_bool,
"wildcard_mentions_notify": check_bool,
}
- response_data = []
for change in subscription_data:
stream_id = change["stream_id"]
@@ -900,6 +899,16 @@ def update_subscription_properties_backend(
user_profile, sub, stream, property, value, acting_user=user_profile
)
- response_data.append({"stream_id": stream_id, "property": property, "value": value})
+ # TODO: Do this more generally, see update_realm_user_settings_defaults.realm.py
+ from zerver.lib.request import RequestNotes
- return json_success({"subscription_data": response_data})
+ request_notes = RequestNotes.get_notes(request)
+ for req_var in request.POST:
+ if req_var not in request_notes.processed_parameters:
+ request_notes.ignored_parameters.add(req_var)
+
+ result: Dict[str, Any] = {}
+ if len(request_notes.ignored_parameters) > 0:
+ result["ignored_parameters_unsupported"] = list(request_notes.ignored_parameters)
+
+ return json_success(result)
| diff --git a/zerver/tests/test_subs.py b/zerver/tests/test_subs.py
--- a/zerver/tests/test_subs.py
+++ b/zerver/tests/test_subs.py
@@ -2729,6 +2729,40 @@ def test_set_invalid_property(self) -> None:
)
self.assert_json_error(result, "Unknown subscription property: bad")
+ def test_ignored_parameters_in_subscriptions_properties_endpoint(self) -> None:
+ """
+ Sending an invalid parameter with a valid parameter returns
+ an `ignored_parameters_unsupported` array.
+ """
+ test_user = self.example_user("hamlet")
+ self.login_user(test_user)
+
+ subs = gather_subscriptions(test_user)[0]
+ sub = subs[0]
+ json_result = self.api_post(
+ test_user,
+ "/api/v1/users/me/subscriptions/properties",
+ {
+ "subscription_data": orjson.dumps(
+ [
+ {
+ "property": "wildcard_mentions_notify",
+ "stream_id": sub["stream_id"],
+ "value": True,
+ }
+ ]
+ ).decode(),
+ "invalid_parameter": orjson.dumps(
+ [{"property": "pin_to_top", "stream_id": sub["stream_id"], "value": False}]
+ ).decode(),
+ },
+ )
+
+ self.assert_json_success(json_result)
+ result = orjson.loads(json_result.content)
+ self.assertIn("ignored_parameters_unsupported", result)
+ self.assertEqual(result["ignored_parameters_unsupported"], ["invalid_parameter"])
+
class SubscriptionRestApiTest(ZulipTestCase):
def test_basic_add_delete(self) -> None:
| `update_subscription_properties_backend`: Don't return the request in the response.
Tim Abbott: In our API endpoint for editing a stream's properties: https://chat.zulip.org/api/update-subscription-properties
We appear to have an unusual return value scheme, where we return basically what the client sent rather than an empty HTTP response. While there's potentially some mild utility for communicating what changes were confirmed by the server, it feels to me like this is probably just a bug; none of our other POST/PATCH endpoints for doing a write have this pattern of returning what you just sent (which was a pattern used in the early days of Zulip, before we had the real-time events system).
The fix is to just have this endpoint `return json_success()` rather than gathering a response. I believe no known clients actually look at the response from this endpoint, so we can just make the change (bumping `API_FEATURE_LEVEL` appropriately).
See https://chat.zulip.org/#narrow/stream/3-backend/topic/update-subscription-properties/near/900984 for discussion.
Tagging as a priority since this sort of API cleanup is valuable to do sooner rather than later.
| Hello @zulip/server-api members, this issue was labeled with the "area: api" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @akashaviator, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
https://github.com/zulip/zulip/pull/20332#issuecomment-975864248 has a detailed technical discussion of the changes to be made here. Once we finish this, we should audit for other endpoints with this structure and migrate them as well. | 2021-11-24T17:41:39 |
zulip/zulip | 20,366 | zulip__zulip-20366 | [
"19966"
] | 939032b45f5eae354f53afd3c1384466ae81e3d9 | diff --git a/zerver/views/video_calls.py b/zerver/views/video_calls.py
--- a/zerver/views/video_calls.py
+++ b/zerver/views/video_calls.py
@@ -178,7 +178,7 @@ def get_bigbluebutton_url(request: HttpRequest, user_profile: UserProfile) -> Ht
# https://docs.bigbluebutton.org/dev/api.html#usage for reference for checksum
id = "zulip-" + str(random.randint(100000000000, 999999999999))
password = b32encode(secrets.token_bytes(7))[:10].decode()
- checksum = hashlib.sha1(
+ checksum = hashlib.sha256(
(
"create"
+ "meetingID="
@@ -257,7 +257,7 @@ def join_bigbluebutton(
quote_via=quote,
)
- checksum = hashlib.sha1(
+ checksum = hashlib.sha256(
("join" + join_params + settings.BIG_BLUE_BUTTON_SECRET).encode()
).hexdigest()
redirect_url_base = append_url_query_string(
| diff --git a/zerver/tests/test_create_video_call.py b/zerver/tests/test_create_video_call.py
--- a/zerver/tests/test_create_video_call.py
+++ b/zerver/tests/test_create_video_call.py
@@ -175,7 +175,7 @@ def test_create_bigbluebutton_link(self) -> None:
self.assertEqual(
response.json()["url"],
"/calls/bigbluebutton/join?meeting_id=zulip-1&password=AAAAAAAAAA"
- "&checksum=697939301a52d3a2f0b3c3338895c1a5ab528933",
+ "&checksum=d5eb2098bcd0e69a33caf2b18490991b843c8fa6be779316b4303c7990aca687",
)
@responses.activate
@@ -194,7 +194,7 @@ def test_join_bigbluebutton_redirect(self) -> None:
self.assertEqual(
response.url,
"https://bbb.example.com/bigbluebutton/api/join?meetingID=zulip-1&password=a"
- "&fullName=King%20Hamlet&checksum=7ddbb4e7e5aa57cb8c58db12003f3b5b040ff530",
+ "&fullName=King%20Hamlet&checksum=ca78d6d3c3e04918bfab9d7d6cbc6e50602ab2bdfe1365314570943346a71a00",
)
@responses.activate
| Use SHA-256 for BigBlueButton checksums
BigBlueButton's API uses SHA1 for its checksum; I marked is as dismissed for CodeQL since it's not something we can directly change without SHA256 support in BigBlueButton itself. I note that https://github.com/bigbluebutton/bigbluebutton/issues/17135 and related issues suggest it might be possible to do so today, though apparently the way to do that may not be documented. So probably not something we can change without testing with a running BigBlueButton instance.
Noticed by GitHub Actions in https://github.com/zulip/zulip/pull/19962; I disabled the CodeQL warning for this so CI remains happy.
@strifel FYI as the original author of our BigBlueButton plugin.
| Hello @zulip/server-integrations members, this issue was labeled with the "area: integrations" label, so you may want to check it out!
<!-- areaLabelAddition -->
> Noticed by GitHub Actions in #19962; I disabled the CodeQL warning for this so CI remains happy.
(This has no effect on whether CI remains happy. The CodeQL action complains about new warnings *relative* to `main`. This is why CI was happy before #19962 even though CodeQL already knew about this error.)
Makes sense -- I figured this was a result of a function signature change. But still good to have this in the tracker, since it appears BigBlueButton may allow us to change things.
Just tested this.
It works fine with a BigBlueButton server.
It does not with Scalelite v1.3, but the support for SHA1 is already on master and in v1.3.1-beta.1. | 2021-11-25T10:19:54 |
zulip/zulip | 20,395 | zulip__zulip-20395 | [
"20060"
] | 3714a30e63a5aad583ef1f053a39c0c92401257c | diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -2870,7 +2870,10 @@ def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
"user_profile_id",
"user_profile__full_name",
]
- return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
+ # The ordering is important here, as it makes it convenient
+ # for clients to display reactions in order without
+ # client-side sorting code.
+ return Reaction.objects.filter(message_id__in=needed_ids).values(*fields).order_by("id")
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
| diff --git a/zerver/tests/test_reactions.py b/zerver/tests/test_reactions.py
--- a/zerver/tests/test_reactions.py
+++ b/zerver/tests/test_reactions.py
@@ -87,29 +87,38 @@ def test_cached_reaction_data(self) -> None:
"""
Formatted reactions data is saved in cache.
"""
- sender = self.example_user("hamlet")
- reaction_info = {
- "emoji_name": "smile",
- }
- result = self.api_post(sender, "/api/v1/messages/1/reactions", reaction_info)
+ senders = [self.example_user("hamlet"), self.example_user("cordelia")]
+ emojis = ["smile", "tada"]
+ expected_emoji_codes = ["1f642", "1f389"]
+
+ for sender, emoji in zip(senders, emojis):
+ reaction_info = {
+ "emoji_name": emoji,
+ }
+ result = self.api_post(sender, "/api/v1/messages/1/reactions", reaction_info)
+
+ self.assert_json_success(result)
+ self.assertEqual(200, result.status_code)
- self.assert_json_success(result)
- self.assertEqual(200, result.status_code)
key = to_dict_cache_key_id(1)
message = extract_message_dict(cache_get(key)[0])
expected_reaction_data = [
{
- "emoji_name": "smile",
- "emoji_code": "1f642",
+ "emoji_name": emoji,
+ "emoji_code": emoji_code,
"reaction_type": "unicode_emoji",
"user": {
"email": f"user{sender.id}@zulip.testserver",
"id": sender.id,
- "full_name": "King Hamlet",
+ "full_name": sender.full_name,
},
"user_id": sender.id,
}
+ # It's important that we preserve the loop order in this
+ # test, since this is our test to verify that we're
+ # returning reactions in chronological order.
+ for sender, emoji, emoji_code in zip(senders, emojis, expected_emoji_codes)
]
self.assertEqual(expected_reaction_data, message["reactions"])
| reactions: Preserve order of reactions
When users react to messages, they expect their reactions to be in the same order as they put them. What currently happens is that the reactions stay in the order they are placed, until the page is refreshed, at which point they are exactly reversed.
See the gif bellow for example:

The functions `set_clean_reactions(message)` in `reaction.js` and `_get_message_template(message_container)` in `message_list_view.js` are relevant.
See [# frontend > emoji reaction order](https://chat.zulip.org/#narrow/stream/6-frontend/topic/emoji.20reaction.20order).
| [Further discussion on CZO](https://chat.zulip.org/#narrow/stream/6-frontend/topic/emoji.20reaction.20order/near/1271375):
- incremental reactions are stable, and new emojis go to end (the desired behavior)
- on page load the newest emojis go to front (wrong), and "newness" is about the last click
Hello @zulip/server-emoji members, this issue was labeled with the "area: emoji" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2021-11-30T00:58:16 |
zulip/zulip | 20,409 | zulip__zulip-20409 | [
"19424"
] | 35960be510237513c9302011f1206b2260eca646 | diff --git a/zerver/lib/markdown/api_arguments_table_generator.py b/zerver/lib/markdown/api_arguments_table_generator.py
--- a/zerver/lib/markdown/api_arguments_table_generator.py
+++ b/zerver/lib/markdown/api_arguments_table_generator.py
@@ -17,6 +17,37 @@
REGEXP = re.compile(r"\{generate_api_arguments_table\|\s*(.+?)\s*\|\s*(.+)\s*\}")
+API_PARAMETER_TEMPLATE = """
+<div class="api-argument" id="parameter-{argument}">
+ <p class="api-argument-name"><strong>{argument}</strong> <span class="api-field-type">{type}</span> {required} {deprecated} <a href="#parameter-{argument}" class="api-argument-hover-link"><i class="fa fa-chain"></i></a></p>
+ <div class="api-example">
+ <span class="api-argument-example-label">Example</span>: <code>{example}</code>
+ </div>
+ <div class="api-description">{description}{object_details}</div>
+ <hr>
+</div>
+""".strip()
+
+OBJECT_DETAILS_TEMPLATE = """
+<p><strong>{argument}</strong> object details:</p>
+<ul>
+{values}
+</ul>
+""".strip()
+
+OBJECT_LIST_ITEM_TEMPLATE = """
+<li>
+<code>{value}</code>: <span class=api-field-type>{data_type}</span> {description}{object_details}
+</li>
+""".strip()
+
+OBJECT_DESCRIPTION_TEMPLATE = """
+{description}
+<p>{additional_information}</p>
+""".strip()
+
+OBJECT_CODE_TEMPLATE = "<code>{value}</code>".strip()
+
class MarkdownArgumentsTableGenerator(Extension):
def __init__(self, configs: Mapping[str, Any] = {}) -> None:
@@ -80,7 +111,7 @@ def run(self, lines: List[str]) -> List[str]:
arguments = json_obj[doc_name]
if arguments:
- text = self.render_table(arguments)
+ text = self.render_parameters(arguments)
# We want to show this message only if the parameters
# description doesn't say anything else.
elif is_openapi_format and get_parameters_description(endpoint, method) == "":
@@ -101,22 +132,13 @@ def run(self, lines: List[str]) -> List[str]:
done = True
return lines
- def render_table(self, arguments: Sequence[Mapping[str, Any]]) -> List[str]:
- # TODO: Fix naming now that this no longer renders a table.
- table = []
- argument_template = """
-<div class="api-argument" id="parameter-{argument}">
- <p class="api-argument-name"><strong>{argument}</strong> <span class="api-field-type">{type}</span> {required} {deprecated} <a href="#parameter-{argument}" class="api-argument-hover-link"><i class="fa fa-chain"></i></a></p>
- <div class="api-example">
- <span class="api-argument-example-label">Example</span>: <code>{example}</code>
- </div>
- <div class="api-description">{description}</div>
- <hr>
-</div>"""
+ def render_parameters(self, arguments: Sequence[Mapping[str, Any]]) -> List[str]:
+ parameters = []
md_engine = markdown.Markdown(extensions=[])
arguments = sorted(arguments, key=lambda argument: "deprecated" in argument)
for argument in arguments:
+ name = argument.get("argument") or argument.get("name")
description = argument["description"]
oneof = ["`" + str(item) + "`" for item in argument.get("schema", {}).get("enum", [])]
if oneof:
@@ -163,18 +185,100 @@ def render_table(self, arguments: Sequence[Mapping[str, Any]]) -> List[str]:
else:
deprecated_block = ""
- table.append(
- argument_template.format(
- argument=argument.get("argument") or argument.get("name"),
+ object_block = ""
+ # TODO: There are some endpoint parameters with object properties
+ # that are not defined in `zerver/openapi/zulip.yaml`
+ if "object" in data_type:
+ if "schema" in argument:
+ object_schema = argument["schema"]
+ else:
+ object_schema = argument["content"]["application/json"]["schema"]
+
+ if "items" in object_schema and "properties" in object_schema["items"]:
+ object_block = self.render_object_details(object_schema["items"], str(name))
+ elif "properties" in object_schema:
+ object_block = self.render_object_details(object_schema, str(name))
+
+ parameters.append(
+ API_PARAMETER_TEMPLATE.format(
+ argument=name,
example=escape_html(example),
required=required_block,
deprecated=deprecated_block,
description=md_engine.convert(description),
- type=data_type,
+ type=(data_type),
+ object_details=object_block,
+ )
+ )
+
+ return parameters
+
+ def render_object_details(self, schema: Mapping[str, Any], name: str) -> str:
+ md_engine = markdown.Markdown(extensions=[])
+ li_elements = []
+
+ object_values = schema.get("properties", {})
+ for value in object_values:
+
+ description = ""
+ if "description" in object_values[value]:
+ description = object_values[value]["description"]
+
+ # check for default, enum, required or example in documentation
+ additions: List[str] = []
+
+ default = object_values.get(value, {}).get("default")
+ if default is not None:
+ formatted_default = OBJECT_CODE_TEMPLATE.format(value=json.dumps(default))
+ additions += f"Defaults to {formatted_default}. "
+
+ enums = object_values.get(value, {}).get("enum")
+ if enums is not None:
+ formatted_enums = [
+ OBJECT_CODE_TEMPLATE.format(value=json.dumps(enum)) for enum in enums
+ ]
+ additions += "Must be one of: {}. ".format(", ".join(formatted_enums))
+
+ if "required" in schema:
+ if value in schema["required"]:
+ additions += "Required value. "
+ else:
+ additions += "Optional value. "
+
+ if "example" in object_values[value]:
+ example = json.dumps(object_values[value]["example"])
+ formatted_example = OBJECT_CODE_TEMPLATE.format(value=escape_html(example))
+ additions += f"Example: {formatted_example}"
+
+ if len(additions) > 0:
+ additional_information = "".join(additions).strip()
+ description_final = OBJECT_DESCRIPTION_TEMPLATE.format(
+ description=md_engine.convert(description),
+ additional_information=additional_information,
)
+ else:
+ description_final = md_engine.convert(description)
+
+ data_type = generate_data_type(object_values[value])
+
+ details = ""
+ if "object" in data_type and "properties" in object_values[value]:
+ details += self.render_object_details(object_values[value], str(value))
+
+ li = OBJECT_LIST_ITEM_TEMPLATE.format(
+ value=value,
+ data_type=data_type,
+ description=description_final,
+ object_details=details,
)
- return table
+ li_elements.append(li)
+
+ object_details = OBJECT_DETAILS_TEMPLATE.format(
+ argument=name,
+ values="\n".join(li_elements),
+ )
+ return object_details
def makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:
| API documentation doesn't display documentation for drafts parameters usefully
Following #18074, we have API documentation for the new drafts synchronization endpoints. However, while the `GET` endpoint renders the detailed documentation about individual fields within a `draft` object, the other endpoints do not, e.g.:

| Hello @zulip/server-api members, this issue was labeled with the "area: documentation (api and integrations)" label, so you may want to check it out!
<!-- areaLabelAddition -->
zulipbot claim
@timabbott - I think the issue is in the `render_table` function in `zerver/lib/markdown/api_arguments_table_generator.py`. Unlike the `render_table` function for return values (`zerver/lib/markdown/api_return_values_table_generator.py`), the parameters function doesn't recursively call on itself when it encounters an 'object' - like a draft in this issue. Generally, this isn't an issue because the majority of parameters in the currently documented endpoints are basic data types (boolean, string, int).
@EmmalineLake - have you been looking into this too? I noticed you tried to put a claim on the issue last week. If so, let me know if I can help. If not, then I might try to see what a fix might look like.
I have been looking into it, and it is funny you commented today, because I've been working on it all day. It is more of a stretch project for me than I had anticipated, but I would love to work on it together, if you are up for working with somewhat of a newbie. I would definitely benefit from help. Let me know what you think.
Totally! I'm also a newbie, so no worries there. I'll send you a PM in chat.zulip so that we don't clutter up the github issue.
@timabbott
This is a clarification question. I want to make sure Lauryn and I are on the right track.
In the `GET` endpoint, the fields (items) within the draft are in the "Responses" section, and rendered on the webpage. (YAML line 4436 and https://chat.zulip.org/api/get-drafts)
In the `POST` and `PATCH` endpoints, the fields (item) within the draft are contained in the "Parameters" section, and aren't rendered at all. (YAML lines 4489 and https://chat.zulip.org/api/create-drafts ; 4964 and https://chat.zulip.org/api/edit-draft).
In the `DELETE` endpoint, the fields within the draft aren't in the YAML or the webapge. (https://chat.zulip.org/api/delete-draft).
The goal is to list the draft schemas in the parameter section of the respective webpages?
If so , should the formatting match the current parameter formatting or look more like the response formatting from the `GET` page?
If this is the case, would it make sense to do the following?
In the `api_arguments_table_generator` file:
1. Add and format a `<div>` above the example in the HTML in the `render_table` function to contain this information on the website? line 109
2. Add a line to the `table.append` function (wording pending)? line166
3. Add a recursive call to the `data type` (?) section of the `arguments` for-loop to loop through the draft object? line 128
Thanks in advance!
I don't know this code super well; @orientor or @MSurfer20 is likely better positioned to help. I think adding a missing recursive call makes sense, but I haven't looked at the code myself.
@orientor or @MSurfer20
See this [CZO chat](https://chat.zulip.org/#narrow/stream/19-documentation/topic/rendering.20api.20documentation.20from.20YAML) for further questions. The above question isn't correct.
Thanks! | 2021-12-01T18:10:21 |
|
zulip/zulip | 20,422 | zulip__zulip-20422 | [
"18943"
] | b8a760b14e57f151bdf84634b55917667e3eaa96 | diff --git a/tools/linter_lib/custom_check.py b/tools/linter_lib/custom_check.py
--- a/tools/linter_lib/custom_check.py
+++ b/tools/linter_lib/custom_check.py
@@ -230,7 +230,7 @@
rules=[
{
"pattern": "subject|SUBJECT",
- "exclude_pattern": "subject to the|email|outbox",
+ "exclude_pattern": "subject to the|email|outbox|account deactivation",
"description": "avoid subject as a var",
"good_lines": ["topic_name"],
"bad_lines": ['subject="foo"', " MAX_SUBJECT_LEN"],
diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 134
+API_FEATURE_LEVEL = 135
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/views/users.py b/zerver/views/users.py
--- a/zerver/views/users.py
+++ b/zerver/views/users.py
@@ -48,6 +48,7 @@
from zerver.lib.rate_limiter import rate_limit_spectator_attachment_access_by_file
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
+from zerver.lib.send_email import FromAddress, send_email
from zerver.lib.streams import access_stream_by_id, access_stream_by_name, subscribed_to_stream
from zerver.lib.types import ProfileDataElementUpdateDict, ProfileDataElementValue, Validator
from zerver.lib.upload import upload_avatar_image
@@ -71,6 +72,7 @@
from zerver.lib.utils import generate_api_key
from zerver.lib.validator import (
check_bool,
+ check_capped_string,
check_dict,
check_dict_only,
check_int,
@@ -104,15 +106,28 @@ def check_last_owner(user_profile: UserProfile) -> bool:
return user_profile.is_realm_owner and not user_profile.is_bot and len(owners) == 1
+@has_request_variables
def deactivate_user_backend(
- request: HttpRequest, user_profile: UserProfile, user_id: int
+ request: HttpRequest,
+ user_profile: UserProfile,
+ user_id: int,
+ deactivation_notification_comment: Optional[str] = REQ(
+ str_validator=check_capped_string(max_length=2000), default=None
+ ),
) -> HttpResponse:
target = access_user_by_id(user_profile, user_id, for_admin=True)
if target.is_realm_owner and not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
if check_last_owner(target):
raise JsonableError(_("Cannot deactivate the only organization owner"))
- return _deactivate_user_profile_backend(request, user_profile, target)
+ if deactivation_notification_comment is not None:
+ deactivation_notification_comment = deactivation_notification_comment.strip()
+ return _deactivate_user_profile_backend(
+ request,
+ user_profile,
+ target,
+ deactivation_notification_comment=deactivation_notification_comment,
+ )
def deactivate_user_own_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
@@ -129,13 +144,33 @@ def deactivate_bot_backend(
request: HttpRequest, user_profile: UserProfile, bot_id: int
) -> HttpResponse:
target = access_bot_by_id(user_profile, bot_id)
- return _deactivate_user_profile_backend(request, user_profile, target)
+ return _deactivate_user_profile_backend(
+ request, user_profile, target, deactivation_notification_comment=None
+ )
def _deactivate_user_profile_backend(
- request: HttpRequest, user_profile: UserProfile, target: UserProfile
+ request: HttpRequest,
+ user_profile: UserProfile,
+ target: UserProfile,
+ *,
+ deactivation_notification_comment: Optional[str],
) -> HttpResponse:
do_deactivate_user(target, acting_user=user_profile)
+
+ # It's important that we check for None explicitly here, since ""
+ # encodes sending an email without a custom administrator comment.
+ if deactivation_notification_comment is not None:
+ send_email(
+ "zerver/emails/deactivate",
+ to_user_ids=[target.id],
+ from_address=FromAddress.NOREPLY,
+ context={
+ "deactivation_notification_comment": deactivation_notification_comment,
+ "realm_uri": target.realm.uri,
+ "realm_name": target.realm.name,
+ },
+ )
return json_success(request)
| diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -1399,6 +1399,42 @@ def test_api(self) -> None:
user = self.example_user("hamlet")
self.assertTrue(user.is_active)
+ def test_email_sent(self) -> None:
+ self.login("iago")
+ user = self.example_user("hamlet")
+
+ # Verify no email sent by default.
+ result = self.client_delete(f"/json/users/{user.id}", dict())
+ self.assert_json_success(result)
+ from django.core.mail import outbox
+
+ self.assert_length(outbox, 0)
+ user.refresh_from_db()
+ self.assertFalse(user.is_active)
+
+ # Reactivate user
+ do_reactivate_user(user, acting_user=None)
+ user.refresh_from_db()
+ self.assertTrue(user.is_active)
+
+ # Verify no email sent by default.
+ result = self.client_delete(
+ f"/json/users/{user.id}",
+ dict(
+ deactivation_notification_comment="Dear Hamlet,\nyou just got deactivated.",
+ ),
+ )
+ self.assert_json_success(result)
+ user.refresh_from_db()
+ self.assertFalse(user.is_active)
+
+ self.assert_length(outbox, 1)
+ msg = outbox[0]
+ self.assertEqual(msg.subject, "Notification of account deactivation on Zulip Dev")
+ self.assert_length(msg.reply_to, 1)
+ self.assertEqual(msg.reply_to[0], "noreply@testserver")
+ self.assertIn("Dear Hamlet,", msg.body)
+
def test_api_with_nonexistent_user(self) -> None:
self.login("iago")
| Option to send email to user when they are deactivated
We should add an option to send an email to a user when they are deactivated by an admin. We can add the following to the "Deactivate user" popover:
`E-mail {user name} ({user email})?` [ ]
If checkbox is checked, expand to show a text box where the admin can type in a message to the user.
This should be done a follow-on to #18941. In the case of deactivating a spammer, it partially solves #16473, but it is useful more generally.
| Hello @zulip/server-settings members, this issue was labeled with the "area: settings (admin/org)" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @juliaBichler01, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@alya could i work on this? I am currently waiting for review on my other two open pull requests and would like to look into another issue in the meantime :) | 2021-12-02T13:29:39 |
zulip/zulip | 20,446 | zulip__zulip-20446 | [
"20297"
] | 2b7cca96d2eabe1fa2d407cf8f283a88587c25dd | diff --git a/zerver/lib/events.py b/zerver/lib/events.py
--- a/zerver/lib/events.py
+++ b/zerver/lib/events.py
@@ -177,15 +177,18 @@ def fetch_initial_state_data(
state["max_message_id"] = -1
if want("drafts"):
- # Note: if a user ever disables syncing drafts then all of
- # their old drafts stored on the server will be deleted and
- # simply retained in local storage. In which case user_drafts
- # would just be an empty queryset.
- user_draft_objects = Draft.objects.filter(user_profile=user_profile).order_by(
- "-last_edit_time"
- )[: settings.MAX_DRAFTS_IN_REGISTER_RESPONSE]
- user_draft_dicts = [draft.to_dict() for draft in user_draft_objects]
- state["drafts"] = user_draft_dicts
+ if user_profile is None:
+ state["drafts"] = []
+ else:
+ # Note: if a user ever disables syncing drafts then all of
+ # their old drafts stored on the server will be deleted and
+ # simply retained in local storage. In which case user_drafts
+ # would just be an empty queryset.
+ user_draft_objects = Draft.objects.filter(user_profile=user_profile).order_by(
+ "-last_edit_time"
+ )[: settings.MAX_DRAFTS_IN_REGISTER_RESPONSE]
+ user_draft_dicts = [draft.to_dict() for draft in user_draft_objects]
+ state["drafts"] = user_draft_dicts
if want("muted_topics"):
state["muted_topics"] = [] if user_profile is None else get_topic_mutes(user_profile)
| Add explicit logic to disable drafts for spectators
This block of logic in `zerver/lib/events.py` happens to work because the `UserProfile=None` filter returns the empty set, but I think we should use the explicit model that we use for other personal data for readability reasons:
```
if want("drafts"):
# Note: if a user ever disables syncing drafts then all of
# their old drafts stored on the server will be deleted and
# simply retained in local storage. In which case user_drafts
# would just be an empty queryset.
user_draft_objects = Draft.objects.filter(user_profile=user_profile).order_by(
"-last_edit_time"
)[: settings.MAX_DRAFTS_IN_REGISTER_RESPONSE]
user_draft_dicts = [draft.to_dict() for draft in user_draft_objects]
state["drafts"] = user_draft_dicts
```
Example more readable version that makes clear that we don't send any data for spectators:
```
if want("muted_topics"):
state["muted_topics"] = [] if user_profile is None else get_topic_mutes(user_profile)
```
| 2021-12-03T11:32:57 |
||
zulip/zulip | 20,491 | zulip__zulip-20491 | [
"20482"
] | 0ca49bc93a8304777259aa038a0b2863fb3a355b | diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -9,10 +9,6 @@
from zerver.models import UserHotspot, UserProfile
INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
- "intro_reply": {
- "title": gettext_lazy("Reply to a message"),
- "description": gettext_lazy("Click anywhere on a message to reply."),
- },
"intro_streams": {
"title": gettext_lazy("Catch up on a stream"),
"description": gettext_lazy(
| diff --git a/zerver/tests/test_events.py b/zerver/tests/test_events.py
--- a/zerver/tests/test_events.py
+++ b/zerver/tests/test_events.py
@@ -1806,7 +1806,7 @@ def test_do_mark_hotspot_as_read(self) -> None:
self.user_profile.save(update_fields=["tutorial_status"])
events = self.verify_action(
- lambda: do_mark_hotspot_as_read(self.user_profile, "intro_reply")
+ lambda: do_mark_hotspot_as_read(self.user_profile, "intro_streams")
)
check_hotspots("events[0]", events[0])
diff --git a/zerver/tests/test_hotspots.py b/zerver/tests/test_hotspots.py
--- a/zerver/tests/test_hotspots.py
+++ b/zerver/tests/test_hotspots.py
@@ -16,14 +16,14 @@ def setUp(self) -> None:
def test_first_hotspot(self) -> None:
hotspots = get_next_hotspots(self.user)
self.assert_length(hotspots, 1)
- self.assertEqual(hotspots[0]["name"], "intro_reply")
+ self.assertEqual(hotspots[0]["name"], "intro_streams")
def test_some_done_some_not(self) -> None:
- do_mark_hotspot_as_read(self.user, "intro_reply")
+ do_mark_hotspot_as_read(self.user, "intro_streams")
do_mark_hotspot_as_read(self.user, "intro_compose")
hotspots = get_next_hotspots(self.user)
self.assert_length(hotspots, 1)
- self.assertEqual(hotspots[0]["name"], "intro_streams")
+ self.assertEqual(hotspots[0]["name"], "intro_topics")
def test_all_intro_hotspots_done(self) -> None:
with self.settings(TUTORIAL_ENABLED=True):
@@ -54,16 +54,16 @@ def test_do_mark_hotspot_as_read(self) -> None:
def test_hotspots_url_endpoint(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
- result = self.client_post("/json/users/me/hotspots", {"hotspot": "intro_reply"})
+ result = self.client_post("/json/users/me/hotspots", {"hotspot": "intro_streams"})
self.assert_json_success(result)
self.assertEqual(
list(UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)),
- ["intro_reply"],
+ ["intro_streams"],
)
result = self.client_post("/json/users/me/hotspots", {"hotspot": "invalid"})
self.assert_json_error(result, "Unknown hotspot: invalid")
self.assertEqual(
list(UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)),
- ["intro_reply"],
+ ["intro_streams"],
)
diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -1178,7 +1178,7 @@ def test_copy_default_settings_from_another_user(self) -> None:
UserHotspot.objects.filter(user=cordelia).delete()
UserHotspot.objects.filter(user=iago).delete()
- hotspots_completed = {"intro_reply", "intro_streams", "intro_topics"}
+ hotspots_completed = {"intro_streams", "intro_topics"}
for hotspot in hotspots_completed:
UserHotspot.objects.create(user=cordelia, hotspot=hotspot)
| Remove "Send a reply" new user tip
After implementing #19900, there are two places where new users are told how to reply to a message: in the Welcome Bot text and in the "Send a reply" new user tip immediately below.
To simplify and avoid redundancy, we should remove the "Send a reply" new user tip.
<img width="909" alt="Screen_Shot_2021-12-06_at_10_08_14_AM" src="https://user-images.githubusercontent.com/2090066/144938995-080268ce-510d-4b76-b3c1-b691fbb814f4.png">
[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/.22click.20to.20reply.22.20whale)
| Hello @zulip/server-onboarding members, this issue was labeled with the "area: onboarding" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim | 2021-12-07T10:10:28 |
zulip/zulip | 20,579 | zulip__zulip-20579 | [
"19732"
] | ad232c9b65b2fec95b74acb4a25e0828291376cd | diff --git a/zerver/lib/upload.py b/zerver/lib/upload.py
--- a/zerver/lib/upload.py
+++ b/zerver/lib/upload.py
@@ -12,6 +12,7 @@
from datetime import timedelta
from mimetypes import guess_extension, guess_type
from typing import IO, Any, Callable, Optional, Tuple
+from urllib.parse import urljoin
import boto3
import botocore
@@ -32,6 +33,7 @@
from zerver.lib.avatar_hash import user_avatar_path
from zerver.lib.exceptions import ErrorCode, JsonableError
+from zerver.lib.outgoing_http import OutgoingSession
from zerver.lib.utils import assert_is_not_none
from zerver.models import (
Attachment,
@@ -1143,3 +1145,52 @@ def upload_export_tarball(
def delete_export_tarball(export_path: str) -> Optional[str]:
return upload_backend.delete_export_tarball(export_path)
+
+
+def get_emoji_file_content(
+ session: OutgoingSession, emoji_url: str, emoji_id: int, logger: logging.Logger
+) -> bytes:
+ original_emoji_url = emoji_url + ".original"
+
+ logger.info("Downloading %s", original_emoji_url)
+ response = session.get(original_emoji_url)
+ if response.status_code == 200:
+ assert type(response.content) == bytes
+ return response.content
+
+ logger.info("Error fetching emoji from URL %s", original_emoji_url)
+ logger.info("Trying %s instead", emoji_url)
+ response = session.get(emoji_url)
+ if response.status_code == 200:
+ assert type(response.content) == bytes
+ return response.content
+ logger.info("Error fetching emoji from URL %s", emoji_url)
+ logger.error("Could not fetch emoji %s", emoji_id)
+ raise AssertionError(f"Could not fetch emoji {emoji_id}")
+
+
+def handle_reupload_emojis_event(realm: Realm, logger: logging.Logger) -> None:
+ from zerver.lib.emoji import get_emoji_url
+
+ session = OutgoingSession(role="reupload_emoji", timeout=3, max_retries=3)
+
+ query = RealmEmoji.objects.filter(realm=realm).order_by("id")
+
+ for realm_emoji in query:
+ logger.info("Processing emoji %s", realm_emoji.id)
+ emoji_filename = realm_emoji.file_name
+ emoji_url = get_emoji_url(emoji_filename, realm_emoji.realm_id)
+ if emoji_url.startswith("/"):
+ emoji_url = urljoin(realm_emoji.realm.uri, emoji_url)
+
+ emoji_file_content = get_emoji_file_content(session, emoji_url, realm_emoji.id, logger)
+
+ emoji_bytes_io = io.BytesIO(emoji_file_content)
+
+ user_profile = realm_emoji.author
+ # When this runs, emojis have already been migrated to always have .author set.
+ assert user_profile is not None
+
+ logger.info("Reuploading emoji %s", realm_emoji.id)
+ realm_emoji.is_animated = upload_emoji_image(emoji_bytes_io, emoji_filename, user_profile)
+ realm_emoji.save(update_fields=["is_animated"])
diff --git a/zerver/migrations/0376_set_realmemoji_author_and_reupload_realmemoji.py b/zerver/migrations/0376_set_realmemoji_author_and_reupload_realmemoji.py
new file mode 100644
--- /dev/null
+++ b/zerver/migrations/0376_set_realmemoji_author_and_reupload_realmemoji.py
@@ -0,0 +1,49 @@
+from django.db import migrations
+from django.db.backends.postgresql.schema import DatabaseSchemaEditor
+from django.db.migrations.state import StateApps
+
+from zerver.lib.queue import queue_json_publish
+
+
+def set_emoji_author(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
+ """
+ This migration establishes the invariant that all RealmEmoji objects have .author set
+ and queues events for reuploading all RealmEmoji.
+ """
+ RealmEmoji = apps.get_model("zerver", "RealmEmoji")
+ Realm = apps.get_model("zerver", "Realm")
+ UserProfile = apps.get_model("zerver", "UserProfile")
+ ROLE_REALM_OWNER = 100
+
+ realm_emoji_to_update = []
+ for realm_emoji in RealmEmoji.objects.all():
+ if realm_emoji.author_id is None:
+ user_profile = (
+ UserProfile.objects.filter(
+ realm_id=realm_emoji.realm_id, is_active=True, role=ROLE_REALM_OWNER
+ )
+ .order_by("id")
+ .first()
+ )
+ realm_emoji.author_id = user_profile.id
+ realm_emoji_to_update.append(realm_emoji)
+
+ RealmEmoji.objects.bulk_update(realm_emoji_to_update, ["author_id"])
+
+ for realm_id in Realm.objects.order_by("id").values_list("id", flat=True):
+ event = {
+ "type": "reupload_realm_emoji",
+ "realm_id": realm_id,
+ }
+ queue_json_publish("deferred_work", event)
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("zerver", "0375_invalid_characters_in_stream_names"),
+ ]
+
+ operations = [
+ migrations.RunPython(set_emoji_author, reverse_code=migrations.RunPython.noop),
+ ]
diff --git a/zerver/worker/queue_processors.py b/zerver/worker/queue_processors.py
--- a/zerver/worker/queue_processors.py
+++ b/zerver/worker/queue_processors.py
@@ -88,6 +88,7 @@
send_future_email,
)
from zerver.lib.timestamp import timestamp_to_datetime
+from zerver.lib.upload import handle_reupload_emojis_event
from zerver.lib.url_preview import preview as url_preview
from zerver.models import (
Message,
@@ -1064,6 +1065,13 @@ def failure_processor(event: Dict[str, Any]) -> None:
user_profile.realm.string_id,
time.time() - start,
)
+ elif event["type"] == "reupload_realm_emoji":
+ # This is a special event queued by the migration for reuploading emojis.
+ # We don't want to run the necessary code in the actual migration, so it simply
+ # queues the necessary event, and the actual work is done here in the queue worker.
+ realm = Realm.objects.get(id=event["realm_id"])
+ logger.info("Processing reupload_realm_emoji event for realm %s", realm.id)
+ handle_reupload_emojis_event(realm, logger)
end = time.time()
logger.info("deferred_work processed %s event (%dms)", event["type"], (end - start) * 1000)
| Add migration to re-upload all animated custom emoji
Following https://github.com/zulip/zulip/pull/19563, we likely want to re-upload all custom emoji, in case they are animated emoji; reuploading animated emoji is required for us to get the benefits of having that emoji animate only on hover with the new feature in #19563.
The best way to do this is probably with a migration, since that'll ensure it gets run exactly once on every server. We'll likely want the migration to use `atomic=False`, since there could be many thousands of custom emoji on large servers like Zulip Cloud.
Essentially, the migration should just loop through all custom emoji configured on the server, download the emoji, call `upload_emoji_image` on each image. I think the code ends up something like this:
```
for realm_emoji in RealmEmoji.objects.all():
emoji_filename, emoji_file = # new code to get the original file and filename
user_profile = realm_emoji.author
if user_profile is None:
# We only use the user_profile to get the realm
user_profile = UserProfile.objects.filter(realm=realm_emoji.realm, is_active=True, role=UserProfile.ROLE_OWNER).first()
# Should we mark the first owner as the uploader of this custom emoji while we're at it?
# This would establish for the future the invariant that all custom emoji have an owner.
realm_emoiji.is_animated = upload_emoji_image(emoji_file, emoji_filename, user_profile)
realm_emoji.save(update_fields=["author", "is_animated"])
```
@Riken-Shah @mateuszmandera FYI.
| Hello @zulip/server-emoji, @zulip/server-production members, this issue was labeled with the "area: emoji", "area: production" labels, so you may want to check it out!
<!-- areaLabelAddition -->
Hello @mateuszmandera, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
Hello @mateuszmandera, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
@zulipbot claim
Welcome to Zulip, @Fingel! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@Fingel I already have a PR open for this at #20579
> @Fingel I already have a PR open for this at #20579
Hi mateuszmandera,
I found this issue by searching for "Needs Help" issues that were unclaimed and not in progress. I am just trying to get a feel for the codebase. I don't mean to step on your toes, but I was not aware it was currently being worked on.
I'm fine with my PR being ignored if it is redundant.
Hello @mateuszmandera, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
@mateuszmandera Mind taking over this issue? It sounds like we'll be going with your PR with maybe some elements of mine included. | 2021-12-15T21:19:35 |
|
zulip/zulip | 20,589 | zulip__zulip-20589 | [
"20127"
] | 0b454dda1207c24ba19d51d7cd5839ea3c349f67 | diff --git a/zerver/lib/streams.py b/zerver/lib/streams.py
--- a/zerver/lib/streams.py
+++ b/zerver/lib/streams.py
@@ -1,5 +1,6 @@
from typing import Collection, List, Optional, Set, Tuple, Union
+from django.db import transaction
from django.db.models.query import QuerySet
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
@@ -102,42 +103,44 @@ def create_stream_if_needed(
realm, invite_only, history_public_to_subscribers
)
- (stream, created) = Stream.objects.get_or_create(
- realm=realm,
- name__iexact=stream_name,
- defaults=dict(
- name=stream_name,
- description=stream_description,
- invite_only=invite_only,
- is_web_public=is_web_public,
- stream_post_policy=stream_post_policy,
- history_public_to_subscribers=history_public_to_subscribers,
- is_in_zephyr_realm=realm.is_zephyr_mirror_realm,
- message_retention_days=message_retention_days,
- ),
- )
+ with transaction.atomic():
+ (stream, created) = Stream.objects.get_or_create(
+ realm=realm,
+ name__iexact=stream_name,
+ defaults=dict(
+ name=stream_name,
+ description=stream_description,
+ invite_only=invite_only,
+ is_web_public=is_web_public,
+ stream_post_policy=stream_post_policy,
+ history_public_to_subscribers=history_public_to_subscribers,
+ is_in_zephyr_realm=realm.is_zephyr_mirror_realm,
+ message_retention_days=message_retention_days,
+ ),
+ )
+ if created:
+ recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
+
+ stream.recipient = recipient
+ stream.rendered_description = render_stream_description(stream_description)
+ stream.save(update_fields=["recipient", "rendered_description"])
+
+ event_time = timezone_now()
+ RealmAuditLog.objects.create(
+ realm=realm,
+ acting_user=acting_user,
+ modified_stream=stream,
+ event_type=RealmAuditLog.STREAM_CREATED,
+ event_time=event_time,
+ )
if created:
- recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
-
- stream.recipient = recipient
- stream.rendered_description = render_stream_description(stream_description)
- stream.save(update_fields=["recipient", "rendered_description"])
-
if stream.is_public():
send_stream_creation_event(stream, active_non_guest_user_ids(stream.realm_id))
else:
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
send_stream_creation_event(stream, realm_admin_ids)
- event_time = timezone_now()
- RealmAuditLog.objects.create(
- realm=realm,
- acting_user=acting_user,
- modified_stream=stream,
- event_type=RealmAuditLog.STREAM_CREATED,
- event_time=event_time,
- )
return stream, created
| Create stream in an atomic transaction
`create_stream_if_needed` does:
https://github.com/zulip/zulip/blob/862061fa53ad085e1a528a26cc89e636485fb0b9/zerver/lib/streams.py#L105-L125
This has a window when the newly-created stream has no `Recipient` object, which can lead to odd exceptions. We should create the interconnected Stream and Recipient objects in a single transaction, so that other reads can't find a stream with inconsistent state.
| Hello @zulip/server-production members, this issue was labeled with the "area: production" label, so you may want to check it out!
<!-- areaLabelAddition -->
@mateuszmandera @abhijeetbodas2001 FYI.
@zulipbot claim
Hello @aadityasinha-dotcom, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
| 2021-12-16T14:53:01 |
|
zulip/zulip | 20,678 | zulip__zulip-20678 | [
"2559"
] | fc13dd6f3d590566312392f9f10d0fd7644d485e | diff --git a/scripts/lib/zulip_tools.py b/scripts/lib/zulip_tools.py
--- a/scripts/lib/zulip_tools.py
+++ b/scripts/lib/zulip_tools.py
@@ -623,7 +623,7 @@ def list_supervisor_processes(*args: str) -> List[str]:
universal_newlines=True,
stdout=subprocess.PIPE,
)
- # `supercisorctl status` returns 3 if any are stopped, which is
+ # `supervisorctl status` returns 3 if any are stopped, which is
# fine here; and exit code 4 is for no such process, which is
# handled below.
if worker_status.returncode not in (0, 3, 4):
| Support restarting the server without rejecting any requests
In theory, it should be possible with uwsgi and its `master=true` setting to restart the server with 0 requests being rejected due to the service being down (the approach seems to be the obvious thing of queuing requests in the socket until the new processes are up). I tried this briefly with our supervisord and ran into problems where it would just fail to restart, so some investigation is required into how to do this properly.
| @zulipbot claim
@timabbott,
Supervisord doesn't allow to reload applications. It just contains the interface for restarting programs through stop-start command. It means, that when we send HUP signal to uWSGI through supervisor it start reloading child processes, but the master process still alive. Supervisord is waiting for application stopping at this moment and kills the master process after `stopwaitsec` period. This behavior is the cause of errors "502 bad gateway" on restarting `zulip-django` application with supervisord.
For graceful reloading, we can enable The master FIFO mode (http://uwsgi-docs.readthedocs.io/en/latest/MasterFIFO.html) and write `r` to master fifo file. It will allow us to reload uWSGI workers without requests rejecting.
There are several additional methods to reload uWSGI workers through fifo file.
One of them it is using `lazy-app` mode. In this mode, each worker uses the own copy of the application(don't share a memory from master). It allows wait for running workers and then restarts each of them. But also it will use more memory than in simple mode.
Another way is chain reloading. It also uses `lasy-app` mode with writing `c` to fifo file. In this mode, uWSGI restarts one worker at the time and do not reload another while previous will be not ready to receive requests.
Hello @kkanahin, you claimed this issue to work on it, but this issue and any referenced pull requests haven't been updated for a week. Are you still working on this issue?
If so, please update this issue by leaving a comment on this issue to let me know that you're still working on it. Otherwise, I'll automatically remove you from this issue in 3 days.
If you've decided to work on something else, simply comment `@zulipbot abandon` so that someone else can claim it and continue from where you left off.
Thank you for your valuable contributions to Zulip!
@timabbott Could you please review my investigations.
Col, I think my conclusion from that is that this is somewhat tricky to do with supervisord and potentially something we should defer until later.
I think we need separate tools for reloading services. And allow supervisor just controls stop start commands.
Hello @kkanahin, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over ten days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
| 2022-01-03T19:30:54 |
|
zulip/zulip | 20,697 | zulip__zulip-20697 | [
"20595"
] | 9aa5082d63e9d2f36c1055c5a855ea2fc1f61123 | diff --git a/zerver/views/registration.py b/zerver/views/registration.py
--- a/zerver/views/registration.py
+++ b/zerver/views/registration.py
@@ -511,6 +511,7 @@ def accounts_register(
"MAX_NAME_LENGTH": str(UserProfile.MAX_NAME_LENGTH),
"MAX_PASSWORD_LENGTH": str(form.MAX_PASSWORD_LENGTH),
"MAX_REALM_SUBDOMAIN_LENGTH": str(Realm.MAX_REALM_SUBDOMAIN_LENGTH),
+ "corporate_enabled": settings.CORPORATE_ENABLED,
"sorted_realm_types": sorted(
Realm.ORG_TYPES.values(), key=lambda d: d["display_order"]
),
| diff --git a/zerver/lib/test_classes.py b/zerver/lib/test_classes.py
--- a/zerver/lib/test_classes.py
+++ b/zerver/lib/test_classes.py
@@ -653,7 +653,7 @@ def submit_reg_form_for_user(
source_realm_id: str = "",
key: Optional[str] = None,
realm_type: int = Realm.ORG_TYPES["business"]["id"],
- enable_marketing_emails: bool = True,
+ enable_marketing_emails: Optional[bool] = None,
is_demo_organization: bool = False,
**kwargs: ClientArg,
) -> HttpResponse:
@@ -678,9 +678,10 @@ def submit_reg_form_for_user(
"from_confirmation": from_confirmation,
"default_stream_group": default_stream_groups,
"source_realm_id": source_realm_id,
- "enable_marketing_emails": enable_marketing_emails,
"is_demo_organization": is_demo_organization,
}
+ if enable_marketing_emails is not None:
+ payload["enable_marketing_emails"] = enable_marketing_emails
if password is not None:
payload["password"] = password
if realm_in_root_domain is not None:
diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py
--- a/zerver/tests/test_signup.py
+++ b/zerver/tests/test_signup.py
@@ -3232,6 +3232,60 @@ def test_create_realm_with_marketing_emails_enabled(self) -> None:
self.assertEqual(user.realm, realm)
self.assertTrue(user.enable_marketing_emails)
+ @override_settings(OPEN_REALM_CREATION=True, CORPORATE_ENABLED=False)
+ def test_create_realm_without_prompting_for_marketing_emails(self) -> None:
+ password = "test"
+ string_id = "zuliptest"
+ email = "[email protected]"
+ realm_name = "Test"
+
+ # Make sure the realm does not exist
+ with self.assertRaises(Realm.DoesNotExist):
+ get_realm(string_id)
+
+ # Create new realm with the email
+ result = self.client_post("/new/", {"email": email})
+ self.assertEqual(result.status_code, 302)
+ self.assertTrue(result["Location"].endswith(f"/accounts/new/send_confirm/{email}"))
+ result = self.client_get(result["Location"])
+ self.assert_in_response("Check your email so we can get started.", result)
+
+ # Visit the confirmation link.
+ confirmation_url = self.get_confirmation_url_from_outbox(email)
+ result = self.client_get(confirmation_url)
+ self.assertEqual(result.status_code, 200)
+
+ # Simulate the initial POST that is made by confirm-preregistration.js
+ # by triggering submit on confirm_preregistration.html.
+ payload = {
+ "full_name": "",
+ "key": find_key_by_email(email),
+ "from_confirmation": "1",
+ }
+ result = self.client_post("/accounts/register/", payload)
+ # Assert that the form did not prompt the user for enabling
+ # marketing emails.
+ self.assert_not_in_success_response(['input id="id_enable_marketing_emails"'], result)
+
+ result = self.submit_reg_form_for_user(
+ email,
+ password,
+ realm_subdomain=string_id,
+ realm_name=realm_name,
+ )
+ self.assertEqual(result.status_code, 302)
+
+ result = self.client_get(result.url, subdomain=string_id)
+ self.assertEqual(result.status_code, 302)
+ self.assertEqual(result.url, "http://zuliptest.testserver")
+
+ # Make sure the realm is created
+ realm = get_realm(string_id)
+ self.assertEqual(realm.string_id, string_id)
+ user = get_user(email, realm)
+ self.assertEqual(user.realm, realm)
+ self.assertFalse(user.enable_marketing_emails)
+
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_marketing_emails_disabled(self) -> None:
password = "test"
| Self-hosted installation incorrectly prompts to βSubscribe me to Zulip's low-traffic newsletterβ
The checkbox added by #19573 (tweaked by 191b1ac2be46fe4219c170ab318aa10c714cb49a) for βSubscribe me to Zulip's low-traffic newsletter (a few emails a year)β should not show up on self-hosted installations, where it doesnβt in fact subscribe you to anything. It should be conditional on `settings.CORPORATE_ENABLED`.
Cc @eeshangarg
| Hello @zulip/server-misc members, this issue was labeled with the "area: portico" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @Biki-das!
Thanks for your interest in Zulip! You have attempted to claim an issue without the labels "help wanted", "good first issue". Since you're a new contributor, you can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) or [good first issue](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22good+first+issue%22) labels.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
| 2022-01-05T22:56:02 |
zulip/zulip | 20,763 | zulip__zulip-20763 | [
"20383"
] | 82f27072784f591a6bdae70b745f9d233b538d9b | diff --git a/zerver/lib/actions.py b/zerver/lib/actions.py
--- a/zerver/lib/actions.py
+++ b/zerver/lib/actions.py
@@ -813,6 +813,29 @@ def do_reactivate_user(user_profile: UserProfile, *, acting_user: Optional[UserP
if user_profile.is_bot:
notify_created_bot(user_profile)
+ subscribed_recipient_ids = Subscription.objects.filter(
+ user_profile_id=user_profile.id, active=True, recipient__type=Recipient.STREAM
+ ).values_list("recipient__type_id", flat=True)
+ subscribed_streams = Stream.objects.filter(id__in=subscribed_recipient_ids, deactivated=False)
+ subscriber_peer_info = bulk_get_subscriber_peer_info(
+ realm=user_profile.realm,
+ streams=subscribed_streams,
+ )
+
+ altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
+ for stream in subscribed_streams:
+ altered_user_dict[stream.id] = {user_profile.id}
+
+ stream_dict = {stream.id: stream for stream in subscribed_streams}
+
+ send_peer_subscriber_events(
+ op="peer_add",
+ realm=user_profile.realm,
+ altered_user_dict=altered_user_dict,
+ stream_dict=stream_dict,
+ private_peer_dict=subscriber_peer_info.private_peer_dict,
+ )
+
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
| diff --git a/zerver/tests/test_events.py b/zerver/tests/test_events.py
--- a/zerver/tests/test_events.py
+++ b/zerver/tests/test_events.py
@@ -1848,10 +1848,23 @@ def test_do_deactivate_user(self) -> None:
def test_do_reactivate_user(self) -> None:
bot = self.create_bot("test")
+ self.subscribe(bot, "Denmark")
+ self.make_stream("Test private stream", invite_only=True)
+ self.subscribe(bot, "Test private stream")
do_deactivate_user(bot, acting_user=None)
action = lambda: do_reactivate_user(bot, acting_user=None)
- events = self.verify_action(action, num_events=2)
+ events = self.verify_action(action, num_events=3)
+ check_realm_bot_add("events[1]", events[1])
+ check_subscription_peer_add("events[2]", events[2])
+
+ # Test 'peer_add' event for private stream is received only if user is subscribed to it.
+ do_deactivate_user(bot, acting_user=None)
+ self.subscribe(self.example_user("hamlet"), "Test private stream")
+ action = lambda: do_reactivate_user(bot, acting_user=None)
+ events = self.verify_action(action, num_events=4)
check_realm_bot_add("events[1]", events[1])
+ check_subscription_peer_add("events[2]", events[2])
+ check_subscription_peer_add("events[3]", events[3])
def test_do_deactivate_realm(self) -> None:
realm = self.user_profile.realm
| Reactivated users don't appear in "Manage streams" until you reload the browser
If you deactivate a user, reload the browser, and then reactivate them, the "Manage streams" UI will incorrectly not show the user as not subscribed to streams. This leads to the incorrect impression that Zulip does not preserve one's subscriptions when an account is deactivated.
This needs further debugging; I suspect we need `do_reactivate_user` to call `send_peer_subscriber_events`.
See https://chat.zulip.org/#narrow/stream/6-frontend/topic/Keep.20stream.20subscriptions.20if.20deactivated/near/1287667 for debugging details.
| Hello @zulip/server-misc members, this issue was labeled with the "area: real-time sync" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
@zulipbot claim
Welcome to Zulip, @thecalendar! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
Hey @timabbott, I tried reproducing the bug. I logged in as an administrator, deactivated a profile 'Aaron". Checked on Manage Stream -> Subscribers - to ensure the Aaron was deactivated, and rightly so. Reactivated Aaron again and refreshed the page. Went to Subscribers and found his name. I am unsure if there is any bug or was it resolved? Kindly guide me if I have misinterpreted the question in anyway.
I am attaching screenshots for your reference.
Post Deactivating :

Post reactivating + refreshing:

@thecalendar The bug is that `Aaron` is not present in the list before **refreshing the page**.
@sahil839 Ah, I thought so. Thank you, will look into it.
Hello @sampriti026, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
| 2022-01-12T14:43:45 |
zulip/zulip | 20,788 | zulip__zulip-20788 | [
"20759"
] | d5a784a1cae37fd7cf18734376e7e69168c9cf3c | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = "173.3"
+PROVISION_VERSION = "173.4"
| diff --git a/frontend_tests/node_tests/lightbox.js b/frontend_tests/node_tests/lightbox.js
--- a/frontend_tests/node_tests/lightbox.js
+++ b/frontend_tests/node_tests/lightbox.js
@@ -54,8 +54,8 @@ test("pan_and_zoom", ({override_rewire}) => {
};
override_rewire(lightbox, "render_lightbox_list_images", () => {});
-
- lightbox.open(img);
+ const open_image = lightbox.build_open_image_function();
+ open_image(img);
assert.equal(fetched_zid, 1234);
});
@@ -88,6 +88,7 @@ test("youtube", ({override_rewire}) => {
override_rewire(lightbox, "render_lightbox_list_images", () => {});
- lightbox.open(img);
+ const open_image = lightbox.build_open_image_function();
+ open_image(img);
assert.equal($(".image-actions .open").attr("href"), href);
});
| "Pan and zoom" cuts off images instead of using the available space
If you have a tall image and a wide monitor (and wide browser viewport), and you try to zoom⦠the image stays trapped inside the same box it occupied before you even tried to zoom. If the image is super wide instead of tall, the same thing happens the other way around.
This leads to a lot of frustrating panning around, to look at the different parts of the image through this narrow keyhole, while tons of screen space next to it doesn't get used.
This is the biggest of the issues described by @vanclute in #18939. It was reported again by @alexanderglueck as #19837, and I just ran into it myself ([chat](https://chat.zulip.org/#narrow/stream/6-frontend/topic/pan.2Fzoom/near/1308717)). Here's a nice illustration from #19837:

Instead, when zooming we should use the full space available. This may be bigger than the area the image occupied when it was scaled down to fit completely in the space available, because the available box may have a different aspect ratio from the image.
| Hello @zulip/server-message-view members, this issue was labeled with the "area: message view" label, so you may want to check it out!
<!-- areaLabelAddition -->
It's also mentioned that when the pan and zoom mode is enabled the image becomes somewhat blurry.
@zulipbot claim
Hello @Fingel, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
| 2022-01-13T23:59:07 |
zulip/zulip | 20,807 | zulip__zulip-20807 | [
"20575",
"20575"
] | a1e71e8639095fc22d415baa67ec313bd3e02adf | diff --git a/zerver/lib/actions.py b/zerver/lib/actions.py
--- a/zerver/lib/actions.py
+++ b/zerver/lib/actions.py
@@ -105,6 +105,7 @@
MessageDict,
SendMessageRequest,
access_message,
+ bulk_access_messages,
get_last_message_id,
normalize_body,
render_markdown,
@@ -167,6 +168,7 @@
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
+ messages_for_topic,
save_message_for_edit_use_case,
update_edit_history,
update_messages_for_topic_edit,
@@ -6488,6 +6490,7 @@ def send_message_moved_breadcrumbs(
new_stream: Stream,
new_topic: Optional[str],
new_thread_notification_string: Optional[str],
+ changed_messages_count: int,
) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
@@ -6510,6 +6513,7 @@ def send_message_moved_breadcrumbs(
new_thread_notification_string.format(
old_location=old_topic_link,
user=user_mention,
+ changed_messages_count=changed_messages_count,
),
)
@@ -6523,6 +6527,7 @@ def send_message_moved_breadcrumbs(
old_thread_notification_string.format(
user=user_mention,
new_location=new_topic_link,
+ changed_messages_count=changed_messages_count,
),
)
@@ -7017,17 +7022,65 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
if len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None:
# Notify users that the topic was moved.
+ changed_messages_count = len(changed_messages)
+
+ if propagate_mode == "change_all":
+ moved_all_visible_messages = True
+ else:
+ # With other propagate modes, if the user in fact moved
+ # all messages in the stream, we want to explain it was a
+ # full-topic move.
+ #
+ # For security model reasons, we don't want to allow a
+ # user to take any action that would leak information
+ # about older messages they cannot access (E.g. the only
+ # remaining messages are in a stream without shared
+ # history). The bulk_access_messages call below addresses
+ # that concern.
+ #
+ # bulk_access_messages is inefficient for this task, since
+ # we just want to do the exists() version of this
+ # query. But it's nice to reuse code, and this bulk
+ # operation is likely cheaper than a `GET /messages`
+ # unless the topic has thousands of messages of history.
+ unmoved_messages = messages_for_topic(
+ stream_being_edited.recipient_id,
+ orig_topic_name,
+ )
+ visible_unmoved_messages = bulk_access_messages(
+ user_profile, unmoved_messages, stream=stream_being_edited
+ )
+ moved_all_visible_messages = len(visible_unmoved_messages) == 0
+
old_thread_notification_string = None
if send_notification_to_old_thread:
- old_thread_notification_string = gettext_lazy(
- "This topic was moved by {user} to {new_location}"
- )
+ if moved_all_visible_messages:
+ old_thread_notification_string = gettext_lazy(
+ "This topic was moved to {new_location} by {user}."
+ )
+ elif changed_messages_count == 1:
+ old_thread_notification_string = gettext_lazy(
+ "A message was moved from this topic to {new_location} by {user}."
+ )
+ else:
+ old_thread_notification_string = gettext_lazy(
+ "{changed_messages_count} messages were moved from this topic to {new_location} by {user}."
+ )
new_thread_notification_string = None
if send_notification_to_new_thread:
- new_thread_notification_string = gettext_lazy(
- "This topic was moved here from {old_location} by {user}"
- )
+ if moved_all_visible_messages:
+ new_thread_notification_string = gettext_lazy(
+ "This topic was moved here from {old_location} by {user}."
+ )
+ elif changed_messages_count == 1:
+ new_thread_notification_string = gettext_lazy(
+ "A message was moved here from {old_location} by {user}."
+ )
+ else:
+ new_thread_notification_string = gettext_lazy(
+ "{changed_messages_count} messages were moved here from {old_location} by {user}."
+ )
send_message_moved_breadcrumbs(
user_profile,
@@ -7037,6 +7090,7 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
new_stream,
topic_name,
new_thread_notification_string,
+ changed_messages_count,
)
if (
| diff --git a/zerver/tests/test_message_edit.py b/zerver/tests/test_message_edit.py
--- a/zerver/tests/test_message_edit.py
+++ b/zerver/tests/test_message_edit.py
@@ -117,9 +117,7 @@ def test_edit_message_no_changes(self) -> None:
)
result = self.client_patch(
"/json/messages/" + str(msg_id),
- {
- "message_id": msg_id,
- },
+ {},
)
self.assert_json_error(result, "Nothing to change")
@@ -134,7 +132,6 @@ def test_move_message_cant_move_private_message(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": verona.id,
},
)
@@ -150,7 +147,6 @@ def test_private_message_edit_topic(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"topic": "Should not exist",
},
)
@@ -189,7 +185,6 @@ def test_edit_message_no_topic(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"topic": " ",
},
)
@@ -203,7 +198,6 @@ def test_edit_message_invalid_topic(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"topic": "editing\nfun",
},
)
@@ -217,7 +211,6 @@ def test_move_message_to_stream_with_content(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"content": "Not allowed",
@@ -243,7 +236,6 @@ def test_edit_submessage(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"content": "/poll Games?\nYES\nNO\nMaybe",
},
)
@@ -297,7 +289,6 @@ def test_save_message(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"content": "after edit",
},
)
@@ -307,7 +298,6 @@ def test_save_message(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"topic": "edited",
},
)
@@ -459,7 +449,6 @@ def test_edit_message_no_permission(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"content": "content after edit",
},
)
@@ -473,7 +462,6 @@ def test_edit_message_no_content(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"content": " ",
},
)
@@ -498,7 +486,6 @@ def test_edit_message_history_disabled(self) -> None:
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
- "message_id": msg_id_1,
"content": new_content_1,
},
)
@@ -531,7 +518,6 @@ def test_edit_message_history(self) -> None:
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
- "message_id": msg_id_1,
"content": new_content_1,
},
)
@@ -573,7 +559,6 @@ def test_edit_message_history(self) -> None:
result_2 = self.client_patch(
f"/json/messages/{msg_id_2}",
{
- "message_id": msg_id_2,
"content": new_content_2,
},
)
@@ -625,7 +610,6 @@ def test_empty_message_edit(self) -> None:
self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"content": "We will edit this to also render as empty.",
},
)
@@ -654,7 +638,6 @@ def test_edit_link(self) -> None:
result_1 = self.client_patch(
f"/json/messages/{msg_id_1}",
{
- "message_id": msg_id_1,
"content": new_content_1,
},
)
@@ -732,7 +715,6 @@ def test_edit_cases(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"content": "content 2",
},
)
@@ -754,7 +736,6 @@ def test_edit_cases(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"topic": "topic 2",
},
)
@@ -767,7 +748,6 @@ def test_edit_cases(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"content": "content 3",
"topic": "topic 3",
},
@@ -792,7 +772,6 @@ def test_edit_cases(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"content": "content 4",
},
)
@@ -805,7 +784,6 @@ def test_edit_cases(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"topic": "topic 4",
},
)
@@ -1168,7 +1146,6 @@ def notify(user_id: int) -> Dict[str, Any]:
result = self.client_patch(
f"/json/messages/{message_id}",
{
- "message_id": message_id,
"content": "Hello @**everyone**",
},
)
@@ -1210,7 +1187,6 @@ def test_wildcard_mention_restrictions_when_editing(self) -> None:
result = self.client_patch(
"/json/messages/" + str(message_id),
{
- "message_id": message_id,
"content": "Hello @**everyone**",
},
)
@@ -1222,7 +1198,6 @@ def test_wildcard_mention_restrictions_when_editing(self) -> None:
result = self.client_patch(
"/json/messages/" + str(message_id),
{
- "message_id": message_id,
"content": "Hello @**everyone**",
},
)
@@ -1234,7 +1209,6 @@ def test_wildcard_mention_restrictions_when_editing(self) -> None:
result = self.client_patch(
"/json/messages/" + str(message_id),
{
- "message_id": message_id,
"content": "Hello @**everyone**",
},
)
@@ -1269,7 +1243,6 @@ def verify_edit_history(new_topic: str, len_edit_history: int) -> None:
result = self.client_patch(
f"/json/messages/{id1}",
{
- "message_id": id1,
"topic": new_topic,
"propagate_mode": "change_later",
},
@@ -1282,7 +1255,6 @@ def verify_edit_history(new_topic: str, len_edit_history: int) -> None:
result = self.client_patch(
f"/json/messages/{id1}",
{
- "message_id": id1,
"topic": new_topic,
"propagate_mode": "change_later",
},
@@ -1301,7 +1273,6 @@ def test_topic_and_content_edit(self) -> None:
result = self.client_patch(
"/json/messages/" + str(id1),
{
- "message_id": id1,
"topic": new_topic,
"propagate_mode": "change_later",
"content": "edited message",
@@ -1340,7 +1311,6 @@ def test_propagate_topic_forward(self) -> None:
result = self.client_patch(
f"/json/messages/{id1}",
{
- "message_id": id1,
"topic": "edited",
"propagate_mode": "change_later",
},
@@ -1365,7 +1335,6 @@ def test_propagate_all_topics(self) -> None:
result = self.client_patch(
f"/json/messages/{id2}",
{
- "message_id": id2,
"topic": "edited",
"propagate_mode": "change_all",
},
@@ -1389,7 +1358,6 @@ def test_propagate_all_topics_with_different_uppercase_letters(self) -> None:
result = self.client_patch(
f"/json/messages/{id2}",
{
- "message_id": id2,
"topic": "edited",
"propagate_mode": "change_all",
},
@@ -1415,7 +1383,6 @@ def test_move_message_to_stream(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
@@ -1428,14 +1395,14 @@ def test_move_message_to_stream(self) -> None:
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
- f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>test**",
+ f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
- f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**",
+ f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_realm_admin_cant_move_to_another_realm(self) -> None:
@@ -1451,7 +1418,6 @@ def test_move_message_realm_admin_cant_move_to_another_realm(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
@@ -1472,7 +1438,6 @@ def test_move_message_realm_admin_cant_move_to_private_stream_without_subscripti
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
@@ -1497,7 +1462,6 @@ def test_move_message_realm_admin_cant_move_from_private_stream_without_subscrip
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": verona.id,
"propagate_mode": "change_all",
},
@@ -1541,7 +1505,6 @@ def test_move_message_from_private_stream_message_access_checks(
result = self.client_patch(
"/json/messages/" + str(new_msg_id),
{
- "message_id": new_msg_id,
"stream_id": verona.id,
"propagate_mode": "change_all",
},
@@ -1568,7 +1531,6 @@ def test_move_message_to_stream_change_later(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id_later}",
{
- "message_id": msg_id_later,
"stream_id": new_stream.id,
"propagate_mode": "change_later",
},
@@ -1580,7 +1542,7 @@ def test_move_message_to_stream_change_later(self) -> None:
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(
messages[1].content,
- f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>test**",
+ f"2 messages were moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
@@ -1588,7 +1550,95 @@ def test_move_message_to_stream_change_later(self) -> None:
self.assertEqual(messages[0].id, msg_id_later)
self.assertEqual(
messages[2].content,
- f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**",
+ f"2 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ def test_move_message_to_stream_change_later_all_moved(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
+ "iago", "test move stream", "new stream", "test"
+ )
+
+ result = self.client_patch(
+ f"/json/messages/{msg_id}",
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_later",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 1)
+ self.assertEqual(
+ messages[0].content,
+ f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 4)
+ self.assertEqual(messages[0].id, msg_id)
+ self.assertEqual(
+ messages[3].content,
+ f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ def test_move_message_to_stream_change_one(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
+ "iago", "test move stream", "new stream", "test"
+ )
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id_later),
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_one",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 3)
+ self.assertEqual(messages[0].id, msg_id)
+ self.assertEqual(
+ messages[2].content,
+ f"A message was moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 2)
+ self.assertEqual(messages[0].id, msg_id_later)
+ self.assertEqual(
+ messages[1].content,
+ f"A message was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ def test_move_message_to_stream_change_all(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
+ "iago", "test move stream", "new stream", "test"
+ )
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id_later),
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_all",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 1)
+ self.assertEqual(
+ messages[0].content,
+ f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 4)
+ self.assertEqual(messages[0].id, msg_id)
+ self.assertEqual(
+ messages[3].content,
+ f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_move_message_between_streams_policy_setting(self) -> None:
@@ -1602,7 +1652,6 @@ def check_move_message_according_to_policy(role: int, expect_fail: bool = False)
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
@@ -1692,7 +1741,6 @@ def check_move_message_to_stream(role: int, error_msg: Optional[str] = None) ->
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
@@ -1788,7 +1836,6 @@ def test_move_message_to_stream_with_topic_editing_not_allowed(self) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
@@ -1799,7 +1846,6 @@ def test_move_message_to_stream_with_topic_editing_not_allowed(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
@@ -1819,7 +1865,6 @@ def test_move_message_to_stream_and_topic(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
@@ -1832,14 +1877,14 @@ def test_move_message_to_stream_and_topic(self) -> None:
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
- f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>new topic**",
+ f"This topic was moved to #**new stream>new topic** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "new topic")
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
- f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**",
+ f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
self.assert_json_success(result)
@@ -1885,7 +1930,6 @@ def test_inaccessible_msg_after_stream_change(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"topic": "new topic",
@@ -1965,7 +2009,6 @@ def test_no_notify_move_message_to_stream(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "false",
@@ -1989,7 +2032,6 @@ def test_notify_new_thread_move_message_to_stream(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "false",
@@ -2006,7 +2048,7 @@ def test_notify_new_thread_move_message_to_stream(self) -> None:
self.assert_length(messages, 4)
self.assertEqual(
messages[3].content,
- f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**",
+ f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
def test_notify_old_thread_move_message_to_stream(self) -> None:
@@ -2017,7 +2059,6 @@ def test_notify_old_thread_move_message_to_stream(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
"send_notification_to_old_thread": "true",
@@ -2031,7 +2072,7 @@ def test_notify_old_thread_move_message_to_stream(self) -> None:
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
- f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>test**",
+ f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
)
messages = get_topic_messages(user_profile, new_stream, "test")
@@ -2085,7 +2126,6 @@ def parameterized_test_move_message_involving_private_stream(
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "message_id": msg_id,
"stream_id": new_stream.id,
"propagate_mode": "change_all",
},
@@ -2096,7 +2136,7 @@ def parameterized_test_move_message_involving_private_stream(
self.assert_length(messages, 1)
self.assertEqual(
messages[0].content,
- f"This topic was moved by @_**Iago|{admin_user.id}** to #**new stream>test**",
+ f"This topic was moved to #**new stream>test** by @_**Iago|{admin_user.id}**.",
)
messages = get_topic_messages(admin_user, new_stream, "test")
@@ -2191,7 +2231,6 @@ def test_mark_topic_as_resolved(self) -> None:
result = self.client_patch(
"/json/messages/" + str(id1),
{
- "message_id": id1,
"topic": original_topic,
"propagate_mode": "change_all",
},
@@ -2202,7 +2241,6 @@ def test_mark_topic_as_resolved(self) -> None:
result = self.client_patch(
"/json/messages/" + str(id1),
{
- "message_id": id1,
"topic": resolved_topic,
"propagate_mode": "change_all",
},
@@ -2246,7 +2284,6 @@ def test_mark_topic_as_resolved(self) -> None:
result = self.client_patch(
"/json/messages/" + str(id1),
{
- "message_id": id1,
"topic": weird_topic,
"propagate_mode": "change_all",
},
@@ -2271,7 +2308,6 @@ def test_mark_topic_as_resolved(self) -> None:
result = self.client_patch(
"/json/messages/" + str(id1),
{
- "message_id": id1,
"topic": unresolved_topic,
"propagate_mode": "change_all",
},
| Moving a topic partially
I am very grateful for the "Move topic" option. But there is a little interface issue:
Sometimes a conversation drifts off the original topic and a moderator takes the liberty to move that part out to another topic. Zulip then says "This topic was moved to ...", which is incorrect: The topic is still very much alive.
So, when moving only part of a topic I would suggest to either display "Part of this topic was moved to ..." or better "A conversation was moved to ...".
Furthermore (maybe this is already done though), when moving a whole topic, the original should be marked resolved.
Moving a topic partially
I am very grateful for the "Move topic" option. But there is a little interface issue:
Sometimes a conversation drifts off the original topic and a moderator takes the liberty to move that part out to another topic. Zulip then says "This topic was moved to ...", which is incorrect: The topic is still very much alive.
So, when moving only part of a topic I would suggest to either display "Part of this topic was moved to ..." or better "A conversation was moved to ...".
Furthermore (maybe this is already done though), when moving a whole topic, the original should be marked resolved.
| Thanks for the report!
It's a checkbox option for whether to send the "topic was moved" notification -- so the person doing the move gets to choice whether to trigger a "topic was moved" notice. I think it makes sense to change the text of the notice when moving only one message, and possibly in a different way when moving a subset of a topic, e.g. "3 messages from this topic were moved to ...". @alya thoughts? Might be a good chat.zulip.org discussion.
> Furthermore (maybe this is already done though), when moving a whole topic, the original should be marked resolved.
This doesn't make sense in our model; "resolves topics" are ones with a name starting with a checkmark.
Hello @zulip/server-message-view members, this issue was labeled with the "area: message-editing" label, so you may want to check it out!
<!-- areaLabelAddition -->
Thanks for the feedback! Yeah, I think something like "3 messages from this topic were moved to..." seems good; I can kick off a discussion.
Based on [discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/part.20of.20a.20topic.20moved), to solve this issue we should change the text to the following format when only part of the topic is moved:
> 3 messages from this topic were moved to ... by ... .
When all messages are moved, we should reorder the message to be:
> This topic was moved to ... by ... .
@zulipbot claim
Welcome to Zulip, @victorknox! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@victorknox I saw the questions you posted on chat.zulip.org. Please take a look at the [Zulip contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html) to get started, and keep in mind the following guideline:
> Before you claim an issue, you should be confident that you will be able to tackle it effectively.
I will go ahead and unassign this issue, and you should feel free to re-claim it once you have figured out how to approach it (or pick a different one if you prefer).
@zulipbot claim
Welcome to Zulip, @Shubh0405! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
Thanks for the report!
It's a checkbox option for whether to send the "topic was moved" notification -- so the person doing the move gets to choice whether to trigger a "topic was moved" notice. I think it makes sense to change the text of the notice when moving only one message, and possibly in a different way when moving a subset of a topic, e.g. "3 messages from this topic were moved to ...". @alya thoughts? Might be a good chat.zulip.org discussion.
> Furthermore (maybe this is already done though), when moving a whole topic, the original should be marked resolved.
This doesn't make sense in our model; "resolves topics" are ones with a name starting with a checkmark.
Hello @zulip/server-message-view members, this issue was labeled with the "area: message-editing" label, so you may want to check it out!
<!-- areaLabelAddition -->
Thanks for the feedback! Yeah, I think something like "3 messages from this topic were moved to..." seems good; I can kick off a discussion.
Based on [discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/part.20of.20a.20topic.20moved), to solve this issue we should change the text to the following format when only part of the topic is moved:
> 3 messages from this topic were moved to ... by ... .
When all messages are moved, we should reorder the message to be:
> This topic was moved to ... by ... .
@zulipbot claim
Welcome to Zulip, @victorknox! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@victorknox I saw the questions you posted on chat.zulip.org. Please take a look at the [Zulip contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html) to get started, and keep in mind the following guideline:
> Before you claim an issue, you should be confident that you will be able to tackle it effectively.
I will go ahead and unassign this issue, and you should feel free to re-claim it once you have figured out how to approach it (or pick a different one if you prefer).
@zulipbot claim
Welcome to Zulip, @Shubh0405! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
| 2022-01-15T21:48:37 |
zulip/zulip | 20,905 | zulip__zulip-20905 | [
"20887"
] | ec86290e2a6991152e3e071f75eddab67d0c8de2 | diff --git a/zproject/urls.py b/zproject/urls.py
--- a/zproject/urls.py
+++ b/zproject/urls.py
@@ -226,7 +226,7 @@
# - runtornado.py has its own URL list for Tornado views. See the
# invocation of web.Application in that file.
#
-# - The Nginx config knows which URLs to route to Django or Tornado.
+# - The nginx config knows which URLs to route to Django or Tornado.
#
# - Likewise for the local dev server in tools/run-dev.py.
| docs: Some of the Nginx words are not capitalized.
E.g. https://github.com/zulip/zulip/blob/fd1d4e101b5d6baaa19ae3166e568244c2582f26/docs/production/authentication-methods.md?plain=1#L768
| Can I make changes in this
Yes, sounds good.
@mudassir1231 wait, see the discussion at https://chat.zulip.org/#narrow/stream/18-tools/topic/grammar.20checker/near/1314277.
It should be the other way around, i.e. to lowercase "nginx" whenever possible. Read the discussion above. | 2022-01-25T06:40:36 |
|
zulip/zulip | 20,915 | zulip__zulip-20915 | [
"20911"
] | 157cbca1e043d938b74aaffad66e63a95315e3ca | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = "173.2"
+PROVISION_VERSION = "173.3"
| 4.9: dev failing provisioning due to orjson==3.6.6 requiring rust toolchain
crazy! orjson seems to have a hidden dependency?!
```
default: Preparing wheel metadata: finished with status 'error'
default: ERROR: Command errored out with exit status 1:
default: command: /srv/zulip-venv-cache/b4a767a2f298218a7feee0f52190b8ade6698b74/zulip-py3-venv/bin/python3 /srv/zulip-venv-cache/b4a767a2f298218a7feee0f52190b8ade6698b74/zulip-py3-venv/lib/python3.7/site-packages/pip/_vendor/pep517/_in_process.py prepare_metadata_for_build_wheel /tmp/tmp7f5pjtbs
default: cwd: /tmp/pip-install-__o230mo/orjson
default: Complete output (6 lines):
default: Checking for Rust toolchain....
default:
default: Cargo, the Rust package manager, is not installed or is not on PATH.
default: This package requires Rust and Cargo to compile extensions. Install it through
default: the system's package manager or via https://rustup.rs/
default:
default: ----------------------------------------
default: ERROR: Command errored out with exit status 1: /srv/zulip-venv-cache/b4a767a2f298218a7feee0f52190b8ade6698b74/zulip-py3-venv/bin/python3 /srv/zulip-venv-cache/b4a767a2f298218a7feee0f52190b8ade6698b74/zulip-py3-venv/lib/python3.7/site-packages/pip/_vendor/pep517/_in_process.py prepare_metadata_for_build_wheel /tmp/tmp7f5pjtbs Check the logs for full command output.
default: WARNING: You are using pip version 20.3.4; however, version 21.3.1 is available.
default: You should consider upgrading via the '/srv/zulip-venv-cache/b4a767a2f298218a7feee0f52190b8ade6698b74/zulip-py3-venv/bin/python3 -m pip install --upgrade pip' command.
```
running standalone I get the same error:
```
$ pip3 install orjson==3.6.6
Collecting orjson==3.6.6
Cache entry deserialization failed, entry ignored
Cache entry deserialization failed, entry ignored
Downloading https://files.pythonhosted.org/packages/99/6c/cb0dcbc2500c004764cfadb63a51701c328f45f8689ffecbc8906df0f0de/orjson-3.6.6.tar.gz (550kB)
100% |ββββββββββββββββββββββββββββββββ| 552kB 1.9MB/s
Installing build dependencies ... error
Complete output from command /usr/bin/python3 -m pip install --ignore-installed --no-user --prefix /tmp/pip-build-env-6jn9rmj7 --no-warn-script-location --no-binary :none: --only-binary :none: -i https://pypi.org/simple -- maturin>=0.12.6,<0.13:
Collecting maturin<0.13,>=0.12.6
Cache entry deserialization failed, entry ignored
Downloading https://files.pythonhosted.org/packages/fb/16/d401cb37d3adb33c6e56cb4fc3916626afd75f4a57a315895abea4837c95/maturin-0.12.6.tar.gz (139kB)
Installing build dependencies: started
Installing build dependencies: finished with status 'done'
Collecting toml~=0.10.0 (from maturin<0.13,>=0.12.6)
Using cached https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl
Building wheels for collected packages: maturin
Running setup.py bdist_wheel for maturin: started
Running setup.py bdist_wheel for maturin: finished with status 'error'
Complete output from command /usr/bin/python3 -u -c "import setuptools, tokenize;__file__='/tmp/pip-install-of8zdmcq/maturin/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" bdist_wheel -d /tmp/pip-wheel-fxd1ov71 --python-tag cp37:
running bdist_wheel
running build
installing to build/bdist.linux-aarch64/wheel
running install
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-install-of8zdmcq/maturin/setup.py", line 135, in <module>
zip_safe=False,
File "/tmp/pip-build-env-2xerxhnc/lib/python3.7/site-packages/setuptools/__init__.py", line 153, in setup
return distutils.core.setup(**attrs)
File "/usr/lib/python3.7/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.7/distutils/dist.py", line 966, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.7/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/tmp/pip-build-env-2xerxhnc/lib/python3.7/site-packages/wheel/bdist_wheel.py", line 335, in run
self.run_command('install')
File "/usr/lib/python3.7/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/lib/python3.7/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/tmp/pip-install-of8zdmcq/maturin/setup.py", line 58, in run
"cargo not found in PATH. Please install rust "
RuntimeError: cargo not found in PATH. Please install rust (https://www.rust-lang.org/tools/install) and try again
----------------------------------------
Failed building wheel for maturin
Running setup.py clean for maturin
Failed to build maturin
Installing collected packages: toml, maturin
Running setup.py install for maturin: started
Running setup.py install for maturin: finished with status 'error'
Complete output from command /usr/bin/python3 -u -c "import setuptools, tokenize;__file__='/tmp/pip-install-of8zdmcq/maturin/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" install --record /tmp/pip-record-cv6n90um/install-record.txt --single-version-externally-managed --prefix /tmp/pip-build-env-6jn9rmj7 --compile:
running install
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-install-of8zdmcq/maturin/setup.py", line 135, in <module>
zip_safe=False,
File "/tmp/pip-build-env-2xerxhnc/lib/python3.7/site-packages/setuptools/__init__.py", line 153, in setup
return distutils.core.setup(**attrs)
File "/usr/lib/python3.7/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.7/distutils/dist.py", line 966, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.7/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/tmp/pip-install-of8zdmcq/maturin/setup.py", line 58, in run
"cargo not found in PATH. Please install rust "
RuntimeError: cargo not found in PATH. Please install rust (https://www.rust-lang.org/tools/install) and try again
----------------------------------------
Command "/usr/bin/python3 -u -c "import setuptools, tokenize;__file__='/tmp/pip-install-of8zdmcq/maturin/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" install --record /tmp/pip-record-cv6n90um/install-record.txt --single-version-externally-managed --prefix /tmp/pip-build-env-6jn9rmj7 --compile" failed with error code 1 in /tmp/pip-install-of8zdmcq/maturin/
```
cc @alexmv
(vagrant/docker Engine 20.10.12 (desktop 4.4.2) - host OS is Mac OSX 12.1 Monterey)
| haha, not getting those hours back. tl;dr: there's something very screwy with OSX 12.1 and docker... anyway, I also wanted to dev on ubuntu 20.04 to match my prod instance, so I decided to nail both at once... below is the diff. Notes:
- curl crashed so I replaced with wget, which... also crashed! wget2 seems to work.
- `pip3 install orjson=3.6.6` fails on needing rust and `python-is-python3`, so I added those.
- `pip3 install --upgrade pip` might be unnecessary...
hope this helps!
cc @alexmv
```
$ git diff
diff --git a/scripts/lib/install-yarn b/scripts/lib/install-yarn
index c28dc12cf6..6c5cb2b211 100755
--- a/scripts/lib/install-yarn
+++ b/scripts/lib/install-yarn
@@ -18,7 +18,7 @@ if ! check_version; then
tmpdir="$(mktemp -d)"
trap 'rm -r "$tmpdir"' EXIT
cd "$tmpdir"
- curl -fLO "https://registry.npmjs.org/yarn/-/$tarball"
+ wget2 "https://registry.npmjs.org/yarn/-/$tarball"
sha256sum -c <<<"$sha256 $tarball"
rm -rf /srv/zulip-yarn
mkdir /srv/zulip-yarn
diff --git a/tools/setup/dev-vagrant-docker/Dockerfile b/tools/setup/dev-vagrant-docker/Dockerfile
index 98fa714888..a8cb403d8f 100644
--- a/tools/setup/dev-vagrant-docker/Dockerfile
+++ b/tools/setup/dev-vagrant-docker/Dockerfile
@@ -1,4 +1,4 @@
-FROM debian:10
+FROM ubuntu:20.04
ARG DEBIAN_MIRROR
@@ -10,17 +10,22 @@ RUN echo locales locales/default_environment_locale select C.UTF-8 | debconf-set
&& apt-get install --no-install-recommends -y \
ca-certificates \
curl \
+ wget2 \
+ rustc cargo \
locales \
lsb-release \
openssh-server \
python3 \
+ python-is-python3 \
sudo \
systemd \
&& rm -rf /var/lib/apt/lists/*
ARG VAGRANT_UID
-RUN \
+RUN pip3 install --upgrade pip \
# We use https://github.com/gdraheim/docker-systemctl-replacement
# to make services we install like PostgreSQL, Redis, etc. normally
# managed by systemd start within Docker, which breaks normal
```
@alexmv is there any follow-up we need to do here, or should we close the issue? | 2022-01-26T02:39:03 |
|
zulip/zulip | 20,937 | zulip__zulip-20937 | [
"20924"
] | 364139feec710663a489ed010c6038c2f535bf84 | diff --git a/zerver/views/auth.py b/zerver/views/auth.py
--- a/zerver/views/auth.py
+++ b/zerver/views/auth.py
@@ -977,7 +977,7 @@ def json_fetch_api_key(
if not authenticate(
request=request, username=user_profile.delivery_email, password=password, realm=realm
):
- raise JsonableError(_("Your username or password is incorrect."))
+ raise JsonableError(_("Password is incorrect."))
api_key = get_api_key(user_profile)
return json_success({"api_key": api_key, "email": user_profile.delivery_email})
| diff --git a/zerver/tests/test_auth_backends.py b/zerver/tests/test_auth_backends.py
--- a/zerver/tests/test_auth_backends.py
+++ b/zerver/tests/test_auth_backends.py
@@ -4345,7 +4345,7 @@ def test_wrong_password(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
result = self.client_post("/json/fetch_api_key", dict(password="wrong"))
- self.assert_json_error(result, "Your username or password is incorrect.", 400)
+ self.assert_json_error(result, "Password is incorrect.", 400)
def test_invalid_subdomain(self) -> None:
username = "hamlet"
diff --git a/zerver/tests/test_decorators.py b/zerver/tests/test_decorators.py
--- a/zerver/tests/test_decorators.py
+++ b/zerver/tests/test_decorators.py
@@ -1143,7 +1143,7 @@ def test_fetch_api_key_email_address_visibility(self) -> None:
def test_fetch_api_key_wrong_password(self) -> None:
self.login("cordelia")
result = self.client_post("/json/fetch_api_key", dict(password="wrong_password"))
- self.assert_json_error_contains(result, "password is incorrect")
+ self.assert_json_error_contains(result, "Password is incorrect")
class InactiveUserTest(ZulipTestCase):
| Show proper Error Message while generating API Key

On submitting wrong password for generating API key, we're shown a error of 'Your username or password is incorrect.'
which can be made more precise by showing 'Your password entered is incorrect.'
we don't need to include 'username' as here there's no chance of username being incorrect.
| @alya @timabbott letmee know whether we can go ahead with this issue.
@timabbott @alya , I have solved this issue in https://github.com/zulip/zulip/pull/20937 , can you please review and merge? | 2022-01-27T02:32:17 |
zulip/zulip | 20,977 | zulip__zulip-20977 | [
"19709"
] | 38003408cbbfd197bc01d1681eec89a774563213 | diff --git a/zerver/lib/actions.py b/zerver/lib/actions.py
--- a/zerver/lib/actions.py
+++ b/zerver/lib/actions.py
@@ -1910,6 +1910,7 @@ def build_message_send_dict(
widget_content_dict: Optional[Dict[str, Any]] = None,
email_gateway: bool = False,
mention_backend: Optional[MentionBackend] = None,
+ limit_unread_user_ids: Optional[Set[int]] = None,
) -> SendMessageRequest:
"""Returns a dictionary that can be passed into do_send_messages. In
production, this is always called by check_message, but some
@@ -2015,6 +2016,7 @@ def build_message_send_dict(
wildcard_mention_user_ids=wildcard_mention_user_ids,
links_for_embed=links_for_embed,
widget_content=widget_content_dict,
+ limit_unread_user_ids=limit_unread_user_ids,
)
return message_send_dict
@@ -2069,6 +2071,7 @@ def do_send_messages(
stream_email_user_ids=send_request.stream_email_user_ids,
mentioned_user_ids=mentioned_user_ids,
mark_as_read_for_users=mark_as_read_for_users,
+ limit_unread_user_ids=send_request.limit_unread_user_ids,
)
for um in user_messages:
@@ -2280,6 +2283,7 @@ def create_user_messages(
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read_for_users: Set[int],
+ limit_unread_user_ids: Optional[Set[int]],
) -> List[UserMessageLite]:
# These properties on the Message are set via
# render_markdown by code in the Markdown inline patterns
@@ -2316,8 +2320,10 @@ def create_user_messages(
for user_profile_id in um_eligible_user_ids:
flags = base_flags
if (
- user_profile_id == sender_id and message.sent_by_human()
- ) or user_profile_id in mark_as_read_for_users:
+ (user_profile_id == sender_id and message.sent_by_human())
+ or user_profile_id in mark_as_read_for_users
+ or (limit_unread_user_ids is not None and user_profile_id not in limit_unread_user_ids)
+ ):
flags |= UserMessage.flags.read
if user_profile_id in mentioned_user_ids:
flags |= UserMessage.flags.mentioned
@@ -3364,6 +3370,7 @@ def check_message(
*,
skip_stream_access_check: bool = False,
mention_backend: Optional[MentionBackend] = None,
+ limit_unread_user_ids: Optional[Set[int]] = None,
) -> SendMessageRequest:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
@@ -3490,6 +3497,7 @@ def check_message(
widget_content_dict=widget_content_dict,
email_gateway=email_gateway,
mention_backend=mention_backend,
+ limit_unread_user_ids=limit_unread_user_ids,
)
if stream is not None and message_send_dict.rendering_result.mentions_wildcard:
@@ -3507,6 +3515,7 @@ def _internal_prep_message(
content: str,
email_gateway: bool = False,
mention_backend: Optional[MentionBackend] = None,
+ limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[SendMessageRequest]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
@@ -3537,6 +3546,7 @@ def _internal_prep_message(
realm=realm,
email_gateway=email_gateway,
mention_backend=mention_backend,
+ limit_unread_user_ids=limit_unread_user_ids,
)
except JsonableError as e:
logging.exception(
@@ -3555,6 +3565,7 @@ def internal_prep_stream_message(
topic: str,
content: str,
email_gateway: bool = False,
+ limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
@@ -3568,6 +3579,7 @@ def internal_prep_stream_message(
addressee=addressee,
content=content,
email_gateway=email_gateway,
+ limit_unread_user_ids=limit_unread_user_ids,
)
@@ -3629,9 +3641,12 @@ def internal_send_stream_message(
topic: str,
content: str,
email_gateway: bool = False,
+ limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[int]:
- message = internal_prep_stream_message(sender, stream, topic, content, email_gateway)
+ message = internal_prep_stream_message(
+ sender, stream, topic, content, email_gateway, limit_unread_user_ids=limit_unread_user_ids
+ )
if message is None:
return None
@@ -6403,6 +6418,7 @@ def maybe_send_resolve_topic_notifications(
stream: Stream,
old_topic: str,
new_topic: str,
+ changed_messages: List[Message],
) -> None:
# Note that topics will have already been stripped in check_update_message.
#
@@ -6434,6 +6450,14 @@ def maybe_send_resolve_topic_notifications(
# not a bug with the "resolve topics" feature.
return
+ # Compute the users who either sent or reacted to messages that
+ # were moved via the "resolve topic' action. Only those users
+ # should be eligible for this message being managed as unread.
+ affected_participant_ids = (set(message.sender_id for message in changed_messages)) | set(
+ Reaction.objects.filter(message__in=changed_messages).values_list(
+ "user_profile_id", flat=True
+ )
+ )
sender = get_system_bot(settings.NOTIFICATION_BOT, user_profile.realm_id)
user_mention = silent_mention_syntax_for_user(user_profile)
with override_language(stream.realm.default_language):
@@ -6449,6 +6473,7 @@ def maybe_send_resolve_topic_notifications(
notification_string.format(
user=user_mention,
),
+ limit_unread_user_ids=affected_participant_ids,
)
@@ -7023,6 +7048,7 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
stream=stream_being_edited,
old_topic=orig_topic_name,
new_topic=topic_name,
+ changed_messages=changed_messages,
)
return len(changed_messages)
diff --git a/zerver/lib/message.py b/zerver/lib/message.py
--- a/zerver/lib/message.py
+++ b/zerver/lib/message.py
@@ -126,6 +126,7 @@ class SendMessageRequest:
submessages: List[Dict[str, Any]] = field(default_factory=list)
deliver_at: Optional[datetime.datetime] = None
delivery_type: Optional[str] = None
+ limit_unread_user_ids: Optional[Set[int]] = None
# We won't try to fetch more unread message IDs from the database than
| diff --git a/zerver/tests/test_message_edit.py b/zerver/tests/test_message_edit.py
--- a/zerver/tests/test_message_edit.py
+++ b/zerver/tests/test_message_edit.py
@@ -9,6 +9,7 @@
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
+ do_add_reaction,
do_change_realm_plan_type,
do_change_stream_post_policy,
do_change_user_role,
@@ -2165,6 +2166,8 @@ def test_mark_topic_as_resolved(self) -> None:
self.login("iago")
admin_user = self.example_user("iago")
hamlet = self.example_user("hamlet")
+ cordelia = self.example_user("cordelia")
+ aaron = self.example_user("aaron")
# Set the user's translation language to German to test that
# it is overridden by the realm's default language.
@@ -2173,11 +2176,16 @@ def test_mark_topic_as_resolved(self) -> None:
stream = self.make_stream("new")
self.subscribe(admin_user, stream.name)
self.subscribe(hamlet, stream.name)
+ self.subscribe(cordelia, stream.name)
+ self.subscribe(aaron, stream.name)
original_topic = "topic 1"
id1 = self.send_stream_message(hamlet, "new", topic_name=original_topic)
id2 = self.send_stream_message(admin_user, "new", topic_name=original_topic)
+ msg1 = Message.objects.get(id=id1)
+ do_add_reaction(aaron, msg1, "tada", "1f389", "unicode_emoji")
+
# Check that we don't incorrectly send "unresolve topic"
# notifications when asking the preserve the current topic.
result = self.client_patch(
@@ -2216,6 +2224,23 @@ def test_mark_topic_as_resolved(self) -> None:
f"@_**Iago|{admin_user.id}** has marked this topic as resolved.",
)
+ # Check topic resolved notification message is only unread for participants.
+ assert (
+ UserMessage.objects.filter(
+ user_profile__in=[admin_user, hamlet, aaron], message__id=messages[2].id
+ )
+ .extra(where=[UserMessage.where_unread()])
+ .count()
+ == 3
+ )
+
+ assert (
+ UserMessage.objects.filter(user_profile=cordelia, message__id=messages[2].id)
+ .extra(where=[UserMessage.where_unread()])
+ .count()
+ == 0
+ )
+
# Now move to a weird state and confirm no new messages
weird_topic = "β ββ" + original_topic
result = self.client_patch(
@@ -2267,6 +2292,23 @@ def test_mark_topic_as_resolved(self) -> None:
f"@_**Iago|{admin_user.id}** has marked this topic as unresolved.",
)
+ # Check topic unresolved notification message is only unread for participants.
+ assert (
+ UserMessage.objects.filter(
+ user_profile__in=[admin_user, hamlet, aaron], message__id=messages[3].id
+ )
+ .extra(where=[UserMessage.where_unread()])
+ .count()
+ == 3
+ )
+
+ assert (
+ UserMessage.objects.filter(user_profile=cordelia, message__id=messages[3].id)
+ .extra(where=[UserMessage.where_unread()])
+ .count()
+ == 0
+ )
+
class DeleteMessageTest(ZulipTestCase):
def test_delete_message_invalid_request_format(self) -> None:
| Resolve topic should mark topic as unread only for participants
At present, resolving a topic marks it as unread. This can be annoying to users, as it unnecessarily bumps the topic, when in fact the intent is to clearly mark it as being "done". However, participants in a topic may want to be alerted when a topic is resolved, e.g. to make sure that they agree with the decision to resolve it.
To balance between these needs, **resolving** or **unresolving** a topic should create new unread messages only for users who have **participated in the topic**. For all others, the resolve/unresolve messages should automatically be marked as read.
When **Follow topic** is implemented in #6027, this behavior should be altered so that resolving/unresolving topics creates unreads for anyone following the topic.
This change is important for resolve topic workflows, so we should consider it a release goal.
[CZO discussion thread](https://chat.zulip.org/#narrow/stream/137-feedback/topic/Topic.20resolution.20-.3E.20bump.3F)
| Hello @zulip/server-message-view members, this issue was labeled with the "area: message-editing" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @mitanshu001, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
**ERROR:** You have not claimed this issue to work on yet.
@zulipbot claim.
@alya are the required changes are of Python(backend)? I am new to the community. This will be my 2nd issue. Please let me know if this is not a good issue for beginners
@timabbott will have more insight into whether this is a good beginner issue.
This is probably accessible to someone good at using `git grep` and similar to trace call flows in a Python codebase. Structurally, you want to pass the `mark_as_read` parameter through to to `do_send_messages` from the `maybe_send_resolve_topic_notifications` call to `internal_send_stream_message`.
Except that's probably awkward, since the participants are rare (So in a 10K user stream, you'll be passing 10K-7 users around). So probably we want a new `unread_for_participants_only` parameter that touches that same code.
I expect it won't merge quickly, as we'll need some iteration (e.g. we may refactor it 2-3 times after getting something basic working that has tests), but it's an OK thing to work on for someone experienced in development in general but not the Zulip codebase specifically.
Thanks, Tim for the explanation
Unassignning myself as I am not getting much time currently, so other people can pick it up. In case no one starts working on it in 4-5 days will pick it up.
@zulipbot abandon
@zulipbot claim
Welcome to Zulip, @collinwhitlow! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
**ERROR:** You have not claimed this issue to work on yet.
@zulipbot claim
Hello @collinwhitlow, it looks like we've already sent you a collaboration invite at https://github.com/zulip/zulip/invitations, but you haven't accepted it yet!
Please accept the invite and try to claim this issue again afterwards. We look forward to your contributions!
@alya my invitation expired before I had a chance to get started on it. Do you have any idea how I can go about getting a new one so I can successfully claim this issue?
We may need to manually cancel and resend it? I'm not sure. CC @timabbott
Sent a new one.
Thank you, @timabbott
@zulipbot claim
Hello @collinwhitlow, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@timabbott Since I am new to contributing to Zulip, do you happen to know which files would be the most beneficial to start with for this issue?? Thank you!
@collinwhitlow please take a look at the (recently updated) [Zulip contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html) to learn more about how to get help. Also, keep in mind the following guideline:
> Before you claim an issue, you should be confident that you will be able to tackle it effectively.
I will go ahead and unassign this issue, and you should feel free to re-claim it once you have figured out how to approach it (or pick a different one if you prefer).
That's understandable. I spent some time reviewing that link, and I am now more confident I am in a position to succeed. I'll give this another shot.
@zulipbot claim
Cool, let me know when you have a draft PR ready for review; this is one of the issues I'm tracking as important to include in the upcoming 5.0 release (final likely in January, but we want to merge as many things like this as we can before then).
Hello @collinwhitlow, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
| 2022-01-29T00:30:57 |
zulip/zulip | 20,989 | zulip__zulip-20989 | [
"20969"
] | 924df5aaf55a08cad35ae16d9adca7d8df3aa9c3 | diff --git a/zerver/lib/actions.py b/zerver/lib/actions.py
--- a/zerver/lib/actions.py
+++ b/zerver/lib/actions.py
@@ -5429,25 +5429,29 @@ def send_change_stream_message_retention_days_notification(
with override_language(stream.realm.default_language):
if old_value == Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP["unlimited"]:
- notification_string = _(
- "{user} has changed the [message retention period](/help/message-retention-policy) for this stream from "
- "**Forever** to **{new_value} days**. Messages will be automatically deleted after {new_value} days."
- )
- notification_string = notification_string.format(user=user_mention, new_value=new_value)
+ old_retention_period = _("Forever")
+ new_retention_period = f"{new_value} days"
+ summary_line = f"Messages in this stream will now be automatically deleted {new_value} days after they are sent."
elif new_value == Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP["unlimited"]:
- notification_string = _(
- "{user} has changed the [message retention period](/help/message-retention-policy) for this stream from "
- "**{old_value} days** to **Forever**."
- )
- notification_string = notification_string.format(user=user_mention, old_value=old_value)
+ old_retention_period = f"{old_value} days"
+ new_retention_period = _("Forever")
+ summary_line = _("Messages in this stream will now be retained forever.")
else:
- notification_string = _(
- "{user} has changed the [message retention period](/help/message-retention-policy) for this stream from "
- "**{old_value} days** to **{new_value} days**. Messages will be automatically deleted after {new_value} days."
- )
- notification_string = notification_string.format(
- user=user_mention, old_value=old_value, new_value=new_value
- )
+ old_retention_period = f"{old_value} days"
+ new_retention_period = f"{new_value} days"
+ summary_line = f"Messages in this stream will now be automatically deleted {new_value} days after they are sent."
+ notification_string = _(
+ "{user} has changed the [message retention period](/help/message-retention-policy) for this stream:\n"
+ "* **Old retention period**: {old_retention_period}\n"
+ "* **New retention period**: {new_retention_period}\n\n"
+ "{summary_line}"
+ )
+ notification_string = notification_string.format(
+ user=user_mention,
+ old_retention_period=old_retention_period,
+ new_retention_period=new_retention_period,
+ summary_line=summary_line,
+ )
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
| diff --git a/zerver/tests/test_subs.py b/zerver/tests/test_subs.py
--- a/zerver/tests/test_subs.py
+++ b/zerver/tests/test_subs.py
@@ -1781,9 +1781,10 @@ def test_change_stream_message_retention_days_notifications(self) -> None:
messages = get_topic_messages(user_profile, stream, "stream events")
self.assert_length(messages, 1)
expected_notification = (
- f"@_**Desdemona|{user_profile.id}** has changed the [message retention period](/help/message-retention-policy) "
- "for this stream from **Forever** to **2 days**. Messages will be automatically "
- "deleted after 2 days."
+ f"@_**Desdemona|{user_profile.id}** has changed the [message retention period](/help/message-retention-policy) for this stream:\n"
+ "* **Old retention period**: Forever\n"
+ "* **New retention period**: 2 days\n\n"
+ "Messages in this stream will now be automatically deleted 2 days after they are sent."
)
self.assertEqual(messages[0].content, expected_notification)
realm_audit_log = RealmAuditLog.objects.filter(
@@ -1803,9 +1804,10 @@ def test_change_stream_message_retention_days_notifications(self) -> None:
messages = get_topic_messages(user_profile, stream, "stream events")
self.assert_length(messages, 2)
expected_notification = (
- f"@_**Desdemona|{user_profile.id}** has changed the [message retention period](/help/message-retention-policy) "
- "for this stream from **2 days** to **8 days**. Messages will be automatically "
- "deleted after 8 days."
+ f"@_**Desdemona|{user_profile.id}** has changed the [message retention period](/help/message-retention-policy) for this stream:\n"
+ "* **Old retention period**: 2 days\n"
+ "* **New retention period**: 8 days\n\n"
+ "Messages in this stream will now be automatically deleted 8 days after they are sent."
)
self.assertEqual(messages[1].content, expected_notification)
realm_audit_log = RealmAuditLog.objects.filter(
@@ -1826,8 +1828,10 @@ def test_change_stream_message_retention_days_notifications(self) -> None:
messages = get_topic_messages(user_profile, stream, "stream events")
self.assert_length(messages, 3)
expected_notification = (
- f"@_**Desdemona|{user_profile.id}** has changed the [message retention period](/help/message-retention-policy) "
- "for this stream from **8 days** to **Forever**."
+ f"@_**Desdemona|{user_profile.id}** has changed the [message retention period](/help/message-retention-policy) for this stream:\n"
+ "* **Old retention period**: 8 days\n"
+ "* **New retention period**: Forever\n\n"
+ "Messages in this stream will now be retained forever."
)
self.assertEqual(messages[2].content, expected_notification)
realm_audit_log = RealmAuditLog.objects.filter(
| Reformat stream retention policy change notification
We should change the format of the stream retention policy notification message to be consistent with other stream notifications added in #20289 (cf. #20723).
Current format:

Updated format:
@ Desdemona has changed the [message retention period](/help/message-retention-policy) for this stream:
* **Old retention period**: Forever
* **New retention period**: 15 days
Messages will be automatically deleted after 15 days.
| Hello @zulip/server-streams members, this issue was labeled with the "area: stream settings" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-01-29T19:20:05 |
zulip/zulip | 20,990 | zulip__zulip-20990 | [
"18067"
] | 90e202cd38d00945c81da4730d39e3f5c5b1e8b1 | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 114
+API_FEATURE_LEVEL = 115
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/lib/push_notifications.py b/zerver/lib/push_notifications.py
--- a/zerver/lib/push_notifications.py
+++ b/zerver/lib/push_notifications.py
@@ -714,6 +714,7 @@ def get_message_payload(
if message.recipient.type == Recipient.STREAM:
data["recipient_type"] = "stream"
data["stream"] = get_display_recipient(message.recipient)
+ data["stream_id"] = message.recipient.type_id
data["topic"] = message.topic_name()
elif message.recipient.type == Recipient.HUDDLE:
data["recipient_type"] = "private"
| diff --git a/zerver/tests/test_push_notifications.py b/zerver/tests/test_push_notifications.py
--- a/zerver/tests/test_push_notifications.py
+++ b/zerver/tests/test_push_notifications.py
@@ -1717,6 +1717,7 @@ def test_get_message_payload_apns_stream_message(self) -> None:
"sender_email": self.sender.email,
"sender_id": self.sender.id,
"stream": get_display_recipient(message.recipient),
+ "stream_id": stream.id,
"topic": message.topic_name(),
"server": settings.EXTERNAL_HOST,
"realm_id": self.sender.realm.id,
@@ -1747,6 +1748,7 @@ def test_get_message_payload_apns_stream_mention(self) -> None:
"sender_email": self.sender.email,
"sender_id": self.sender.id,
"stream": get_display_recipient(message.recipient),
+ "stream_id": stream.id,
"topic": message.topic_name(),
"server": settings.EXTERNAL_HOST,
"realm_id": self.sender.realm.id,
@@ -1780,6 +1782,7 @@ def test_get_message_payload_apns_user_group_mention(self) -> None:
"sender_email": self.sender.email,
"sender_id": self.sender.id,
"stream": get_display_recipient(message.recipient),
+ "stream_id": stream.id,
"topic": message.topic_name(),
"server": settings.EXTERNAL_HOST,
"realm_id": self.sender.realm.id,
@@ -1814,6 +1817,7 @@ def test_get_message_payload_apns_stream_wildcard_mention(self) -> None:
"sender_email": self.sender.email,
"sender_id": self.sender.id,
"stream": get_display_recipient(message.recipient),
+ "stream_id": stream.id,
"topic": message.topic_name(),
"server": settings.EXTERNAL_HOST,
"realm_id": self.sender.realm.id,
@@ -1901,6 +1905,7 @@ def _test_get_message_payload_gcm_mentions(
"sender_avatar_url": absolute_avatar_url(message.sender),
"recipient_type": "stream",
"stream": get_display_recipient(message.recipient),
+ "stream_id": stream.id,
"topic": message.topic_name(),
}
@@ -1995,6 +2000,7 @@ def test_get_message_payload_gcm_stream_notifications(self) -> None:
"recipient_type": "stream",
"topic": "Test topic",
"stream": "Denmark",
+ "stream_id": stream.id,
},
)
self.assertDictEqual(
@@ -2032,6 +2038,7 @@ def test_get_message_payload_gcm_redacted_content(self) -> None:
"recipient_type": "stream",
"topic": "Test topic",
"stream": "Denmark",
+ "stream_id": stream.id,
},
)
self.assertDictEqual(
| Include stream ID in push notifications for stream messages
Currently our push notifications include user IDs for all the users involved, but for the stream of a stream message they only include the stream name -- not the stream ID:
https://github.com/zulip/zulip/blob/4dacbfdc823ce7347979683e917eabb390c8ae07/zerver/lib/push_notifications.py#L670-L674
We should add a field `stream_id` which has the numeric stream ID, so that it's possible for clients to interpret the data in a way that's robust to the stream's name changing.
| @mateuszmandera want to just do this? It seems really quick.
Bump. It'd be nice to get this in, because if we have it in 5.0 then that will shorten the period that we have to have fallback code in the client for using the stream name instead (and hoping that e.g. it hasn't been renamed.)
| 2022-01-29T22:00:53 |
zulip/zulip | 20,992 | zulip__zulip-20992 | [
"20821"
] | c0f7158378c9218f804ffe759a066c48ec5309e0 | diff --git a/zproject/backends.py b/zproject/backends.py
--- a/zproject/backends.py
+++ b/zproject/backends.py
@@ -1557,14 +1557,17 @@ def social_associate_user_helper(
full_name = kwargs["details"].get("fullname")
first_name = kwargs["details"].get("first_name")
last_name = kwargs["details"].get("last_name")
+
if all(name is None for name in [full_name, first_name, last_name]) and backend.name not in [
"apple",
"saml",
+ "oidc",
]:
# (1) Apple authentication provides the user's name only the very first time a user tries to log in.
# So if the user aborts login or otherwise is doing this the second time,
# we won't have any name data.
- # (2) Some IdPs may not send any name value if the user doesn't have them set in the IdP's directory.
+ # (2) Some SAML or OIDC IdPs may not send any name value if the user doesn't
+ # have them set in the IdP's directory.
#
# The name will just default to the empty string in the code below.
| diff --git a/zerver/tests/test_auth_backends.py b/zerver/tests/test_auth_backends.py
--- a/zerver/tests/test_auth_backends.py
+++ b/zerver/tests/test_auth_backends.py
@@ -3336,13 +3336,21 @@ def generate_access_token_url_payload(self, account_data_dict: Dict[str, str]) -
}
)
- def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
+ def get_account_data_dict(self, email: str, name: Optional[str]) -> Dict[str, Any]:
+ if name is not None:
+ name_parts = name.split(" ")
+ given_name = name_parts[0]
+ family_name = name_parts[1]
+ else:
+ given_name = None
+ family_name = None
+
return dict(
email=email,
name=name,
nickname="somenickname",
- given_name=name.split(" ")[0],
- family_name=name.split(" ")[1],
+ given_name=given_name,
+ family_name=family_name,
)
@override_settings(TERMS_OF_SERVICE_VERSION=None)
@@ -3377,6 +3385,28 @@ def test_social_auth_registration_auto_signup(self) -> None:
expect_confirm_registration_page=False,
)
+ def test_auth_registration_with_no_name_provided(self) -> None:
+ """
+ The OIDC IdP may not send the name information. The
+ signup flow should proceed normally, without pre-filling the name in the
+ registration form.
+ """
+ email = "[email protected]"
+ subdomain = "zulip"
+ realm = get_realm("zulip")
+ account_data_dict = self.get_account_data_dict(email=email, name=None)
+ result = self.social_auth_test(account_data_dict, subdomain=subdomain, is_signup=True)
+ self.stage_two_of_registration(
+ result,
+ realm,
+ subdomain,
+ email,
+ "",
+ "Full Name",
+ skip_registration_form=False,
+ expect_full_name_prepopulated=False,
+ )
+
def test_social_auth_no_key(self) -> None:
"""
Requires overriding because client key/secret are configured
| zulip 5 oidc potential bug.
I've configured OpenId connect in settings.py from the main git branch. I'm pretty sure I've configured it correctly but I am getting an error when trying to login:
500 Internal Server error with this url in the browser bar:
https://site.tld/login/oidc/?subdomain=&is_signup=0&multiuse_object_key=&next=%2F
I'm using Keycloak and my oidc_url is "https://adiffsite.tld/auth/realms/MyRealm/"
| Upon further investigation it looks like zulip was flaking out because keycloak was passing back only a username instead of passing it a first & last name in the oidc session.
` File "/home/zulip/deployments/2022-01-18-07-14-28/./zproject/backends.py", line 1573, in social_associate_user_helper
raise AssertionError("Social auth backend doesn't provide name")
`
Hello @zulip/server-authentication members, this issue was labeled with the "area: authentication" label, so you may want to check it out!
<!-- areaLabelAddition -->
@ericbets can you stop by production help in chat.zulip.org to debug?
@mateuszmandera can you help investigate?
Closing since requiring a first and lastname in/from the oidc session is a reasonable requirement.
@ericbet If some providers, like Keycloak, by default may not send the first/last name, then I think we want to allow that (and if we didn't, we should be documenting the requirement clearly), so I'll reopen. | 2022-01-30T11:53:23 |
zulip/zulip | 21,053 | zulip__zulip-21053 | [
"20132"
] | 197843af31e218b826b952ff91e24861311e1aca | diff --git a/zerver/lib/send_email.py b/zerver/lib/send_email.py
--- a/zerver/lib/send_email.py
+++ b/zerver/lib/send_email.py
@@ -249,6 +249,8 @@ def send_email(
)
template = template_prefix.split("/")[-1]
+ log_email_config_errors()
+
if dry_run:
print(mail.message().get_payload()[0])
return
@@ -588,3 +590,14 @@ def send_custom_email(
if options["dry_run"]:
break
+
+
+def log_email_config_errors() -> None:
+ """
+ The purpose of this function is to log (potential) config errors,
+ but without raising an exception.
+ """
+ if settings.EMAIL_HOST_USER and not settings.EMAIL_HOST_PASSWORD:
+ logger.error(
+ "An SMTP username was set (EMAIL_HOST_USER), but password is unset (EMAIL_HOST_PASSWORD)."
+ )
| diff --git a/zerver/tests/test_send_email.py b/zerver/tests/test_send_email.py
--- a/zerver/tests/test_send_email.py
+++ b/zerver/tests/test_send_email.py
@@ -147,3 +147,24 @@ def test_send_email_exceptions(self) -> None:
f"INFO:{logger.name}:Sending password_reset email to {mail.to}",
)
self.assertTrue(info_log.output[1].startswith(f"ERROR:zulip.send_email:{message}"))
+
+ def test_send_email_config_error_logging(self) -> None:
+ hamlet = self.example_user("hamlet")
+
+ with self.settings(EMAIL_HOST_USER="test", EMAIL_HOST_PASSWORD=None):
+ with self.assertLogs(logger=logger, level="ERROR") as error_log:
+ send_email(
+ "zerver/emails/password_reset",
+ to_emails=[hamlet],
+ from_name="From Name",
+ from_address=FromAddress.NOREPLY,
+ language="en",
+ )
+
+ self.assertEqual(
+ error_log.output,
+ [
+ "ERROR:zulip.send_email:"
+ "An SMTP username was set (EMAIL_HOST_USER), but password is unset (EMAIL_HOST_PASSWORD)."
+ ],
+ )
| Throw an error when EMAIL_HOST_USER is specified but not email_password
As a convenience (from someone who was misreading zulip/zulip-secrets.conf as zulip/secrets.conf), it would be nice to see an error message when EMAIL_HOST_USER is specified but the password is missing in the secrets file.
| Hello @namangirdhar16!
Thanks for your interest in Zulip! You have attempted to claim an issue without the labels "help wanted", "good first issue". Since you're a new contributor, you can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) or [good first issue](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22good+first+issue%22) labels.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
Hello @zulip/server-production members, this issue was labeled with the "area: production installer" label, so you may want to check it out!
<!-- areaLabelAddition -->
This seems useful and likely easy. I think perhaps we'd want to change this function to add an additional check after the loop? Not sure. @mateuszmandera FYI.
```
def check_config() -> None:
for (setting_name, default) in settings.REQUIRED_SETTINGS:
# if required setting is the same as default OR is not found in settings,
# throw error to add/set that setting in config
try:
if settings.__getattr__(setting_name) != default:
continue
except AttributeError:
pass
raise CommandError(f"Error: You must set {setting_name} in /etc/zulip/settings.py.")
```
@timabbott Hmm, though this only gets called in `manage.py checkconfig` (and for some reason in `register_server`) - so I'm not sure how many admins actually utilize that command, especially since we don't particularly advertize it (only in `management-commands.md`). Also called in `initialize-database`, but that runs only once, during installation, so it's not useful for this issue either.
I'm wondering if it wouldn't make sense to add some kind of `check_config()` function that's called in zproject/settings.py after the setting imports, that could be generally used for putting all sorts of assertions in it for validating that settings make sense?:
```
from .configured_settings import * # noqa: F401,F403 isort: skip
from .computed_settings import * # noqa: F401,F403 isort: skip
check_config()
# Do not add any code after these wildcard imports! Add it to
# computed_settings instead.
```
Checks like this are a little complicated because they can fire when the server process starts up. If we add anything like this, we need to make sure that we start running `check_config` in `restart_server` so that we can abort with a clear admin-visible error message before the restart, so they aren't left wondering and having to log-dive for why uwsgi isn't coming up cleanly.
@alexmv Would `logging.error` in the hypothetical `check_config` function solve this concern? (as opposed to hard `AssertionError`s)
That pushes it back into "is this visible enough" since it's only going to show up if folks check their logs. If you're running the check at startup, that also means it's going to show up in the logs at random intervals when uwsgi processes get (re)started.
We might be better off doing this sort of check, with a `logging.error`, when we're trying to send an email -- rather than trying to do it at "start time," which is not well-defined and not always an interactive process.
Logging an error on email send makes sense to me.
-Tim Abbott (mobile)
On Tue, Feb 1, 2022, 14:21 Alex Vandiver ***@***.***> wrote:
> That pushes it back into "is this visible enough" since it's only going to
> show up if folks check their logs. If you're running the check at startup,
> that also means it's going to show up in the logs at random intervals when
> uwsgi processes get (re)started.
>
> We might be better off doing this sort of check, with a logging.error,
> when we're trying to send an email -- rather than trying to do it at "start
> time," which is not well-defined and not always an interactive process.
>
> β
> Reply to this email directly, view it on GitHub
> <https://github.com/zulip/zulip/issues/20132#issuecomment-1027345167>, or
> unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AAU6NWQ2KKAALE65FXCSFF3UZBMHFANCNFSM5HFHVLCQ>
> .
> You are receiving this because you were mentioned.Message ID:
> ***@***.***>
>
| 2022-02-06T20:01:47 |
zulip/zulip | 21,059 | zulip__zulip-21059 | [
"21011"
] | c81a8c03467993f6a8964c233aa9cea16b2e102f | diff --git a/zerver/views/muting.py b/zerver/views/muting.py
--- a/zerver/views/muting.py
+++ b/zerver/views/muting.py
@@ -1,6 +1,7 @@
import datetime
from typing import Optional
+from django.db import IntegrityError
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
@@ -39,7 +40,10 @@ def mute_topic(
if topic_is_muted(user_profile, stream.id, topic_name):
raise JsonableError(_("Topic already muted"))
- do_mute_topic(user_profile, stream, topic_name, date_muted)
+ try:
+ do_mute_topic(user_profile, stream, topic_name, date_muted)
+ except IntegrityError:
+ raise JsonableError(_("Topic already muted"))
def unmute_topic(
| diff --git a/zerver/tests/test_muting_topics.py b/zerver/tests/test_muting_topics.py
--- a/zerver/tests/test_muting_topics.py
+++ b/zerver/tests/test_muting_topics.py
@@ -105,7 +105,6 @@ def test_add_muted_topic(self) -> None:
self.assert_json_success(result)
self.assertIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
- self.assertTrue(topic_is_muted(user, stream.id, "Verona3"))
self.assertTrue(topic_is_muted(user, stream.id, "verona3"))
remove_topic_mute(
@@ -114,6 +113,23 @@ def test_add_muted_topic(self) -> None:
topic_name="Verona3",
)
+ # Verify the error handling for the database level
+ # IntegrityError we'll get with a race between two processes
+ # trying to mute the topic. To do this, we patch the
+ # topic_is_muted function to always return False when trying
+ # to mute a topic that is already muted.
+ add_topic_mute(
+ user_profile=user,
+ stream_id=stream.id,
+ recipient_id=stream.recipient.id,
+ topic_name="Verona3",
+ date_muted=datetime(2020, 1, 1, tzinfo=timezone.utc),
+ )
+
+ with mock.patch("zerver.views.muting.topic_is_muted", return_value=False):
+ result = self.api_patch(user, url, data)
+ self.assert_json_error(result, "Topic already muted")
+
def test_remove_muted_topic(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
| Race conditions in muting topics and users
Our pattern in [muting topics](https://github.com/zulip/zulip/blob/b4075b78eb6e128bce7ef3d36b86d176ef2ecfa5/zerver/views/muting.py#L39-L42) is to check if the topic is muted, and if not then to add a row:
```py3
if topic_is_muted(user_profile, stream.id, topic_name):
raise JsonableError(_("Topic already muted"))
do_mute_topic(user_profile, stream, topic_name, date_muted)
return json_success()
```
This pattern is inherently prone to race conditions. Luckily, we catch those due to database constraints, in the form of `UserTopic.objects.create` raising an IntegrityError, but those bubble up as 500's, not 400's.
We should catch those IntegrityError's and re-raise them as `JsonableError(_("Topic already muted"))`. That applies to the mute-topic codepath, as well as the mute-user codepath.
Though it doesn't affect correctness in this case, since the duplicate row is the first database change operation, these actions should be done inside of transactions.
Un-muting is technically also subject to this race, though it doesn't matter -- both processes calling `.delete()` on the same object is a mostly-silent no-op for the second process.
| @zulipbot claim
Hello @jai2201, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@jai2201 are you working on this? If not I want to work on this.
@madrix01 , I tried fixing this, but not able to produce the race condition in tests with mocking.
i would be greatful if you can solve this and i'll learn to produce race condition with mocking from your PR.
@alexmv Can we test by sending multiple api request using a script?
@madrix01: Not unless you mock something like `topic_is_muted` to always return `False`, so that the test is reliable. See discussion in https://chat.zulip.org/#narrow/stream/49-development-help/topic/Race.20condition.20with.20mute.20users.20and.20topic.20.2321011
I changed the `topic_is_muted` to return `False`, and making api request always returns
```
{'result': 'error', 'msg': 'Internal server error'}
```
Error in console
```
django.db.utils.IntegrityError: duplicate key value violates unique constraint "zerver_mutedtopic_user_profile_id_stream_i_2cb30f72_uniq"
```
@jai2201 fyi
Correct, that's the symptom that you'll need to make the code catch and return as `JsonableError(_("Topic already muted"))`.
@madrix01 , I too made that change to make ```topic_is_muted``` to always return false, and then added the catch for returning Json error, but it didn't pass the test, maybe i might be doing some other mistake at somewhere else, will have a look at your PR, once it's up.
@alexmv I have implemented a `try~except` block for catching the error returns the desired results. Is the implementation correct?
@jai2201 can which test failed for you?
@madrix01: That's fine (and presumably comparable to what @jai2201 wrote) but it needs to come with a test, which is what the discussion on chat.zulip.org is about. If you don't write a new test, you'll find that it fails tests because it doesn't have complete test coverage. | 2022-02-07T21:12:29 |
zulip/zulip | 21,152 | zulip__zulip-21152 | [
"20910"
] | 73266350efa3b6a39dc36744e91782010f974481 | diff --git a/tools/lib/provision.py b/tools/lib/provision.py
--- a/tools/lib/provision.py
+++ b/tools/lib/provision.py
@@ -398,6 +398,24 @@ def main(options: argparse.Namespace) -> "NoReturn":
else:
print("No changes to apt dependencies, so skipping apt operations.")
+ # Binary-patch ARM64 assembly bug in OpenSSL 1.1.1b through 1.1.1h.
+ # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=989604
+ # https://bugs.launchpad.net/ubuntu/+source/openssl/+bug/1951279
+ try:
+ with open("/usr/lib/aarch64-linux-gnu/libcrypto.so.1.1", "rb") as fb:
+ if b"\xbf#\x03\xd5\xfd\x07E\xf8" in fb.read():
+ run_as_root(
+ [
+ "sed",
+ "-i",
+ r"s/\(\xbf#\x03\xd5\)\(\xfd\x07E\xf8\)/\2\1/",
+ "/usr/lib/aarch64-linux-gnu/libcrypto.so.1.1",
+ ],
+ env={**os.environ, "LC_ALL": "C"},
+ )
+ except FileNotFoundError:
+ pass
+
# Here we install node.
proxy_env = [
"env",
| clean dev/vagrant setup: curl crashing (reported to curl)
Mac OSX 12.1 Monterey docker Engine 20.10.12 (desktop 4.4.2), sync'd to 4.9 (7ec2a2cde7a68de741b4bc179650e0ab0e26008e), ran `vagrant destroy` and `vagrant up --provider=docker` and got the dreaded `Error running a subcommand of ./tools/lib/provision.py` which I tracked down to curl crashing (!): `curl -fLO https://registry.npmjs.org/yarn/-/yarn-1.22.17.tgz` (install-yarn:21). I'm working around it with wget for now... I reported to the curl folks: https://github.com/curl/curl/issues/8328 (see that issue for a standalone Dockerfile to repro)
(also posted to https://chat.zulip.org/#narrow/stream/21-provision-help/topic/curl.20crashing.20.28!.29.20on.20docker.2020.2FOSX.2012.2E1/near/1315641 )
cc @alexmv
| I traced this to an issue in the OpenSSL ARM64 assembly for Poly1305, introduced in openssl/openssl@2cf7fd698ec1375421f91338ff8a44e7da5238b6 (`OpenSSL_1_1_1b~37`) and already fixed in openssl/openssl@5795acffd8706e1cb584284ee5bb3a30986d0e75 (`OpenSSL_1_1_1i~21`). We could ask Debian if they want to backport this fix to oldstable.
Meanwhile, hereβs a workaround:
```sh
sudo perl -0777 -pe 's/(\xbf#\x03\xd5)(\xfd\x07E\xf8)/$2$1/' \
-i /usr/lib/aarch64-linux-gnu/libcrypto.so.1.1
```
Iβve sent a debdiff with the upstream patch to https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=989604 (might take a few minutes to show up in the web interface).
Hello @zulip/server-development members, this issue was labeled with the "area: provision" label, so you may want to check it out!
<!-- areaLabelAddition -->
I think we can close this as now tracked in the Debian tracker; I don't think there's much else we can do here directly.
fyi I just did a fresh checkout and the curl segfault is still happening :-( i.e. new developers on Mac M1's will need to manually apply one of the above workarounds by hand.
```
default: + sudo -- env http_proxy= https_proxy= no_proxy= scripts/lib/install-yarn
default: % Total % Received % Xferd Average Speed Time Time Time Current
default: Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0scripts/lib/install-yarn: line 28: 10058 Segmentation fault curl -fLO "https://registry.npmjs.org/yarn/-/$tarball"
default:
default: Error running a subcommand of ./lib/provision.py: sudo -- env http_proxy= https_proxy= no_proxy= scripts/lib/install-yarn
default: Actual error output for the subcommand is just above this.
default:
default:
default: Provisioning failed (exit code 1)!
default:
default: * Look at the traceback(s) above to find more about the errors.
default: * Resolve the errors or get help on chat.
default: * If you can fix this yourself, you can re-run tools/provision at any time.
default: * Logs are here: zulip/var/log/provision.log
default:
The SSH command responded with a non-zero exit status. Vagrant
assumes that this means the command failed. The output for this command
should be in the log above. Please read the output to determine what
went wrong.
```
I also sent a debdiff to the corresponding Ubuntu 20.04 bug: https://bugs.launchpad.net/ubuntu/+source/openssl/+bug/1951279 | 2022-02-16T01:57:44 |
|
zulip/zulip | 21,229 | zulip__zulip-21229 | [
"2755",
"19994"
] | eb377a88726456c76c4f8c8de2fba1a0ab07171b | diff --git a/zerver/lib/email_mirror.py b/zerver/lib/email_mirror.py
--- a/zerver/lib/email_mirror.py
+++ b/zerver/lib/email_mirror.py
@@ -6,8 +6,6 @@
from typing import Dict, List, Match, Optional, Tuple
from django.conf import settings
-from django.utils.timezone import now as timezone_now
-from django.utils.timezone import timedelta
from zerver.actions.message_send import (
check_send_message,
@@ -134,18 +132,9 @@ def get_missed_message_token_from_address(address: str) -> str:
def get_usable_missed_message_address(address: str) -> MissedMessageEmailAddress:
token = get_missed_message_token_from_address(address)
try:
- mm_address = MissedMessageEmailAddress.objects.select_related().get(
- email_token=token,
- timestamp__gt=timezone_now()
- - timedelta(seconds=MissedMessageEmailAddress.EXPIRY_SECONDS),
- )
+ mm_address = MissedMessageEmailAddress.objects.select_related().get(email_token=token)
except MissedMessageEmailAddress.DoesNotExist:
- raise ZulipEmailForwardUserError("Missed message address expired or doesn't exist.")
-
- if not mm_address.is_usable():
- # Technical, this also checks whether the event is expired,
- # but that case is excluded by the logic above.
- raise ZulipEmailForwardUserError("Missed message address out of uses.")
+ raise ZulipEmailForwardError("Zulip notification reply address is invalid.")
return mm_address
diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -4220,27 +4220,19 @@ def __str__(self) -> str:
class MissedMessageEmailAddress(models.Model):
- EXPIRY_SECONDS = 60 * 60 * 24 * 5
- ALLOWED_USES = 1
-
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
- # The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
+ # Number of times the missed message address has been used.
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
- def is_usable(self) -> bool:
- not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
- has_uses_left = self.times_used < self.ALLOWED_USES
- return has_uses_left and not_expired
-
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
| diff --git a/zerver/tests/test_email_mirror.py b/zerver/tests/test_email_mirror.py
--- a/zerver/tests/test_email_mirror.py
+++ b/zerver/tests/test_email_mirror.py
@@ -39,7 +39,6 @@
from zerver.lib.test_helpers import mock_queue_publish, most_recent_message, most_recent_usermessage
from zerver.models import (
Attachment,
- MissedMessageEmailAddress,
Recipient,
Stream,
UserProfile,
@@ -1246,10 +1245,8 @@ def test_missed_message_email_multiple_responses(self) -> None:
incoming_valid_message["To"] = mm_address
incoming_valid_message["Reply-to"] = user_profile.delivery_email
- for i in range(0, MissedMessageEmailAddress.ALLOWED_USES):
- process_missed_message(mm_address, incoming_valid_message)
-
- with self.assertRaises(ZulipEmailForwardError):
+ # there is no longer a usage limit. Ensure we can send multiple times.
+ for i in range(0, 5):
process_missed_message(mm_address, incoming_valid_message)
@@ -1502,22 +1499,18 @@ def test_success_to_private(self) -> None:
def test_using_mm_address_multiple_times(self) -> None:
mm_address = self.send_private_message()
- for i in range(0, MissedMessageEmailAddress.ALLOWED_USES):
+ # there is no longer a usage limit. Ensure we can send multiple times.
+ for i in range(0, 5):
result = self.send_offline_message(mm_address, self.example_user("cordelia"))
self.assert_json_success(result)
- result = self.send_offline_message(mm_address, self.example_user("cordelia"))
- self.assert_json_error(
- result, "5.1.1 Bad destination mailbox address: Missed message address out of uses."
- )
-
def test_wrong_missed_email_private_message(self) -> None:
self.send_private_message()
mm_address = "mm" + ("x" * 32) + "@testserver"
result = self.send_offline_message(mm_address, self.example_user("cordelia"))
self.assert_json_error(
result,
- "5.1.1 Bad destination mailbox address: Missed message address expired or doesn't exist.",
+ "5.1.1 Bad destination mailbox address: Zulip notification reply address is invalid.",
)
| Replying to zulip email notification bounces
Today I replied to two Zulip notification (recurse.zulipchat.com) from a Cafe from Gmail for iPhone and all the emails bounced back.
Here are two responses without message body
```
This is the mail system at host prod0.zulipchat.net.
I'm sorry to have to inform you that your message could not
be delivered to one or more recipients. It's attached below.
For further assistance, please send mail to postmaster.
If you do so, please include this problem report. You can
delete your own text from the attached returned message.
The mail system
<zulip@localhost> (expanded from
<[email protected]>): Bad destination
mailbox address: Bad or expired missed message address.
Final-Recipient: rfc822; zulip@localhost
Original-Recipient: rfc822;[email protected]
Action: failed
Status: 5.1.1
Diagnostic-Code: x-unix; Bad destination mailbox address: Bad or expired missed
message address.
---------- Forwarded message ----------
From: Kracekumar Ramaraj <[email protected]>
To: [email protected]
Cc:
Date: Fri, 16 Dec 2016 20:12:56 +0000
Subject: <Stripped>
I am on the move, zulip should also work.
```
Second one was 8 days old Email.
```
This is the mail system at host prod0.zulipchat.net.
I'm sorry to have to inform you that your message could not
be delivered to one or more recipients. It's attached below.
For further assistance, please send mail to postmaster.
If you do so, please include this problem report. You can
delete your own text from the attached returned message.
The mail system
<zulip@localhost> (expanded from
<[email protected]>): Bad destination
mailbox address: Bad or expired missed message address.
Final-Recipient: rfc822; zulip@localhost
Original-Recipient: rfc822;[email protected]
Action: failed
Status: 5.1.1
Diagnostic-Code: x-unix; Bad destination mailbox address: Bad or expired missed
message address.
---------- Forwarded message ----------
From: Kracekumar Ramaraj <[email protected]>
To: [email protected]
Cc:
Date: Fri, 16 Dec 2016 20:08:12 +0000
Subject: <Stripped>
```
I am happy to furnish more information.
Replying more than once to a Zulip email notification bounces
One may reply via email to _Zulip notifications_ emails (but beware of #2755), but sending one reply seems to also invalidate the email address, and further replies (like you'd add more messages in the web UI) generate an email _Undelivered Mail Returned to Sender_ bounce message, because _Bad destination mailbox address: Missed message address out of uses_. That's annoying.
| Thanks for the bug report @kracekumar! I think what's going on is that the current security policy for missed-message reply-to email addresses means they expire after some period that's I think a week.
I wonder what the right strategy for these should be.
I expected it to work like standard e-mail reply for GH issues (ignoring locked conversation). I wouldn't expect an expiry since I can reply to any old private message from the web UI.
I am curious to know the reason for expiry and security concern.
I think a reasonable approach is to either remove the expiry for these or mention it in both the email and the bounce notification so users know what's up.
This issue is due to ALLOWED_USES defined in `zerver/models.py` line 3234:
```
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
...
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
```
This would be easy enough to change. I don't understand why this setting defaults to 1, or whether there is any risk/downside to increasing it. | 2022-02-24T03:38:02 |
zulip/zulip | 21,237 | zulip__zulip-21237 | [
"13264"
] | f3964673e7b11c32c14f2355c949a2c76178545f | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -36,6 +36,7 @@
"colon_fence",
"substitution",
]
+myst_heading_anchors = 6
myst_substitutions = {
"LATEST_RELEASE_VERSION": LATEST_RELEASE_VERSION,
}
diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = "179.0"
+PROVISION_VERSION = "180.0"
| diff --git a/docs/testing/linters.md b/docs/testing/linters.md
--- a/docs/testing/linters.md
+++ b/docs/testing/linters.md
@@ -65,7 +65,7 @@ but it is good practice to run lint checks locally.
:::{important}
We provide a
-[Git pre-commit hook](../git/zulip-tools.html#set-up-git-repo-script)
+[Git pre-commit hook](../git/zulip-tools.md#set-up-git-repo-script)
that can automatically run `tools/lint` on just the files that
changed (in a few 100ms) whenever you make a commit. This can save
you a lot of time, by automatically detecting linter errors as you
diff --git a/docs/testing/testing-with-django.md b/docs/testing/testing-with-django.md
--- a/docs/testing/testing-with-django.md
+++ b/docs/testing/testing-with-django.md
@@ -330,7 +330,7 @@ with self.settings(RATE_LIMITING=True):
self.assertTrue(rate_limit_mock.called)
```
-Follow [this link](../subsystems/settings.html#testing-non-default-settings) for more
+Follow [this link](../subsystems/settings.md#testing-non-default-settings) for more
information on the "settings" context manager.
Zulip has several features, like outgoing webhooks or social
@@ -411,7 +411,7 @@ We use mocks and stubs for all the typical reasons:
- to stub out calls to third-party services
- to make it so that you can [run the Zulip tests on the airplane without wifi][no-internet]
-[no-internet]: testing.html#internet-access-inside-test-suites
+[no-internet]: testing.md#internet-access-inside-test-suites
A detailed description of mocks, along with useful coded snippets, can be found in the section
[Testing with mocks](#testing-with-mocks).
@@ -449,7 +449,7 @@ the same data structure as performing an action that generates said event.
This is a bit esoteric, but if you read the tests, you will see some of
the patterns. You can also learn more about our event system in the
-[new feature tutorial](../tutorials/new-feature-tutorial.html#handle-database-interactions).
+[new feature tutorial](../tutorials/new-feature-tutorial.md#handle-database-interactions).
### Negative tests
diff --git a/docs/testing/testing.md b/docs/testing/testing.md
--- a/docs/testing/testing.md
+++ b/docs/testing/testing.md
@@ -126,7 +126,7 @@ This is easy to do using test fixtures (a fancy word for fixed data
used in tests) and the `mock.patch` function to specify what HTTP
response should be used by the tests for every outgoing HTTP (or other
network) request. Consult
-[our guide on mocking](testing-with-django.html#zulip-mocking-practices) to
+[our guide on mocking](testing-with-django.md#zulip-mocking-practices) to
learn how to mock network requests easily; there are also a number of
examples throughout the codebase.
| docs: make links equally browsable on both GitHub and ReadTheDocs
Once upstream bug https://github.com/readthedocs/recommonmark/issues/179 is fixed, we can replace the `.html` part in links of the form `file_name.html#anchor` with `.md`.
This is a followup to https://github.com/zulip/zulip/pull/13232.
| Hello @zulip/server-development members, this issue was labeled with the "area: documentation (developer)" label, so you may want to check it out!
<!-- areaLabelAddition --> | 2022-02-24T23:11:44 |
zulip/zulip | 21,420 | zulip__zulip-21420 | [
"21415",
"21318"
] | 4be79dba876522c7d8dcb59fc5a3e394a6f29deb | diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -76,6 +76,8 @@
"instructions-for-all-platforms": "Instructions for all platforms",
"public-streams": "Public streams",
"private-streams": "Private streams",
+ "via-user-profile": "Via the user's profile",
+ "via-organization-settings": "Via organization settings",
}
| Document "Manage this user" profile link
Administrators now see a "Manage this user" link at the bottom of the user profile modal, which will often be handier than getting to the user management UI through the settings.
<img width="300" alt="Screen Shot 2022-03-14 at 3 46 02 PM" src="https://user-images.githubusercontent.com/2090066/158273279-acedbd78-a5b8-4a5a-ae48-cda2eb19e931.png">
We should probably document this link as the primary way to perform certain actions, with the settings approach being an alternative.
Affected pages I'm aware of:
- https://zulip.com/help/change-a-users-name
- https://zulip.com/help/change-a-users-role
- https://zulip.com/help/deactivate-or-reactivate-a-user
Document user management via profile
In #20373, we added the ability to:
1. Manage a user from their profile
2. Deactivate a user from the "manage user" modal
We should document this in the Help center. In particular, we should indicate that user management actions (e.g. Change a user's name, etc.) can be done from either the gear menu, or from their profile. (Maybe we can do this by using tabs in the instructions.)
For deactivating a user, I think the button inside the modal only needs to be described for the approach where you start from the profile.
This page might be a good related article: https://zulip.com/help/view-someones-profile
| Perhaps tabs would work well for this? I don't know if it's the best way, but we use it here: https://zulip.com/help/reading-strategies#reading-topics
> Perhaps tabs would work well for this? I don't know if it's the best way, but we use it here: https://zulip.com/help/reading-strategies#reading-topics
Yeah, since we know a minimum of 3 articles would use them, I can see using tabs being better in this case. Let me make a first draft that way and we can iterate from there.
Also, just checking, but this issue seems to be the same as #21318 (or at least overlap with it), right?
Hello @zulip/server-user-docs members, this issue was labeled with the "area: documentation (user)" label, so you may want to check it out!
<!-- areaLabelAddition -->
@laurynmm it would be great if you could add this to your todo list. Not super urgent, but we should take care of it to make sure the new functionality is documented.
Hello @laurynmm, you claimed this issue to work on it, but this issue and any referenced pull requests haven't been updated for 10 days. Are you still working on this issue?
If so, please update this issue by leaving a comment on this issue to let me know that you're still working on it. Otherwise, I'll automatically remove you from this issue in 4 days.
If you've decided to work on something else, simply comment `@zulipbot abandon` so that someone else can claim it and continue from where you left off.
Thank you for your valuable contributions to Zulip!
<!-- inactiveWarning -->
| 2022-03-15T14:52:59 |
|
zulip/zulip | 21,506 | zulip__zulip-21506 | [
"21502"
] | b9e428dd5d26b608c5ce9e836ed0e0c77bc58e2b | diff --git a/zerver/lib/markdown/__init__.py b/zerver/lib/markdown/__init__.py
--- a/zerver/lib/markdown/__init__.py
+++ b/zerver/lib/markdown/__init__.py
@@ -2365,9 +2365,21 @@ def topic_links(linkifiers_key: int, topic_name: str) -> List[Dict[str, str]]:
# here on an invalid regex would spam the logs with every
# message sent; simply move on.
continue
- for m in pattern.finditer(topic_name):
+ pos = 0
+ while pos < len(topic_name):
+ m = pattern.search(topic_name, pos)
+ if m is None:
+ break
+
match_details = m.groupdict()
match_text = match_details[OUTER_CAPTURE_GROUP]
+
+ # Adjust the start point of the match for the next
+ # iteration -- we rewind the non-word character at the
+ # end, if there was one, so a potential next match can
+ # also use it.
+ pos = m.end() - len(match_details[AFTER_CAPTURE_GROUP])
+
# We format the linkifier's url string using the matched text.
# Also, we include the matched text in the response, so that our clients
# don't have to implement any logic of their own to get back the text.
| diff --git a/zerver/tests/test_markdown.py b/zerver/tests/test_markdown.py
--- a/zerver/tests/test_markdown.py
+++ b/zerver/tests/test_markdown.py
@@ -1322,13 +1322,13 @@ def test_realm_patterns(self) -> None:
flush_per_request_caches()
- content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
+ content = "We should fix #224 #336 #446 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted.rendered_content,
- '<p>We should fix <a href="https://trac.example.com/ticket/224">#224</a> and <a href="https://trac.example.com/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.example.com/ticket/16">trac #15</a> today.</p>',
+ '<p>We should fix <a href="https://trac.example.com/ticket/224">#224</a> <a href="https://trac.example.com/ticket/336">#336</a> <a href="https://trac.example.com/ticket/446">#446</a> and <a href="https://trac.example.com/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.example.com/ticket/16">trac #15</a> today.</p>',
)
self.assertEqual(
converted_topic, [{"url": "https://trac.example.com/ticket/444", "text": "#444"}]
@@ -1344,6 +1344,17 @@ def test_realm_patterns(self) -> None:
],
)
+ msg.set_topic_name("#444 #555 #666")
+ converted_topic = topic_links(realm.id, msg.topic_name())
+ self.assertEqual(
+ converted_topic,
+ [
+ {"url": "https://trac.example.com/ticket/444", "text": "#444"},
+ {"url": "https://trac.example.com/ticket/555", "text": "#555"},
+ {"url": "https://trac.example.com/ticket/666", "text": "#666"},
+ ],
+ )
+
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[a-zA-Z]+-[0-9]+)",
@@ -1474,7 +1485,7 @@ def test_multiple_matching_realm_patterns(self) -> None:
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/ABC-123">ABC-123</a> or <a href="https://trac.example.com/ticket/16">trac ABC-123</a> today.</p>',
)
- # Both the links should be generated in topics.
+ # But both the links should be generated in topics.
self.assertEqual(
converted_topic,
[
| linkifiers: Fix Markdown processor handling of topics that should have multiple linkifier topic_links.
As reported in https://chat.zulip.org/#narrow/stream/9-issues/topic/Multiple.20topic.20links.3F/near/1348089, it appears that we've regressed support for computing multiple topic_links generated by linkifiers in the server. Here's a test case:
```
diff --git a/zerver/tests/test_markdown.py b/zerver/tests/test_markdown.py
index 7b945f3187..4ab4e7a750 100644
--- a/zerver/tests/test_markdown.py
+++ b/zerver/tests/test_markdown.py
@@ -1344,6 +1344,16 @@ class MarkdownTest(ZulipTestCase):
],
)
+ # BUG: This should render topic_links for both issue IDs.
+ msg.set_topic_name("Two issues: #444 #555")
+ converted_topic = topic_links(realm.id, msg.topic_name())
+ self.assertEqual(
+ converted_topic,
+ [
+ {"url": "https://trac.example.com/ticket/444", "text": "#444"},
+ ],
+ )
+
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[a-zA-Z]+-[0-9]+)",
```
| The prepared regex (by `prepare_linkifier_pattern`) matches `" #444 "` with spaces on both sides, after which it cannot also match `" #555"` because the space has already been used.
So yes, this is a regression from db934be0646e3002287c72ba2154a904a4dc42b5
Without using any sort of look-around, both regexes need to mach that in-betweeen space, in order to know that the pattern is well-bounded. re2 doesn't support `\G`, which would be a way to work around the second regex trying to match it.
We can try using `\b` at the end, which is effectively a cheating look-ahead? | 2022-03-22T00:51:32 |
zulip/zulip | 21,507 | zulip__zulip-21507 | [
"19238"
] | 4d4c320a075b21f81d2c0901424f1267b335b322 | diff --git a/zerver/lib/upload.py b/zerver/lib/upload.py
--- a/zerver/lib/upload.py
+++ b/zerver/lib/upload.py
@@ -385,7 +385,7 @@ def get_file_info(request: HttpRequest, user_file: File) -> Tuple[str, int, Opti
return uploaded_file_name, uploaded_file_size, content_type
-def get_signed_upload_url(path: str) -> str:
+def get_signed_upload_url(path: str, download: bool = False) -> str:
client = boto3.client(
"s3",
aws_access_key_id=settings.S3_KEY,
@@ -393,9 +393,16 @@ def get_signed_upload_url(path: str) -> str:
region_name=settings.S3_REGION,
endpoint_url=settings.S3_ENDPOINT_URL,
)
+ params = {
+ "Bucket": settings.S3_AUTH_UPLOADS_BUCKET,
+ "Key": path,
+ }
+ if download:
+ params["ResponseContentDisposition"] = "attachment"
+
return client.generate_presigned_url(
ClientMethod="get_object",
- Params={"Bucket": settings.S3_AUTH_UPLOADS_BUCKET, "Key": path},
+ Params=params,
ExpiresIn=SIGNED_UPLOAD_URL_DURATION,
HttpMethod="GET",
)
diff --git a/zerver/views/upload.py b/zerver/views/upload.py
--- a/zerver/views/upload.py
+++ b/zerver/views/upload.py
@@ -21,15 +21,19 @@
from zerver.models import UserProfile, validate_attachment_request
-def serve_s3(request: HttpRequest, url_path: str, url_only: bool) -> HttpResponse:
- url = get_signed_upload_url(url_path)
+def serve_s3(
+ request: HttpRequest, url_path: str, url_only: bool, download: bool = False
+) -> HttpResponse:
+ url = get_signed_upload_url(url_path, download=download)
if url_only:
return json_success(request, data=dict(url=url))
return redirect(url)
-def serve_local(request: HttpRequest, path_id: str, url_only: bool) -> HttpResponse:
+def serve_local(
+ request: HttpRequest, path_id: str, url_only: bool, download: bool = False
+) -> HttpResponse:
local_path = get_local_file_path(path_id)
if local_path is None:
return HttpResponseNotFound("<p>File not found</p>")
@@ -56,7 +60,7 @@ def serve_local(request: HttpRequest, path_id: str, url_only: bool) -> HttpRespo
# and filename, see the below docs:
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
mimetype, encoding = guess_type(local_path)
- attachment = mimetype not in INLINE_MIME_TYPES
+ attachment = download or mimetype not in INLINE_MIME_TYPES
response = sendfile(
request, local_path, attachment=attachment, mimetype=mimetype, encoding=encoding
@@ -65,6 +69,12 @@ def serve_local(request: HttpRequest, path_id: str, url_only: bool) -> HttpRespo
return response
+def serve_file_download_backend(
+ request: HttpRequest, user_profile: UserProfile, realm_id_str: str, filename: str
+) -> HttpRequest:
+ return serve_file(request, user_profile, realm_id_str, filename, url_only=False, download=True)
+
+
def serve_file_backend(
request: HttpRequest, user_profile: UserProfile, realm_id_str: str, filename: str
) -> HttpResponse:
@@ -88,6 +98,7 @@ def serve_file(
realm_id_str: str,
filename: str,
url_only: bool = False,
+ download: bool = False,
) -> HttpResponse:
path_id = f"{realm_id_str}/{filename}"
is_authorized = validate_attachment_request(user_profile, path_id)
@@ -97,9 +108,9 @@ def serve_file(
if not is_authorized:
return HttpResponseForbidden(_("<p>You are not authorized to view this file.</p>"))
if settings.LOCAL_UPLOADS_DIR is not None:
- return serve_local(request, path_id, url_only)
+ return serve_local(request, path_id, url_only, download=download)
- return serve_s3(request, path_id, url_only)
+ return serve_s3(request, path_id, url_only, download=download)
def serve_local_file_unauthed(request: HttpRequest, token: str, filename: str) -> HttpResponse:
diff --git a/zproject/urls.py b/zproject/urls.py
--- a/zproject/urls.py
+++ b/zproject/urls.py
@@ -167,6 +167,7 @@
from zerver.views.unsubscribe import email_unsubscribe
from zerver.views.upload import (
serve_file_backend,
+ serve_file_download_backend,
serve_file_url_backend,
serve_local_file_unauthed,
upload_file_backend,
@@ -669,6 +670,10 @@
serve_local_file_unauthed,
name="local_file_unauthed",
),
+ rest_path(
+ "user_uploads/download/<realm_id_str>/<path:filename>",
+ GET=(serve_file_download_backend, {"override_api_url_scheme"}),
+ ),
rest_path(
"user_uploads/<realm_id_str>/<path:filename>",
GET=(serve_file_backend, {"override_api_url_scheme"}),
| diff --git a/zerver/tests/test_upload.py b/zerver/tests/test_upload.py
--- a/zerver/tests/test_upload.py
+++ b/zerver/tests/test_upload.py
@@ -210,6 +210,12 @@ def test_file_upload_authed(self) -> None:
# requests; they will be first authenticated and redirected
self.assert_streaming_content(self.client_get(uri), b"zulip!")
+ # Check the download endpoint
+ download_uri = uri.replace("/user_uploads/", "/user_uploads/download/")
+ result = self.client_get(download_uri)
+ self.assert_streaming_content(result, b"zulip!")
+ self.assertIn("attachment;", result.headers["Content-Disposition"])
+
# check if DB has attachment marked as unclaimed
entry = Attachment.objects.get(file_name="zulip.txt")
self.assertEqual(entry.is_claimed(), False)
@@ -815,7 +821,10 @@ def test_file_download_authorization_public(self) -> None:
def test_serve_local(self) -> None:
def check_xsend_links(
- name: str, name_str_for_test: str, content_disposition: str = ""
+ name: str,
+ name_str_for_test: str,
+ content_disposition: str = "",
+ download: bool = False,
) -> None:
with self.settings(SENDFILE_BACKEND="django_sendfile.backends.nginx"):
_get_sendfile.cache_clear() # To clearout cached version of backend from djangosendfile
@@ -826,6 +835,8 @@ def check_xsend_links(
uri = result.json()["uri"]
fp_path_id = re.sub("/user_uploads/", "", uri)
fp_path = os.path.split(fp_path_id)[0]
+ if download:
+ uri = uri.replace("/user_uploads/", "/user_uploads/download/")
response = self.client_get(uri)
_get_sendfile.cache_clear()
assert settings.LOCAL_UPLOADS_DIR is not None
@@ -852,6 +863,9 @@ def check_xsend_links(
check_xsend_links("zulip.html", "zulip.html", 'filename="zulip.html"')
check_xsend_links("zulip.sh", "zulip.sh", 'filename="zulip.sh"')
check_xsend_links("zulip.jpeg", "zulip.jpeg")
+ check_xsend_links(
+ "zulip.jpeg", "zulip.jpeg", download=True, content_disposition='filename="zulip.jpeg"'
+ )
check_xsend_links("ÑéΠΠ.pdf", "%C3%A1%C3%A9%D0%91%D0%94.pdf")
check_xsend_links("zulip", "zulip", 'filename="zulip"')
@@ -1935,6 +1949,15 @@ def test_file_upload_authed(self) -> None:
key = path[1:]
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
+ # Check the download endpoint
+ download_uri = uri.replace("/user_uploads/", "/user_uploads/download/")
+ response = self.client_get(download_uri)
+ redirect_url = response["Location"]
+ path = urllib.parse.urlparse(redirect_url).path
+ assert path.startswith("/")
+ key = path[1:]
+ self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
+
# Now try the endpoint that's supposed to return a temporary URL for access
# to the file.
result = self.client_get("/json" + uri)
| Download button for an image in lightbox does not download the image
Regarding [this discussion in CZO](https://chat.zulip.org/#narrow/stream/16-desktop/topic/Downloading.20image.20problem.3F), when viewing an image in the lightbox of a current Zulip cloud instance, if a user clicks 'Download', the image displays rather than downloads.
There is discussion around this at the link above being a server setting required to handle the MIME type to initiate download rather than display the file in this instance. Perhaps this needs to be in the URL?
| Earlier discussion here:
https://chat.zulip.org/#narrow/stream/16-desktop/topic/Download-image/near/858563
Reported again yesterday here:
https://chat.zulip.org/#narrow/stream/9-issues/topic/open.20and.20download.20mixed.20up/near/1263589
This issue still happens today. Please find screen recording and technical details (console logs, network requests) of this bug (unable to download image) by following the link https://app.birdeatsbug.com/sessions/lwC2BLaW4_hsjYjlE7nTm?networkPane=active
Hello @alexmv, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
Hello @alexmv, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
This is in progress; it requires https://github.com/cactus/go-camo/pull/57 to be able to land fully. | 2022-03-22T03:56:57 |
zulip/zulip | 21,532 | zulip__zulip-21532 | [
"20715"
] | cab37b4acaaae7e29cfdc1ed819f60173fd7ebfa | diff --git a/zerver/lib/upload.py b/zerver/lib/upload.py
--- a/zerver/lib/upload.py
+++ b/zerver/lib/upload.py
@@ -85,6 +85,14 @@
# through a sanitization function.
+# https://github.com/boto/botocore/issues/2644 means that the IMDS
+# request _always_ pulls from the environment. Monkey-patch the
+# `should_bypass_proxies` function if we need to skip them, based
+# on S3_SKIP_PROXY.
+if settings.S3_SKIP_PROXY is True: # nocoverage
+ botocore.utils.should_bypass_proxies = lambda url: True
+
+
class RealmUploadQuotaError(JsonableError):
code = ErrorCode.REALM_UPLOAD_QUOTA
diff --git a/zproject/default_settings.py b/zproject/default_settings.py
--- a/zproject/default_settings.py
+++ b/zproject/default_settings.py
@@ -137,6 +137,7 @@
S3_AUTH_UPLOADS_BUCKET = ""
S3_REGION: Optional[str] = None
S3_ENDPOINT_URL: Optional[str] = None
+S3_SKIP_PROXY = True
LOCAL_UPLOADS_DIR: Optional[str] = None
MAX_FILE_UPLOAD_SIZE = 25
diff --git a/zproject/prod_settings_template.py b/zproject/prod_settings_template.py
--- a/zproject/prod_settings_template.py
+++ b/zproject/prod_settings_template.py
@@ -721,6 +721,7 @@
# S3_AVATAR_BUCKET = ""
# S3_REGION = None
# S3_ENDPOINT_URL = None
+# S3_SKIP_PROXY = True
## Maximum allowed size of uploaded files, in megabytes. This value is
## capped at 80MB in the nginx configuration, because the file upload
| Add a configuration flag to skip proxy with boto3, for EC2 using IAM roles to upload to S3
Attempts to upload files to the S3 file upload backend can fail if they use IAM roles on the EC2 instance to provide auth to perform the S3 upload. This is because boto3, if using IAM instance credentials, makes a request to the metadata IP address (169.254.169.254), which is precisely the kind of address which Smokescreen does _not_ allow.
The S3 storage backend should use a configuration option to allow [passing an empty set of proxies to `boto3`](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#specify-proxies). Enabling this by default is not correct, since non-EC2 hosts may _require_ the proxy if it is an exit proxy. Since we primarily document the access_key / secret_access_key authentication method, we should add an additional configuration option to support the corner case of IAM roles.
See [discussion on chat.zulip.org](https://chat.zulip.org/#narrow/stream/31-production-help/topic/Can't.20access.20AWS.20metadata.20service/near/1303045).
| Hello @zulip/server-misc, @zulip/server-production members, this issue was labeled with the "area: production", "area: uploads" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @Shubh0405!
Thanks for your interest in Zulip! You have attempted to claim an issue without the labels "help wanted", "good first issue". Since you're a new contributor, you can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) or [good first issue](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22good+first+issue%22) labels.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
@Shubh0405: Go ahead and give this a shot.
Hello @Shubh0405, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
| 2022-03-24T00:05:36 |
|
zulip/zulip | 21,577 | zulip__zulip-21577 | [
"19397"
] | 4e1befa209f3eaf21e69396bb4eab9b4aec40812 | diff --git a/zerver/actions/user_settings.py b/zerver/actions/user_settings.py
--- a/zerver/actions/user_settings.py
+++ b/zerver/actions/user_settings.py
@@ -1,5 +1,5 @@
import datetime
-from typing import Optional, Union
+from typing import List, Optional, Union
import orjson
from django.db import transaction
@@ -29,6 +29,7 @@
UserProfile,
active_user_ids,
bot_owner_user_ids,
+ get_user_profile_by_id,
)
from zerver.tornado.django_api import send_event
@@ -246,6 +247,12 @@ def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -
return new_api_key
+def bulk_regenerate_api_keys(user_profile_ids: List[int]) -> None:
+ for user_profile_id in user_profile_ids:
+ user_profile = get_user_profile_by_id(user_profile_id)
+ do_regenerate_api_key(user_profile, user_profile)
+
+
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
bot_event = dict(
diff --git a/zerver/management/commands/logout_all_users.py b/zerver/management/commands/logout_all_users.py
--- a/zerver/management/commands/logout_all_users.py
+++ b/zerver/management/commands/logout_all_users.py
@@ -1,12 +1,16 @@
from argparse import ArgumentParser
from typing import Any
+from django.db.models import Q
+
+from zerver.actions.user_settings import bulk_regenerate_api_keys
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.sessions import (
delete_all_deactivated_user_sessions,
delete_all_user_sessions,
delete_realm_user_sessions,
)
+from zerver.models import UserProfile
class Command(ZulipBaseCommand):
@@ -23,13 +27,29 @@ def add_arguments(self, parser: ArgumentParser) -> None:
action="store_true",
help="Only log out all users who are deactivated",
)
+ parser.add_argument(
+ "--rotate-api-keys",
+ action="store_true",
+ help="Also rotate API keys of the affected users",
+ )
self.add_realm_args(parser, help="Only log out all users in a particular realm")
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
+ rotate_api_keys = options["rotate_api_keys"]
if realm:
delete_realm_user_sessions(realm)
+ regenerate_api_key_queryset = UserProfile.objects.filter(realm=realm).values_list(
+ "id", flat=True
+ )
elif options["deactivated_only"]:
delete_all_deactivated_user_sessions()
+ regenerate_api_key_queryset = UserProfile.objects.filter(
+ Q(is_active=False) | Q(realm__deactivated=True)
+ ).values_list("id", flat=True)
else:
delete_all_user_sessions()
+ regenerate_api_key_queryset = UserProfile.objects.values_list("id", flat=True)
+
+ if rotate_api_keys:
+ bulk_regenerate_api_keys(regenerate_api_key_queryset)
| diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -17,6 +17,7 @@
from zerver.actions.message_send import get_recipient_info
from zerver.actions.muted_users import do_mute_user
from zerver.actions.realm_settings import do_set_realm_property
+from zerver.actions.user_settings import bulk_regenerate_api_keys
from zerver.actions.users import (
change_user_is_active,
do_change_can_create_users,
@@ -2271,3 +2272,25 @@ def test_invalid_fake_email_domain_ip(self) -> None:
with self.assertRaises(InvalidFakeEmailDomain):
realm = get_realm("zulip")
get_fake_email_domain(realm)
+
+
+class TestBulkRegenerateAPIKey(ZulipTestCase):
+ def test_bulk_regenerate_api_keys(self) -> None:
+ hamlet = self.example_user("hamlet")
+ cordelia = self.example_user("cordelia")
+ othello = self.example_user("othello")
+
+ hamlet_old_api_key = hamlet.api_key
+ cordelia_old_api_key = cordelia.api_key
+ othello_old_api_key = othello.api_key
+
+ bulk_regenerate_api_keys([hamlet.id, cordelia.id])
+
+ hamlet.refresh_from_db()
+ cordelia.refresh_from_db()
+ othello.refresh_from_db()
+
+ self.assertNotEqual(hamlet_old_api_key, hamlet.api_key)
+ self.assertNotEqual(cordelia_old_api_key, cordelia.api_key)
+
+ self.assertEqual(othello_old_api_key, othello.api_key)
| Extend and document logout_all_users management command
We currently don't have a documented/recommended option if a system administrator wants to logout all users.
The undocumented `./manage.py logout_all_users` management command will delete all browser sessions, but doesn't do anything with API keys (which it should probably do by default, with an option to skip).
See https://chat.zulip.org/#narrow/stream/9-issues/topic/force.20logout/near/1235628 for some background.
| Hello @zulip/server-production, @zulip/server-tooling members, this issue was labeled with the "area: documentation (production)", "area: tooling" labels, so you may want to check it out!
<!-- areaLabelAddition --> | 2022-03-27T20:42:08 |
zulip/zulip | 21,579 | zulip__zulip-21579 | [
"21520"
] | cbfe2707f456ff44683fcf4f10514f0364dc758a | diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -55,8 +55,6 @@
"mm-default": "Default installation",
"mm-docker": "Docker",
"mm-gitlab-omnibus": "GitLab Omnibus",
- "send-email-invitations": "Send email invitations",
- "share-an-invite-link": "Share an invite link",
"require-invitations": "Require invitations",
"allow-anyone-to-join": "Allow anyone to join",
"restrict-by-email-domain": "Restrict by email domain",
| Document custom expiration times for invitations
In #19680, we added the ability to set custom expiration times for invitations and invite links. This should be documented in the help center at https://zulip.com/help/invite-new-users.
Note that this feature will be further extended in #19681; we might be able to write the documentation so that it doesn't have to be updated when this happens.
| Hello @zulip/server-user-docs members, this issue was labeled with the "area: documentation (user)" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-03-28T12:26:17 |
|
zulip/zulip | 21,647 | zulip__zulip-21647 | [
"21608"
] | 0af00a3233fc8964b1ba7c32e672b74d80889b89 | diff --git a/zerver/migrations/0387_reupload_realmemoji_again.py b/zerver/migrations/0387_reupload_realmemoji_again.py
new file mode 100644
--- /dev/null
+++ b/zerver/migrations/0387_reupload_realmemoji_again.py
@@ -0,0 +1,46 @@
+from django.conf import settings
+from django.db import migrations
+from django.db.backends.postgresql.schema import DatabaseSchemaEditor
+from django.db.migrations.state import StateApps
+
+from zerver.lib.queue import queue_json_publish
+
+
+def reupload_realm_emoji(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
+ """As detailed in https://github.com/zulip/zulip/issues/21608, it is
+ possible for the deferred_work queue from Zulip 4.x to have been
+ started up by puppet during the deployment before migrations were
+ run on Zulip 5.0.
+
+ This means that the deferred_work events produced by migration
+ 0376 might have been processed and discarded without effect.
+
+ Since it's harmless to reupload a custom emoji a second time, we
+ fix this issue for the slice of servers that have already
+ installed 5.0 by repeating that part of the migration.
+ """
+
+ Realm = apps.get_model("zerver", "Realm")
+ if settings.TEST_SUITE:
+ # There are no custom emoji in the test suite data set, and
+ # the below code won't work because RabbitMQ isn't enabled for
+ # the test suite.
+ return
+
+ for realm_id in Realm.objects.order_by("id").values_list("id", flat=True):
+ event = {
+ "type": "reupload_realm_emoji",
+ "realm_id": realm_id,
+ }
+ queue_json_publish("deferred_work", event)
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("zerver", "0386_fix_attachment_caches"),
+ ]
+
+ operations = [
+ migrations.RunPython(reupload_realm_emoji, reverse_code=migrations.RunPython.noop),
+ ]
| Animated status emojis uploaded before 5.x always animate β not only on hover
Animated emojis uploaded before version 5.x used as status emojis are currently always animating next to user names in the sidebar and message feed, not only on hover.
Newly uploaded, animated emojis have the desired behavior, only animating on hover.
| Thanks for the report @alfonsrv ! We had a migration that was supposed to address this; it's definitely something we'd like to take care of. Would you be willing to stop by the [#production help stream on chat.zulip.org](https://chat.zulip.org/#narrow/stream/31-production-help) so that we can debug interactively?
Hello @zulip/server-emoji members, this issue was labeled with the "area: emoji" label, so you may want to check it out!
<!-- areaLabelAddition -->
I'm puzzled by the report, since we added a migration to re-upload all existing custom emoji in 30ac291ebab5ead80de31f0ea07b4d845013fd8f.
That migration makes use of the `deferred_work` queue processor. @alfonsrv can you check `rabbitmqctl list_queues` (as `root`) to see whether your `deferred_work` queue processor just hasn't processed these jobs?
I'd expect logs to be in `/var/log/zulip/events_deferred_work.log` or if that doesn't exist, `/var/log/zulip/events.log`.
This is being discussed here: https://chat.zulip.org/#narrow/stream/31-production-help/topic/Status.20icons.20animate.20after.20upgrade.20.28issue.20.2321608.29/near/1357180 | 2022-04-01T00:28:42 |
|
zulip/zulip | 21,678 | zulip__zulip-21678 | [
"21646"
] | 9f8022de5ee222a05516a17bcb24787228170d14 | diff --git a/zilencer/management/commands/populate_db.py b/zilencer/management/commands/populate_db.py
--- a/zilencer/management/commands/populate_db.py
+++ b/zilencer/management/commands/populate_db.py
@@ -197,6 +197,14 @@ def add_arguments(self, parser: CommandParser) -> None:
"-n", "--num-messages", type=int, default=500, help="The number of messages to create."
)
+ parser.add_argument(
+ "-o",
+ "--oldest-message-days",
+ type=int,
+ default=5,
+ help="The start of the time range where messages could have been sent.",
+ )
+
parser.add_argument(
"-b",
"--batch-size",
@@ -1087,7 +1095,9 @@ def generate_and_send_messages(
message.subject = random.choice(possible_topics[message.recipient.id])
saved_data["subject"] = message.subject
- message.date_sent = choose_date_sent(num_messages, tot_messages, options["threads"])
+ message.date_sent = choose_date_sent(
+ num_messages, tot_messages, options["oldest_message_days"], options["threads"]
+ )
messages.append(message)
recipients[num_messages] = (message_type, message.recipient.id, saved_data)
@@ -1174,25 +1184,32 @@ def bulk_create_reactions(all_messages: List[Message]) -> None:
Reaction.objects.bulk_create(reactions)
-def choose_date_sent(num_messages: int, tot_messages: int, threads: int) -> datetime:
+def choose_date_sent(
+ num_messages: int, tot_messages: int, oldest_message_days: int, threads: int
+) -> datetime:
# Spoofing time not supported with threading
if threads != 1:
return timezone_now()
- # Distrubutes 80% of messages starting from 5 days ago, over a period
- # of 3 days. Then, distributes remaining messages over past 24 hours.
+ # We want to ensure that:
+ # (1) some messages are sent in the last 4 hours,
+ # (2) there are some >24hr gaps between adjacent messages, and
+ # (3) a decent bulk of messages in the last day so you see adjacent messages with the same date.
+ # So we distribute 80% of messages starting from oldest_message_days days ago, over a period
+ # of the first min(oldest_message_days-2, 1) of those days. Then, distributes remaining messages
+ # over the past 24 hours.
amount_in_first_chunk = int(tot_messages * 0.8)
amount_in_second_chunk = tot_messages - amount_in_first_chunk
+
if num_messages < amount_in_first_chunk:
- # Distribute starting from 5 days ago, over a period
- # of 3 days:
- spoofed_date = timezone_now() - timezone_timedelta(days=5)
- interval_size = 3 * 24 * 60 * 60 / amount_in_first_chunk
+ spoofed_date = timezone_now() - timezone_timedelta(days=oldest_message_days)
+ num_days_for_first_chunk = min(oldest_message_days - 2, 1)
+ interval_size = num_days_for_first_chunk * 24 * 60 * 60 / amount_in_first_chunk
lower_bound = interval_size * num_messages
upper_bound = interval_size * (num_messages + 1)
else:
- # We're in the last 20% of messages, distribute them over the last 24 hours:
+ # We're in the last 20% of messages, so distribute them over the last 24 hours:
spoofed_date = timezone_now() - timezone_timedelta(days=1)
interval_size = 24 * 60 * 60 / amount_in_second_chunk
lower_bound = interval_size * (num_messages - amount_in_first_chunk)
| diff --git a/zerver/tests/test_populate_db.py b/zerver/tests/test_populate_db.py
--- a/zerver/tests/test_populate_db.py
+++ b/zerver/tests/test_populate_db.py
@@ -13,7 +13,7 @@ def test_choose_date_sent_large_tot_messages(self) -> None:
"""
tot_messages = 1000000
datetimes_list = [
- choose_date_sent(i, tot_messages, 1)
+ choose_date_sent(i, tot_messages, 5, 1)
for i in range(1, tot_messages, tot_messages // 100)
]
| populate_db: Add support for creating messages older than 5 days ago
This function in `populate_db` hardcodes several parameters that would ideally be options one can pass into the management command (5 days, 3 days, 1 day). We should add some coherent set of options that can be used alongside `--num-messages` to generate older messages.
```
def choose_date_sent(num_messages: int, tot_messages: int, threads: int) -> datetime:
# Spoofing time not supported with threading
if threads != 1:
return timezone_now()
# Distrubutes 80% of messages starting from 5 days ago, over a period
# of 3 days. Then, distributes remaining messages over past 24 hours.
amount_in_first_chunk = int(tot_messages * 0.8)
amount_in_second_chunk = tot_messages - amount_in_first_chunk
if num_messages < amount_in_first_chunk:
# Distribute starting from 5 days ago, over a period
# of 3 days:
spoofed_date = timezone_now() - timezone_timedelta(days=5)
interval_size = 3 * 24 * 60 * 60 / amount_in_first_chunk
lower_bound = interval_size * num_messages
upper_bound = interval_size * (num_messages + 1)
else:
# We're in the last 20% of messages, distribute them over the last 24 hours:
spoofed_date = timezone_now() - timezone_timedelta(days=1)
interval_size = 24 * 60 * 60 / amount_in_second_chunk
```
| Hello @zulip/server-development members, this issue was labeled with the "area: provision" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @evykassirer, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
| 2022-04-04T22:19:39 |
zulip/zulip | 21,726 | zulip__zulip-21726 | [
"20870"
] | 53b18f819a93e5f1396d76261ea8813496bf424e | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -78,6 +78,9 @@
r"more topics",
# Used alone in a parenthetical where capitalized looks worse.
r"^deprecated$",
+ # We want the similar text in the Private Messages section to have the same capitalization.
+ r"more conversations",
+ r"back to streams",
# Capital 'i' looks weird in reminders popover
r"in 1 hour",
r"in 20 minutes",
| diff --git a/frontend_tests/node_tests/dispatch.js b/frontend_tests/node_tests/dispatch.js
--- a/frontend_tests/node_tests/dispatch.js
+++ b/frontend_tests/node_tests/dispatch.js
@@ -40,6 +40,7 @@ const message_lists = mock_esm("../../static/js/message_lists");
const muted_topics_ui = mock_esm("../../static/js/muted_topics_ui");
const muted_users_ui = mock_esm("../../static/js/muted_users_ui");
const notifications = mock_esm("../../static/js/notifications");
+const pm_list = mock_esm("../../static/js/pm_list");
const reactions = mock_esm("../../static/js/reactions");
const realm_icon = mock_esm("../../static/js/realm_icon");
const realm_logo = mock_esm("../../static/js/realm_logo");
@@ -1020,6 +1021,7 @@ run_test("user_status", ({override}) => {
{
const stub = make_stub();
override(activity, "redraw_user", stub.f);
+ override(pm_list, "update_private_messages", noop);
dispatch(event);
assert.equal(stub.num_calls, 1);
const args = stub.get_args("user_id");
diff --git a/frontend_tests/node_tests/pm_list.js b/frontend_tests/node_tests/pm_list.js
--- a/frontend_tests/node_tests/pm_list.js
+++ b/frontend_tests/node_tests/pm_list.js
@@ -17,14 +17,16 @@ run_test("update_dom_with_unread_counts", () => {
assert.equal(narrow_state.active(), true);
const $total_count = $.create("total-count-stub");
- const $private_li = $(".top_left_private_messages .private_messages_header");
+ const $private_li = $(
+ ".private_messages_container #private_messages_section #private_messages_section_header",
+ );
$private_li.set_find_results(".unread_count", $total_count);
counts = {
private_message_count: 10,
};
- pm_list.update_dom_with_unread_counts(counts);
+ pm_list.set_count(counts.private_message_count);
assert.equal($total_count.text(), "10");
assert.ok($total_count.visible());
@@ -32,7 +34,7 @@ run_test("update_dom_with_unread_counts", () => {
private_message_count: 0,
};
- pm_list.update_dom_with_unread_counts(counts);
+ pm_list.set_count(counts.private_message_count);
assert.equal($total_count.text(), "");
assert.ok(!$total_count.visible());
});
diff --git a/frontend_tests/node_tests/recent_topics.js b/frontend_tests/node_tests/recent_topics.js
--- a/frontend_tests/node_tests/recent_topics.js
+++ b/frontend_tests/node_tests/recent_topics.js
@@ -107,6 +107,10 @@ const narrow = mock_esm("../../static/js/narrow", {
handle_middle_pane_transition: noop,
has_shown_message_list_view: true,
});
+mock_esm("../../static/js/pm_list", {
+ update_private_messages: noop,
+ handle_narrow_deactivated: noop,
+});
mock_esm("../../static/js/popovers", {
any_active: () => false,
});
diff --git a/frontend_tests/node_tests/stream_list.js b/frontend_tests/node_tests/stream_list.js
--- a/frontend_tests/node_tests/stream_list.js
+++ b/frontend_tests/node_tests/stream_list.js
@@ -409,6 +409,7 @@ test_ui("narrowing", ({mock_template}) => {
topic_list.rebuild = noop;
topic_list.active_stream_id = noop;
topic_list.get_stream_li = noop;
+ $("#streams_header").outerHeight = () => 0;
assert.ok(!$("<devel-sidebar-row-stub>").hasClass("active-filter"));
@@ -700,6 +701,7 @@ test_ui("refresh_pin", ({override, override_rewire, mock_template}) => {
override_rewire(stream_list, "update_count_in_dom", noop);
$("#stream_filters").append = noop;
+ $("#streams_header").outerHeight = () => 0;
let scrolled;
override(scroll_util, "scroll_element_into_container", ($li) => {
diff --git a/frontend_tests/node_tests/vdom.js b/frontend_tests/node_tests/vdom.js
--- a/frontend_tests/node_tests/vdom.js
+++ b/frontend_tests/node_tests/vdom.js
@@ -26,7 +26,7 @@ run_test("basics", () => {
run_test("attribute escaping", () => {
// So far most of the time our attributes are
- // hard-coded classes like "expanded_private_messages",
+ // hard-coded classes like "pm-list",
// but we need to be defensive about future code
// that might use data from possibly malicious users.
const opts = {
diff --git a/frontend_tests/puppeteer_tests/compose.ts b/frontend_tests/puppeteer_tests/compose.ts
--- a/frontend_tests/puppeteer_tests/compose.ts
+++ b/frontend_tests/puppeteer_tests/compose.ts
@@ -113,7 +113,7 @@ async function test_narrow_to_private_messages_with_cordelia(page: Page): Promis
you_and_cordelia_selector,
);
const cordelia_user_id = await common.get_user_id_from_name(page, "Cordelia, Lear's daughter");
- const pm_list_selector = `li[data-user-ids-string="${cordelia_user_id}"].expanded_private_message.active-sub-filter`;
+ const pm_list_selector = `li[data-user-ids-string="${cordelia_user_id}"].pm-list-item.active-sub-filter`;
await page.waitForSelector(pm_list_selector, {visible: true});
await close_compose_box(page);
diff --git a/frontend_tests/puppeteer_tests/message-basics.ts b/frontend_tests/puppeteer_tests/message-basics.ts
--- a/frontend_tests/puppeteer_tests/message-basics.ts
+++ b/frontend_tests/puppeteer_tests/message-basics.ts
@@ -280,7 +280,9 @@ async function test_narrow_by_clicking_the_left_sidebar(page: Page): Promise<voi
await page.click(".top_left_all_messages a");
await expect_home(page);
- await page.click(".top_left_private_messages a");
+ const all_private_messages_icon = "#show_all_private_messages";
+ await page.waitForSelector(all_private_messages_icon, {visible: true});
+ await page.click(all_private_messages_icon);
await expect_all_pm(page);
await un_narrow(page);
diff --git a/frontend_tests/puppeteer_tests/navigation.ts b/frontend_tests/puppeteer_tests/navigation.ts
--- a/frontend_tests/puppeteer_tests/navigation.ts
+++ b/frontend_tests/puppeteer_tests/navigation.ts
@@ -64,6 +64,16 @@ async function navigate_to_subscriptions(page: Page): Promise<void> {
await page.waitForSelector("#subscription_overlay", {hidden: true});
}
+async function navigate_to_private_messages(page: Page): Promise<void> {
+ console.log("Navigate to private messages");
+
+ const all_private_messages_icon = "#show_all_private_messages";
+ await page.waitForSelector(all_private_messages_icon, {visible: true});
+ await page.click(all_private_messages_icon);
+
+ await page.waitForSelector("#message_view_header .fa-envelope", {visible: true});
+}
+
async function test_reload_hash(page: Page): Promise<void> {
const initial_page_load_time = await page.evaluate(
(): number => zulip_test.page_params.page_load_time,
@@ -99,7 +109,7 @@ async function navigation_tests(page: Page): Promise<void> {
await navigate_to_subscriptions(page);
await navigate_using_left_sidebar(page, "all_messages", "message_feed_container");
await navigate_to_settings(page);
- await navigate_using_left_sidebar(page, "narrow/is/private", "message_feed_container");
+ await navigate_to_private_messages(page);
await navigate_to_subscriptions(page);
await navigate_using_left_sidebar(page, verona_narrow, "message_feed_container");
| Create collapsible "Private messages" section in left sidebar
At present, private messages are collapsed in the left sidebar, unless the user is in a private message narrow. This has a few down sides:
1. Getting to a PM conversation generally requires multiple clicks.
2. It's not immediately clear who send you a new private message, which is important for determining whether one needs to read it right away.
3. It can be hard for new users to figure out how to view and send private messages.
In order to address this, we should try making a private messages section in the left sidebar that is open by default. Specifically:
1. Make a Private messages section just above STREAMS in the left sidebar that is open by default.
2. In the new PMs section, use the same algorithm we use for stream topics to decide how many conversations to show.
3. Make the PMs section collapsible, similar to the collapsible sections in #20072. The open/collapsed state should be sticky as the user navigates around Zulip, closes and reopens the window, logs out and in, etc.
Note that this will likely require experimentation for us to get it right. To avoid misdirected effort, please post screenshots in the #design stream on chat.zulip.org for feedback. Also, if (3) can't be implemented quickly, we can test the experience in chat.zulip.org without waiting for it to be completed.
[Prior discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/private.20messages.20UI/near/1159032).
See also #11108.
| Hello @zulip/server-sidebars members, this issue was labeled with the "area: left-sidebar" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @jai2201, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@zulipbot claim
Hello @jai2201, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
@zulipbot I'm still working on this issue.
@zulipbot claim
Hello @jai2201, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
@zulipbot claim
Hello @jai2201, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
@zulipbot claim
@jai2201 You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
| 2022-04-08T03:59:08 |
zulip/zulip | 21,740 | zulip__zulip-21740 | [
"21267"
] | 0df4d8eb172bee9988d0a33c251368d2c2aa08ae | diff --git a/zerver/lib/actions.py b/zerver/lib/actions.py
--- a/zerver/lib/actions.py
+++ b/zerver/lib/actions.py
@@ -487,6 +487,9 @@ def process_new_human_user(
if prereg_user is not None:
streams: List[Stream] = list(prereg_user.streams.all())
acting_user: Optional[UserProfile] = prereg_user.referred_by
+
+ # A PregistrationUser should not be used for another UserProfile
+ assert prereg_user.created_user is None, "PregistrationUser should not be reused"
else:
streams = []
acting_user = None
@@ -529,9 +532,34 @@ def process_new_human_user(
),
)
- revoke_preregistration_users(user_profile, prereg_user, realm_creation)
- if not realm_creation and prereg_user is not None and prereg_user.referred_by is not None:
- notify_invites_changed(user_profile.realm)
+ # Revoke all preregistration users except prereg_user, and link prereg_user to
+ # the created user
+ if prereg_user is None:
+ assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
+
+ if prereg_user is not None:
+ prereg_user.status = confirmation_settings.STATUS_ACTIVE
+ prereg_user.created_user = user_profile
+ prereg_user.save(update_fields=["status", "created_user"])
+
+ # In the special case of realm creation, there can be no additional PreregistrationUser
+ # for us to want to modify - because other realm_creation PreregistrationUsers should be
+ # left usable for creating different realms.
+ if not realm_creation:
+ # Mark any other PreregistrationUsers in the realm that are STATUS_ACTIVE as
+ # inactive so we can keep track of the PreregistrationUser we
+ # actually used for analytics.
+ if prereg_user is not None:
+ PreregistrationUser.objects.filter(
+ email__iexact=user_profile.delivery_email, realm=user_profile.realm
+ ).exclude(id=prereg_user.id).update(status=confirmation_settings.STATUS_REVOKED)
+ else:
+ PreregistrationUser.objects.filter(
+ email__iexact=user_profile.delivery_email, realm=user_profile.realm
+ ).update(status=confirmation_settings.STATUS_REVOKED)
+
+ if prereg_user is not None and prereg_user.referred_by is not None:
+ notify_invites_changed(user_profile.realm)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
@@ -547,39 +575,6 @@ def process_new_human_user(
send_initial_pms(user_profile)
-def revoke_preregistration_users(
- created_user_profile: UserProfile,
- used_preregistration_user: Optional[PreregistrationUser],
- realm_creation: bool,
-) -> None:
- if used_preregistration_user is None:
- assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
-
- if used_preregistration_user is not None:
- used_preregistration_user.status = confirmation_settings.STATUS_ACTIVE
- used_preregistration_user.save(update_fields=["status"])
-
- # In the special case of realm creation, there can be no additional PreregistrationUser
- # for us to want to modify - because other realm_creation PreregistrationUsers should be
- # left usable for creating different realms.
- if realm_creation:
- return
-
- # Mark any other PreregistrationUsers in the realm that are STATUS_ACTIVE as
- # inactive so we can keep track of the PreregistrationUser we
- # actually used for analytics.
- if used_preregistration_user is not None:
- PreregistrationUser.objects.filter(
- email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
- ).exclude(id=used_preregistration_user.id).update(
- status=confirmation_settings.STATUS_REVOKED
- )
- else:
- PreregistrationUser.objects.filter(
- email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
- ).update(status=confirmation_settings.STATUS_REVOKED)
-
-
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(
diff --git a/zerver/migrations/0388_preregistrationuser_created_user.py b/zerver/migrations/0388_preregistrationuser_created_user.py
new file mode 100644
--- /dev/null
+++ b/zerver/migrations/0388_preregistrationuser_created_user.py
@@ -0,0 +1,25 @@
+# Generated by Django 3.2.12 on 2022-04-09 00:38
+
+import django.db.models.deletion
+from django.conf import settings
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("zerver", "0387_reupload_realmemoji_again"),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name="preregistrationuser",
+ name="created_user",
+ field=models.ForeignKey(
+ null=True,
+ on_delete=django.db.models.deletion.SET_NULL,
+ related_name="+",
+ to=settings.AUTH_USER_MODEL,
+ ),
+ ),
+ ]
diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -2226,6 +2226,12 @@ class PreregistrationUser(models.Model):
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS["MEMBER"])
+ # The UserProfile created upon completion of the registration
+ # for this PregistrationUser
+ created_user: Optional[UserProfile] = models.ForeignKey(
+ UserProfile, null=True, related_name="+", on_delete=models.SET_NULL
+ )
+
class Meta:
indexes = [
models.Index(Upper("email"), name="upper_preregistration_email_idx"),
| diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py
--- a/zerver/tests/test_signup.py
+++ b/zerver/tests/test_signup.py
@@ -46,6 +46,7 @@
do_set_realm_property,
do_set_realm_user_default_setting,
get_default_streams_for_realm,
+ process_new_human_user,
)
from zerver.lib.email_notifications import enqueue_welcome_emails, followup_day2_email_delay
from zerver.lib.initial_password import initial_password
@@ -2155,7 +2156,7 @@ def test_send_more_than_one_invite_to_same_user(self) -> None:
invites = PreregistrationUser.objects.filter(email__iexact="[email protected]")
self.assert_length(invites, 4)
- do_create_user(
+ created_user = do_create_user(
"[email protected]",
"password",
self.user_profile.realm,
@@ -2174,6 +2175,7 @@ def test_send_more_than_one_invite_to_same_user(self) -> None:
# the others must be canceled.
self.assert_length(accepted_invite, 1)
self.assertEqual(accepted_invite[0].id, prereg_user.id)
+ self.assertEqual(accepted_invite[0].created_user, created_user)
expected_revoked_invites = set(invites.exclude(id=prereg_user.id).exclude(realm=lear))
self.assertEqual(set(revoked_invites), expected_revoked_invites)
@@ -2182,6 +2184,9 @@ def test_send_more_than_one_invite_to_same_user(self) -> None:
PreregistrationUser.objects.get(email__iexact="[email protected]", realm=lear).status, 0
)
+ with self.assertRaises(AssertionError):
+ process_new_human_user(created_user, prereg_user)
+
def test_confirmation_obj_not_exist_error(self) -> None:
"""Since the key is a param input by the user to the registration endpoint,
if it inserts an invalid value, the confirmation object won't be found. This
| PreregistrationUser should link to the UserProfile it created
When a PreregistrationUser is marked as "used," there is no clear link between that object and the UserProfile it spawned, other than sharing an email address -- which can be later changed.
We should add a `created_user_id` column on PreregistrationUser which links to the UserProfile that it was used to create. We should also add an assert to ensure that a PreregistrationUser is only used to create a single UserProfile.
| Hello @zulip/server-authentication, @zulip/server-onboarding members, this issue was labeled with the "area: authentication", "area: invitations" labels, so you may want to check it out!
<!-- areaLabelAddition -->
Hello @RyanJHamby!
Thanks for your interest in Zulip! You have attempted to claim an issue without the labels "help wanted", "good first issue". Since you're a new contributor, you can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) or [good first issue](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22good+first+issue%22) labels.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
@zulipbot claim | 2022-04-09T01:51:55 |
zulip/zulip | 21,836 | zulip__zulip-21836 | [
"16022"
] | 1db591de54b9d573094bcc5307d0af279a69d370 | diff --git a/zerver/tornado/event_queue.py b/zerver/tornado/event_queue.py
--- a/zerver/tornado/event_queue.py
+++ b/zerver/tornado/event_queue.py
@@ -1129,50 +1129,62 @@ def process_message_update_event(
stream_name = event_template.get("stream_name")
message_id = event_template["message_id"]
+ # TODO/compatibility: Modern `update_message` events contain the
+ # rendering_only key, which indicates whether the update is a link
+ # preview rendering update (not a human action). However, because
+ # events may be in the notify_tornado queue at the time we
+ # upgrade, we need the below logic to compute rendering_only based
+ # on the `user_id` key not being present in legacy events that
+ # would have had rendering_only set. Remove this check when one
+ # can no longer directly update from 4.x to main.
+ if "rendering_only" in event_template:
+ rendering_only_update = event_template["rendering_only"]
+ else:
+ rendering_only_update = "user_id" not in event_template
+
for user_data in users:
user_profile_id = user_data["id"]
- if "user_id" in event_template:
- # The user we'll get here will be the sender if the message's
- # content was edited, and the editor for topic edits. That's
- # the correct "acting_user" for both cases.
- acting_user_id = event_template["user_id"]
- else:
- # Events without a `user_id` field come from the do_update_embedded_data
- # code path, and represent just rendering previews; there should be no
- # real content changes.
- # It doesn't really matter what we set `acting_user_id` in this case,
- # because we know this event isn't meant to send notifications.
- acting_user_id = user_profile_id
-
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
- flags: Collection[str] = user_event["flags"]
- user_notifications_data = UserMessageNotificationsData.from_user_id_sets(
- user_id=user_profile_id,
- flags=flags,
- private_message=(stream_name is None),
- online_push_user_ids=online_push_user_ids,
- pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
- pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
- stream_push_user_ids=stream_push_user_ids,
- stream_email_user_ids=stream_email_user_ids,
- wildcard_mention_user_ids=wildcard_mention_user_ids,
- muted_sender_user_ids=muted_sender_user_ids,
- all_bot_user_ids=all_bot_user_ids,
- )
+ # Events where `rendering_only_update` is True come from the
+ # do_update_embedded_data code path, and represent rendering
+ # previews; there should be no real content changes.
+ # Therefore, we know only events where `rendering_only_update`
+ # is False possibly send notifications.
+ if not rendering_only_update:
- maybe_enqueue_notifications_for_message_update(
- user_notifications_data=user_notifications_data,
- message_id=message_id,
- acting_user_id=acting_user_id,
- private_message=(stream_name is None),
- presence_idle=(user_profile_id in presence_idle_user_ids),
- prior_mentioned=(user_profile_id in prior_mention_user_ids),
- )
+ # The user we'll get here will be the sender if the message's
+ # content was edited, and the editor for topic edits. That's
+ # the correct "acting_user" for both cases.
+ acting_user_id = event_template["user_id"]
+
+ flags: Collection[str] = user_event["flags"]
+ user_notifications_data = UserMessageNotificationsData.from_user_id_sets(
+ user_id=user_profile_id,
+ flags=flags,
+ private_message=(stream_name is None),
+ online_push_user_ids=online_push_user_ids,
+ pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
+ pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
+ stream_push_user_ids=stream_push_user_ids,
+ stream_email_user_ids=stream_email_user_ids,
+ wildcard_mention_user_ids=wildcard_mention_user_ids,
+ muted_sender_user_ids=muted_sender_user_ids,
+ all_bot_user_ids=all_bot_user_ids,
+ )
+
+ maybe_enqueue_notifications_for_message_update(
+ user_notifications_data=user_notifications_data,
+ message_id=message_id,
+ acting_user_id=acting_user_id,
+ private_message=(stream_name is None),
+ presence_idle=(user_profile_id in presence_idle_user_ids),
+ prior_mentioned=(user_profile_id in prior_mention_user_ids),
+ )
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
| message embeds: Fix nonstandard update_message event for message embeds
In `do_update_embedded_data`, we generate a variant `update_message` event, which has not been properly maintained and doesn't actually match the normal format. I fixed one issue in 00fd9afad5147beb373caf3410ddc34b0c68a83f; but we should figure out how to make this event actually match the format of a normal `update_message` event, and then update the `GET /events` documentation with clear advice on handling it.
I'd like us to move that event to be more clearly match the format of normal `update_message` events, with perhaps some clear marker for these variant events.
@amanagr @showell I'd be interested in your thoughts on this.
| Hello @zulip/server-api members, this issue was labeled with the "area: api" label, so you may want to check it out!
<!-- areaLabelAddition -->
So, If I understand correctly, this event doesn't belong in the edit history but is an `update_message` event. If there are other events like these we can definitely club them and create a new event for them.
This is also related to a case with topic edits for messages, where only one messages among all those that are moved receives an "EDITED" tag. I remember you telling me mark all of them as "Edited" which I wasn't so sure about. Sorry, this got lost in one of my WIPs.
We can create a "silently_update_message" event which is tracked by frontend too and no "Edited" label appears.
@zulipbot claim
Hello @MSurfer20, you claimed this issue to work on it, but this issue and any referenced pull requests haven't been updated for 10 days. Are you still working on this issue?
If so, please update this issue by leaving a comment on this issue to let me know that you're still working on it. Otherwise, I'll automatically remove you from this issue in 4 days.
If you've decided to work on something else, simply comment `@zulipbot abandon` so that someone else can claim it and continue from where you left off.
Thank you for your valuable contributions to Zulip!
<!-- inactiveWarning -->
@zulipbot claim
@MSurfer20 this would be a great issue for you to pick up again now that you have more experience.
@zulipbot claim
@MSurfer20 are you still planning to work this?
Hey @shanukun ! I'm still working on this(couldn't get the chance to make a PR due to a busy week; will open one soon) | 2022-04-18T14:00:04 |
|
zulip/zulip | 21,908 | zulip__zulip-21908 | [
"19056"
] | 299995bd3a348d77de770dc27a8ec4f4b1d42a8e | diff --git a/zerver/webhooks/slack_incoming/view.py b/zerver/webhooks/slack_incoming/view.py
--- a/zerver/webhooks/slack_incoming/view.py
+++ b/zerver/webhooks/slack_incoming/view.py
@@ -7,10 +7,9 @@
from django.utils.translation import gettext as _
from zerver.decorator import webhook_view
-from zerver.lib.exceptions import InvalidJSONError
-from zerver.lib.request import REQ, has_request_variables
+from zerver.lib.exceptions import InvalidJSONError, JsonableError
+from zerver.lib.request import REQ, RequestVariableMissingError, has_request_variables
from zerver.lib.response import json_success
-from zerver.lib.validator import check_dict
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@@ -21,19 +20,28 @@ def api_slack_incoming_webhook(
request: HttpRequest,
user_profile: UserProfile,
user_specified_topic: Optional[str] = REQ("topic", default=None),
- payload: Optional[Dict[str, Any]] = REQ("payload", json_validator=check_dict(), default=None),
) -> HttpResponse:
# Slack accepts webhook payloads as payload="encoded json" as
# application/x-www-form-urlencoded, as well as in the body as
- # application/json. We use has_request_variables to try to get
- # the form encoded version, and parse the body out ourselves if
- # # we were given JSON.
- if payload is None:
+ # application/json.
+ if request.content_type == "application/json":
try:
- payload = orjson.loads(request.body)
- except orjson.JSONDecodeError: # nocoverage
- raise InvalidJSONError(_("Malformed JSON"))
+ val = request.body.decode(request.encoding or "utf-8")
+ except UnicodeDecodeError: # nocoverage
+ raise JsonableError(_("Malformed payload"))
+ else:
+ req_var = "payload"
+ if req_var in request.POST:
+ val = request.POST[req_var]
+ elif req_var in request.GET: # nocoverage
+ val = request.GET[req_var]
+ else:
+ raise RequestVariableMissingError(req_var)
+ try:
+ payload = orjson.loads(val)
+ except orjson.JSONDecodeError: # nocoverage
+ raise InvalidJSONError(_("Malformed JSON"))
if user_specified_topic is None and "channel" in payload:
user_specified_topic = re.sub("^[@#]", "", payload["channel"])
| diff --git a/zerver/webhooks/slack_incoming/tests.py b/zerver/webhooks/slack_incoming/tests.py
--- a/zerver/webhooks/slack_incoming/tests.py
+++ b/zerver/webhooks/slack_incoming/tests.py
@@ -37,6 +37,15 @@ def test_message_as_www_urlencoded(self) -> None:
content_type="application/x-www-form-urlencoded",
)
+ def test_message_without_payload(self) -> None:
+ self.url = self.build_webhook_url()
+ result = self.client_post(
+ self.url,
+ None, # type: ignore[arg-type] # we need to simulate a http request that doesn't send any payload
+ content_type="multipart/form-data; boundary=14537ad40cab4c77b6699c9c0fa9c82f",
+ )
+ self.assert_json_error(result, "Missing 'payload' argument")
+
def test_message_with_actions(self) -> None:
expected_topic = "C1H9RESGL"
expected_message = """
| Slack webhook: do not read body twice
The code in https://github.com/zulip/zulip/blob/c8849f8fe30bf96c227044b94ee5c269b166a1b9/zerver/webhooks/slack_incoming/view.py#L26-L35 unfortunately cannot work as documented, since `request.body` has already been parsed. We will need to not use `@has_request_variables` and instead look at the content-type to determine how to parse the body, since it can only be read once.
As is, this results in the following, if `payload` is not provided:
```
RawPostDataException: You cannot access body after reading from request's data stream
File "zerver/decorator.py", line 334, in _wrapped_func_arguments
return view_func(request, user_profile, *args, **kwargs)
File "zerver/lib/request.py", line 390, in _wrapped_view_func
return view_func(request, *args, **kwargs)
File "zerver/webhooks/slack_incoming/view.py", line 33, in api_slack_incoming_webhook
payload = orjson.loads(request.body)
File "django/http/request.py", line 328, in body
raise RawPostDataException("You cannot access body after reading from request's data stream")
You cannot access body after reading from request's data stream
```
| Hello @zulip/server-integrations members, this issue was labeled with the "area: integrations" label, so you may want to check it out!
<!-- areaLabelAddition -->
I can't replicate the issue. What is the exact payload that causes the error?
Unfortunately, this was an error observed from an exception in production -- and I don't have the trace anymore. I suspect this is triggerable with a `application/x-www-form-urlencoded` body which _doesn't_ set the `body` parameter. Did you give that a shot?
Ah, found the trace.
## Query string
Key | Value
-- | --
`api_key` | [redacted]
`stream` | [redacted]
`topic` | [redacted]
## Body
(this is presumably the parsed form of the `multipart/form-data`)
<table>
<tr>
<th> Key </th> <th> Value </th>
</tr>
<tr>
<td><code>content</code></td>
<td>
```
Message:
From:
```
</td></tr></table>
## Headers
Key | value
-- | --
`Accept-Encoding` | `gzip, deflate`
`Content-Length` | `178`
`Content-Type` | `multipart/form-data; boundary=--------------------------780381918857007658710229`
-----
Sadly, I don't have any more "raw" version of the request than that, but hopefully that helps.
I have tried sending requests using `application/x-www-form-urlencoded` and `multipart/form-data`, with and without files, but with no luck.
Given that this doesn't reproduce, the original traceback is likely caused by some non-Slack software being used to send a notification into Zulip with this webhook.
Yeah. I think we can close this unless it recurs.
This just recurred, also clearly from a (probably manual) test message.
I can replicate with:
```
https -v --multipart POST https://some-hostname.zulipchat.com/api/v1/external/slack_incoming api_key==redacted stream==general topic==foo
```
...which produces:
```
POST /api/v1/external/slack_incoming?api_key=redacted&stream=general&topic=foo HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate
Connection: keep-alive
Content-Length: 38
Content-Type: multipart/form-data; boundary=14537ad40cab4c77b6699c9c0fa9c82f
Host: some-hostname.zulipchat.com
--14537ad40cab4c77b6699c9c0fa9c82f--
```
Hi guys, I am interested in picking this up, however I don't know how to replicate this issue in my local vagrant setup. Can someone please guide me on that?
I believe https://github.com/zulip/zulip/issues/19056#issuecomment-1040863023 contains a replication recipe. (You'll presumably want to adjust the hostname/etc. to target your development environment, look at http://localhost:9991/api for some useful details).
For reference, the `https` tool which I show using, can be installed via `pip install httpie`, [among other ways](https://httpie.io/docs/cli/installation). | 2022-04-24T07:36:46 |
zulip/zulip | 21,920 | zulip__zulip-21920 | [
"21941"
] | 127108c7d1eb8a3729ccbab95b2d6589cdf48378 | diff --git a/zproject/urls.py b/zproject/urls.py
--- a/zproject/urls.py
+++ b/zproject/urls.py
@@ -906,6 +906,10 @@
"help/night-mode",
RedirectView.as_view(url="/help/dark-theme", permanent=True),
),
+ path(
+ "help/web-public-streams",
+ RedirectView.as_view(url="/help/public-access-option", permanent=True),
+ ),
path("help/", help_documentation_view),
path("help/<path:article>", help_documentation_view),
path("api/", api_documentation_view),
| diff --git a/frontend_tests/node_tests/narrow.js b/frontend_tests/node_tests/narrow.js
--- a/frontend_tests/node_tests/narrow.js
+++ b/frontend_tests/node_tests/narrow.js
@@ -237,7 +237,7 @@ run_test("show_empty_narrow_message", ({mock_template}) => {
$(".empty_feed_notice_main").html(),
empty_narrow_html(
"",
- 'translated HTML: This stream does not exist or is not <a href="https://zulip.com/help/web-public-streams">web-public</a>.',
+ 'translated HTML: This stream does not exist or is not <a href="/help/public-access-option">publicly accessible</a>.',
),
);
@@ -251,7 +251,7 @@ run_test("show_empty_narrow_message", ({mock_template}) => {
$(".empty_feed_notice_main").html(),
empty_narrow_html(
"",
- 'translated HTML: This stream does not exist or is not <a href="https://zulip.com/help/web-public-streams">web-public</a>.',
+ 'translated HTML: This stream does not exist or is not <a href="/help/public-access-option">publicly accessible</a>.',
),
);
page_params.is_spectator = false;
diff --git a/zerver/lib/test_helpers.py b/zerver/lib/test_helpers.py
--- a/zerver/lib/test_helpers.py
+++ b/zerver/lib/test_helpers.py
@@ -487,6 +487,7 @@ def find_pattern(pattern: Any, prefixes: List[str]) -> None:
"help/disable-new-login-emails",
"help/test-mobile-notifications",
"help/troubleshooting-desktop-notifications",
+ "help/web-public-streams",
"for/working-groups-and-communities/",
"help/only-allow-admins-to-add-emoji",
"help/night-mode",
| Change wording on "Login required" modal for logged out users
Following up on #21920 (or @laurynmm 's replacement PR), whenever possible, we should use the term "public access" rather than "web-public stream" when communicating with logged out users. We should therefore change the text of the "Login required" modal.
- **Current**: Since you are not logged in, you can only view messages in [web-public streams](/help/stream-permissions).
- **New**: Since you are not logged in, you can only view messages in [publicly accessible conversations](/help/public-access-option).
@amanagr FYI
| 2022-04-26T23:24:36 |
|
zulip/zulip | 21,940 | zulip__zulip-21940 | [
"21690"
] | bd2dc7358b276e6aaf49dd3fbec79cf045a3f912 | diff --git a/zerver/views/development/dev_login.py b/zerver/views/development/dev_login.py
--- a/zerver/views/development/dev_login.py
+++ b/zerver/views/development/dev_login.py
@@ -79,7 +79,6 @@ def dev_direct_login(
realm = get_realm(subdomain)
if request.POST.get("prefers_web_public_view") == "Anonymous login":
- request.session["prefers_web_public_view"] = True
redirect_to = get_safe_redirect_to(next, realm.uri)
return HttpResponseRedirect(redirect_to)
diff --git a/zerver/views/home.py b/zerver/views/home.py
--- a/zerver/views/home.py
+++ b/zerver/views/home.py
@@ -8,7 +8,7 @@
from django.utils.cache import patch_cache_control
from zerver.actions.user_settings import do_change_tos_version
-from zerver.context_processors import get_valid_realm_from_request
+from zerver.context_processors import get_realm_from_request, get_valid_realm_from_request
from zerver.decorator import web_public_view, zulip_login_required
from zerver.forms import ToSForm
from zerver.lib.compatibility import is_outdated_desktop_app, is_unsupported_browser
@@ -19,7 +19,6 @@
from zerver.lib.user_counts import realm_user_count
from zerver.lib.utils import statsd
from zerver.models import PreregistrationUser, Realm, Stream, UserProfile
-from zerver.views.auth import get_safe_redirect_to
from zerver.views.portico import hello_view
@@ -117,14 +116,10 @@ def home(request: HttpRequest) -> HttpResponse:
if settings.ROOT_DOMAIN_LANDING_PAGE and subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return hello_view(request)
- # TODO: The following logic is a bit hard to read. We save a
- # database query in the common case by avoiding the call to
- # `get_valid_realm_from_request` if user hasn't requested
- # web-public access.
- if (
- request.POST.get("prefers_web_public_view") == "true"
- or request.session.get("prefers_web_public_view")
- ) and get_valid_realm_from_request(request).allow_web_public_streams_access():
+ realm = get_realm_from_request(request)
+ if realm is None:
+ return render(request, "zerver/invalid_realm.html", status=404)
+ if realm.allow_web_public_streams_access():
return web_public_view(home_real)(request)
return zulip_login_required(home_real)(request)
@@ -161,32 +156,9 @@ def home_real(request: HttpRequest) -> HttpResponse:
if request.user.is_authenticated:
user_profile = request.user
realm = user_profile.realm
-
- # User is logged in and hence no longer `prefers_web_public_view`.
- if "prefers_web_public_view" in request.session.keys():
- del request.session["prefers_web_public_view"]
else:
realm = get_valid_realm_from_request(request)
-
- # TODO: Ideally, we'd open Zulip directly as a spectator if
- # the URL had clicked a link to content on a web-public
- # stream. We could maybe do this by parsing `next`, but it's
- # not super convenient with Zulip's hash-based URL scheme.
-
- # The "Access without an account" button on the login page
- # submits a POST to this page with this hidden field set.
- if request.POST.get("prefers_web_public_view") == "true":
- request.session["prefers_web_public_view"] = True
- # We serve a redirect here, rather than serving a page, to
- # avoid browser "Confirm form resubmission" prompts on reload.
- redirect_to = get_safe_redirect_to(request.POST.get("next"), realm.uri)
- return redirect(redirect_to)
-
- # See the assert in `home` above for why this must be true.
- assert request.session.get("prefers_web_public_view")
-
- # For users who have selected public access, we load the
- # spectator experience. We fall through to the shared code
+ # We load the spectator experience. We fall through to the shared code
# for loading the application, with user_profile=None encoding
# that we're a spectator, not a logged-in user.
user_profile = None
| diff --git a/zerver/tests/test_decorators.py b/zerver/tests/test_decorators.py
--- a/zerver/tests/test_decorators.py
+++ b/zerver/tests/test_decorators.py
@@ -2170,7 +2170,7 @@ def inner(request: HttpRequest) -> HttpResponse:
# no realm can be set on the request notes.
with mock.patch("zerver.views.home.zulip_login_required", lambda f: mock_home(None)):
result = self.client_get("/", subdomain="")
- self.assertEqual(result.status_code, 200)
+ self.assertEqual(result.status_code, 404)
root_subdomain_realm = do_create_realm("", "Root Domain")
# Now test that that realm does get set, if it exists, for requests
diff --git a/zerver/tests/test_home.py b/zerver/tests/test_home.py
--- a/zerver/tests/test_home.py
+++ b/zerver/tests/test_home.py
@@ -14,7 +14,7 @@
from corporate.models import Customer, CustomerPlan
from zerver.actions.create_user import do_create_user
-from zerver.actions.realm_settings import do_change_realm_plan_type
+from zerver.actions.realm_settings import do_change_realm_plan_type, do_set_realm_property
from zerver.actions.users import change_user_is_active
from zerver.lib.compatibility import LAST_SERVER_UPGRADE_TIME, is_outdated_server
from zerver.lib.home import (
@@ -248,7 +248,7 @@ def test_home(self) -> None:
set(result["Cache-Control"].split(", ")), {"must-revalidate", "no-store", "no-cache"}
)
- self.assert_length(queries, 45)
+ self.assert_length(queries, 46)
self.assert_length(cache_mock.call_args_list, 5)
html = result.content.decode()
@@ -311,46 +311,22 @@ def test_home_demo_organization(self) -> None:
self.assertEqual(set(actual_keys), set(expected_keys))
def test_logged_out_home(self) -> None:
- # Redirect to login on first request.
- result = self.client_get("/")
- self.assertEqual(result.status_code, 302)
- self.assertEqual(result.url, "/login/")
-
- # Tell server that user wants to log in anonymously
- # Redirects to load webapp.
realm = get_realm("zulip")
- result = self.client_post("/", {"prefers_web_public_view": "true"})
- self.assertEqual(self.client.session.get("prefers_web_public_view"), True)
- self.assertEqual(realm.enable_spectator_access, True)
- self.assertEqual(result.status_code, 302)
- self.assertEqual(result.url, "http://zulip.testserver")
-
- # Disable spectator login. Since Realm.enable_spectator_access
- # is False, the login should fail.
- realm.enable_spectator_access = False
- realm.save()
+ do_set_realm_property(realm, "enable_spectator_access", False, acting_user=None)
- result = self.client_post("/", {"prefers_web_public_view": "true"})
- self.assertEqual(self.client.session.get("prefers_web_public_view"), True)
- self.assertEqual(realm.enable_spectator_access, False)
+ # Redirect to login if spectator access is disabled.
+ result = self.client_get("/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
- # Enable spectator login.
- realm.enable_spectator_access = True
- realm.save()
-
- result = self.client_post("/", {"prefers_web_public_view": "true"})
- self.assertEqual(self.client.session.get("prefers_web_public_view"), True)
- self.assertEqual(realm.enable_spectator_access, True)
- self.assertEqual(result.status_code, 302)
- self.assertEqual(result.url, "http://zulip.testserver")
-
- # Always load the web app from then on directly
+ # Load webapp directly if spectator access is enabled.
+ do_set_realm_property(realm, "enable_spectator_access", True, acting_user=None)
result = self.client_get("/")
self.assertEqual(result.status_code, 200)
+ # Check no unnecessary params are passed to spectators.
page_params = self._get_page_params(result)
+ self.assertEqual(page_params["is_spectator"], True)
actual_keys = sorted(str(k) for k in page_params.keys())
removed_keys = [
"custom_profile_field_types",
@@ -361,7 +337,6 @@ def test_logged_out_home(self) -> None:
]
expected_keys = [i for i in self.expected_page_params_keys if i not in removed_keys]
self.assertEqual(actual_keys, expected_keys)
- self.assertEqual(self.client.session.get("prefers_web_public_view"), True)
# Test information passed to client about users.
page_params = self._get_page_params(result)
@@ -384,11 +359,6 @@ def test_logged_out_home(self) -> None:
date_length = len("YYYY-MM-DD")
self.assert_length(page_params["realm_users"][0]["date_joined"], date_length)
- # Web-public session key should clear once user is logged in
- self.login("hamlet")
- self.client_get("/")
- self.assertEqual(self.client.session.get("prefers_web_public_view"), None)
-
def test_home_under_2fa_without_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
self.login("iago")
@@ -421,7 +391,7 @@ def test_num_queries_for_realm_admin(self) -> None:
result = self._get_home_page()
self.check_rendered_logged_in_app(result)
self.assert_length(cache_mock.call_args_list, 6)
- self.assert_length(queries, 42)
+ self.assert_length(queries, 43)
def test_num_queries_with_streams(self) -> None:
main_user = self.example_user("hamlet")
@@ -452,7 +422,7 @@ def test_num_queries_with_streams(self) -> None:
with queries_captured() as queries2:
result = self._get_home_page()
- self.assert_length(queries2, 40)
+ self.assert_length(queries2, 41)
# Do a sanity check that our new streams were in the payload.
html = result.content.decode()
diff --git a/zerver/tests/test_sessions.py b/zerver/tests/test_sessions.py
--- a/zerver/tests/test_sessions.py
+++ b/zerver/tests/test_sessions.py
@@ -4,6 +4,7 @@
from django.utils.timezone import now as timezone_now
+from zerver.actions.realm_settings import do_set_realm_property
from zerver.actions.users import change_user_is_active
from zerver.lib.sessions import (
delete_all_deactivated_user_sessions,
@@ -28,8 +29,8 @@ def do_test_session(
action()
if expected_result:
result = self.client_get("/", subdomain=realm.subdomain)
- self.assertEqual(302, result.status_code)
- self.assertEqual("/login/", result.url)
+ self.assertEqual(200, result.status_code)
+ self.assertTrue('is_spectator":true' in str(result.content))
else:
self.assertIn("_auth_user_id", self.client.session)
@@ -40,8 +41,8 @@ def test_delete_session(self) -> None:
for session in user_sessions(user_profile):
delete_session(session)
result = self.client_get("/")
- self.assertEqual(result.status_code, 302)
- self.assertEqual(result.url, "/login/")
+ self.assertEqual(result.status_code, 200)
+ self.assertTrue('is_spectator":true' in str(result.content))
def test_delete_user_sessions(self) -> None:
user_profile = self.example_user("hamlet")
@@ -77,8 +78,19 @@ def test_delete_all_user_sessions(self) -> None:
get_realm("zulip"),
True,
)
+
+ lear_realm = get_realm("lear")
+ do_set_realm_property(lear_realm, "enable_spectator_access", True, acting_user=None)
+ self.make_stream(
+ "web_public_stream",
+ realm=lear_realm,
+ is_web_public=True,
+ )
self.do_test_session(
- self.mit_user("sipbtest"), lambda: delete_all_user_sessions(), get_realm("zephyr"), True
+ self.lear_user("cordelia"),
+ lambda: delete_all_user_sessions(),
+ lear_realm,
+ True,
)
def test_delete_all_deactivated_user_sessions(self) -> None:
@@ -89,8 +101,8 @@ def test_delete_all_deactivated_user_sessions(self) -> None:
self.client_post("/accounts/logout/")
delete_all_deactivated_user_sessions()
result = self.client_get("/")
- self.assertEqual(result.status_code, 302)
- self.assertEqual(result.url, "/login/")
+ self.assertEqual(result.status_code, 200)
+ self.assertTrue('is_spectator":true' in str(result.content))
# Test nothing happens to an active user's session
self.login("othello")
@@ -110,8 +122,8 @@ def test_delete_all_deactivated_user_sessions(self) -> None:
[f"INFO:root:Deactivating session for deactivated user {user_profile_3.id}"],
)
result = self.client_get("/")
- self.assertEqual(result.status_code, 302)
- self.assertEqual(result.url, "/login/")
+ self.assertEqual(result.status_code, 200)
+ self.assertTrue('is_spectator":true' in str(result.content))
class TestExpirableSessionVars(ZulipTestCase):
diff --git a/zerver/tests/test_urls.py b/zerver/tests/test_urls.py
--- a/zerver/tests/test_urls.py
+++ b/zerver/tests/test_urls.py
@@ -43,9 +43,8 @@ def test_public_urls(self) -> None:
"/en/accounts/login/",
"/ru/accounts/login/",
"/help/",
- ],
- 302: [
- # These 302 because they redirect to the spectator experience.
+ # Since web-public streams are enabled in this `zulip`
+ # instance, the public access experience is loaded directly.
"/",
"/en/",
"/ru/",
| Skip login page for organizations with web-public streams
For organizations with web-public streams, we can minimize friction for visitors by taking the user directly to the organization, bypassing the `/login` page regardless of whether the user is logged in. Users who wish to log in can use one of the login buttons in the web-public view.
Details:
- It should work this way for links to a message/topic/stream that is web-public, as well as links to the organization homepage.
- Ideally, links to a stream/topic/message that is not web-public would still take the user to the login page.
- This should only apply to organizations that have web-public streams enabled and actually have at least one web-public stream.
[CZO thread](https://chat.zulip.org/#narrow/stream/2-general/topic/web-public.20.2F.20login.20sequencing)
| 2022-04-28T05:06:44 |
|
zulip/zulip | 21,972 | zulip__zulip-21972 | [
"21925"
] | e9ba9b0e0d3578c6c55c003fdb64e6cdb100128a | diff --git a/zproject/email_backends.py b/zproject/email_backends.py
--- a/zproject/email_backends.py
+++ b/zproject/email_backends.py
@@ -82,11 +82,19 @@ def prepare_email_messages_for_forwarding(email_messages: List[EmailMultiAlterna
email_message.to = [get_forward_address()]
+ # This wrapper function exists to allow tests easily to mock the
+ # step of trying to send the emails. Previously, we had mocked
+ # Django's connection.send_messages(), which caused unexplained
+ # test failures when running test-backend at very high
+ # concurrency.
+ def _do_send_messages(self, email_messages: List[EmailMultiAlternatives]) -> int:
+ return super().send_messages(email_messages) # nocoverage
+
def send_messages(self, email_messages: List[EmailMultiAlternatives]) -> int:
num_sent = len(email_messages)
if get_forward_address():
self.prepare_email_messages_for_forwarding(email_messages)
- num_sent = super().send_messages(email_messages)
+ num_sent = self._do_send_messages(email_messages)
if settings.DEVELOPMENT_LOG_EMAILS:
for email in email_messages:
| diff --git a/zerver/tests/test_email_log.py b/zerver/tests/test_email_log.py
--- a/zerver/tests/test_email_log.py
+++ b/zerver/tests/test_email_log.py
@@ -10,7 +10,7 @@
class EmailLogTest(ZulipTestCase):
def test_generate_and_clear_email_log(self) -> None:
with self.settings(EMAIL_BACKEND="zproject.email_backends.EmailLogBackEnd"), mock.patch(
- "zproject.email_backends.EmailBackend.send_messages"
+ "zproject.email_backends.EmailLogBackEnd._do_send_messages", lambda *args: 1
), self.assertLogs(level="INFO") as m, self.settings(DEVELOPMENT_LOG_EMAILS=True):
result = self.client_get("/emails/generate/")
self.assertEqual(result.status_code, 302)
@@ -35,11 +35,12 @@ def test_forward_address_details(self) -> None:
self.assertEqual(get_forward_address(), forward_address)
- with self.settings(EMAIL_BACKEND="zproject.email_backends.EmailLogBackEnd"):
- with mock.patch("zproject.email_backends.EmailBackend.send_messages"):
- result = self.client_get("/emails/generate/")
- self.assertEqual(result.status_code, 302)
- self.assertIn("emails", result["Location"])
- result = self.client_get(result["Location"])
- self.assert_in_success_response([forward_address], result)
+ with self.settings(EMAIL_BACKEND="zproject.email_backends.EmailLogBackEnd"), mock.patch(
+ "zproject.email_backends.EmailLogBackEnd._do_send_messages", lambda *args: 1
+ ):
+ result = self.client_get("/emails/generate/")
+ self.assertEqual(result.status_code, 302)
+ self.assertIn("emails", result["Location"])
+ result = self.client_get(result["Location"])
+ self.assert_in_success_response([forward_address], result)
os.remove(settings.FORWARD_ADDRESS_CONFIG_FILE)
diff --git a/zerver/tests/test_example.py b/zerver/tests/test_example.py
--- a/zerver/tests/test_example.py
+++ b/zerver/tests/test_example.py
@@ -401,8 +401,9 @@ def test_generate_emails(self) -> None:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertLogs
with self.settings(EMAIL_BACKEND="zproject.email_backends.EmailLogBackEnd"), self.settings(
DEVELOPMENT_LOG_EMAILS=True
- ), self.assertLogs(level="INFO") as logger:
-
+ ), self.assertLogs(level="INFO") as logger, mock.patch(
+ "zproject.email_backends.EmailLogBackEnd._do_send_messages", lambda *args: 1
+ ):
result = self.client_get(
"/emails/generate/"
) # Generates emails and redirects to /emails/
| support concurrent backend testing for generate_emails
A couple of backend unittests are failing when executed concurrently, and it seems to be related to generating emails.
The [CZO](https://chat.zulip.org/#narrow/stream/43-automated-testing/topic/parallel.20testing.20on.2064.20core.20node) has a by @timabbott has a reasonable hypothesis about what might be happening. @andersk reduced the symptoms to a very simple test case:
```
tools/test-backend --parallel=2 \
zerver.tests.test_example.TestDevelopmentEmailsLog.test_generate_emails \
zerver.tests.test_email_log.EmailLogTest.test_forward_address_details
```
As I investigate/fix, I'll post updates here, so they're searchable in the future.
| @zulipbot claim
Hello @asah!
Thanks for your interest in Zulip! You have attempted to claim an issue without the label "help wanted". You can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) label.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
This is perfectly reproducible for me, which means it's only a matter of time - PR coming. thx @andersk | 2022-05-01T14:30:26 |
zulip/zulip | 21,977 | zulip__zulip-21977 | [
"21948"
] | 214b1a5eba29c9af01cf98010ad7e818b259c034 | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = "190.0"
+PROVISION_VERSION = "190.1"
diff --git a/zerver/lib/home.py b/zerver/lib/home.py
--- a/zerver/lib/home.py
+++ b/zerver/lib/home.py
@@ -163,14 +163,16 @@ def build_page_params_for_home_page_load(
}
default_language = realm.default_language
- furthest_read_time = get_furthest_read_time(user_profile)
-
- request_language = get_and_set_request_language(
- request,
- default_language,
- translation.get_language_from_path(request.path_info),
- )
+ if user_profile is None:
+ request_language = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, default_language)
+ else:
+ request_language = get_and_set_request_language(
+ request,
+ default_language,
+ translation.get_language_from_path(request.path_info),
+ )
+ furthest_read_time = get_furthest_read_time(user_profile)
two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None
billing_info = get_billing_info(user_profile)
user_permission_info = get_user_permission_info(user_profile)
@@ -234,5 +236,6 @@ def build_page_params_for_home_page_load(
# Get rendered version of realm description which is displayed in right
# sidebar for spectator.
page_params["realm_rendered_description"] = get_realm_rendered_description(realm)
+ page_params["language_cookie_name"] = settings.LANGUAGE_COOKIE_NAME
return register_ret["queue_id"], page_params
| diff --git a/frontend_tests/puppeteer_tests/admin.ts b/frontend_tests/puppeteer_tests/admin.ts
--- a/frontend_tests/puppeteer_tests/admin.ts
+++ b/frontend_tests/puppeteer_tests/admin.ts
@@ -312,25 +312,6 @@ async function test_organization_profile(page: Page): Promise<void> {
await page.waitForSelector(gravatar_selctor, {visible: true});
}
-async function submit_default_user_settings(page: Page): Promise<void> {
- assert.strictEqual(
- await common.get_text_from_selector(page, "#org-submit-notifications"),
- "Save changes",
- );
- await page.click("#org-submit-notifications");
- const saved_status = '#org-submit-notifications[data-status="saved"]';
- await page.waitForSelector(saved_status, {hidden: true});
-}
-
-async function test_change_organization_default_language(page: Page): Promise<void> {
- console.log("Changing realm default language");
- await page.click("li[data-section='organization-settings']");
- await page.waitForSelector("#id_realm_default_language", {visible: true});
-
- await page.evaluate(() => $("#id_realm_default_language").val("de").trigger("change"));
- await submit_default_user_settings(page);
-}
-
async function test_authentication_methods(page: Page): Promise<void> {
await page.click("li[data-section='auth-methods']");
await page.waitForSelector(".method_row[data-method='Google'] input[type='checkbox'] + span", {
@@ -366,7 +347,6 @@ async function admin_test(page: Page): Promise<void> {
await common.manage_organization(page);
await test_change_new_stream_notifications_setting(page);
await test_change_signup_notifications_stream(page);
- await test_change_organization_default_language(page);
await test_organization_permissions(page);
// Currently, Firefox (with puppeteer) does not support file upload:
diff --git a/frontend_tests/puppeteer_tests/settings.ts b/frontend_tests/puppeteer_tests/settings.ts
--- a/frontend_tests/puppeteer_tests/settings.ts
+++ b/frontend_tests/puppeteer_tests/settings.ts
@@ -303,8 +303,10 @@ async function test_alert_words_section(page: Page): Promise<void> {
}
async function change_language(page: Page, language_data_code: string): Promise<void> {
- await page.waitForSelector("#user-display-settings .setting_default_language", {visible: true});
- await page.click("#user-display-settings .setting_default_language");
+ await page.waitForSelector("#user-display-settings .language_selection_button", {
+ visible: true,
+ });
+ await page.click("#user-display-settings .language_selection_button");
await common.wait_for_micromodal_to_open(page);
const language_selector = `a[data-code="${CSS.escape(language_data_code)}"]`;
await page.click(language_selector);
@@ -317,10 +319,12 @@ async function check_language_setting_status(page: Page): Promise<void> {
}
async function assert_language_changed_to_chinese(page: Page): Promise<void> {
- await page.waitForSelector("#user-display-settings .setting_default_language", {visible: true});
+ await page.waitForSelector("#user-display-settings .language_selection_button", {
+ visible: true,
+ });
const default_language = await common.get_text_from_selector(
page,
- "#user-display-settings .setting_default_language",
+ "#user-display-settings .language_selection_button",
);
assert.strictEqual(
default_language,
@@ -346,7 +350,9 @@ async function test_default_language_setting(page: Page): Promise<void> {
// Check that the saved indicator appears
await check_language_setting_status(page);
await page.click(".reload_link");
- await page.waitForSelector("#user-display-settings .setting_default_language", {visible: true});
+ await page.waitForSelector("#user-display-settings .language_selection_button", {
+ visible: true,
+ });
await assert_language_changed_to_chinese(page);
await test_i18n_language_precedence(page);
await page.waitForSelector(display_settings_section, {visible: true});
@@ -363,7 +369,9 @@ async function test_default_language_setting(page: Page): Promise<void> {
await page.waitForSelector("#user-display-settings .lang-time-settings-status", {
visible: true,
});
- await page.waitForSelector("#user-display-settings .setting_default_language", {visible: true});
+ await page.waitForSelector("#user-display-settings .language_selection_button", {
+ visible: true,
+ });
}
async function test_notifications_section(page: Page): Promise<void> {
diff --git a/zerver/tests/test_home.py b/zerver/tests/test_home.py
--- a/zerver/tests/test_home.py
+++ b/zerver/tests/test_home.py
@@ -338,6 +338,7 @@ def test_logged_out_home(self) -> None:
"furthest_read_time",
"insecure_desktop_app",
"is_spectator",
+ "language_cookie_name",
"language_list",
"login_page",
"needs_tutorial",
| Update "notifications language" setting to use the "Default language" picker modal
The settings UI for picking the "notifications language" (previously "Default language for new users"; see #20866) should use the much nicer language picker component that we have for an individual user's language setting (i.e. this, rather than the simple dropdown).

I haven't looked at how complex this is, but it seems clearly better to reuse that component.
| Hello @zulip/server-settings members, this issue was labeled with the "area: settings (admin/org)", "area: settings UI" labels, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-05-02T09:21:16 |
zulip/zulip | 22,081 | zulip__zulip-22081 | [
"22020"
] | 0d9b1547ceada2d270c4f04d19d528077a3762a2 | diff --git a/corporate/urls.py b/corporate/urls.py
--- a/corporate/urls.py
+++ b/corporate/urls.py
@@ -9,6 +9,7 @@
from corporate.views.portico import (
app_download_link_redirect,
apps_view,
+ communities_view,
hello_view,
landing_view,
plans_view,
@@ -125,6 +126,7 @@
landing_view,
{"template_name": "corporate/case-studies/recurse-center-case-study.html"},
),
+ path("communities/", communities_view),
]
i18n_urlpatterns += landing_page_urls
diff --git a/corporate/views/portico.py b/corporate/views/portico.py
--- a/corporate/views/portico.py
+++ b/corporate/views/portico.py
@@ -10,6 +10,8 @@
from zerver.context_processors import get_realm_from_request, latest_info_context
from zerver.decorator import add_google_analytics
from zerver.lib.github import InvalidPlatform, get_latest_github_release_download_link_for_platform
+from zerver.lib.realm_description import get_realm_text_description
+from zerver.lib.realm_icon import get_realm_icon_url
from zerver.lib.subdomains import is_subdomain_root_or_alias
from zerver.models import Realm
@@ -103,3 +105,47 @@ def landing_view(request: HttpRequest, template_name: str) -> HttpResponse:
@add_google_analytics
def hello_view(request: HttpRequest) -> HttpResponse:
return TemplateResponse(request, "corporate/hello.html", latest_info_context())
+
+
+@add_google_analytics
+def communities_view(request: HttpRequest) -> HttpResponse:
+ eligible_realms = []
+ unique_org_type_ids = set()
+ want_to_be_advertised_realms = Realm.objects.filter(
+ want_advertise_in_communities_directory=True
+ ).order_by("name")
+ for realm in want_to_be_advertised_realms:
+ if realm.allow_web_public_streams_access():
+ eligible_realms.append(
+ {
+ "id": realm.id,
+ "name": realm.name,
+ "realm_url": realm.uri,
+ "logo_url": get_realm_icon_url(realm),
+ "description": get_realm_text_description(realm),
+ "org_type_key": [
+ org_type
+ for org_type in Realm.ORG_TYPES
+ if Realm.ORG_TYPES[org_type]["id"] == realm.org_type
+ ][0],
+ }
+ )
+ unique_org_type_ids.add(realm.org_type)
+
+ # Remove org_types for which there are no open organizations.
+ org_types = dict()
+ for org_type in Realm.ORG_TYPES:
+ if Realm.ORG_TYPES[org_type]["id"] in unique_org_type_ids:
+ org_types[org_type] = Realm.ORG_TYPES[org_type]
+
+ # Remove `Unspecified` ORG_TYPE
+ org_types.pop("unspecified", None)
+
+ return TemplateResponse(
+ request,
+ "corporate/communities.html",
+ context={
+ "eligible_realms": eligible_realms,
+ "org_types": org_types,
+ },
+ )
diff --git a/zerver/lib/templates.py b/zerver/lib/templates.py
--- a/zerver/lib/templates.py
+++ b/zerver/lib/templates.py
@@ -192,8 +192,15 @@ def webpack_entry(entrypoint: str) -> List[str]:
if status != "done":
raise RuntimeError("Webpack compilation was not successful")
- return [
- staticfiles_storage.url(settings.WEBPACK_BUNDLES + filename)
- for filename in stats["chunks"][entrypoint]
- if filename.endswith((".css", ".js")) and not filename.endswith(".hot-update.js")
- ]
+ try:
+ files_from_entrypoints = [
+ staticfiles_storage.url(settings.WEBPACK_BUNDLES + filename)
+ for filename in stats["chunks"][entrypoint]
+ if filename.endswith((".css", ".js")) and not filename.endswith(".hot-update.js")
+ ]
+ except KeyError:
+ raise KeyError(
+ f"'{entrypoint}' entrypoint could not be found. Please define it in tools/webpack.assets.json."
+ )
+
+ return files_from_entrypoints
| diff --git a/zerver/tests/test_docs.py b/zerver/tests/test_docs.py
--- a/zerver/tests/test_docs.py
+++ b/zerver/tests/test_docs.py
@@ -199,6 +199,13 @@ def test_doc_endpoints(self) -> None:
result = self.client_get("/for/companies/", follow=True)
self.assert_in_success_response(["Communication efficiency represents"], result)
+ def test_open_organizations_endpoint(self) -> None:
+ realm = get_realm("zulip")
+ realm.want_advertise_in_communities_directory = True
+ realm.save()
+
+ self._test("/communities/", "Open communities directory")
+
def test_portico_pages_open_graph_metadata(self) -> None:
# Why Zulip
url = "/why-zulip/"
| Open Organizations landing page
@alya asked me to provide a design for the Open Organizations landing page for the Zulip website. She also told me that probably @amanagr is the right person to implement this (if its not so, please reassign the task)
We don't introduce any new design to the Zulip website this time and would like to reuse already available styles and elements.
S lets take [integrations](https://zulip.com/integrations/) page and tune/add some styles.
[Figma frame with the mock](https://www.figma.com/file/0MHqR2QphLTfJxFLeWIIDC/Web-site?node-id=3%3A24)

The text itself isn't final, please talk to @alya for the content acceptance.
## Design clarifications
I suggest to round corners of the white background:
<img width="744" alt="image" src="https://user-images.githubusercontent.com/1903309/167272542-09423950-c978-41e7-b45e-96f584b2273a.png">
There might be no, 1 line or 2 line description of the organization
<img width="675" alt="image" src="https://user-images.githubusercontent.com/1903309/167272582-277dea85-bead-46ef-a1dd-1ebc0bff6817.png">. Vertically align with 60x60 square of the image
Image/Logo is fixed rectangle in which we fit the actual logotype of the company:
<img width="241" alt="image" src="https://user-images.githubusercontent.com/1903309/167272609-84cc2db9-7efb-4a7f-bc9d-ea459073cfea.png">
I suggest to have max-width of the list item as 700px (or something around it, if it's convenient in the current column system)
List items on hover should have a green border:
<img width="777" alt="image" src="https://user-images.githubusercontent.com/1903309/167272706-7664b6f9-ba7c-4df9-9748-dad4f291a717.png">, but there is no border in the normal state

## Extra improvement - TBD if should be implemented
I noticed that there is a problem with background on the Zulip website
<img width="509" alt="image" src="https://user-images.githubusercontent.com/1903309/167272944-0f95206b-8d41-41dd-a64d-4005615fd881.png">
And I suggest a correction for the background gradients

In my suggestion I exclude the shadow from the white container, so the bottom part will look like this:
<img width="1453" alt="image" src="https://user-images.githubusercontent.com/1903309/167272988-7fd41472-6320-49f8-9eac-203e573e5352.png"> instead of this
<img width="1459" alt="image" src="https://user-images.githubusercontent.com/1903309/167272993-25ce07c3-1f12-4c02-a26e-311e9f6a7d7d.png">
So there is a white background and there is a fixed height gradient (it can be of any desired size). In my example it is 1100px of height. It consist from 4 layers of gradients:
1. Corner green to white. Layer with with 80% opacity
<img width="992" alt="image" src="https://user-images.githubusercontent.com/1903309/167273035-78760cc0-d5dd-4746-84a4-93ecb864c9ac.png">
```css
background: radial-gradient(100% 100% at 0% 0%, #61AD86 0%, rgba(97, 173, 134, 0) 100%);
opacity: 0.8;
```
2. Centered blue
<img width="1014" alt="image" src="https://user-images.githubusercontent.com/1903309/167273077-c37372df-d12e-4ae6-8e6f-7508b613b082.png">
```css
background: radial-gradient(100% 100% at 50.05% 0%, #5298B1 0%, rgba(82, 152, 177, 0) 100%);
```
3. Corner blue
<img width="1023" alt="image" src="https://user-images.githubusercontent.com/1903309/167273110-04f7c26d-92f7-42db-a982-94b72cebae9a.png">
```css
background: radial-gradient(100% 100% at 100% 0%, #5298B1 0%, rgba(82, 152, 177, 0) 100%);
```
4. Yellow linear
<img width="1041" alt="image" src="https://user-images.githubusercontent.com/1903309/167273129-fc24c111-eeef-48ee-800d-1827e4bf12bd.png">
```css
background: linear-gradient(180deg, #FFED9D 0%, rgba(255, 237, 157, 0) 100%);
```
A cool aspect about such gradient approach that it's working with any background color. Here is black background:
<img width="820" alt="image" src="https://user-images.githubusercontent.com/1903309/167273193-b19e8078-80e9-4f66-b81c-1f43d3dd870f.png">
| Hello @zulip/server-misc members, this issue was labeled with the "area: portico" label, so you may want to check it out!
<!-- areaLabelAddition -->
I see we removed the search bar that is in integrations. It would still be useful here, right?
@amanagr as soon as there are more than 20 organizations.
@amanagr You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
| 2022-05-17T20:41:27 |
zulip/zulip | 22,086 | zulip__zulip-22086 | [
"22082"
] | 6337f179235b4ca4451d02fd96942aca3531b15d | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 130
+API_FEATURE_LEVEL = 132
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/views/streams.py b/zerver/views/streams.py
--- a/zerver/views/streams.py
+++ b/zerver/views/streams.py
@@ -754,6 +754,16 @@ def get_streams_backend(
return json_success(request, data={"streams": streams})
+@has_request_variables
+def get_stream_backend(
+ request: HttpRequest,
+ user_profile: UserProfile,
+ stream_id: int,
+) -> HttpResponse:
+ (stream, sub) = access_stream_by_id(user_profile, stream_id, allow_realm_admin=True)
+ return json_success(request, data={"stream": stream.to_dict()})
+
+
@has_request_variables
def get_topics_backend(
request: HttpRequest,
diff --git a/zproject/urls.py b/zproject/urls.py
--- a/zproject/urls.py
+++ b/zproject/urls.py
@@ -145,6 +145,7 @@
create_default_stream_group,
deactivate_stream_backend,
delete_in_topic,
+ get_stream_backend,
get_streams_backend,
get_subscribers_backend,
get_topics_backend,
@@ -443,7 +444,10 @@
# GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404)
rest_path("streams/<int:stream_id>/members", GET=get_subscribers_backend),
rest_path(
- "streams/<int:stream_id>", PATCH=update_stream_backend, DELETE=deactivate_stream_backend
+ "streams/<int:stream_id>",
+ GET=get_stream_backend,
+ PATCH=update_stream_backend,
+ DELETE=deactivate_stream_backend,
),
# Delete topic in stream
rest_path("streams/<int:stream_id>/delete_topic", POST=delete_in_topic),
| diff --git a/zerver/tests/test_decorators.py b/zerver/tests/test_decorators.py
--- a/zerver/tests/test_decorators.py
+++ b/zerver/tests/test_decorators.py
@@ -1983,7 +1983,7 @@ def test_options_method(self) -> None:
result = self.client_options("/json/streams/15")
self.assertEqual(result.status_code, 204)
- self.assertEqual(str(result["Allow"]), "DELETE, PATCH")
+ self.assertEqual(str(result["Allow"]), "DELETE, GET, HEAD, PATCH")
def test_http_accept_redirect(self) -> None:
result = self.client_get("/json/users", HTTP_ACCEPT="text/html")
diff --git a/zerver/tests/test_subs.py b/zerver/tests/test_subs.py
--- a/zerver/tests/test_subs.py
+++ b/zerver/tests/test_subs.py
@@ -5457,6 +5457,40 @@ def test_public_streams_api(self) -> None:
]
self.assertEqual(sorted(s["name"] for s in json["streams"]), sorted(all_streams))
+ def test_get_single_stream_api(self) -> None:
+ self.login("hamlet")
+ realm = get_realm("zulip")
+ denmark_stream = get_stream("Denmark", realm)
+ result = self.client_get(f"/json/streams/{denmark_stream.id}")
+ self.assert_json_success(result)
+ json = result.json()
+ self.assertEqual(json["stream"]["name"], "Denmark")
+ self.assertEqual(json["stream"]["stream_id"], denmark_stream.id)
+
+ result = self.client_get("/json/streams/9999")
+ self.assert_json_error(result, "Invalid stream id")
+
+ private_stream = self.make_stream("private_stream", invite_only=True)
+ self.subscribe(self.example_user("cordelia"), "private_stream")
+
+ # Non-admins cannot access unsubscribed private streams.
+ result = self.client_get(f"/json/streams/{private_stream.id}")
+ self.assert_json_error(result, "Invalid stream id")
+
+ self.login("iago")
+ result = self.client_get(f"/json/streams/{private_stream.id}")
+ self.assert_json_success(result)
+ json = result.json()
+ self.assertEqual(json["stream"]["name"], "private_stream")
+ self.assertEqual(json["stream"]["stream_id"], private_stream.id)
+
+ self.login("cordelia")
+ result = self.client_get(f"/json/streams/{private_stream.id}")
+ self.assert_json_success(result)
+ json = result.json()
+ self.assertEqual(json["stream"]["name"], "private_stream")
+ self.assertEqual(json["stream"]["stream_id"], private_stream.id)
+
class StreamIdTest(ZulipTestCase):
def test_get_stream_id(self) -> None:
| Add endpoint to get a stream by ID
Apparently, we've never added an endpoint to get the details of a single stream by ID -- probably because most clients tend to want all the streams in a given category.
This should be `GET /streams/{stream_id}` and implemented using a combination of `Stream.get_client_data` and `access_stream_by_id`.
| Hello @zulip/server-api, @zulip/server-streams members, this issue was labeled with the "area: stream settings", "area: api" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim | 2022-05-18T11:59:23 |
zulip/zulip | 22,124 | zulip__zulip-22124 | [
"22066"
] | bb6bd900cdfc473caed6c1795649e94d9070a7e1 | diff --git a/zerver/forms.py b/zerver/forms.py
--- a/zerver/forms.py
+++ b/zerver/forms.py
@@ -29,6 +29,7 @@
from zerver.lib.name_restrictions import is_disposable_domain, is_reserved_subdomain
from zerver.lib.rate_limiter import RateLimitedObject
from zerver.lib.send_email import FromAddress, send_email
+from zerver.lib.soft_deactivation import queue_soft_reactivation
from zerver.lib.subdomains import get_subdomain, is_root_domain_available
from zerver.lib.users import check_full_name
from zerver.models import (
@@ -360,6 +361,7 @@ def save(
if user is not None:
context["active_account_in_realm"] = True
context["reset_url"] = generate_password_reset_url(user, token_generator)
+ queue_soft_reactivation(user.id)
send_email(
"zerver/emails/password_reset",
to_user_ids=[user.id],
diff --git a/zerver/lib/soft_deactivation.py b/zerver/lib/soft_deactivation.py
--- a/zerver/lib/soft_deactivation.py
+++ b/zerver/lib/soft_deactivation.py
@@ -377,6 +377,14 @@ def get_soft_deactivated_users_for_catch_up(filter_kwargs: Any) -> List[UserProf
return users_to_catch_up
+def queue_soft_reactivation(user_profile_id: int) -> None:
+ event = {
+ "type": "soft_reactivate",
+ "user_profile_id": user_profile_id,
+ }
+ queue_json_publish("deferred_work", event)
+
+
def soft_reactivate_if_personal_notification(
user_profile: UserProfile, unique_triggers: Set[str], mentioned_user_group_name: Optional[str]
) -> None:
@@ -400,8 +408,4 @@ def soft_reactivate_if_personal_notification(
if not private_message and not personal_mention:
return
- event = {
- "type": "soft_reactivate",
- "user_profile_id": user_profile.id,
- }
- queue_json_publish("deferred_work", event)
+ queue_soft_reactivation(user_profile.id)
| diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py
--- a/zerver/tests/test_signup.py
+++ b/zerver/tests/test_signup.py
@@ -731,6 +731,17 @@ def test_redirect_endpoints(self) -> None:
result = self.client_get("/accounts/new/send_confirm/[email protected]")
self.assert_in_success_response(["/new/"], result)
+ def test_password_reset_for_soft_deactivated_user(self) -> None:
+ user_profile = self.example_user("hamlet")
+ email = user_profile.delivery_email
+ with self.soft_deactivate_and_check_long_term_idle(user_profile, False):
+ # start the password reset process by supplying an email address
+ result = self.client_post("/accounts/password/reset/", {"email": email})
+
+ # check the redirect link telling you to check mail for password reset link
+ self.assertEqual(result.status_code, 302)
+ self.assertTrue(result["Location"].endswith("/accounts/password/reset/done/"))
+
class LoginTest(ZulipTestCase):
"""
| Soft-reactivate users on password reset emails or failed logins
b4feb673f1da503583899a7ee1a41952240c70d2 and a8fd9eb7010d572be8129fe244729865a730a87d started soft-reactivating users when we sent them PMs, which is great for the user experience.
We should expand this to soft-reactivate users on password reset requests and "find my accounts" emails, which are other good signals that the user is about to log back in.
We could potentially consider a stronger form of this, where we soft-reactivate on _failed_ password auth attempts, which generally come before the password reset request. I'm a little worried that would be overzealous, though.
| cc @PIG208, if you're so inclined
Hello @zulip/server-authentication members, this issue was labeled with the "area: authentication" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim | 2022-05-26T00:57:34 |
zulip/zulip | 22,172 | zulip__zulip-22172 | [
"22161"
] | 690420ffa0ee2ed4930c569288add77681c7ee40 | diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -514,7 +514,6 @@ class Realm(models.Model):
"name": "Unspecified",
"id": 0,
"hidden": True,
- "hidden_for_sponsorship": True,
"display_order": 0,
},
"business": {
| Upgrade banner should mention Zulip Standard sponsorship for non-business orgs
In organization settings, we show an upgrade banner next to settings that are not available on Zulip Cloud Free:

Non-business organizations are often eligible for sponsorship for Zulip Standard, so it would be very helpful if this banner mentioned sponsorship as an option for those categories of organizations.
* Banner change: "Upgrade to access." -> "Upgrade to access or [request sponsorship](/accounts/go/?next=/upgrade%23sponsorship)."
* Organization types: Any type other than "Business", including unspecified.
[Related CZO thread](https://chat.zulip.org/#narrow/stream/9-issues/topic/Web-public.20streams/near/1384705)
| Hello @zulip/server-settings members, this issue was labeled with the "area: settings (admin/org)" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim | 2022-06-01T18:06:24 |
|
zulip/zulip | 22,176 | zulip__zulip-22176 | [
"22164"
] | 68c4b708a07d1050644773114e9782de616ee135 | diff --git a/zerver/actions/message_flags.py b/zerver/actions/message_flags.py
--- a/zerver/actions/message_flags.py
+++ b/zerver/actions/message_flags.py
@@ -1,7 +1,8 @@
from collections import defaultdict
from dataclasses import asdict, dataclass, field
-from typing import List, Optional, Set
+from typing import List, Optional, Set, Tuple
+from django.db import transaction
from django.db.models import F
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
@@ -229,7 +230,7 @@ def do_clear_mobile_push_notifications_for_ids(
def do_update_message_flags(
user_profile: UserProfile, operation: str, flag: str, messages: List[int]
-) -> int:
+) -> Tuple[int, List[int]]:
valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
@@ -250,18 +251,33 @@ def do_update_message_flags(
# And then create historical UserMessage records. See the called function for more context.
create_historical_user_messages(user_id=user_profile.id, message_ids=historical_message_ids)
-
- if operation == "add":
- count = msgs.update(flags=F("flags").bitor(flagattr))
- elif operation == "remove":
- count = msgs.update(flags=F("flags").bitand(~flagattr))
-
+ with transaction.atomic():
+ if operation == "add":
+ msgs = (
+ msgs.select_for_update()
+ .order_by("message_id")
+ .extra(where=[UserMessage.where_flag_is_absent(flagattr)])
+ )
+ updated_message_ids = [um.message_id for um in msgs]
+ msgs.filter(message_id__in=updated_message_ids).update(flags=F("flags").bitor(flagattr))
+ elif operation == "remove":
+ msgs = (
+ msgs.select_for_update()
+ .order_by("message_id")
+ .extra(where=[UserMessage.where_flag_is_present(flagattr)])
+ )
+ updated_message_ids = [um.message_id for um in msgs]
+ msgs.filter(message_id__in=updated_message_ids).update(
+ flags=F("flags").bitand(~flagattr)
+ )
+
+ count = len(updated_message_ids)
event = {
"type": "update_message_flags",
"op": operation,
"operation": operation,
"flag": flag,
- "messages": messages,
+ "messages": updated_message_ids,
"all": False,
}
@@ -270,14 +286,14 @@ def do_update_message_flags(
# unread), extend the event with an additional object with
# details on the messages required to update the client's
# `unread_msgs` data structure.
- raw_unread_data = get_raw_unread_data(user_profile, messages)
+ raw_unread_data = get_raw_unread_data(user_profile, updated_message_ids)
event["message_details"] = format_unread_message_details(user_profile.id, raw_unread_data)
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
- do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
+ do_clear_mobile_push_notifications_for_ids([user_profile.id], updated_message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
@@ -290,4 +306,4 @@ def do_update_message_flags(
increment=min(1, count),
)
- return count
+ return count, updated_message_ids
diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -25,7 +25,7 @@
import orjson
import re2
from bitfield import BitField
-from bitfield.types import BitHandler
+from bitfield.types import Bit, BitHandler
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser,
@@ -3181,14 +3181,7 @@ class Meta:
unique_together = ("user_profile", "message")
@staticmethod
- def where_unread() -> str:
- # Use this for Django ORM queries to access unread message.
- # This custom SQL plays nice with our partial indexes. Grep
- # the code for example usage.
- return "flags & 1 = 0"
-
- @staticmethod
- def where_starred() -> str:
+ def where_flag_is_present(flagattr: Bit) -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
@@ -3196,12 +3189,27 @@ def where_starred() -> str:
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
- return "flags & 2 <> 0"
+ return f"flags & {1 << flagattr.number} <> 0"
+
+ @staticmethod
+ def where_flag_is_absent(flagattr: Bit) -> str:
+ return f"flags & {1 << flagattr.number} = 0"
+
+ @staticmethod
+ def where_unread() -> str:
+ return AbstractUserMessage.where_flag_is_absent(getattr(AbstractUserMessage.flags, "read"))
+
+ @staticmethod
+ def where_starred() -> str:
+ return AbstractUserMessage.where_flag_is_present(
+ getattr(AbstractUserMessage.flags, "starred")
+ )
@staticmethod
def where_active_push_notification() -> str:
- # See where_starred for documentation.
- return "flags & 4096 <> 0"
+ return AbstractUserMessage.where_flag_is_present(
+ getattr(AbstractUserMessage.flags, "active_mobile_push_notification")
+ )
def flags_list(self) -> List[str]:
flags = int(self.flags)
diff --git a/zerver/views/message_flags.py b/zerver/views/message_flags.py
--- a/zerver/views/message_flags.py
+++ b/zerver/views/message_flags.py
@@ -38,13 +38,13 @@ def update_message_flags(
request_notes = RequestNotes.get_notes(request)
assert request_notes.log_data is not None
- count = do_update_message_flags(user_profile, operation, flag, messages)
+ count, updated_message_ids = do_update_message_flags(user_profile, operation, flag, messages)
target_count_str = str(len(messages))
log_data_str = f"[{operation} {flag}/{target_count_str}] actually {count}"
request_notes.log_data["extra"] = log_data_str
- return json_success(request, data={"messages": messages})
+ return json_success(request, data={"messages": updated_message_ids})
@has_request_variables
| diff --git a/zerver/tests/test_events.py b/zerver/tests/test_events.py
--- a/zerver/tests/test_events.py
+++ b/zerver/tests/test_events.py
@@ -651,12 +651,27 @@ def test_update_message_flags(self) -> None:
state_change_expected=True,
)
check_update_message_flags_add("events[0]", events[0])
+ self.assert_length(events[0]["messages"], 1)
+ # No message_id is returned from the server if the flag is already preset.
+ events = self.verify_action(
+ lambda: do_update_message_flags(user_profile, "add", "starred", [message]),
+ state_change_expected=False,
+ )
+ self.assert_length(events[0]["messages"], 0)
events = self.verify_action(
lambda: do_update_message_flags(user_profile, "remove", "starred", [message]),
state_change_expected=True,
)
check_update_message_flags_remove("events[0]", events[0])
+ self.assert_length(events[0]["messages"], 1)
+
+ # No message_id is returned from the server if the flag is already absent.
+ events = self.verify_action(
+ lambda: do_update_message_flags(user_profile, "remove", "starred", [message]),
+ state_change_expected=False,
+ )
+ self.assert_length(events[0]["messages"], 0)
def test_update_read_flag_removes_unread_msg_ids(self) -> None:
| Server sending mark-unread for already-unread messages
To reproduce:
* Open up the web app in a dev server running https://github.com/zulip/zulip/pull/21477 .
* (That provides handy UI for exercising the issue; but the behavior is something the server itself shouldn't allow, and that PR doesn't touch the server, so it's already a bug in main.)
* Watch the events the server emits.
* One way to do this is in the "Network" tab of Chrome DevTools, while viewing the web app; look at "Preview" for each "events" request.
* Another is to run a dev build of the mobile app, and use the debug console there.
* Go to some conversation with several messages.
* Mark the first message as unread, with `m` or the three-dots menu.
* Mark that message as unread again.
Expected:
* Each of the `update_message_flags`/`remove`/`read` events should list just the messages for which the `read` flag was removed. As [the API docs](https://zulip.com/api/get-events#update_message_flags-remove) say:
> `messages`: (integer)[]
> Array containing the IDs of the messages from which the flag was removed.
Actual:
* Each of those messages lists all of the messages in the conversation, even though most of them are already unread and therefore cannot have the `read` flag removed.
* (In my experiments, typically one of the messages will be promptly marked as read after marking the whole thread as unread -- either the one at the top where my cursor is, or the next one just after it. That seems like a bug in #21477; certainly the inconsistency where it's sometimes one and sometimes another seems like a bug. Given that web client behavior, that one message has the `read` flag and so that one message is available to participate in a later mark-as-unread event.
But regardless of the client's behavior, the server is responsible for its event stream: three messages get marked as unread, then just one gets marked as read, then all three get marked again as unread.)
---
With the current version of https://github.com/zulip/zulip-mobile/pull/4790 -- which I was just about to merge -- this behavior causes the unread counter for the conversation (in the inbox view) to grow larger and larger if you keep doing it, because we accumulate more and more duplicate references to the same message. I might add a workaround in the client before merging that. But we really shouldn't have to work around broken server behavior; it should be fixed in the server.
| Can I work on this @gnprice? If yes, please assign this to me.
I think probably this should be claimed by someone more experienced with the codebase; @amanagr can you debug this one?
I think the client will likely want to add a workaround in any case -- since probably an appropriate workaround would also help generate correct behavior in bugs/corner cases involving races, event reordering (say, an unread, read, unread sequence for a given message ID gets somehow reordered to read, unread, unread).
Reasonable. Can we agree that we'll fix this bug before merging any UI to exercise the feature?
The difference is:
* If we fix the bug before there's UI to exercise the feature, then I can go ahead and merge https://github.com/zulip/zulip-mobile/pull/4790 . It's been open a long time, is all ready as far as its code is concerned, and would be great to get in.
Then adding that workaround can be a polish followup, alongside adding an implementation for group PMs (as https://github.com/zulip/zulip-mobile/pull/4790 covers stream messages and 1:1 PMs.) As I mention at https://github.com/zulip/zulip-mobile/pull/4790#issuecomment-1143009113 , the natural way to fix it should also make the code more efficient, too.
In principle the client would be susceptible to getting these buggy events from a server that doesn't have the fix -- but with no UI to exercise the feature, it'll be rare to nonexistent.
* If we end up with UI to exercise the feature while it still has this bug, then there will be a contingent of servers that could be sending these buggy events for a long time. The client will have to deal with it as a blocker, and we can't really merge https://github.com/zulip/zulip-mobile/pull/4790 until we first write the workaround.
> event reordering (say, an unread, read, unread sequence for a given message ID gets somehow reordered to read, unread, unread).
This sort of bug could easily cause broken behavior regardless of any workaround, though: just reorder it to unread, unread, read instead. In general it's an imporant invariant of the event system that the events come in order, so that the resulting state they describe is consistent with the eventual state in the database.
| 2022-06-02T11:51:07 |
zulip/zulip | 22,184 | zulip__zulip-22184 | [
"21776"
] | 443b974b3e73ca31541c173c6f92889830846b5e | diff --git a/zerver/context_processors.py b/zerver/context_processors.py
--- a/zerver/context_processors.py
+++ b/zerver/context_processors.py
@@ -131,6 +131,9 @@ def zulip_default_context(request: HttpRequest) -> Dict[str, Any]:
settings_path = "/etc/zulip/settings.py"
settings_comments_path = "/etc/zulip/settings.py"
+ # Used to remove links to Zulip docs and landing page from footer of self-hosted pages.
+ corporate_enabled = settings.CORPORATE_ENABLED
+
support_email = FromAddress.SUPPORT
support_email_html_tag = SafeString(
f'<a href="mailto:{escape(support_email)}">{escape(support_email)}</a>'
@@ -174,6 +177,7 @@ def zulip_default_context(request: HttpRequest) -> Dict[str, Any]:
"landing_page_navbar_message": settings.LANDING_PAGE_NAVBAR_MESSAGE,
"is_isolated_page": is_isolated_page(request),
"default_page_params": default_page_params,
+ "corporate_enabled": corporate_enabled,
}
context["OPEN_GRAPH_URL"] = f"{realm_uri}{request.path}"
| Replace footer on registration/login pages for self-hosted servers
On registration and login pages on self-hosted Zulip servers, it is not helpful and confusing to show the full navigation footer for the Zulip website. Instead, we should show a minimal footer containing just:
- Powered by [Zulip](https://zulip.com/).
- Help center (local /help link)
- Policies (local /policies link)
Please post screenshots of the proposed layout for discussion in the [CZO thread for this issue](https://chat.zulip.org/#narrow/stream/137-feedback/topic/footer.20for.20self-hosted.20zulip/near/1364147).
Note that the following footers should *not* be affected by this change:
* Footers on the Zulip landing pages (self-hosted or not)
* Footers on Zulip Cloud registration and login pages
Screenshot of the footer in question:
<img width="1125" alt="Screen Shot 2022-04-12 at 4 03 52 PM" src="https://user-images.githubusercontent.com/2090066/163068529-46db6feb-aec2-49b5-b5bf-37c4f427d031.png">
| Hello @zulip/server-misc, @zulip/server-onboarding members, this issue was labeled with the "area: onboarding", "area: portico" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Welcome to Zulip, @akanakis! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
Once we complete this, we'll likely be in a position to do a follow-up migration of moving all the landing page content currently in `templates/zerver/` to `templates/corporate`.
@akanakis You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
I'm still working on this.
@zulipbot claim
@akanakis You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
@amanagr You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
| 2022-06-03T13:57:59 |
|
zulip/zulip | 22,203 | zulip__zulip-22203 | [
"22194"
] | 4129a8f8bc48603f077ecf2222334f3a2034d3b3 | diff --git a/zerver/actions/streams.py b/zerver/actions/streams.py
--- a/zerver/actions/streams.py
+++ b/zerver/actions/streams.py
@@ -944,6 +944,43 @@ def do_change_stream_permission(
).decode(),
)
+ notify_stream_creation_ids = set()
+ if old_invite_only_value and not stream.invite_only:
+ # We need to send stream creation event to users who can access the
+ # stream now but were not able to do so previously. So, we can exclude
+ # subscribers, users who were previously subscribed to the stream and
+ # realm admins from the non-guest user list.
+ previously_subscribed_user_ids = Subscription.objects.filter(
+ recipient_id=stream.recipient_id, active=False, is_user_active=True
+ ).values_list("user_profile_id", flat=True)
+ stream_subscriber_user_ids = get_active_subscriptions_for_stream_id(
+ stream.id, include_deactivated_users=False
+ ).values_list("user_profile_id", flat=True)
+
+ old_can_access_stream_user_ids = (
+ set(stream_subscriber_user_ids)
+ | set(previously_subscribed_user_ids)
+ | {user.id for user in stream.realm.get_admin_users_and_bots()}
+ )
+ non_guest_user_ids = set(active_non_guest_user_ids(stream.realm_id))
+ notify_stream_creation_ids = non_guest_user_ids - old_can_access_stream_user_ids
+ send_stream_creation_event(stream, list(notify_stream_creation_ids))
+
+ # Add subscribers info to the stream object. We need to send peer_add
+ # events to users who were previously subscribed to the streams as
+ # they did not had subscribers data.
+ old_subscribers_access_user_ids = set(stream_subscriber_user_ids) | {
+ user.id for user in stream.realm.get_admin_users_and_bots()
+ }
+ peer_notify_user_ids = non_guest_user_ids - old_subscribers_access_user_ids
+ peer_add_event = dict(
+ type="subscription",
+ op="peer_add",
+ stream_ids=[stream.id],
+ user_ids=sorted(stream_subscriber_user_ids),
+ )
+ send_event(stream.realm, peer_add_event, peer_notify_user_ids)
+
event = dict(
op="update",
type="stream",
@@ -954,7 +991,10 @@ def do_change_stream_permission(
stream_id=stream.id,
name=stream.name,
)
- send_event(stream.realm, event, can_access_stream_user_ids(stream))
+ # we do not need to send update events to the users who received creation event
+ # since they already have the updated stream info.
+ notify_stream_update_ids = can_access_stream_user_ids(stream) - notify_stream_creation_ids
+ send_event(stream.realm, event, notify_stream_update_ids)
old_policy_name = get_stream_permission_policy_name(
invite_only=old_invite_only_value,
diff --git a/zerver/lib/events.py b/zerver/lib/events.py
--- a/zerver/lib/events.py
+++ b/zerver/lib/events.py
@@ -923,17 +923,22 @@ def _draft_update_action(i: int) -> None:
if event["op"] == "update":
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
- for obj in state["subscriptions"]:
- if obj["name"].lower() == event["name"].lower():
- obj[event["property"]] = event["value"]
- if event["property"] == "description":
- obj["rendered_description"] = event["rendered_description"]
- if event.get("history_public_to_subscribers") is not None:
- obj["history_public_to_subscribers"] = event[
- "history_public_to_subscribers"
- ]
- if event.get("is_web_public") is not None:
- obj["is_web_public"] = event["is_web_public"]
+ for sub_list in [
+ state["subscriptions"],
+ state["unsubscribed"],
+ state["never_subscribed"],
+ ]:
+ for obj in sub_list:
+ if obj["name"].lower() == event["name"].lower():
+ obj[event["property"]] = event["value"]
+ if event["property"] == "description":
+ obj["rendered_description"] = event["rendered_description"]
+ if event.get("history_public_to_subscribers") is not None:
+ obj["history_public_to_subscribers"] = event[
+ "history_public_to_subscribers"
+ ]
+ if event.get("is_web_public") is not None:
+ obj["is_web_public"] = event["is_web_public"]
# Also update the pure streams data
if "streams" in state:
for stream in state["streams"]:
| diff --git a/zerver/tests/test_events.py b/zerver/tests/test_events.py
--- a/zerver/tests/test_events.py
+++ b/zerver/tests/test_events.py
@@ -2744,6 +2744,40 @@ def do_test_subscribe_events(self, include_subscribers: bool) -> None:
check_stream_update("events[0]", events[0])
check_message("events[1]", events[1])
+ # Update stream privacy - make stream public
+ self.user_profile = self.example_user("cordelia")
+ action = lambda: do_change_stream_permission(
+ stream,
+ invite_only=False,
+ history_public_to_subscribers=True,
+ is_web_public=False,
+ acting_user=self.example_user("hamlet"),
+ )
+ events = self.verify_action(action, include_subscribers=include_subscribers, num_events=2)
+ check_stream_create("events[0]", events[0])
+ check_subscription_peer_add("events[1]", events[1])
+
+ do_change_stream_permission(
+ stream,
+ invite_only=True,
+ history_public_to_subscribers=True,
+ is_web_public=False,
+ acting_user=self.example_user("hamlet"),
+ )
+ self.subscribe(self.example_user("cordelia"), stream.name)
+ self.unsubscribe(self.example_user("cordelia"), stream.name)
+ action = lambda: do_change_stream_permission(
+ stream,
+ invite_only=False,
+ history_public_to_subscribers=True,
+ is_web_public=False,
+ acting_user=self.example_user("hamlet"),
+ )
+ events = self.verify_action(
+ action, include_subscribers=include_subscribers, num_events=2, include_streams=False
+ )
+
+ self.user_profile = self.example_user("hamlet")
# Update stream stream_post_policy property
action = lambda: do_change_stream_post_policy(
stream, Stream.STREAM_POST_POLICY_ADMINS, acting_user=self.example_user("hamlet")
| Changing a stream from private to public requires a client reload to view
When a stream is changed from private to public, users who were not invited to the private stream need to reload the client in order to view the now-public stream.
If the user does not refresh the client, they get a `This stream does not exist or is private.` error when trying to access the channel via hashtag reference (`#name-of-channel`) in chat. If they attempt to navigate to it manually, it shows as `deactivated`.
| Hello @zulip/server-streams members, this issue was labeled with the "area: stream settings" label, so you may want to check it out!
<!-- areaLabelAddition -->
Thanks for the report @agbaber !
CZO discussion thread: https://chat.zulip.org/#narrow/stream/9-issues/topic/live.20update.20for.20stream.20access.20issue.20.20.2322194
@zulipbot claim
Hello @sahil839!
Thanks for your interest in Zulip! You have attempted to claim an issue without the label "help wanted". You can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) label.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
| 2022-06-07T14:29:20 |
zulip/zulip | 22,267 | zulip__zulip-22267 | [
"22145"
] | 2fcd8d5e2115c9d27c60531bb71546f721ada67f | diff --git a/zerver/actions/custom_profile_fields.py b/zerver/actions/custom_profile_fields.py
--- a/zerver/actions/custom_profile_fields.py
+++ b/zerver/actions/custom_profile_fields.py
@@ -78,6 +78,17 @@ def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
+def remove_custom_profile_field_value_if_required(
+ field: CustomProfileField, field_data: ProfileFieldData
+) -> None:
+ old_values = set(orjson.loads(field.field_data).keys())
+ new_values = set(field_data.keys())
+ removed_values = old_values - new_values
+
+ if removed_values:
+ CustomProfileFieldValue.objects.filter(field=field, value__in=removed_values).delete()
+
+
def try_update_realm_custom_profile_field(
realm: Realm,
field: CustomProfileField,
@@ -91,6 +102,9 @@ def try_update_realm_custom_profile_field(
field.field_type == CustomProfileField.SELECT
or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
+ if field.field_type == CustomProfileField.SELECT:
+ assert field_data is not None
+ remove_custom_profile_field_value_if_required(field, field_data)
field.field_data = orjson.dumps(field_data or {}).decode()
field.save()
notify_realm_custom_profile_fields(realm)
diff --git a/zerver/migrations/0397_remove_custom_field_values_for_deleted_options.py b/zerver/migrations/0397_remove_custom_field_values_for_deleted_options.py
new file mode 100644
--- /dev/null
+++ b/zerver/migrations/0397_remove_custom_field_values_for_deleted_options.py
@@ -0,0 +1,35 @@
+# Generated by Django 3.2.13 on 2022-06-17 17:39
+import orjson
+from django.db import migrations
+from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
+from django.db.migrations.state import StateApps
+
+
+def remove_custom_field_values_for_deleted_options(
+ apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
+) -> None:
+ SELECT_TYPE = 3
+ CustomProfileField = apps.get_model("zerver", "CustomProfileField")
+ CustomProfileFieldValue = apps.get_model("zerver", "CustomProfileFieldValue")
+
+ select_type_fields = CustomProfileField.objects.filter(field_type=SELECT_TYPE)
+ for field in select_type_fields:
+ field_data = orjson.loads(field.field_data)
+ current_options = list(field_data.keys())
+ CustomProfileFieldValue.objects.filter(field=field).exclude(
+ value__in=current_options
+ ).delete()
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("zerver", "0396_remove_subscription_role"),
+ ]
+
+ operations = [
+ migrations.RunPython(
+ remove_custom_field_values_for_deleted_options,
+ elidable=True,
+ ),
+ ]
diff --git a/zilencer/management/commands/populate_db.py b/zilencer/management/commands/populate_db.py
--- a/zilencer/management/commands/populate_db.py
+++ b/zilencer/management/commands/populate_db.py
@@ -726,8 +726,8 @@ def assign_time_zone_by_delivery_email(delivery_email: str, new_time_zone: str)
hint="Or drink, if you'd prefer",
)
field_data: ProfileFieldData = {
- "vim": {"text": "Vim", "order": "1"},
- "emacs": {"text": "Emacs", "order": "2"},
+ "0": {"text": "Vim", "order": "1"},
+ "1": {"text": "Emacs", "order": "2"},
}
favorite_editor = try_add_realm_custom_profile_field(
zulip_realm, "Favorite editor", CustomProfileField.SELECT, field_data=field_data
@@ -754,7 +754,7 @@ def assign_time_zone_by_delivery_email(delivery_email: str, new_time_zone: str)
{"id": phone_number.id, "value": "+1-234-567-8901"},
{"id": biography.id, "value": "Betrayer of Othello."},
{"id": favorite_food.id, "value": "Apples"},
- {"id": favorite_editor.id, "value": "emacs"},
+ {"id": favorite_editor.id, "value": "1"},
{"id": birthday.id, "value": "2000-01-01"},
{"id": favorite_website.id, "value": "https://zulip.readthedocs.io/en/latest/"},
{"id": mentor.id, "value": [hamlet.id]},
@@ -770,7 +770,7 @@ def assign_time_zone_by_delivery_email(delivery_email: str, new_time_zone: str)
"value": "I am:\n* The prince of Denmark\n* Nephew to the usurping Claudius",
},
{"id": favorite_food.id, "value": "Dark chocolate"},
- {"id": favorite_editor.id, "value": "vim"},
+ {"id": favorite_editor.id, "value": "0"},
{"id": birthday.id, "value": "1900-01-01"},
{"id": favorite_website.id, "value": "https://blog.zulig.org"},
{"id": mentor.id, "value": [iago.id]},
| diff --git a/zerver/tests/test_custom_profile_data.py b/zerver/tests/test_custom_profile_data.py
--- a/zerver/tests/test_custom_profile_data.py
+++ b/zerver/tests/test_custom_profile_data.py
@@ -106,8 +106,8 @@ def test_create_select_field(self) -> None:
data["field_data"] = orjson.dumps(
{
- "python": {"text": "Python"},
- "java": {"text": "Java"},
+ "0": {"text": "Python"},
+ "1": {"text": "Java"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
@@ -115,8 +115,8 @@ def test_create_select_field(self) -> None:
data["field_data"] = orjson.dumps(
{
- "python": {"text": "Python", "order": ""},
- "java": {"text": "Java", "order": "2"},
+ "0": {"text": "Python", "order": ""},
+ "1": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
@@ -125,7 +125,7 @@ def test_create_select_field(self) -> None:
data["field_data"] = orjson.dumps(
{
"": {"text": "Python", "order": "1"},
- "java": {"text": "Java", "order": "2"},
+ "1": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
@@ -133,8 +133,8 @@ def test_create_select_field(self) -> None:
data["field_data"] = orjson.dumps(
{
- "python": {"text": "Python", "order": 1},
- "java": {"text": "Java", "order": "2"},
+ "0": {"text": "Python", "order": 1},
+ "1": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
@@ -146,8 +146,8 @@ def test_create_select_field(self) -> None:
data["field_data"] = orjson.dumps(
{
- "python": {"text": "Duplicate", "order": "1"},
- "java": {"text": "Duplicate", "order": "2"},
+ "0": {"text": "Duplicate", "order": "1"},
+ "1": {"text": "Duplicate", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
@@ -155,8 +155,8 @@ def test_create_select_field(self) -> None:
data["field_data"] = orjson.dumps(
{
- "python": {"text": "Python", "order": "1"},
- "java": {"text": "Java", "order": "2"},
+ "0": {"text": "Python", "order": "1"},
+ "1": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
@@ -497,8 +497,8 @@ def test_update(self) -> None:
field_data = orjson.dumps(
{
- "vim": "Vim",
- "emacs": {"order": "2", "text": "Emacs"},
+ "0": "Vim",
+ "1": {"order": "2", "text": "Emacs"},
}
).decode()
result = self.client_patch(
@@ -509,9 +509,9 @@ def test_update(self) -> None:
field_data = orjson.dumps(
{
- "vim": {"order": "1", "text": "Vim"},
- "emacs": {"order": "2", "text": "Emacs"},
- "notepad": {"order": "3", "text": "Notepad"},
+ "0": {"order": "1", "text": "Vim"},
+ "1": {"order": "2", "text": "Emacs"},
+ "2": {"order": "3", "text": "Notepad"},
}
).decode()
result = self.client_patch(
@@ -594,7 +594,7 @@ def test_update_profile_data_successfully(self) -> None:
("Phone number", "*short* text data"),
("Biography", "~~short~~ **long** text data"),
("Favorite food", "long short text data"),
- ("Favorite editor", "vim"),
+ ("Favorite editor", "0"),
("Birthday", "1909-03-05"),
("Favorite website", "https://zulip.com"),
("Mentor", [self.example_user("cordelia").id]),
@@ -667,7 +667,7 @@ def test_update_select_field_successfully(self) -> None:
data = [
{
"id": field.id,
- "value": "emacs",
+ "value": "1",
}
]
@@ -727,6 +727,30 @@ def test_do_update_value_not_changed(self) -> None:
do_update_user_custom_profile_data_if_changed(iago, data)
mock_notify.assert_not_called()
+ def test_removing_option_from_select_field(self) -> None:
+ self.login("iago")
+ realm = get_realm("zulip")
+ field = CustomProfileField.objects.get(name="Favorite editor", realm=realm)
+ self.assertTrue(
+ CustomProfileFieldValue.objects.filter(field_id=field.id, value="0").exists()
+ )
+ self.assertTrue(
+ CustomProfileFieldValue.objects.filter(field_id=field.id, value="1").exists()
+ )
+
+ new_options = {"1": {"text": "Emacs", "order": "1"}}
+ result = self.client_patch(
+ f"/json/realm/profile_fields/{field.id}",
+ info={"name": "Favorite editor", "field_data": orjson.dumps(new_options).decode()},
+ )
+ self.assert_json_success(result)
+ self.assertFalse(
+ CustomProfileFieldValue.objects.filter(field_id=field.id, value="0").exists()
+ )
+ self.assertTrue(
+ CustomProfileFieldValue.objects.filter(field_id=field.id, value="1").exists()
+ )
+
class ListCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_list(self) -> None:
diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -608,7 +608,7 @@ def test_admin_user_can_change_profile_data(self) -> None:
"Phone number": "short text data",
"Biography": "long text data",
"Favorite food": "short text data",
- "Favorite editor": "vim",
+ "Favorite editor": "0",
"Birthday": "1909-03-05",
"Favorite website": "https://zulip.com",
"Mentor": [cordelia.id],
| Improve behavior of deleting an option for a "List of options" custom profile field
As [discussed on CZO](https://chat.zulip.org/#narrow/stream/378-api-design/topic/custom.20profile.20fields.20option.20deletion/near/1384923), when an administrator deletes an option for a "List of options" custom profile field, users have the invalid value in the database while in UI the empty option is selected. Also, we do not show any warning in the UI to the admin user deleting the option. We should:
- [ ] When an option gets deleted, we should delete the field from all the users that had that option chosen.
- [ ] When an admin goes to delete an option, we should first give them a confirm/cancel modal saying that this will delete the field for N users. We should show the modal even when zero users have the option selected.
| Hello @zulip/server-settings members, this issue was labeled with the "area: settings (admin/org)" label, so you may want to check it out!
<!-- areaLabelAddition -->
Hi,
Can I take up this issue if nobody is working on it?
Yeah, go for it. The New Application Feature tutorial should give you the background needed to find the code for this.
@ritikBhandari are you working on this?
Actually, no. I am working on another issue and have opened a Draft PR for the same. But I can work on this one if you want.
I was asking as if you are not working then I can take this one, should be quick to do.
Sure. | 2022-06-20T16:56:44 |
zulip/zulip | 22,270 | zulip__zulip-22270 | [
"22244"
] | 028c2e4ec93fd97c08b5f950d21b1f3f171daca9 | diff --git a/zerver/management/commands/rename_stream.py b/zerver/management/commands/rename_stream.py
deleted file mode 100644
--- a/zerver/management/commands/rename_stream.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from argparse import ArgumentParser
-from typing import Any
-
-from zerver.actions.streams import do_rename_stream
-from zerver.lib.management import ZulipBaseCommand
-from zerver.models import get_stream
-
-
-class Command(ZulipBaseCommand):
- help = """Change the stream name for a realm."""
-
- def add_arguments(self, parser: ArgumentParser) -> None:
- parser.add_argument("old_name", metavar="<old name>", help="name of stream to be renamed")
- parser.add_argument(
- "new_name", metavar="<new name>", help="new name to rename the stream to"
- )
- self.add_realm_args(parser, required=True)
-
- def handle(self, *args: Any, **options: str) -> None:
- realm = self.get_realm(options)
- assert realm is not None # Should be ensured by parser
- old_name = options["old_name"]
- new_name = options["new_name"]
-
- stream = get_stream(old_name, realm)
- do_rename_stream(stream, new_name, self.user_profile)
diff --git a/zilencer/management/commands/migrate_stream_notifications.py b/zilencer/management/commands/migrate_stream_notifications.py
deleted file mode 100644
--- a/zilencer/management/commands/migrate_stream_notifications.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from typing import Any
-
-from django.core.management.base import BaseCommand
-
-from zerver.models import Subscription
-
-
-class Command(BaseCommand):
- help = """One-off script to migration users' stream notification settings."""
-
- def handle(self, *args: Any, **options: Any) -> None:
- for subscription in Subscription.objects.all():
- subscription.desktop_notifications = subscription.notifications
- subscription.audible_notifications = subscription.notifications
- subscription.save(update_fields=["desktop_notifications", "audible_notifications"])
| management: `rename_stream` management command does not work
`rename_stream` uses the `do_rename_stream` function to rename the stream. However, it accesses a non-existent attribute when calling it.
```
do_rename_stream(stream, new_name, self.user_profile) # self.user_profile does not exist
```
To replicate this, run:
```
python manage.py rename_stream Denmark bar -r zulip
```
and you should see:
```
AttributeError: 'Command' object has no attribute 'user_profile'
```
You might want to look at `zerver/management/commands/rename_stream.py` and `zerver/actions/streams.py`.
The fix should refactor `do_rename_stream` to accept `user_profile: Optional[UserProfile]` with the `None` default, and correctly handle what should happen for the notification message that might be sent when the stream is renamed (which currently mentions the name of the acting user that renames it).
| @zulipbot add "help wanted" "area: tooling" "bug"
Hello @zulip/server-tooling members, this issue was labeled with the "area: tooling" label, so you may want to check it out!
<!-- areaLabelAddition -->
Looks like this came in during the work to add the message, in 4dcccf32f842eddf0238f5a252ca23c682f69324 / #11197.
There's also potentially an argument for removing this management command entirely, since it's been broken for 3.5 years and nobody has noticed, and it's trivial to do via the UI. | 2022-06-20T23:09:52 |
|
zulip/zulip | 22,312 | zulip__zulip-22312 | [
"21712"
] | c1dfa1fd1198a6ada178561831e2a75af4eeddf5 | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 151
+API_FEATURE_LEVEL = 152
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/actions/message_edit.py b/zerver/actions/message_edit.py
--- a/zerver/actions/message_edit.py
+++ b/zerver/actions/message_edit.py
@@ -816,6 +816,7 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
send_event(user_profile.realm, event, users_to_be_notified)
+ sent_resolve_topic_notification = False
if (
topic_name is not None
and new_stream is None
@@ -823,7 +824,7 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
and len(changed_messages) > 0
):
assert stream_being_edited is not None
- maybe_send_resolve_topic_notifications(
+ sent_resolve_topic_notification = maybe_send_resolve_topic_notifications(
user_profile=user_profile,
stream=stream_being_edited,
old_topic=orig_topic_name,
@@ -831,7 +832,11 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
changed_messages=changed_messages,
)
- if len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None:
+ if (
+ len(changed_messages) > 0
+ and (new_stream is not None or topic_name is not None)
+ and stream_being_edited is not None
+ ):
# Notify users that the topic was moved.
changed_messages_count = len(changed_messages)
@@ -850,8 +855,25 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
"{changed_messages_count} messages were moved from this topic to {new_location} by {user}."
)
+ # The new thread notification code path is a bit subtle. We
+ # don't want every resolve-topic action to also annoyingly
+ # send an extra notification that the topic was moved!
+ #
+ # Since one can resolve/unresolve a topic at the same time
+ # you're moving it, we need to carefully treat the resolve
+ # topic notification as satisfying our obligation to send a
+ # notification to the new topic only if the only thing this
+ # request did is mark the topic as resolved.
new_thread_notification_string = None
- if send_notification_to_new_thread:
+ if send_notification_to_new_thread and (
+ new_stream is not None
+ or not sent_resolve_topic_notification
+ or (
+ topic_name is not None
+ and orig_topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
+ != topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
+ )
+ ):
if moved_all_visible_messages:
new_thread_notification_string = gettext_lazy(
"This topic was moved here from {old_location} by {user}."
@@ -870,7 +892,7 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
stream_being_edited,
orig_topic_name,
old_thread_notification_string,
- new_stream,
+ new_stream if new_stream is not None else stream_being_edited,
topic_name,
new_thread_notification_string,
changed_messages_count,
diff --git a/zerver/views/message_edit.py b/zerver/views/message_edit.py
--- a/zerver/views/message_edit.py
+++ b/zerver/views/message_edit.py
@@ -122,7 +122,7 @@ def update_message_backend(
propagate_mode: str = REQ(
default="change_one", str_validator=check_string_in(PROPAGATE_MODE_VALUES)
),
- send_notification_to_old_thread: bool = REQ(default=True, json_validator=check_bool),
+ send_notification_to_old_thread: bool = REQ(default=False, json_validator=check_bool),
send_notification_to_new_thread: bool = REQ(default=True, json_validator=check_bool),
content: Optional[str] = REQ(default=None),
) -> HttpResponse:
| diff --git a/frontend_tests/puppeteer_tests/edit.ts b/frontend_tests/puppeteer_tests/edit.ts
--- a/frontend_tests/puppeteer_tests/edit.ts
+++ b/frontend_tests/puppeteer_tests/edit.ts
@@ -37,9 +37,9 @@ async function test_stream_message_edit(page: Page): Promise<void> {
content: "test editing",
});
- await edit_stream_message(page, "edited", "test edited");
+ await edit_stream_message(page, "edits", "test edited");
- await common.check_messages_sent(page, "zhome", [["Verona > edited", ["test edited"]]]);
+ await common.check_messages_sent(page, "zhome", [["Verona > edits", ["test edited"]]]);
}
async function test_edit_message_with_slash_me(page: Page): Promise<void> {
@@ -61,7 +61,7 @@ async function test_edit_message_with_slash_me(page: Page): Promise<void> {
)} and normalize-space()="Desdemona"]`,
);
- await edit_stream_message(page, "edited", "/me test edited a message with me");
+ await edit_stream_message(page, "edits", "/me test edited a message with me");
await page.waitForSelector(
`xpath/${last_message_xpath}//*[${common.has_class_x(
diff --git a/zerver/tests/test_message_edit.py b/zerver/tests/test_message_edit.py
--- a/zerver/tests/test_message_edit.py
+++ b/zerver/tests/test_message_edit.py
@@ -1305,9 +1305,9 @@ def notify(user_id: int) -> Dict[str, Any]:
send_notification_to_new_thread=False,
content=None,
)
- # This code path adds 9 (1 + 4/user with muted topics) to
+ # This code path adds 9 (1 + 4/user with muted topics) + 1 to
# the number of database queries for moving a topic.
- self.assert_length(queries, 18)
+ self.assert_length(queries, 19)
for muting_user in get_users_muting_topic(stream.id, change_all_topic_name):
for user in users_to_be_notified:
@@ -1755,6 +1755,7 @@ def test_move_message_to_stream(self) -> None:
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
+ "send_notification_to_old_thread": "true",
},
HTTP_ACCEPT_LANGUAGE="de",
)
@@ -1903,6 +1904,7 @@ def test_move_message_to_stream_change_later(self) -> None:
{
"stream_id": new_stream.id,
"propagate_mode": "change_later",
+ "send_notification_to_old_thread": "true",
},
)
self.assert_json_success(result)
@@ -1933,6 +1935,7 @@ def test_move_message_to_stream_change_later_all_moved(self) -> None:
{
"stream_id": new_stream.id,
"propagate_mode": "change_later",
+ "send_notification_to_old_thread": "true",
},
)
self.assert_json_success(result)
@@ -1962,6 +1965,7 @@ def test_move_message_to_stream_change_one(self) -> None:
{
"stream_id": new_stream.id,
"propagate_mode": "change_one",
+ "send_notification_to_old_thread": "true",
},
)
self.assert_json_success(result)
@@ -1992,6 +1996,7 @@ def test_move_message_to_stream_change_all(self) -> None:
{
"stream_id": new_stream.id,
"propagate_mode": "change_all",
+ "send_notification_to_old_thread": "true",
},
)
self.assert_json_success(result)
@@ -2036,7 +2041,7 @@ def check_move_message_according_to_policy(role: int, expect_fail: bool = False)
else:
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
- self.assert_length(messages, 1)
+ self.assert_length(messages, 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
@@ -2125,7 +2130,7 @@ def check_move_message_to_stream(role: int, error_msg: Optional[str] = None) ->
else:
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
- self.assert_length(messages, 1)
+ self.assert_length(messages, 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
@@ -2222,7 +2227,7 @@ def test_move_message_to_stream_with_topic_editing_not_allowed(self) -> None:
)
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
- self.assert_length(messages, 1)
+ self.assert_length(messages, 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 4)
@@ -2235,8 +2240,9 @@ def test_move_message_to_stream_and_topic(self) -> None:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
- "stream_id": new_stream.id,
"propagate_mode": "change_all",
+ "send_notification_to_old_thread": "true",
+ "stream_id": new_stream.id,
"topic": "new topic",
},
)
@@ -2448,6 +2454,290 @@ def test_notify_old_thread_move_message_to_stream(self) -> None:
messages = get_topic_messages(user_profile, new_stream, "test")
self.assert_length(messages, 3)
+ def test_notify_new_topic(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_all",
+ "send_notification_to_old_thread": "false",
+ "send_notification_to_new_thread": "true",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 0)
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 4)
+ self.assertEqual(
+ messages[3].content,
+ f"This topic was moved here from #**public stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ def test_notify_old_topic(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_all",
+ "send_notification_to_old_thread": "true",
+ "send_notification_to_new_thread": "false",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 1)
+ self.assertEqual(
+ messages[0].content,
+ f"This topic was moved to #**public stream>edited** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 3)
+
+ def test_notify_both_topics(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_all",
+ "send_notification_to_old_thread": "true",
+ "send_notification_to_new_thread": "true",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 1)
+ self.assertEqual(
+ messages[0].content,
+ f"This topic was moved to #**public stream>edited** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 4)
+ self.assertEqual(
+ messages[3].content,
+ f"This topic was moved here from #**public stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ def test_notify_no_topic(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_all",
+ "send_notification_to_old_thread": "false",
+ "send_notification_to_new_thread": "false",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 0)
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 3)
+
+ def test_notify_new_topics_after_message_move(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_one",
+ "send_notification_to_old_thread": "false",
+ "send_notification_to_new_thread": "true",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 2)
+ self.assertEqual(messages[0].content, "Second")
+ self.assertEqual(messages[1].content, "Third")
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 2)
+ self.assertEqual(messages[0].content, "First")
+ self.assertEqual(
+ messages[1].content,
+ f"A message was moved here from #**public stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ def test_notify_old_topics_after_message_move(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_one",
+ "send_notification_to_old_thread": "true",
+ "send_notification_to_new_thread": "false",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 3)
+ self.assertEqual(messages[0].content, "Second")
+ self.assertEqual(messages[1].content, "Third")
+ self.assertEqual(
+ messages[2].content,
+ f"A message was moved from this topic to #**public stream>edited** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 1)
+ self.assertEqual(messages[0].content, "First")
+
+ def test_notify_both_topics_after_message_move(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_one",
+ "send_notification_to_old_thread": "true",
+ "send_notification_to_new_thread": "true",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 3)
+ self.assertEqual(messages[0].content, "Second")
+ self.assertEqual(messages[1].content, "Third")
+ self.assertEqual(
+ messages[2].content,
+ f"A message was moved from this topic to #**public stream>edited** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 2)
+ self.assertEqual(messages[0].content, "First")
+ self.assertEqual(
+ messages[1].content,
+ f"A message was moved here from #**public stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ def test_notify_no_topic_after_message_move(self) -> None:
+ user_profile = self.example_user("iago")
+ self.login("iago")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name="test", content="First"
+ )
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Second")
+ self.send_stream_message(user_profile, stream.name, topic_name="test", content="Third")
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "message_id": msg_id,
+ "topic": "edited",
+ "propagate_mode": "change_one",
+ "send_notification_to_old_thread": "false",
+ "send_notification_to_new_thread": "false",
+ },
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, "test")
+ self.assert_length(messages, 2)
+ self.assertEqual(messages[0].content, "Second")
+ self.assertEqual(messages[1].content, "Third")
+
+ messages = get_topic_messages(user_profile, stream, "edited")
+ self.assert_length(messages, 1)
+ self.assertEqual(messages[0].content, "First")
+
def parameterized_test_move_message_involving_private_stream(
self,
from_invite_only: bool,
@@ -2503,11 +2793,7 @@ def parameterized_test_move_message_involving_private_stream(
self.assert_json_success(result)
messages = get_topic_messages(admin_user, old_stream, "test")
- self.assert_length(messages, 1)
- self.assertEqual(
- messages[0].content,
- f"This topic was moved to #**new stream>test** by @_**Iago|{admin_user.id}**.",
- )
+ self.assert_length(messages, 0)
messages = get_topic_messages(admin_user, new_stream, "test")
self.assert_length(messages, 3)
@@ -2649,7 +2935,7 @@ def test_mark_topic_as_resolved(self) -> None:
== 0
)
- # Now move to a weird state and confirm no new messages
+ # Now move to a weird state and confirm we get the normal topic moved message.
weird_topic = "β ββ" + original_topic
result = self.client_patch(
"/json/messages/" + str(id1),
@@ -2668,11 +2954,15 @@ def test_mark_topic_as_resolved(self) -> None:
)
messages = get_topic_messages(admin_user, stream, weird_topic)
- self.assert_length(messages, 3)
+ self.assert_length(messages, 4)
self.assertEqual(
messages[2].content,
f"@_**Iago|{admin_user.id}** has marked this topic as resolved.",
)
+ self.assertEqual(
+ messages[3].content,
+ f"This topic was moved here from #**new>β topic 1** by @_**Iago|{admin_user.id}**.",
+ )
unresolved_topic = original_topic
result = self.client_patch(
@@ -2692,16 +2982,19 @@ def test_mark_topic_as_resolved(self) -> None:
)
messages = get_topic_messages(admin_user, stream, unresolved_topic)
- self.assert_length(messages, 4)
+ self.assert_length(messages, 5)
self.assertEqual(
- messages[3].content,
+ messages[2].content, f"@_**Iago|{admin_user.id}** has marked this topic as resolved."
+ )
+ self.assertEqual(
+ messages[4].content,
f"@_**Iago|{admin_user.id}** has marked this topic as unresolved.",
)
# Check topic unresolved notification message is only unread for participants.
assert (
UserMessage.objects.filter(
- user_profile__in=[admin_user, hamlet, aaron], message__id=messages[3].id
+ user_profile__in=[admin_user, hamlet, aaron], message__id=messages[4].id
)
.extra(where=[UserMessage.where_unread()])
.count()
@@ -2709,12 +3002,42 @@ def test_mark_topic_as_resolved(self) -> None:
)
assert (
- UserMessage.objects.filter(user_profile=cordelia, message__id=messages[3].id)
+ UserMessage.objects.filter(user_profile=cordelia, message__id=messages[4].id)
.extra(where=[UserMessage.where_unread()])
.count()
== 0
)
+ # Now move to another stream while resolving the topic and
+ # check the notifications.
+ final_stream = self.make_stream("final")
+ self.subscribe(admin_user, final_stream.name)
+ result = self.client_patch(
+ "/json/messages/" + str(id1),
+ {
+ "topic": resolved_topic,
+ "stream_id": final_stream.id,
+ "propagate_mode": "change_all",
+ },
+ )
+ self.assert_json_success(result)
+ for msg_id in [id1, id2]:
+ msg = Message.objects.get(id=msg_id)
+ self.assertEqual(
+ resolved_topic,
+ msg.topic_name(),
+ )
+
+ messages = get_topic_messages(admin_user, final_stream, resolved_topic)
+ # TODO: This should be 7 -- but currently we never trigger
+ # resolve-topic notifications when moving the stream, even if
+ # the resolve-topic state is changed at that time.
+ self.assert_length(messages, 6)
+ self.assertEqual(
+ messages[5].content,
+ f"This topic was moved here from #**new>topic 1** by @_**Iago|{admin_user.id}**.",
+ )
+
class DeleteMessageTest(ZulipTestCase):
def test_delete_message_invalid_request_format(self) -> None:
| Show "Send notification to new/old topic" options for moves within stream
At present, when moving messages via the three-dot message menu, we only show the option to Send notification to new/old topic when messages are being moved to a new stream.
Instead, we should show these options for within-stream moves as well, as it can help avoid confusion about moved messages. Note that we already allow this when moving messages via the three-dot topic menu in the left sidebar.
| Hello @zulip/server-message-view members, this issue was labeled with the "area: message-editing" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @mariosmantzaris, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@zulipbot claim
@mariosmantzaris You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
@zulipbot claim
@mariosmantzaris We noticed that you have not made any updates to this issue or linked PRs for 10 days. Please comment here if you are still actively working on it. Otherwise, we'd appreciate a quick `@zulipbot abandon` comment so that someone else can claim this issue and continue from where you left off.
If we don't hear back, you will be automatically unassigned in 4 days. Thanks!
<!-- inactiveWarning -->
@zulipbot abandon
@juliaBichler01 would you be up for picking this one up, since you worked on #19196 recently?
@alya yes, i already started working on it.
I am not finished working on it yet, but i created a draft PR, so that you can have a look at it if you want to: [#22312](https://github.com/zulip/zulip/pull/22312)
Thanks! I'll review the PR once it's been approved by your code review buddies and is ready for me to take a look. | 2022-06-25T16:21:12 |
zulip/zulip | 22,387 | zulip__zulip-22387 | [
"21180"
] | 95dfde121ca0b8c181a34c2935f329f7a2a0070b | diff --git a/zerver/actions/presence.py b/zerver/actions/presence.py
--- a/zerver/actions/presence.py
+++ b/zerver/actions/presence.py
@@ -3,6 +3,7 @@
from typing import Optional
from django.conf import settings
+from django.db import transaction
from zerver.actions.user_activity import update_user_activity_interval
from zerver.decorator import statsd_increment
@@ -13,7 +14,9 @@
from zerver.tornado.django_api import send_event
-def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
+def send_presence_changed(
+ user_profile: UserProfile, presence: UserPresence, *, force_send_update: bool = False
+) -> None:
# Most presence data is sent to clients in the main presence
# endpoint in response to the user's own presence; this results
# data that is 1-2 minutes stale for who is online. The flaw with
@@ -24,7 +27,10 @@ def send_presence_changed(user_profile: UserProfile, presence: UserPresence) ->
# See https://zulip.readthedocs.io/en/latest/subsystems/presence.html for
# internals documentation on presence.
user_ids = active_user_ids(user_profile.realm_id)
- if len(user_ids) > settings.USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS:
+ if (
+ len(user_ids) > settings.USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS
+ and not force_send_update
+ ):
# These immediate presence generate quadratic work for Tornado
# (linear number of users in each event and the frequency of
# users coming online grows linearly with userbase too). In
@@ -64,7 +70,12 @@ def consolidate_client(client: Client) -> Client:
@statsd_increment("user_presence")
def do_update_user_presence(
- user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int
+ user_profile: UserProfile,
+ client: Client,
+ log_time: datetime.datetime,
+ status: int,
+ *,
+ force_send_update: bool = False,
) -> None:
client = consolidate_client(client)
@@ -103,8 +114,18 @@ def do_update_user_presence(
update_fields.append("status")
presence.save(update_fields=update_fields)
- if not user_profile.realm.presence_disabled and (created or became_online):
- send_presence_changed(user_profile, presence)
+ if force_send_update or (
+ not user_profile.realm.presence_disabled and (created or became_online)
+ ):
+ # We do a the transaction.on_commit here, rather than inside
+ # send_presence_changed, to help keep presence transactions
+ # brief; the active_user_ids call there is more expensive than
+ # this whole function.
+ transaction.on_commit(
+ lambda: send_presence_changed(
+ user_profile, presence, force_send_update=force_send_update
+ )
+ )
def update_user_presence(
diff --git a/zerver/actions/user_settings.py b/zerver/actions/user_settings.py
--- a/zerver/actions/user_settings.py
+++ b/zerver/actions/user_settings.py
@@ -2,11 +2,13 @@
from typing import List, Optional, Union
import orjson
+from django.conf import settings
from django.db import transaction
from django.db.models import F
from django.utils.timezone import now as timezone_now
from confirmation.models import Confirmation, create_confirmation_link
+from zerver.actions.presence import do_update_user_presence
from zerver.lib.avatar import avatar_url
from zerver.lib.cache import (
cache_delete,
@@ -26,9 +28,11 @@
RealmAuditLog,
ScheduledEmail,
ScheduledMessageNotificationEmail,
+ UserPresence,
UserProfile,
active_user_ids,
bot_owner_user_ids,
+ get_client,
get_user_profile_by_id,
)
from zerver.tornado.django_api import send_event
@@ -454,3 +458,43 @@ def do_change_user_setting(
# not deleted every previously synced draft - to do that use the DELETE
# endpoint.
Draft.objects.filter(user_profile=user_profile).delete()
+
+ if setting_name == "presence_enabled":
+ # The presence_enabled setting's primary function is to stop
+ # doing presence updates for the user altogether.
+ #
+ # When a user toggles the presence_enabled setting, we
+ # immediately trigger a presence update, so all users see the
+ # user's current presence state as consistent with the new
+ # setting; not doing so can make it look like the settings
+ # change didn't have any effect.
+ if setting_value:
+ status = UserPresence.ACTIVE
+ presence_time = timezone_now()
+ else:
+ # HACK: Remove existing presence data for the current user
+ # when disabling presence. This hack will go away when we
+ # replace our presence data structure with a simpler model
+ # that doesn't separate individual clients.
+ UserPresence.objects.filter(user_profile_id=user_profile.id).delete()
+
+ # We create a single presence entry for the user, old
+ # enough to be guaranteed to be treated as offline by
+ # correct clients, such that the user will, for as long as
+ # presence remains disabled, appear to have been last
+ # online a few minutes before they disabled presence.
+ #
+ # We add a small additional offset as a fudge factor in
+ # case of clock skew.
+ status = UserPresence.IDLE
+ presence_time = timezone_now() - datetime.timedelta(
+ seconds=settings.OFFLINE_THRESHOLD_SECS + 120
+ )
+
+ do_update_user_presence(
+ user_profile,
+ get_client("website"),
+ presence_time,
+ status,
+ force_send_update=True,
+ )
| diff --git a/zerver/tests/test_event_system.py b/zerver/tests/test_event_system.py
--- a/zerver/tests/test_event_system.py
+++ b/zerver/tests/test_event_system.py
@@ -5,6 +5,7 @@
import orjson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
+from django.test import override_settings
from django.utils.timezone import now as timezone_now
from version import API_FEATURE_LEVEL, ZULIP_MERGE_BASE, ZULIP_VERSION
@@ -1200,8 +1201,12 @@ def test_get_raw_user_data_on_system_bot_realm(self) -> None:
class TestUserPresenceUpdatesDisabled(ZulipTestCase):
+ # For this test, we verify do_update_user_presence doesn't send
+ # events for organizations with more than
+ # USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS users, unless
+ # force_send_update is passed.
+ @override_settings(USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS=3)
def test_presence_events_disabled_on_larger_realm(self) -> None:
- # First check that normally the mocked function gets called.
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
do_update_user_presence(
@@ -1209,15 +1214,14 @@ def test_presence_events_disabled_on_larger_realm(self) -> None:
get_client("website"),
timezone_now(),
UserPresence.ACTIVE,
+ force_send_update=True,
)
- # Now check that if the realm has more than the USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS
- # amount of active users, send_event doesn't get called.
with self.tornado_redirected_to_list(events, expected_num_events=0):
- with self.settings(USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS=1):
- do_update_user_presence(
- self.example_user("hamlet"),
- get_client("website"),
- timezone_now(),
- UserPresence.ACTIVE,
- )
+ do_update_user_presence(
+ self.example_user("hamlet"),
+ get_client("website"),
+ timezone_now(),
+ UserPresence.ACTIVE,
+ force_send_update=False,
+ )
diff --git a/zerver/tests/test_events.py b/zerver/tests/test_events.py
--- a/zerver/tests/test_events.py
+++ b/zerver/tests/test_events.py
@@ -1733,7 +1733,11 @@ def test_change_is_guest(self) -> None:
def test_change_notification_settings(self) -> None:
for notification_setting, v in self.user_profile.notification_setting_types.items():
- if notification_setting in ["notification_sound", "desktop_icon_count_display"]:
+ if notification_setting in [
+ "notification_sound",
+ "desktop_icon_count_display",
+ "presence_enabled",
+ ]:
# These settings are tested in their own tests.
continue
@@ -1769,6 +1773,26 @@ def test_change_notification_settings(self) -> None:
check_user_settings_update("events[0]", events[0])
check_update_global_notifications("events[1]", events[1], setting_value)
+ def test_change_presence_enabled(self) -> None:
+ presence_enabled_setting = "presence_enabled"
+
+ for val in [True, False]:
+ events = self.verify_action(
+ lambda: do_change_user_setting(
+ self.user_profile, presence_enabled_setting, val, acting_user=self.user_profile
+ ),
+ num_events=3,
+ )
+ check_user_settings_update("events[0]", events[0])
+ check_update_global_notifications("events[1]", events[1], val)
+ check_presence(
+ "events[2]",
+ events[2],
+ has_email=True,
+ presence_key="website",
+ status="active" if val else "idle",
+ )
+
def test_change_notification_sound(self) -> None:
notification_setting = "notification_sound"
| Presence dot should be in offline mode when not sharing availability
We have an Account & privacy setting to turn off "Display my availability to other users when online". When a user selects this setting, they are indicating that they don't want others to know that they are online.
We should therefore *immediately* update their presence indicator dot to show them as offline. Users will still be able to see "Active now" in the tooltip for the first couple of minutes, but this is much less salient than the presence dot. Based on a brief investigation, other chat apps take this approach as well.
This issue might address the user impact of #18846.
We should probably treat it as a blocker for #21178, which will make the hiding availability feature much more prominent.
| Hello @zulip/server-sidebars members, this issue was labeled with the "area: right-sidebar" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @raghavluthra20, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@alya Can you assign this to me?
Hello @raghavluthra20, you claimed this issue to work on it, but this issue and any referenced pull requests haven't been updated for 10 days. Are you still working on this issue?
If so, please update this issue by leaving a comment on this issue to let me know that you're still working on it. Otherwise, I'll automatically remove you from this issue in 4 days.
If you've decided to work on something else, simply comment `@zulipbot abandon` so that someone else can claim it and continue from where you left off.
Thank you for your valuable contributions to Zulip!
<!-- inactiveWarning -->
@zulipbot abandon
@alya I need some clarification on this issue. I am unclear about the actual change that needs to be implemented because the presence dot gets changed to offline immediately the user unchecks the "Display my availability to other users when online" in the privacy settings. Are you suggesting a change in the text within the tooltip? If Yes what would be more ideal?
Hey @nafiudanlawal, the problem is that it doesn't update immediately for _other users_ (takes around 3-5 minutes). I clarified this in the related [CZO discussion thread](https://chat.zulip.org/#narrow/stream/2-general/topic/.22unavailable.22.20status).
@alya WIP [PR - pls read notes there](https://github.com/zulip/zulip/pull/22387).
| 2022-07-05T22:47:20 |
zulip/zulip | 22,444 | zulip__zulip-22444 | [
"17897"
] | de275da70a5fa5eaa50a909b648ac7276a4f68ef | diff --git a/corporate/urls.py b/corporate/urls.py
--- a/corporate/urls.py
+++ b/corporate/urls.py
@@ -22,6 +22,7 @@
from corporate.views.upgrade import initial_upgrade, sponsorship, upgrade
from corporate.views.webhook import stripe_webhook
from zerver.lib.rest import rest_path
+from zerver.lib.url_redirects import LANDING_PAGE_REDIRECTS
i18n_urlpatterns: Any = [
# Zephyr/MIT
@@ -51,15 +52,11 @@
landing_page_urls = [
# Landing page, features pages, signup form, etc.
path("hello/", hello_view),
- path("new-user/", RedirectView.as_view(url="/hello", permanent=True)),
path("features/", landing_view, {"template_name": "corporate/features.html"}),
path("plans/", plans_view, name="plans"),
path("apps/", apps_view),
path("apps/download/<platform>", app_download_link_redirect),
path("apps/<platform>", apps_view),
- path(
- "developer-community/", RedirectView.as_view(url="/development-community/", permanent=True)
- ),
path(
"development-community/",
landing_view,
@@ -78,17 +75,11 @@
landing_view,
{"template_name": "corporate/for/communities.html"},
),
- # We merged this into /for/communities.
- path(
- "for/working-groups-and-communities/",
- RedirectView.as_view(url="/for/communities/", permanent=True),
- ),
path("for/education/", landing_view, {"template_name": "corporate/for/education.html"}),
path("for/events/", landing_view, {"template_name": "corporate/for/events.html"}),
path("for/open-source/", landing_view, {"template_name": "corporate/for/open-source.html"}),
path("for/research/", landing_view, {"template_name": "corporate/for/research.html"}),
path("for/business/", landing_view, {"template_name": "corporate/for/business.html"}),
- path("for/companies/", RedirectView.as_view(url="/for/business/", permanent=True)),
# case-studies
path(
"case-studies/idrift/",
@@ -126,6 +117,12 @@
{"template_name": "corporate/case-studies/recurse-center-case-study.html"},
),
]
+
+# Redirects due to us having moved or combined landing pages:
+for redirect in LANDING_PAGE_REDIRECTS:
+ old_url = redirect.old_url.lstrip("/")
+ landing_page_urls += [path(old_url, RedirectView.as_view(url=redirect.new_url, permanent=True))]
+
i18n_urlpatterns += landing_page_urls
# Make a copy of i18n_urlpatterns so that they appear without prefix for English
diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
new file mode 100644
--- /dev/null
+++ b/zerver/lib/url_redirects.py
@@ -0,0 +1,64 @@
+from dataclasses import dataclass
+from typing import List
+
+
+@dataclass
+class URLRedirect:
+ old_url: str
+ new_url: str
+
+
+API_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
+ # Add URL redirects for REST API documentation here:
+ URLRedirect("/api/delete-stream", "/api/archive-stream"),
+]
+
+POLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
+ # Add URL redirects for policy documentation here:
+ URLRedirect("/privacy/", "/policies/privacy"),
+ URLRedirect("/terms/", "/policies/terms"),
+]
+
+HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
+ # Add URL redirects for help center documentation here:
+ URLRedirect("/help/delete-a-stream", "/help/archive-a-stream"),
+ URLRedirect("/help/change-the-topic-of-a-message", "/help/rename-a-topic"),
+ URLRedirect("/help/configure-missed-message-emails", "/help/email-notifications"),
+ URLRedirect("/help/add-an-alert-word", "/help/pm-mention-alert-notifications#alert-words"),
+ URLRedirect("/help/test-mobile-notifications", "/help/mobile-notifications"),
+ URLRedirect(
+ "/help/troubleshooting-desktop-notifications",
+ "/help/desktop-notifications#troubleshooting-desktop-notifications",
+ ),
+ URLRedirect(
+ "/help/change-notification-sound", "/help/desktop-notifications#change-notification-sound"
+ ),
+ URLRedirect("/help/configure-message-notification-emails", "/help/email-notifications"),
+ URLRedirect("/help/disable-new-login-emails", "/help/email-notifications#new-login-emails"),
+ # The `help/about-streams-and-topics` redirect is particularly important,
+ # because the old URL appears in links from Welcome Bot messages.
+ URLRedirect("/help/about-streams-and-topics", "/help/streams-and-topics"),
+ URLRedirect("/help/community-topic-edits", "/help/configure-who-can-edit-topics"),
+ URLRedirect(
+ "/help/only-allow-admins-to-add-emoji", "/help/custom-emoji#change-who-can-add-custom-emoji"
+ ),
+ URLRedirect(
+ "/help/configure-who-can-add-custom-emoji",
+ "/help/custom-emoji#change-who-can-add-custom-emoji",
+ ),
+ URLRedirect("/help/add-custom-emoji", "/help/custom-emoji"),
+ URLRedirect("/help/night-mode", "/help/dark-theme"),
+ URLRedirect("/help/web-public-streams", "/help/public-access-option"),
+]
+
+LANDING_PAGE_REDIRECTS = [
+ # Add URL redirects for corporate landing pages here.
+ URLRedirect("/new-user/", "/hello"),
+ URLRedirect("/developer-community/", "/development-community"),
+ URLRedirect("/for/companies/", "/for/business"),
+ URLRedirect("/for/working-groups-and-communities/", "/for/communities"),
+]
+
+DOCUMENTATION_REDIRECTS = (
+ API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS
+)
diff --git a/zproject/urls.py b/zproject/urls.py
--- a/zproject/urls.py
+++ b/zproject/urls.py
@@ -18,6 +18,7 @@
from zerver.forms import LoggingSetPasswordForm
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.lib.rest import rest_path
+from zerver.lib.url_redirects import DOCUMENTATION_REDIRECTS
from zerver.tornado.views import cleanup_event_queue, get_events, get_events_internal, notify
from zerver.views.alert_words import add_alert_words, list_alert_words, remove_alert_words
from zerver.views.attachments import list_by_user, remove
@@ -780,98 +781,19 @@
template_name="zerver/documentation_main.html",
policies_view=True,
)
+
+# Redirects due to us having moved help center, API or policy documentation pages:
+for redirect in DOCUMENTATION_REDIRECTS:
+ old_url = redirect.old_url.lstrip("/")
+ urls += [path(old_url, RedirectView.as_view(url=redirect.new_url, permanent=True))]
+
urls += [
- # Redirects due to us having moved the docs:
- path(
- "help/delete-a-stream", RedirectView.as_view(url="/help/archive-a-stream", permanent=True)
- ),
- path("api/delete-stream", RedirectView.as_view(url="/api/archive-stream", permanent=True)),
- path(
- "help/change-the-topic-of-a-message",
- RedirectView.as_view(url="/help/rename-a-topic", permanent=True),
- ),
- path(
- "help/configure-missed-message-emails",
- RedirectView.as_view(url="/help/email-notifications", permanent=True),
- ),
- path(
- "help/add-an-alert-word",
- RedirectView.as_view(
- url="/help/pm-mention-alert-notifications#alert-words", permanent=True
- ),
- ),
- path(
- "help/test-mobile-notifications",
- RedirectView.as_view(url="/help/mobile-notifications", permanent=True),
- ),
- path(
- "help/troubleshooting-desktop-notifications",
- RedirectView.as_view(
- url="/help/desktop-notifications#troubleshooting-desktop-notifications", permanent=True
- ),
- ),
- path(
- "help/change-notification-sound",
- RedirectView.as_view(
- url="/help/desktop-notifications#change-notification-sound", permanent=True
- ),
- ),
- path(
- "help/configure-message-notification-emails",
- RedirectView.as_view(url="/help/email-notifications", permanent=True),
- ),
- path(
- "help/disable-new-login-emails",
- RedirectView.as_view(url="/help/email-notifications#new-login-emails", permanent=True),
- ),
- # This redirect is particularly important, because the old URL
- # appears in links from Welcome Bot messages.
- path(
- "help/about-streams-and-topics",
- RedirectView.as_view(url="/help/streams-and-topics", permanent=True),
- ),
- path(
- "help/community-topic-edits",
- RedirectView.as_view(url="/help/configure-who-can-edit-topics", permanent=True),
- ),
- path(
- "help/only-allow-admins-to-add-emoji",
- RedirectView.as_view(
- url="/help/custom-emoji#change-who-can-add-custom-emoji", permanent=True
- ),
- ),
- path(
- "help/configure-who-can-add-custom-emoji",
- RedirectView.as_view(
- url="/help/custom-emoji#change-who-can-add-custom-emoji", permanent=True
- ),
- ),
- path(
- "help/add-custom-emoji",
- RedirectView.as_view(url="/help/custom-emoji", permanent=True),
- ),
- path(
- "help/night-mode",
- RedirectView.as_view(url="/help/dark-theme", permanent=True),
- ),
- path(
- "help/web-public-streams",
- RedirectView.as_view(url="/help/public-access-option", permanent=True),
- ),
path("help/", help_documentation_view),
path("help/<path:article>", help_documentation_view),
path("api/", api_documentation_view),
path("api/<slug:article>", api_documentation_view),
path("policies/", policy_documentation_view),
path("policies/<slug:article>", policy_documentation_view),
- path(
- "privacy/",
- RedirectView.as_view(url="/policies/privacy"),
- ),
- path(
- "terms/",
- RedirectView.as_view(url="/policies/terms"),
- ),
]
# Two-factor URLs
| diff --git a/zerver/lib/test_helpers.py b/zerver/lib/test_helpers.py
--- a/zerver/lib/test_helpers.py
+++ b/zerver/lib/test_helpers.py
@@ -481,24 +481,6 @@ def find_pattern(pattern: Any, prefixes: List[str]) -> None:
"confirmation_key/",
"node-coverage/(?P<path>.+)",
"docs/(?P<path>.+)",
- "help/add-custom-emoji",
- "help/configure-who-can-add-custom-emoji",
- "help/change-the-topic-of-a-message",
- "help/configure-missed-message-emails",
- "help/community-topic-edits",
- "help/about-streams-and-topics",
- "help/delete-a-stream",
- "help/add-an-alert-word",
- "help/change-notification-sound",
- "help/configure-message-notification-emails",
- "help/disable-new-login-emails",
- "help/test-mobile-notifications",
- "help/troubleshooting-desktop-notifications",
- "help/web-public-streams",
- "for/working-groups-and-communities/",
- "help/only-allow-admins-to-add-emoji",
- "help/night-mode",
- "api/delete-stream",
"casper/(?P<path>.+)",
"static/(?P<path>.+)",
"flush_caches",
diff --git a/zerver/tests/test_urls.py b/zerver/tests/test_urls.py
--- a/zerver/tests/test_urls.py
+++ b/zerver/tests/test_urls.py
@@ -6,6 +6,12 @@
from django.test import Client
from zerver.lib.test_classes import ZulipTestCase
+from zerver.lib.url_redirects import (
+ API_DOCUMENTATION_REDIRECTS,
+ HELP_DOCUMENTATION_REDIRECTS,
+ LANDING_PAGE_REDIRECTS,
+ POLICY_DOCUMENTATION_REDIRECTS,
+)
from zerver.models import Realm, Stream
from zproject import urls
@@ -154,3 +160,25 @@ def test_bogus_http_host(self) -> None:
"/json/users", secure=True, HTTP_REFERER="https://somewhere", HTTP_HOST="$nonsense"
)
self.assertEqual(result.status_code, 400)
+
+
+class RedirectURLTest(ZulipTestCase):
+ def test_api_redirects(self) -> None:
+ for redirect in API_DOCUMENTATION_REDIRECTS:
+ result = self.client_get(redirect.old_url, follow=True)
+ self.assert_in_success_response(["Zulip homepage", "API documentation home"], result)
+
+ def test_help_redirects(self) -> None:
+ for redirect in HELP_DOCUMENTATION_REDIRECTS:
+ result = self.client_get(redirect.old_url, follow=True)
+ self.assert_in_success_response(["Zulip homepage", "Help center home"], result)
+
+ def test_policy_redirects(self) -> None:
+ for redirect in POLICY_DOCUMENTATION_REDIRECTS:
+ result = self.client_get(redirect.old_url, follow=True)
+ self.assert_in_success_response(["Policies", "Archive"], result)
+
+ def test_landing_page_redirects(self) -> None:
+ for redirect in LANDING_PAGE_REDIRECTS:
+ result = self.client_get(redirect.old_url, follow=True)
+ self.assert_in_success_response(["Download"], result)
| Add system for redirecting a renamed /help or /api article
We from time to time rename articles in the /help or /api world. This breaks incoming links, including from other branches of zulip/zulip that haven't been rebases, which is annoying, but more importantly from previous versions of Zulip that might be out there.
When we rename things in ReadTheDocs, we configure a redirect from the old URL to the new one. We should figure out a convenient way to do this with our markdown system (easy with `urls.py` entries, but would be nice to just have a table somewhere in `zerver/views/documentation.py` that's more convenient) and document it in our guides for documenting them (https://zulip.readthedocs.io/en/latest/documentation/index.html).
| Hello @zulip/server-api, @zulip/server-user-docs members, this issue was labeled with the "area: documentation (api and integrations)", "area: documentation (user)" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Welcome to Zulip, @KakoozaJerry! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
* Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
* [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
https://github.com/zulip/zulip/pull/17906 has an example of doing such a redirect manually.
Hello @KakoozaJerry, you claimed this issue to work on it, but this issue and any referenced pull requests haven't been updated for 10 days. Are you still working on this issue?
If so, please update this issue by leaving a comment on this issue to let me know that you're still working on it. Otherwise, I'll automatically remove you from this issue in 4 days.
If you've decided to work on something else, simply comment `@zulipbot abandon` so that someone else can claim it and continue from where you left off.
Thank you for your valuable contributions to Zulip!
<!-- inactiveWarning -->
@zulipbot abandon | 2022-07-11T11:51:16 |
zulip/zulip | 22,453 | zulip__zulip-22453 | [
"22338"
] | 9ce900f2b4ff687c567e637443374a54f348f5c7 | diff --git a/zerver/webhooks/newrelic/view.py b/zerver/webhooks/newrelic/view.py
--- a/zerver/webhooks/newrelic/view.py
+++ b/zerver/webhooks/newrelic/view.py
@@ -11,6 +11,12 @@
from zerver.lib.webhooks.common import check_send_webhook_message, unix_milliseconds_to_timestamp
from zerver.models import UserProfile
+# Newrelic planned to upgrade Alert Notification Channels to Workflows and Destinations
+# https://discuss.newrelic.com/t/plan-to-upgrade-alert-notification-channels-to-workflows-and-destinations/188205
+# This view will handle both old and new format but will keep it easy to delete the old code
+# once it is EOLed by the end of June, 2023
+
+# Once old is EOLed, delete the OPEN_TEMPLATE
OPEN_TEMPLATE = """
[Incident]({incident_url}) **opened** for condition: **{condition_name}** at <time:{iso_timestamp}>
``` quote
@@ -18,13 +24,23 @@
```
""".strip()
+ACTIVE_TEMPLATE = """
+[Incident]({incident_url}) **active** for condition: **{condition_name}** at <time:{iso_timestamp}>
+``` quote
+{details}
+```
+""".strip()
+
DEFAULT_TEMPLATE = (
"""[Incident]({incident_url}) **{status}** {owner}for condition: **{condition_name}**""".strip()
)
TOPIC_TEMPLATE = """{policy_name} ({incident_id})""".strip()
-ALL_EVENT_TYPES = ["closed", "acknowledged", "open"]
+# Once old is EOLed, delete old and keep new
+OLD_EVENT_TYPES = ["closed", "acknowledged", "open"]
+NEW_EVENT_TYPES = ["created", "activated", "acknowledged", "closed"]
+ALL_EVENT_TYPES = list(set(OLD_EVENT_TYPES).union(set(NEW_EVENT_TYPES)))
@webhook_view("NewRelic", all_event_types=ALL_EVENT_TYPES)
@@ -35,45 +51,102 @@ def api_newrelic_webhook(
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
- info = {
- "condition_name": payload.get("condition_name", "Unknown condition"),
- "details": payload.get("details", "No details."),
- "incident_url": payload.get("incident_url", "https://alerts.newrelic.com"),
- "incident_acknowledge_url": payload.get(
- "incident_acknowledge_url", "https://alerts.newrelic.com"
- ),
- "status": payload.get("current_state", "None"),
- "iso_timestamp": "",
- "owner": payload.get("owner", ""),
- }
-
- unix_time = payload.get("timestamp", None)
- if unix_time is None:
- raise JsonableError(_("The newrelic webhook requires timestamp in milliseconds"))
-
- info["iso_timestamp"] = unix_milliseconds_to_timestamp(unix_time, "newrelic")
-
- # Add formatting to the owner field if owner is present
- if info["owner"] != "":
- info["owner"] = "by **{}** ".format(info["owner"])
-
- # These are the three promised current_state values
- if "open" in info["status"]:
- content = OPEN_TEMPLATE.format(**info)
- elif "acknowledged" in info["status"]:
- content = DEFAULT_TEMPLATE.format(**info)
- elif "closed" in info["status"]:
- content = DEFAULT_TEMPLATE.format(**info)
+ # Handle old format
+ # Once old is EOLed, delete if block and keep else block
+ if not payload.get("id"):
+ info = {
+ "condition_name": payload.get("condition_name", "Unknown condition"),
+ "details": payload.get("details", "No details."),
+ "incident_url": payload.get("incident_url", "https://alerts.newrelic.com"),
+ "incident_acknowledge_url": payload.get(
+ "incident_acknowledge_url", "https://alerts.newrelic.com"
+ ),
+ "status": payload.get("current_state", "None"),
+ "iso_timestamp": "",
+ "owner": payload.get("owner", ""),
+ }
+
+ unix_time = payload.get("timestamp", None)
+ if unix_time is None:
+ raise JsonableError(_("The newrelic webhook requires timestamp in milliseconds"))
+
+ info["iso_timestamp"] = unix_milliseconds_to_timestamp(unix_time, "newrelic")
+
+ # Add formatting to the owner field if owner is present
+ if info["owner"] != "":
+ info["owner"] = "by **{}** ".format(info["owner"])
+
+ # These are the three promised current_state values
+ if info["status"].lower() == "open":
+ content = OPEN_TEMPLATE.format(**info)
+ elif info["status"].lower() == "acknowledged":
+ content = DEFAULT_TEMPLATE.format(**info)
+ elif info["status"].lower() == "closed":
+ content = DEFAULT_TEMPLATE.format(**info)
+ else:
+ raise JsonableError(
+ _("The newrelic webhook requires current_state be in [open|acknowledged|closed]")
+ )
+
+ topic_info = {
+ "policy_name": payload.get("policy_name", "Unknown Policy"),
+ "incident_id": payload.get("incident_id", "Unknown ID"),
+ }
+ topic = TOPIC_TEMPLATE.format(**topic_info)
+
+ check_send_webhook_message(request, user_profile, topic, content, info["status"])
+ return json_success(request)
+
+ # Handle new format
else:
- raise JsonableError(
- _("The newrelic webhook requires current_state be in [open|acknowledged|closed]")
- )
-
- topic_info = {
- "policy_name": payload.get("policy_name", "Unknown Policy"),
- "incident_id": payload.get("incident_id", "Unknown ID"),
- }
- topic = TOPIC_TEMPLATE.format(**topic_info)
-
- check_send_webhook_message(request, user_profile, topic, content, info["status"])
- return json_success(request)
+ info = {
+ "condition_name": payload.get("condition_name", "Unknown condition"),
+ "details": payload.get("details", "No details."),
+ "incident_url": payload.get("issueUrl", "https://alerts.newrelic.com"),
+ "incident_acknowledge_url": payload.get(
+ "incident_acknowledge_url", "https://alerts.newrelic.com"
+ ),
+ "status": payload.get("state", "None"),
+ "iso_timestamp": "",
+ "owner": payload.get("owner", ""),
+ }
+
+ unix_time = payload.get("createdAt", None)
+ if unix_time is None:
+ raise JsonableError(_("The newrelic webhook requires timestamp in milliseconds"))
+
+ info["iso_timestamp"] = unix_milliseconds_to_timestamp(unix_time, "newrelic")
+
+ # Add formatting to the owner field if owner is present
+ if info["owner"] != "":
+ info["owner"] = "by **{}** ".format(info["owner"])
+
+ # These are the three promised state values
+ if info["status"].lower() == "activated":
+ content = ACTIVE_TEMPLATE.format(**info)
+ elif info["status"].lower() == "acknowledged":
+ content = DEFAULT_TEMPLATE.format(**info)
+ elif info["status"].lower() == "closed":
+ content = DEFAULT_TEMPLATE.format(**info)
+ elif info["status"].lower() == "created":
+ content = DEFAULT_TEMPLATE.format(**info)
+ else:
+ raise JsonableError(
+ _(
+ "The newrelic webhook requires state be in [created|activated|acknowledged|closed]"
+ )
+ )
+
+ policy_names_list = payload.get("alertPolicyNames", [])
+ if policy_names_list:
+ policy_names_str = ",".join(policy_names_list)
+ else:
+ policy_names_str = "Unknown Policy"
+ topic_info = {
+ "policy_name": policy_names_str,
+ "incident_id": payload.get("id", "Unknown ID"),
+ }
+ topic = TOPIC_TEMPLATE.format(**topic_info)
+
+ check_send_webhook_message(request, user_profile, topic, content, info["status"])
+ return json_success(request)
| diff --git a/zerver/webhooks/newrelic/tests.py b/zerver/webhooks/newrelic/tests.py
--- a/zerver/webhooks/newrelic/tests.py
+++ b/zerver/webhooks/newrelic/tests.py
@@ -6,7 +6,11 @@ class NewRelicHookTests(WebhookTestCase):
URL_TEMPLATE = "/api/v1/external/newrelic?stream={stream}&api_key={api_key}"
WEBHOOK_DIR_NAME = "newrelic"
- def test_open(self) -> None:
+ # The following 9 unit tests are for the old format
+ # corresponding json fixtures were renamed to have the "_old" trailing
+ # These tests and fixtures are to be deleted when old notifications EOLed
+
+ def test_open_old(self) -> None:
expected_topic = "Test policy name (1234)"
expected_message = """
[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **opened** for condition: **Server Down** at <time:2020-11-11 22:32:11.151000+00:00>
@@ -16,42 +20,42 @@ def test_open(self) -> None:
""".strip()
self.check_webhook(
- "incident_opened",
+ "incident_opened_old",
expected_topic,
expected_message,
content_type="application/json",
)
- def test_closed(self) -> None:
+ def test_closed_old(self) -> None:
expected_topic = "Test policy name (1234)"
expected_message = """
[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **closed** for condition: **Server Down**
""".strip()
self.check_webhook(
- "incident_closed",
+ "incident_closed_old",
expected_topic,
expected_message,
content_type="application/json",
)
- def test_acknowledged(self) -> None:
+ def test_acknowledged_old(self) -> None:
expected_topic = "Test policy name (1234)"
expected_message = """
[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **acknowledged** by **Alice** for condition: **Server Down**
""".strip()
self.check_webhook(
- "incident_acknowledged",
+ "incident_acknowledged_old",
expected_topic,
expected_message,
content_type="application/json",
)
- def test_not_recognized(self) -> None:
+ def test_not_recognized_old(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
- "incident_state_not_recognized",
+ "incident_state_not_recognized_old",
"",
"",
content_type="application/json",
@@ -61,7 +65,7 @@ def test_not_recognized(self) -> None:
e.exception.args[0],
)
- def test_missing_fields(self) -> None:
+ def test_missing_fields_old(self) -> None:
expected_topic = "Unknown Policy (Unknown ID)"
expected_message = """
[Incident](https://alerts.newrelic.com) **opened** for condition: **Unknown condition** at <time:2020-11-11 22:32:11.151000+00:00>
@@ -71,16 +75,16 @@ def test_missing_fields(self) -> None:
""".strip()
self.check_webhook(
- "incident_default_fields",
+ "incident_default_fields_old",
expected_topic,
expected_message,
content_type="application/json",
)
- def test_missing_current_state(self) -> None:
+ def test_missing_current_state_old(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
- "incident_missing_current_state",
+ "incident_missing_current_state_old",
"",
"",
content_type="application/json",
@@ -90,10 +94,143 @@ def test_missing_current_state(self) -> None:
e.exception.args[0],
)
- def test_missing_timestamp(self) -> None:
+ def test_missing_timestamp_old(self) -> None:
+ with self.assertRaises(AssertionError) as e:
+ self.check_webhook(
+ "incident_missing_timestamp_old",
+ "",
+ "",
+ content_type="application/json",
+ )
+ self.assertIn(
+ "The newrelic webhook requires timestamp in milliseconds", e.exception.args[0]
+ )
+
+ def test_malformatted_time_old(self) -> None:
+ with self.assertRaises(AssertionError) as e:
+ self.check_webhook(
+ "incident_malformatted_time_old",
+ "",
+ "",
+ content_type="application/json",
+ )
+ self.assertIn("The newrelic webhook expects time in milliseconds.", e.exception.args[0])
+
+ def test_time_too_large_old(self) -> None:
+ with self.assertRaises(AssertionError) as e:
+ self.check_webhook(
+ "incident_time_too_large_old",
+ "",
+ "",
+ content_type="application/json",
+ )
+ self.assertIn("The newrelic webhook expects time in milliseconds.", e.exception.args[0])
+
+ # The following 10 unit tests are for the new format
+ # One more test than the old format as we have 4 states instead of 3 in the old
+ # corresponding json fixtures have "_new" trailing in the name
+
+ def test_activated_new(self) -> None:
+ expected_topic = "Test policy name (1234)"
+ expected_message = """
+[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **active** for condition: **Server Down** at <time:2020-11-11 22:32:11.151000+00:00>
+``` quote
+Violation description test.
+```
+""".strip()
+
+ self.check_webhook(
+ "incident_active_new",
+ expected_topic,
+ expected_message,
+ content_type="application/json",
+ )
+
+ def test_created_new(self) -> None:
+ expected_topic = "Test policy name (1234)"
+ expected_message = """
+[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **created** for condition: **Server Down**
+""".strip()
+
+ self.check_webhook(
+ "incident_created_new",
+ expected_topic,
+ expected_message,
+ content_type="application/json",
+ )
+
+ def test_closed_new(self) -> None:
+ expected_topic = "Test policy name (1234)"
+ expected_message = """
+[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **closed** for condition: **Server Down**
+""".strip()
+
+ self.check_webhook(
+ "incident_closed_new",
+ expected_topic,
+ expected_message,
+ content_type="application/json",
+ )
+
+ def test_acknowledged_new(self) -> None:
+ expected_topic = "Test policy name (1234)"
+ expected_message = """
+[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **acknowledged** by **Alice** for condition: **Server Down**
+""".strip()
+
+ self.check_webhook(
+ "incident_acknowledged_new",
+ expected_topic,
+ expected_message,
+ content_type="application/json",
+ )
+
+ def test_not_recognized_new(self) -> None:
+ with self.assertRaises(AssertionError) as e:
+ self.check_webhook(
+ "incident_state_not_recognized_new",
+ "",
+ "",
+ content_type="application/json",
+ )
+ self.assertIn(
+ "The newrelic webhook requires state be in [created|activated|acknowledged|closed]",
+ e.exception.args[0],
+ )
+
+ def test_missing_fields_new(self) -> None:
+ expected_topic = "Unknown Policy (1234)"
+ expected_message = """
+[Incident](https://alerts.newrelic.com) **active** for condition: **Unknown condition** at <time:2020-11-11 22:32:11.151000+00:00>
+``` quote
+No details.
+```
+""".strip()
+
+ self.check_webhook(
+ "incident_default_fields_new",
+ expected_topic,
+ expected_message,
+ content_type="application/json",
+ )
+
+ def test_missing_state_new(self) -> None:
+ with self.assertRaises(AssertionError) as e:
+ self.check_webhook(
+ "incident_missing_state_new",
+ "",
+ "",
+ content_type="application/json",
+ )
+ self.assertIn(
+ "The newrelic webhook requires state be in [created|activated|acknowledged|closed]",
+ e.exception.args[0],
+ )
+
+ def test_missing_timestamp_new(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
- "incident_missing_timestamp",
+ "incident_missing_timestamp_new",
"",
"",
content_type="application/json",
@@ -102,20 +239,20 @@ def test_missing_timestamp(self) -> None:
"The newrelic webhook requires timestamp in milliseconds", e.exception.args[0]
)
- def test_malformatted_time(self) -> None:
+ def test_malformatted_time_new(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
- "incident_malformatted_time",
+ "incident_malformatted_time_new",
"",
"",
content_type="application/json",
)
self.assertIn("The newrelic webhook expects time in milliseconds.", e.exception.args[0])
- def test_time_too_large(self) -> None:
+ def test_time_too_large_new(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
- "incident_time_too_large",
+ "incident_time_too_large_new",
"",
"",
content_type="application/json",
| Error: The newrelic webhook requires timestamp in milliseconds
New Relic is changing their notification system from "Classic" "incidents" and "channels" to "policies", "workflows" and "destinations" and it appears that the new format is not recognised by Zulip.
Payload template:
```mustache
{
"id": {{ json issueId }},
"issueUrl": {{ json issuePageUrl }},
"title": {{ json annotations.title.[0] }},
"priority": {{ json priority }},
"impactedEntities": {{json entitiesData.names}},
"totalIncidents": {{json totalIncidents}},
"state": {{ json state }},
"trigger": {{ json triggerEvent }},
"isCorrelated": {{ json isCorrelated }},
"createdAt": {{ createdAt }},
"updatedAt": {{ updatedAt }},
"sources": {{ json accumulations.source }},
"alertPolicyNames": {{ json accumulations.policyName }},
"alertConditionNames": {{ json accumulations.conditionName }},
"workflowName": {{ json workflowName }}
}
```
Payload preview:
```json
{
"id": "d1b1f3fd-995a-4066-88ab-8ce4f6960654",
"issueUrl": "https://one.newrelic.com/launcher/nrai.launcher?pane=eyJpc1Bob3RvbiI6dHJ1ZSwiaWQiOiJhYWFlMGRiMS1mMDI2LTRiNTMtYWU0Ni0yMTI0MTZmYzFhMjYiLCJuZXJkbGV0SWQiOiJucmFpLmlzc3VlLXJlZGlyZWN0In0=",
"title": "Memory Used % > 90 for at least 2 minutes on 'Some-Entity'",
"priority": "CRITICAL",
"impactedEntities": ["logs.itg.cloud","MonitorTTFB query"],
"totalIncidents": 42,
"state": "ACTIVATED",
"trigger": "INCIDENT_ADDED",
"isCorrelated": "FALSE",
"createdAt": 1617881246260,
"updatedAt": 1617881246260,
"sources": ["newrelic"],
"alertPolicyNames": ["Policy1","Policy2"],
"alertConditionNames": ["condition1","condition2"],
"workflowName": "DBA Team workflow"
}
```
Response from Zulip (shown by New Relic after clicking the "send test notification" button):
```
Client request(https://[REDACTED].zulipchat.com/api/v1/external/newrelic?api_key=[REDACTED]&stream=[REDACTED]) invalid: 400 Bad Request. Text: "{"result":"error","msg":"The newrelic webhook requires timestamp in milliseconds","code":"BAD_REQUEST"}
"
```
| Hello @zulip/server-integrations members, this issue was labeled with the "area: integrations" label, so you may want to check it out!
<!-- areaLabelAddition -->
@yuroitaki this could be another good one for you.
@zulipbot claim
Welcome to Zulip, @Rodwan-Bakkar! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
| 2022-07-12T13:28:04 |
zulip/zulip | 22,470 | zulip__zulip-22470 | [
"22461"
] | af01107b5986802b0106e664b1b4b73e4dd7a9eb | diff --git a/zerver/lib/events.py b/zerver/lib/events.py
--- a/zerver/lib/events.py
+++ b/zerver/lib/events.py
@@ -111,6 +111,7 @@ def fetch_initial_state_data(
slim_presence: bool = False,
include_subscribers: bool = True,
include_streams: bool = True,
+ spectator_requested_language: Optional[str] = None,
) -> Dict[str, Any]:
"""When `event_types` is None, fetches the core data powering the
web app's `page_params` and `/api/v1/register` (for mobile/terminal
@@ -372,6 +373,7 @@ def fetch_initial_state_data(
if user_profile is not None:
settings_user = user_profile
else:
+ assert spectator_requested_language is not None
# When UserProfile=None, we want to serve the values for various
# settings as the defaults. Instead of copying the default values
# from models.py here, we access these default values from a
@@ -392,6 +394,7 @@ def fetch_initial_state_data(
avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
# ID=0 is not used in real Zulip databases, ensuring this is unique.
id=0,
+ default_language=spectator_requested_language,
)
if want("realm_user"):
state["raw_users"] = get_raw_user_data(
@@ -1332,6 +1335,7 @@ def do_events_register(
client_capabilities: Dict[str, bool] = {},
narrow: Collection[Sequence[str]] = [],
fetch_event_types: Optional[Collection[str]] = None,
+ spectator_requested_language: Optional[str] = None,
) -> Dict[str, Any]:
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
@@ -1380,6 +1384,7 @@ def do_events_register(
include_subscribers=False,
# Force include_streams=False for security reasons.
include_streams=False,
+ spectator_requested_language=spectator_requested_language,
)
post_process_state(user_profile, ret, notification_settings_null=False)
diff --git a/zerver/views/events_register.py b/zerver/views/events_register.py
--- a/zerver/views/events_register.py
+++ b/zerver/views/events_register.py
@@ -1,5 +1,6 @@
from typing import Dict, Optional, Sequence, Union
+from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
@@ -75,6 +76,7 @@ def events_register_backend(
) -> HttpResponse:
if maybe_user_profile.is_authenticated:
user_profile = maybe_user_profile
+ spectator_requested_language = None
assert isinstance(user_profile, UserProfile)
realm = user_profile.realm
@@ -86,6 +88,10 @@ def events_register_backend(
else:
user_profile = None
realm = get_valid_realm_from_request(request)
+ # Language set by spectator to be passed down to clients as user_settings.
+ spectator_requested_language = request.COOKIES.get(
+ settings.LANGUAGE_COOKIE_NAME, realm.default_language
+ )
if not realm.allow_web_public_streams_access():
raise MissingAuthenticationError()
@@ -112,5 +118,6 @@ def events_register_backend(
include_subscribers=include_subscribers,
client_capabilities=client_capabilities,
fetch_event_types=fetch_event_types,
+ spectator_requested_language=spectator_requested_language,
)
return json_success(request, data=ret)
| Spectator language picker always shows realm default language as current language.
The `default_language` parameter used for the logged-out view is always the realm's default language, not the language you picked in the UI. Having done a big of investigation, I believe the problem is that while we read from the browser's cookies inside home.py:
```
if user_profile is None:
request_language = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, default_language)
```
The `page_params` for spectators come from a separate request to `GET /api/v1/register`, and thus don't inspect the cookies at all. I'm not sure I see an clean way to fix this, but probably easiest would be for `events_register_backend` to read the cookie inside the spectator code path, and pass it through as `request_language` to `do_events_register` and then `fetch_initial_state_data`, which can in turn initialize `settings_user` with `default_language=request_language or realm.default_language`, or something along those lines.
@amanagr FYI.
| 2022-07-13T16:50:25 |
||
zulip/zulip | 22,477 | zulip__zulip-22477 | [
"22377"
] | af01107b5986802b0106e664b1b4b73e4dd7a9eb | diff --git a/zproject/urls.py b/zproject/urls.py
--- a/zproject/urls.py
+++ b/zproject/urls.py
@@ -258,7 +258,7 @@
# realm/emoji -> zerver.views.realm_emoji
rest_path("realm/emoji", GET=list_emoji),
rest_path(
- "realm/emoji/<emoji_name>",
+ "realm/emoji/<path:emoji_name>",
POST=upload_emoji,
DELETE=(delete_emoji, {"intentionally_undocumented"}),
),
| diff --git a/zerver/tests/test_realm_emoji.py b/zerver/tests/test_realm_emoji.py
--- a/zerver/tests/test_realm_emoji.py
+++ b/zerver/tests/test_realm_emoji.py
@@ -123,6 +123,18 @@ def test_upload_exception(self) -> None:
"Emoji names must contain only numbers, lowercase English letters, spaces, dashes, underscores, and periods.",
)
+ def test_forward_slash_exception(self) -> None:
+ self.login("iago")
+ with get_test_image_file("img.png") as fp1:
+ emoji_data = {"f1": fp1}
+ result = self.client_post(
+ "/json/realm/emoji/my/emoji/with/forward/slash/", info=emoji_data
+ )
+ self.assert_json_error(
+ result,
+ "Emoji names must contain only numbers, lowercase English letters, spaces, dashes, underscores, and periods.",
+ )
+
def test_upload_uppercase_exception(self) -> None:
self.login("iago")
with get_test_image_file("img.png") as fp1:
| Error handling for emoji-names with a slash at the end
Reported [here](https://github.com/zulip/zulip/pull/22367#issuecomment-1174087030) by @sov-j
Tested this on current Firefox. There is a bug where forward slash symbol (`/`) handling is incorrect. Details are below.
### Reproduction steps
1. Open **Custom emoji** form
2. Enter name with `/` (e.g. `test/`)
3. Press **Add emoji** button
### Expected result
"Emoji names must contain [...]" error message appears.
### Actual result
Stack trace appears with JSON parsing error. **Add emoji** button becomes inactive untill the form is reopened. See screenshots below.


| Hello @zulip/server-emoji members, this issue was labeled with the "area: emoji" label, so you may want to check it out!
<!-- areaLabelAddition -->
@ganpa3 want to pick this one up? | 2022-07-14T07:10:29 |
zulip/zulip | 22,505 | zulip__zulip-22505 | [
"22464",
"22464"
] | a8d640a5d6f6f616afdd7eb1c710c7766d42c553 | diff --git a/zerver/lib/integrations.py b/zerver/lib/integrations.py
--- a/zerver/lib/integrations.py
+++ b/zerver/lib/integrations.py
@@ -509,7 +509,7 @@ def __init__(self, name: str, *args: Any, **kwargs: Any) -> None:
display_name="GIPHY",
categories=["misc"],
doc="zerver/integrations/giphy.md",
- logo="images/GIPHY_big_logo.png",
+ logo="images/integrations/giphy/GIPHY_big_logo.png",
),
"git": Integration(
"git", "git", ["version-control"], stream_name="commits", doc="zerver/integrations/git.md"
| Clean up /static/images/ directory
To clean up the `/static/images/` directory and therefore make it easier to understand what images we have, we should:
- [ ] Move 400art.svg, 500art.svg and timehout_hourglass.png into `/static/images/errors`
- [ ] Move loader-black.svg, loader-white.svg, loading-ellipsis.svg and tail-spin.svg into `/static/images/loading`
- [ ] Move GIPHY_zulip.png, GIPHY_big_logo.png into `/static/images/integrations/giphy` (cf. [discussion on CZO](https://chat.zulip.org/#narrow/stream/6-frontend/topic/GIPHY.20image.20cleanup))
- [ ] Move GIPHY_attribution.png into `/static/images/giphy` (cf. [discussion on CZO](https://chat.zulip.org/#narrow/stream/6-frontend/topic/GIPHY.20image.20cleanup))
Clean up /static/images/ directory
To clean up the `/static/images/` directory and therefore make it easier to understand what images we have, we should:
- [ ] Move 400art.svg, 500art.svg and timehout_hourglass.png into `/static/images/errors`
- [ ] Move loader-black.svg, loader-white.svg, loading-ellipsis.svg and tail-spin.svg into `/static/images/loading`
- [ ] Move GIPHY_zulip.png, GIPHY_big_logo.png into `/static/images/integrations/giphy` (cf. [discussion on CZO](https://chat.zulip.org/#narrow/stream/6-frontend/topic/GIPHY.20image.20cleanup))
- [ ] Move GIPHY_attribution.png into `/static/images/giphy` (cf. [discussion on CZO](https://chat.zulip.org/#narrow/stream/6-frontend/topic/GIPHY.20image.20cleanup))
| Hello @zulip/server-refactoring members, this issue was labeled with the "area: refactoring" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Welcome to Zulip, @Khushiyant! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@Khushiyant As a heads up, I added a couple more points to the issue description.
Hello @zulip/server-refactoring members, this issue was labeled with the "area: refactoring" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Welcome to Zulip, @Khushiyant! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@Khushiyant As a heads up, I added a couple more points to the issue description. | 2022-07-16T18:44:26 |
|
zulip/zulip | 22,513 | zulip__zulip-22513 | [
"22274"
] | 7d8be670a58016ef7258deaee19451e92dd31023 | diff --git a/zerver/actions/create_user.py b/zerver/actions/create_user.py
--- a/zerver/actions/create_user.py
+++ b/zerver/actions/create_user.py
@@ -235,11 +235,9 @@ def process_new_human_user(
),
)
- # Revoke all preregistration users except prereg_user, and link prereg_user to
- # the created user
- if prereg_user is None:
- assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
-
+ # For the sake of tracking the history of UserProfiles,
+ # we want to tie the newly created user to the PreregistrationUser
+ # it was created from.
if prereg_user is not None:
prereg_user.status = confirmation_settings.STATUS_ACTIVE
prereg_user.created_user = user_profile
| `create_realm` management command generate an AssertionError
On zulip 5.2 creating a realm with the `create_realm` management command leads to an assert.
```
$ /home/zulip/deployments/current/manage.py create_realm Test [email protected] "General Admin" --password "..."
2022-06-21 19:29:44.443 WARN [] Passing password on the command line is insecure; prefer --password-file.
2022-06-21 19:29:44.448 INFO [] Server not yet initialized. Creating the internal realm first.
Traceback (most recent call last):
File "/home/zulip/deployments/current/manage.py", line 157, in <module>
execute_from_command_line(sys.argv)
File "/home/zulip/deployments/current/manage.py", line 122, in execute_from_command_line
utility.execute()
File "/home/zulip/deployments/2022-06-21-15-28-45/zulip-py3-venv/lib/python3.7/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/zulip/deployments/2022-06-21-15-28-45/zulip-py3-venv/lib/python3.7/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/zulip/deployments/2022-06-21-15-28-45/zulip-py3-venv/lib/python3.7/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/home/zulip/deployments/2022-06-21-15-28-45/zerver/management/commands/create_realm.py", line 63, in handle
acting_user=None,
File "/home/zulip/deployments/2022-06-21-15-28-45/zerver/actions/create_user.py", line 445, in do_create_user
realm_creation=realm_creation,
File "/home/zulip/deployments/2022-06-21-15-28-45/zerver/actions/create_user.py", line 231, in process_new_human_user
revoke_preregistration_users(user_profile, prereg_user, realm_creation)
File "/home/zulip/deployments/2022-06-21-15-28-45/zerver/actions/create_user.py", line 255, in revoke_preregistration_users
assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
AssertionError: realm_creation should only happen with a PreregistrationUser
```
Related discussion about this issue: https://chat.zulip.org/#narrow/stream/9-issues/topic/create_realm.20does.20not.20work
| Hello @zulip/server-production members, this issue was labeled with the "area: production" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-07-18T09:21:54 |
|
zulip/zulip | 22,536 | zulip__zulip-22536 | [
"22510"
] | d8ae270899016eb5a6a0953de04632da3a6a5950 | diff --git a/zerver/actions/create_user.py b/zerver/actions/create_user.py
--- a/zerver/actions/create_user.py
+++ b/zerver/actions/create_user.py
@@ -77,14 +77,16 @@ def create_historical_user_messages(*, user_id: int, message_ids: List[int]) ->
def send_message_to_signup_notification_stream(
- sender: UserProfile, realm: Realm, message: str, topic_name: str = _("signups")
+ sender: UserProfile, realm: Realm, message: str
) -> None:
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream is None:
return
with override_language(realm.default_language):
- internal_send_stream_message(sender, signup_notifications_stream, topic_name, message)
+ topic_name = _("signups")
+
+ internal_send_stream_message(sender, signup_notifications_stream, topic_name, message)
def notify_new_user(user_profile: UserProfile) -> None:
@@ -94,9 +96,10 @@ def notify_new_user(user_profile: UserProfile) -> None:
is_first_user = user_count == 1
if not is_first_user:
- message = _("{user} just signed up for Zulip. (total: {user_count})").format(
- user=silent_mention_syntax_for_user(user_profile), user_count=user_count
- )
+ with override_language(user_profile.realm.default_language):
+ message = _("{user} just signed up for Zulip. (total: {user_count})").format(
+ user=silent_mention_syntax_for_user(user_profile), user_count=user_count
+ )
if settings.BILLING_ENABLED:
from corporate.lib.registration import generate_licenses_low_warning_message_if_required
@@ -117,9 +120,10 @@ def notify_new_user(user_profile: UserProfile) -> None:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
# We intentionally use the same strings as above to avoid translation burden.
- message = _("{user} just signed up for Zulip. (total: {user_count})").format(
- user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count
- )
+ with override_language(admin_realm.default_language):
+ message = _("{user} just signed up for Zulip. (total: {user_count})").format(
+ user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count
+ )
internal_send_stream_message(
admin_realm_sender, signups_stream, user_profile.realm.display_subdomain, message
)
| Notification bot uses the language of the user that triggered the notification
Notification bot uses the language of the user that triggered the notification.
See image:
<img width="404" alt="Screen Shot 2022-07-17 at 15 28 30" src="https://user-images.githubusercontent.com/322822/179425453-2d38f403-d633-41a2-a281-fa2055cdb050.png">
I find this odd, but perhaps hard to prevent, as I assume these are messages written to the server, and thus unrealistic there would be a different framework just to handle notification bot messages.
| Hello @zulip/server-i18n members, this issue was labeled with the "area: internationalization" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-07-20T11:01:56 |
|
zulip/zulip | 22,637 | zulip__zulip-22637 | [
"22205"
] | 966e63cc1449fdfb947a7edbe5ea6781d158e713 | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -42,6 +42,7 @@
r"Zephyr",
r"Zoom",
r"Zulip",
+ r"Zulip Server",
r"Zulip Account Security",
r"Zulip Security",
r"Zulip Cloud Standard",
| diff --git a/frontend_tests/node_tests/gear_menu.js b/frontend_tests/node_tests/gear_menu.js
new file mode 100644
--- /dev/null
+++ b/frontend_tests/node_tests/gear_menu.js
@@ -0,0 +1,67 @@
+"use strict";
+
+const {strict: assert} = require("assert");
+
+const {zrequire} = require("../zjsunit/namespace");
+const {run_test} = require("../zjsunit/test");
+const {page_params} = require("../zjsunit/zpage_params");
+
+const gear_menu = zrequire("gear_menu");
+
+run_test("version_display_string", () => {
+ let expected_version_display_string;
+
+ // An official release
+ page_params.zulip_version = "5.6";
+ page_params.zulip_merge_base = "5.6";
+ expected_version_display_string = "translated: Zulip Server 5.6";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // An official beta
+ page_params.zulip_version = "6.0-beta1";
+ page_params.zulip_merge_base = "6.0-beta1";
+ expected_version_display_string = "translated: Zulip Server 6.0-beta1";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // An official release candidate
+ page_params.zulip_version = "6.0-rc1";
+ page_params.zulip_merge_base = "6.0-rc1";
+ expected_version_display_string = "translated: Zulip Server 6.0-rc1";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // The Zulip development environment
+ page_params.zulip_version = "6.0-dev+git";
+ page_params.zulip_merge_base = "6.0-dev+git";
+ expected_version_display_string = "translated: Zulip Server dev environment";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // A commit on Zulip's main branch.
+ page_params.zulip_version = "6.0-dev-1976-g4bb381fc80";
+ page_params.zulip_merge_base = "6.0-dev-1976-g4bb381fc80";
+ expected_version_display_string = "translated: Zulip Server 6.0-dev";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // A fork with 18 commits beyond Zulip's main branch.
+ page_params.zulip_version = "6.0-dev-1994-g93730766b0";
+ page_params.zulip_merge_base = "6.0-dev-1976-g4bb381fc80";
+ expected_version_display_string = "translated: Zulip Server 6.0-dev (modified)";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // A commit from the Zulip 5.x branch
+ page_params.zulip_version = "5.6+git-4-g385a408be5";
+ page_params.zulip_merge_base = "5.6+git-4-g385a408be5";
+ expected_version_display_string = "translated: Zulip Server 5.6 (patched)";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // A fork with 3 commits beyond the Zulip 5.x branch.
+ page_params.zulip_version = "5.6+git-4-g385a408be5";
+ page_params.zulip_merge_base = "5.6+git-7-abcda4235c2";
+ expected_version_display_string = "translated: Zulip Server 5.6 (modified)";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+
+ // A fork of a Zulip release commit, not on 5.x branch.
+ page_params.zulip_version = "5.3-1-g7ed896c0db";
+ page_params.zulip_merge_base = "5.3";
+ expected_version_display_string = "translated: Zulip Server 5.3 (modified)";
+ assert.equal(gear_menu.version_display_string(), expected_version_display_string);
+});
| Show info about organization and Zulip in gear menu
At present, it requires some digging to find a few key pieces of information about the Zulip organization one is viewing:
- Organization name
- Organization URL (if using the Desktop app)
- For Zulip Cloud, the plan that the organization on.
- For self-hosted Zulip, does the server need to be upgraded? What's the version of the server, and what's the current release version?
We should address this by adding this information at the top of the gear settings menu.
- For all users: Show organization name and URL
- For all Zulip Cloud users: Show plan name with a link to `/plans`, e.g. "Zulip Cloud Free"
- For owners of Zulip Cloud Free orgs: Show "Upgrade to Zulip Cloud Standard" link to `/upgrade`
- For all self-hosted users:
- Show Zulip server version (same as in the "About Zulip" widget); we'll need to test to make sure it looks reasonable for non-standard versions (e.g. forks, installs running off `main`).
- If the server version is old, we should show an "Upgrade to the latest release (x.y)" linking to https://zulip.readthedocs.io/en/stable/production/upgrade-or-modify.html. We should probably show this link to all users, as server admins might not be owners/admins of the organization.
## Mockups
<img width="1552" alt="popover-menu" src="https://user-images.githubusercontent.com/2090066/172440944-5dc8ee48-908f-4642-beb7-9ec141128a29.png">
<img width="1552" alt="dark-inbox-01" src="https://user-images.githubusercontent.com/2090066/172440973-12639e2a-3f42-408d-b976-27b01498ecda.png">
<img width="1608" alt="selfhosted-upgrade" src="https://user-images.githubusercontent.com/2090066/172441028-c0ce417f-e3db-4542-845f-10ba3fab98df.png">
**CZO discussion threads:**
- [Design proposal (Zulip Cloud)](https://chat.zulip.org/#narrow/stream/101-design/topic/UI.20redesign.3A.20popover.20menu/near/1388585)
- [Server upgrade notice](https://chat.zulip.org/#narrow/stream/101-design/topic/server.20upgrade.20notice)
| 2022-07-31T17:28:05 |
|
zulip/zulip | 22,663 | zulip__zulip-22663 | [
"22496"
] | c4388e66e53676d471743d2f92c3890514ba4fe6 | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = (196, 0)
+PROVISION_VERSION = (197, 0)
| diff --git a/frontend_tests/puppeteer_lib/common.ts b/frontend_tests/puppeteer_lib/common.ts
--- a/frontend_tests/puppeteer_lib/common.ts
+++ b/frontend_tests/puppeteer_lib/common.ts
@@ -502,7 +502,7 @@ class CommonUtils {
{visible: true},
);
assert.ok(entry);
- await entry.hover();
+ await (entry as ElementHandle<Element>).hover();
await page.evaluate((entry) => {
if (!(entry instanceof HTMLElement)) {
throw new TypeError("expected HTMLElement");
diff --git a/frontend_tests/puppeteer_tests/admin.ts b/frontend_tests/puppeteer_tests/admin.ts
--- a/frontend_tests/puppeteer_tests/admin.ts
+++ b/frontend_tests/puppeteer_tests/admin.ts
@@ -1,6 +1,6 @@
import {strict as assert} from "assert";
-import type {Page} from "puppeteer";
+import type {ElementHandle, Page} from "puppeteer";
import common from "../puppeteer_lib/common";
@@ -40,7 +40,8 @@ async function test_change_new_stream_notifications_setting(page: Page): Promise
'//*[@id="realm_notifications_stream_id_widget"]//*[@class="dropdown-list-body"]/li[1]',
{visible: true},
);
- await verona_in_dropdown!.click();
+ assert.ok(verona_in_dropdown);
+ await (verona_in_dropdown as ElementHandle<Element>).click();
await submit_notifications_stream_settings(page);
@@ -183,7 +184,10 @@ async function test_add_emoji(page: Page): Promise<void> {
await common.fill_form(page, "form.admin-emoji-form", {name: "zulip logo"});
const emoji_upload_handle = await page.$("#emoji_file_input");
- await emoji_upload_handle!.uploadFile("static/images/logo/zulip-icon-128x128.png");
+ assert.ok(emoji_upload_handle);
+ await (emoji_upload_handle as ElementHandle<HTMLInputElement>).uploadFile(
+ "static/images/logo/zulip-icon-128x128.png",
+ );
await page.click("#admin_emoji_submit");
const emoji_status = "div#admin-emoji-status";
@@ -276,7 +280,10 @@ async function test_default_streams(page: Page): Promise<void> {
async function test_upload_realm_icon_image(page: Page): Promise<void> {
const upload_handle = await page.$("#realm-icon-upload-widget .image_file_input");
- await upload_handle!.uploadFile("static/images/logo/zulip-icon-128x128.png");
+ assert.ok(upload_handle);
+ await (upload_handle as ElementHandle<HTMLInputElement>).uploadFile(
+ "static/images/logo/zulip-icon-128x128.png",
+ );
await page.waitForSelector("#realm-icon-upload-widget .upload-spinner-background", {
visible: true,
diff --git a/frontend_tests/puppeteer_tests/compose.ts b/frontend_tests/puppeteer_tests/compose.ts
--- a/frontend_tests/puppeteer_tests/compose.ts
+++ b/frontend_tests/puppeteer_tests/compose.ts
@@ -1,6 +1,6 @@
import {strict as assert} from "assert";
-import type {Page} from "puppeteer";
+import type {ElementHandle, Page} from "puppeteer";
import common from "../puppeteer_lib/common";
@@ -63,7 +63,7 @@ async function test_reply_by_click_prepopulates_stream_topic_names(page: Page):
await page.waitForXPath(stream_message_xpath, {visible: true});
const stream_message = get_last_element(await page.$x(stream_message_xpath));
// we chose only the last element make sure we don't click on any duplicates.
- await stream_message.click();
+ await (stream_message as ElementHandle<Element>).click();
await common.check_form_contents(page, "#send_message_form", {
stream_message_recipient_stream: "Verona",
stream_message_recipient_topic: "Reply test",
@@ -78,7 +78,7 @@ async function test_reply_by_click_prepopulates_private_message_recipient(
const private_message = get_last_element(
await page.$x(get_message_xpath("Compose private message reply test")),
);
- await private_message.click();
+ await (private_message as ElementHandle<Element>).click();
await page.waitForSelector("#private_message_recipient", {visible: true});
await common.pm_recipient.expect(
page,
| dependencies: Switch commit hash for postcss-media-minmax.git.
The commit was re-pushed to the PR it was on, making the old hash
invalid. See 64b78ad992ea6d0e5a7cc03869d9b71fc9006117.
**Self-review checklist**
<!-- Prior to submitting a PR, follow our step-by-step guide to review your own code:
https://zulip.readthedocs.io/en/latest/contributing/code-reviewing.html#how-to-review-code -->
<!-- Once you create the PR, check off all the steps below that you have completed.
If any of these steps are not relevant or you have not completed, leave them unchecked.-->
- [ ] [Self-reviewed](https://zulip.readthedocs.io/en/latest/contributing/code-reviewing.html#how-to-review-code) the changes for clarity and maintainability
(variable names, code reuse, readability, etc.).
Communicate decisions, questions, and potential concerns.
- [ ] Explains differences from previous plans (e.g., issue description).
- [ ] Highlights technical choices and bugs encountered.
- [ ] Calls out remaining decisions and concerns.
- [ ] Automated tests verify logic where appropriate.
Individual commits are ready for review (see [commit discipline](https://zulip.readthedocs.io/en/latest/contributing/version-control.html)).
- [ ] Each commit is a coherent idea.
- [ ] Commit message(s) explain reasoning and motivation for changes.
Completed manual review and testing of the following:
- [ ] Visual appearance of the changes.
- [ ] Responsiveness and internationalization.
- [ ] Strings and tooltips.
- [ ] End-to-end functionality of buttons, interactions and flows.
- [ ] Corner cases, error conditions, and easily imagined bugs.
| Missing a `PROVISION_VERSION` bump. Letβs hold off on this a bit to see what upstream does in postcss/postcss-media-minmax#28; Iβve repushed the old commit to a tag to fix installs.
Repushed with the `PROVISION_VERSION` bump, just in case we need it.
π on holding off pending the results of that other PR.
Heads up @alexmv, we just merged some commits that conflict with the changes you made in this pull request! You can review this repository's [recent commits](https://github.com/zulip/zulip/commits/main) to see where the conflicts occur. Please rebase your feature branch against the `upstream/main` branch and [resolve](https://zulip.readthedocs.io/en/latest/git/troubleshooting.html#recover-from-a-git-rebase-failure) your pull request's merge conflicts accordingly.
<!-- mergeConflictWarning -->
Has upstream dealt with this yet? | 2022-08-04T20:53:39 |
zulip/zulip | 22,669 | zulip__zulip-22669 | [
"22552"
] | e653bb2733db729480c09bb36dfacf5a5fb8ae08 | diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -1111,7 +1111,17 @@ class Meta:
def get_realm_emoji_dicts(realm: Realm, only_active_emojis: bool = False) -> Dict[str, EmojiInfo]:
- query = RealmEmoji.objects.filter(realm=realm).select_related("author")
+ # RealmEmoji objects with file_name=None are still in the process
+ # of being uploaded, and we expect to be cleaned up by a
+ # try/finally block if the upload fails, so it's correct to
+ # exclude them.
+ query = (
+ RealmEmoji.objects.filter(realm=realm)
+ .exclude(
+ file_name=None,
+ )
+ .select_related("author")
+ )
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
@@ -1121,6 +1131,7 @@ def get_realm_emoji_dicts(realm: Realm, only_active_emojis: bool = False) -> Dic
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
+ assert realm_emoji.file_name is not None
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
emoji_dict: EmojiInfo = dict(
@@ -1159,6 +1170,16 @@ def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, EmojiInfo]:
def flush_realm_emoji(*, instance: RealmEmoji, **kwargs: object) -> None:
+ if instance.file_name is None:
+ # Because we construct RealmEmoji.file_name using the ID for
+ # the RealmEmoji object, it will always have file_name=None,
+ # and then it'll be updated with the actual filename as soon
+ # as the upload completes successfully.
+ #
+ # Doing nothing when file_name=None is the best option, since
+ # such an object shouldn't have been cached yet, and this
+ # function will be called again when file_name is set.
+ return
realm = instance.realm
cache_set(
get_realm_emoji_cache_key(realm), get_realm_emoji_uncached(realm), timeout=3600 * 24 * 7
| diff --git a/zerver/tests/test_realm_emoji.py b/zerver/tests/test_realm_emoji.py
--- a/zerver/tests/test_realm_emoji.py
+++ b/zerver/tests/test_realm_emoji.py
@@ -23,7 +23,7 @@ def create_test_emoji(self, name: str, author: UserProfile) -> RealmEmoji:
return realm_emoji
def create_test_emoji_with_no_author(self, name: str, realm: Realm) -> RealmEmoji:
- realm_emoji = RealmEmoji.objects.create(realm=realm, name=name)
+ realm_emoji = RealmEmoji.objects.create(realm=realm, name=name, file_name=name)
return realm_emoji
def test_list(self) -> None:
| cache: `post_save` signal race condition on `RealmEmoji`
We use the `post_save` to flush the realm emoji cache whenever we call `realm_emoji.save()`. The signal handler `zerver.models.flush_realm_emoji` flushes the cache whenever this signal is triggered. However, the value of the cache can be invalid when the file name is `None`.
To replicate, add an assertion `assert realm_emoji.file_name is not None` in `get_realm_emoji_dicts` before `emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)`:
```diff
diff --git a/zerver/models.py b/zerver/models.py
index 973f54758a..9dfadbe279 100644
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -1147,6 +1147,7 @@ def get_realm_emoji_dicts(realm: Realm, only_active_emojis: bool = False) -> Dic
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
+ assert realm_emoji.file_name is not None
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
emoji_dict: EmojiInfo = dict(
```
and then run `./manage.py populate_db`.
You should see an error like this:
```python
2022-07-21 19:15:23.615 INFO [bmemcached.protocol] Flushing memcached
Traceback (most recent call last):
File "./manage.py", line 152, in <module>
execute_from_command_line(sys.argv)
File "./manage.py", line 117, in execute_from_command_line
utility.execute()
File "/srv/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/__init__.py", line 440, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/srv/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/base.py", line 414, in run_from_argv
self.execute(*args, **cmd_options)
File "/srv/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/base.py", line 460, in execute
output = self.handle(*args, **options)
File "/home/pig208/zulip/zilencer/management/commands/populate_db.py", line 792, in handle
check_add_realm_emoji(zulip_realm, "green_tick", iago, File(fp))
File "/home/pig208/zulip/zerver/actions/realm_emoji.py", line 25, in check_add_realm_emoji
realm_emoji.save()
File "/srv/zulip-py3-venv/lib/python3.8/site-packages/django/db/models/base.py", line 806, in save
self.save_base(
File "/srv/zulip-py3-venv/lib/python3.8/site-packages/django/db/models/base.py", line 872, in save_base
post_save.send(
File "/srv/zulip-py3-venv/lib/python3.8/site-packages/django/dispatch/dispatcher.py", line 176, in send
return [
File "/srv/zulip-py3-venv/lib/python3.8/site-packages/django/dispatch/dispatcher.py", line 177, in <listcomp>
(receiver, receiver(signal=self, sender=sender, **named))
File "/home/pig208/zulip/zerver/models.py", line 1181, in flush_realm_emoji
get_realm_emoji_cache_key(realm), get_realm_emoji_uncached(realm), timeout=3600 * 24 * 7
File "/home/pig208/zulip/zerver/models.py", line 1167, in get_realm_emoji_uncached
return get_realm_emoji_dicts(realm)
File "/home/pig208/zulip/zerver/models.py", line 1139, in get_realm_emoji_dicts
assert realm_emoji.file_name is not None
AssertionError
```
| Hello @zulip/server-emoji members, this issue was labeled with the "area: emoji" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-08-05T21:15:32 |
zulip/zulip | 22,834 | zulip__zulip-22834 | [
"19596"
] | b6a4e38c9d877bac8d87c8f853fb8e894f8df51b | diff --git a/zerver/lib/remote_server.py b/zerver/lib/remote_server.py
--- a/zerver/lib/remote_server.py
+++ b/zerver/lib/remote_server.py
@@ -161,7 +161,7 @@ def send_analytics_to_remote_server() -> None:
try:
result = send_to_push_bouncer("GET", "server/analytics/status", {})
except PushNotificationBouncerRetryLaterError as e:
- logging.warning(e.msg)
+ logging.warning(e.msg, exc_info=True)
return
last_acked_realm_count_id = result["last_realm_count_id"]
diff --git a/zerver/middleware.py b/zerver/middleware.py
--- a/zerver/middleware.py
+++ b/zerver/middleware.py
@@ -16,6 +16,7 @@
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
+from django.utils.log import log_response
from django.utils.translation import gettext as _
from django.views.csrf import csrf_failure as html_csrf_failure
from django_scim.middleware import SCIMAuthCheckMiddleware
@@ -465,7 +466,22 @@ def process_exception(
return json_unauthorized(www_authenticate="session")
if isinstance(exception, JsonableError):
- return json_response_from_error(exception)
+ response = json_response_from_error(exception)
+ if response.status_code >= 500:
+ # Here we use Django's log_response the way Django uses
+ # it normally to log error responses. However, we make the small
+ # modification of including the traceback to make the log message
+ # more helpful. log_response takes care of knowing not to duplicate
+ # the logging, so Django won't generate a second log message.
+ log_response(
+ "%s: %s",
+ response.reason_phrase,
+ request.path,
+ response=response,
+ request=request,
+ exc_info=True,
+ )
+ return response
if RequestNotes.get_notes(request).error_format == "JSON" and not settings.TEST_SUITE:
capture_exception(exception)
json_error_logger = logging.getLogger("zerver.middleware.json_error_handler")
| diff --git a/zerver/tests/test_push_notifications.py b/zerver/tests/test_push_notifications.py
--- a/zerver/tests/test_push_notifications.py
+++ b/zerver/tests/test_push_notifications.py
@@ -460,11 +460,9 @@ def test_push_bouncer_api(self) -> None:
"ConnectionError while trying to connect to push notification bouncer",
502,
)
- self.assertEqual(
- error_log.output,
- [
- f"ERROR:django.request:Bad Gateway: {endpoint}",
- ],
+ self.assertIn(
+ f"ERROR:django.request:Bad Gateway: {endpoint}\nTraceback",
+ error_log.output[0],
)
with responses.RequestsMock() as resp, self.assertLogs(level="WARNING") as warn_log:
@@ -472,11 +470,11 @@ def test_push_bouncer_api(self) -> None:
result = self.client_post(endpoint, {"token": token}, subdomain="zulip")
self.assert_json_error(result, "Received 500 from push notification bouncer", 502)
self.assertEqual(
- warn_log.output,
- [
- "WARNING:root:Received 500 from push notification bouncer",
- f"ERROR:django.request:Bad Gateway: {endpoint}",
- ],
+ warn_log.output[0],
+ "WARNING:root:Received 500 from push notification bouncer",
+ )
+ self.assertIn(
+ f"ERROR:django.request:Bad Gateway: {endpoint}\nTraceback", warn_log.output[1]
)
# Add tokens
@@ -552,13 +550,12 @@ def test_analytics_api(self) -> None:
user = self.example_user("hamlet")
end_time = self.TIME_ZERO
- with responses.RequestsMock() as resp, mock.patch(
- "zerver.lib.remote_server.logging.warning"
- ) as mock_warning:
+ with responses.RequestsMock() as resp, self.assertLogs(level="WARNING") as mock_warning:
resp.add(responses.GET, ANALYTICS_STATUS_URL, body=ConnectionError())
send_analytics_to_remote_server()
- mock_warning.assert_called_once_with(
- "ConnectionError while trying to connect to push notification bouncer"
+ self.assertIn(
+ "WARNING:root:ConnectionError while trying to connect to push notification bouncer\nTraceback ",
+ mock_warning.output[0],
)
self.assertTrue(resp.assert_call_count(ANALYTICS_STATUS_URL, 1))
| obscure error messages when zulip push notification gateway is unreachable
our zulip server is hosted in a dmz .
accidentally, the firewall rule for allowing outgoing connection to push.zulipchat.com was not active. we block all outgoing connections by default, as we do not want to be some "hop" or source of spam and illegal action, if some machine is being hacked inside the dmz.
we did not take any notice on this,and users didn't complain because push notifications are not that important to them.
but we started to wonder, why we got weird and very intermittend notification mails from django module like this
```
Subject: [Django] chat:Bad Gateway:/api/v1/users/me/apns_device_token
Logger django.request, from module django.utils.log line 230:
Error generated by <**username**> <**user email**> (Member) on <**servername**> deployment
No stack trace available
Deployed code:
- git: None
- ZULIP_VERSION: 4.3
Request info:
- path: /api/v1/users/me/apns_device_token
- POST: {'token': ['<**token string**>'], 'appid': ['org.zulip.Zulip']}
- REMOTE_ADDR: "<**IP**>"
- QUERY_STRING: ""
- SERVER_NAME: ""
```
in server log, there is proper logging of gateway unreachablility :
```
2021-08-17 00:14:59.285 WARN [] ConnectionError while trying to connect to push notification bouncer
2021-08-17 01:06:19.380 WARN [] ConnectionError while trying to connect to push notification bouncer
2021-08-17 02:05:03.906 WARN [] ConnectionError while trying to connect to push notification bouncer
```
it looks there is some room for improvement:
1. don't send weird django error message
2. send proper notification to the admin or/and to the end-user, if push notification message could not be sent
also see https://chat.zulip.org/#narrow/stream/31-production-help/topic/.5BDjango.5D.20chat.3ABad.20Gateway.3A.2Fapi.2Fv1.2Fusers.2Fme.2Fapns_device_token/near/1246328
| Hello @zulip/server-api, @zulip/server-production members, this issue was labeled with the "area: production", "area: api" labels, so you may want to check it out!
<!-- areaLabelAddition -->
That error email does indeed seem pretty messy for what is fundamentally a networking issue connecting to the [mobile push notifications service](https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html). The code path is here:
```
$ git grep 'while trying to connect'
zerver/lib/remote_server.py: f"{e.__class__.__name__} while trying to connect to push notification bouncer"
zerver/tests/test_push_notifications.py: "ConnectionError while trying to connect to push notification bouncer",
zerver/tests/test_push_notifications.py: "ConnectionError while trying to connect to push notification bouncer"
```
And my guess is that we're somewhere raising an exception or doing a `logging.error` type call without passing a proper stack trace. We do have a test that suggests that's the case:
```
$ git grep 'Bad Gateway'
zerver/tests/test_push_notifications.py: f"ERROR:django.request:Bad Gateway: {endpoint}",
zerver/tests/test_push_notifications.py: f"ERROR:django.request:Bad Gateway: {endpoint}",
```
It should be possible to reproduce and debug in a Zulip development environment after registering for the push notifications service, though I don't think Django will try to actually send those error reporting emails in that setting.
Any update on this error? We are having the same problem, only difference is we are running on Zulip 6.0 instead.
As noted above, the error itself is a networking problem -- you likely have a firewall preventing your server from connecting to the push notifications service.
I'll try to make sure improving the traceback happens to provide more insight into the networking issue happens; it should be easy to do and backport. | 2022-08-31T17:56:12 |
zulip/zulip | 22,847 | zulip__zulip-22847 | [
"21949"
] | 28173cafc86c531fe52b5403cba3e756181b4517 | diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
--- a/zerver/lib/url_redirects.py
+++ b/zerver/lib/url_redirects.py
@@ -21,6 +21,10 @@ class URLRedirect:
HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
# Add URL redirects for help center documentation here:
+ URLRedirect(
+ "/help/change-the-default-language-for-your-organization",
+ "/help/configure-organization-language",
+ ),
URLRedirect("/help/delete-a-stream", "/help/archive-a-stream"),
URLRedirect("/help/change-the-topic-of-a-message", "/help/rename-a-topic"),
URLRedirect("/help/configure-missed-message-emails", "/help/email-notifications"),
| Clean up documentation around organization notifications language
After merging #20866, we have a bunch of follow-up cleanup needed:
* [x] We should fix the /help/configure-notification-bot article to cover all the notifications that it sends, maybe not exhaustively but at least in terms of their categories (E.g. notifications when stream settings are changed). Also, we should think about whether it's be better to make the "organization notifications language" article instead be a section within that, since 80% of what this does is configure Notification bot.
* [ ] We should rename the page from `/help/change-the-default-language-for-your-organization` to something reflecting the new name as a "notifications language", with a redirect.
* [ ] We should open an issue for changing how invitations work to allow explicitly setting a language to be used in outgoing notification emails, with this being the default (I'm not sure it's urgent, but that seems more logical). Or maybe start with a #design discussion to confirm we think this is a good idea.
* [ ] Both language articles advertise contributing `support for new languages on **[Transifex](https://www.transifex.com/zulip/zulip/)**.`; we should probably adjust that language to match the nicer language we have in the language picker itself. (In particular, it's a bad experience to start out on Transifex, since it has no instructions). (I also opened https://github.com/zulip/zulip/issues/21948 for a tangent off this).
* [ ] We should check whether there's anything I missed in updating the documentation for #20866; e.g. maybe the "pick your language" page should mention that we detect your initial language from your browser's `Accept-language` header (likely in a less technical framing).
I'll also note I started https://chat.zulip.org/#narrow/stream/101-design/topic/language.20in.20invitation.20emails/near/1375528 for a question that could result in some further tweaks to the documentation here.
@laurynmm assigned this to you, since you have a ton of context for this.
---
Update by @alya :
Related issues, which will require additional changes to be made to the same help page(s): https://github.com/zulip/zulip/issues/22188 and https://github.com/zulip/zulip/issues/22136.
| Hello @zulip/server-user-docs members, this issue was labeled with the "area: documentation (user)" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-09-02T09:57:12 |
|
zulip/zulip | 22,908 | zulip__zulip-22908 | [
"22905"
] | ce9ceb7f9f88d8bc05d0f4b80ac76639fd8e06eb | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -109,6 +109,8 @@
# Use in compose box.
r"to send",
r"to add a new line",
+ # Used in showing Notification Bot read receipts message
+ "Notification Bot",
]
# Sort regexes in descending order of their lengths. As a result, the
| Do not show read receipts for Notification Bot messages
As [discussed on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/read-receipts.20UI.20.2318935/near/1429360), Notification Bot messages about resolved/unresolved topics have confusing read receipts. Because those messages are immediately marked as read for all non-participants in the thread, it looks like many people have immediately read the message.
To fix this, we should disable showing read receipts for messages sent by Notification Bot. Instead, "This message has been read by X people:" should be replaced with:
> Read receipts are not available for Notification Bot messages.
No names should be shown.
| @zulipbot claim | 2022-09-09T18:27:01 |
|
zulip/zulip | 22,988 | zulip__zulip-22988 | [
"22821"
] | 09c6ee6468e302b5c9d09fb40e6955c1d928e408 | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 146
+API_FEATURE_LEVEL = 147
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/views/streams.py b/zerver/views/streams.py
--- a/zerver/views/streams.py
+++ b/zerver/views/streams.py
@@ -55,6 +55,7 @@
from zerver.lib.mention import MentionBackend, silent_mention_syntax_for_user
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
+from zerver.lib.retention import STREAM_MESSAGE_BATCH_SIZE as RETENTION_STREAM_MESSAGE_BATCH_SIZE
from zerver.lib.retention import parse_message_retention_days
from zerver.lib.streams import (
StreamDict,
@@ -848,7 +849,6 @@ def get_topics_backend(
return json_success(request, data=dict(topics=result))
[email protected]
@require_realm_admin
@has_request_variables
def delete_in_topic(
@@ -857,7 +857,7 @@ def delete_in_topic(
stream_id: int = REQ(converter=to_non_negative_int, path_only=True),
topic_name: str = REQ("topic_name"),
) -> HttpResponse:
- (stream, sub) = access_stream_by_id(user_profile, stream_id)
+ stream, ignored_sub = access_stream_by_id(user_profile, stream_id)
messages = messages_for_topic(assert_is_not_none(stream.recipient_id), topic_name)
if not stream.is_history_public_to_subscribers():
@@ -867,9 +867,20 @@ def delete_in_topic(
).values_list("message_id", flat=True)
messages = messages.filter(id__in=deletable_message_ids)
- messages = messages.select_for_update(of=("self",))
-
- do_delete_messages(user_profile.realm, messages)
+ # Topics can be large enough that this request will inevitably time out.
+ # In such a case, it's good for some progress to be accomplished, so that
+ # full deletion can be achieved by repeating the request. For that purpose,
+ # we delete messages in atomic batches, committing after each batch.
+ # TODO: Ideally this should be moved to the deferred_work queue.
+ batch_size = RETENTION_STREAM_MESSAGE_BATCH_SIZE
+ while True:
+ with transaction.atomic(durable=True):
+ messages_to_delete = messages.order_by("-id")[0:batch_size].select_for_update(
+ of=("self",)
+ )
+ if not messages_to_delete:
+ break
+ do_delete_messages(user_profile.realm, messages_to_delete)
return json_success(request)
| Unable to delete a topic
Hi! I'm attemptimg to delete a large (over 50k messages) topic that contains alerts and logs quotations sent by elastalert. When deleting it from the web gui, the delete request gets stuck for about a minute and then i get 502 with no any entry in zulip logs. Messages from other topics and entire topics still can be deleted, only this one remains untouched.
Is there any way to delete such large topics? Can i delete messages from the database directly?
| Hello @zulip/server-message-view members, this issue was labeled with the "area: message-editing" label, so you may want to check it out!
<!-- areaLabelAddition -->
Yikes, 50k messages is a topic is a lot of log spam. You can likely delete it using `do_delete_messages` via a management command shell (https://zulip.readthedocs.io/en/latest/production/management-commands.html), which would avoid the time limit for requests that we have configured.
The other option is to modify `/etc/zulip/uwsgi.conf` to dramatically increase the timeout (the setting is called `harikiri` in that file; and then you'll need use `scripts/restart-server` to restart the server) and see if that's good enough; might be easier.
@timabbott Thanks for you response! I'm a bit confused, manage.py doesn't show such command:
```
zulip@f866a8398bf6:~/deployments/current$ ./manage.py help do_delete_messages
Unknown command: 'do_delete_messages'. Did you mean restore_messages?
```
And yes, i'm running Zulip in docker, if it's relevant.
@timabbott increased hahakiri to about 3 hours as you suggested but it still fails after the time is up, looks like it takes much more... Maybe you could point how can that `do_delete_messages` be used? Or if it is possible to delete messages from the database directly?
To make it easier to address follow-up questions, I [started a thread in the development community](https://chat.zulip.org/#narrow/stream/31-production-help/topic/Unable.20to.20delete.20a.20topic.20.2322821/near/1432762).
I said "management command shell" - I meant `manage.py shell`, which is documented on that page. `do_delete_messages` is a function you need to call there; it's a Python shell. If you need help scripting that, I recommend getting a support contract.
In terms of a bug fix for Zulip, we'll likely just want to paginate the queries to do blocks of 1k messages in the topic as individual transactions. @mateuszmandera perhaps you can look at whether this is easy to do. I think it'd be a significant improvement even if it still times out, if it just manages to make clear progress before doing so, since that at least would offer a UI level workaround of doing it repeatedly. Eventually we'll need to move to the `deferred_work` queue processor. | 2022-09-17T21:13:09 |
|
zulip/zulip | 23,039 | zulip__zulip-23039 | [
"9957",
"9957"
] | 62015a0b482ecd416b9d9cbbb54960af8ddaf70b | diff --git a/zproject/backends.py b/zproject/backends.py
--- a/zproject/backends.py
+++ b/zproject/backends.py
@@ -80,6 +80,10 @@
from zerver.actions.create_user import do_create_user, do_reactivate_user
from zerver.actions.custom_profile_fields import do_update_user_custom_profile_data_if_changed
+from zerver.actions.user_groups import (
+ bulk_add_members_to_user_groups,
+ bulk_remove_members_from_user_groups,
+)
from zerver.actions.user_settings import do_regenerate_api_key
from zerver.actions.users import do_deactivate_user
from zerver.lib.avatar import avatar_url, is_avatar_new
@@ -105,6 +109,8 @@
PreregistrationRealm,
PreregistrationUser,
Realm,
+ UserGroup,
+ UserGroupMembership,
UserProfile,
custom_profile_fields_for_realm,
get_realm,
@@ -910,6 +916,78 @@ def sync_custom_profile_fields_from_ldap(
except SyncUserError as e:
raise ZulipLDAPError(str(e)) from e
+ def sync_groups_from_ldap(self, user_profile: UserProfile, ldap_user: _LDAPUser) -> None:
+ """
+ For the groups set up for syncing for the realm in LDAP_SYNCHRONIZED_GROUPS_BY_REALM:
+
+ (1) Makes sure the user has membership in the Zulip UserGroups corresponding
+ to the LDAP groups ldap_user belongs to.
+ (2) Makes sure the user doesn't have membership in the Zulip UserGroups corresponding
+ to the LDAP groups ldap_user doesn't belong to.
+ """
+
+ if user_profile.realm.string_id not in settings.LDAP_SYNCHRONIZED_GROUPS_BY_REALM:
+ # no groups to sync for this realm
+ return
+
+ configured_ldap_group_names_for_sync = set(
+ settings.LDAP_SYNCHRONIZED_GROUPS_BY_REALM[user_profile.realm.string_id]
+ )
+
+ try:
+ ldap_logger.debug("Syncing groups for user: %s", user_profile.id)
+ intended_group_name_set_for_user = set(ldap_user.group_names).intersection(
+ configured_ldap_group_names_for_sync
+ )
+
+ existing_group_name_set_for_user = set(
+ UserGroupMembership.objects.filter(
+ user_group__realm=user_profile.realm,
+ user_group__name__in=set(
+ settings.LDAP_SYNCHRONIZED_GROUPS_BY_REALM[user_profile.realm.string_id]
+ ),
+ user_profile=user_profile,
+ ).values_list("user_group__name", flat=True)
+ )
+
+ ldap_logger.debug(
+ "intended groups: %s; zulip groups: %s",
+ repr(intended_group_name_set_for_user),
+ repr(existing_group_name_set_for_user),
+ )
+
+ new_groups = UserGroup.objects.filter(
+ name__in=intended_group_name_set_for_user.difference(
+ existing_group_name_set_for_user
+ ),
+ realm=user_profile.realm,
+ )
+ if new_groups:
+ ldap_logger.debug(
+ "add %s to %s", user_profile.id, [group.name for group in new_groups]
+ )
+ bulk_add_members_to_user_groups(new_groups, [user_profile.id], acting_user=None)
+
+ group_names_for_membership_deletion = existing_group_name_set_for_user.difference(
+ intended_group_name_set_for_user
+ )
+ groups_for_membership_deletion = UserGroup.objects.filter(
+ name__in=group_names_for_membership_deletion, realm=user_profile.realm
+ )
+
+ if group_names_for_membership_deletion:
+ ldap_logger.debug(
+ "removing groups %s from %s",
+ group_names_for_membership_deletion,
+ user_profile.id,
+ )
+ bulk_remove_members_from_user_groups(
+ groups_for_membership_deletion, [user_profile.id], acting_user=None
+ )
+
+ except Exception as e:
+ raise ZulipLDAPError(str(e)) from e
+
class ZulipLDAPAuthBackend(ZulipLDAPAuthBackendBase):
REALM_IS_NONE_ERROR = 1
@@ -1136,6 +1214,7 @@ def get_or_build_user(
self.sync_avatar_from_ldap(user, ldap_user)
self.sync_full_name_from_ldap(user, ldap_user)
self.sync_custom_profile_fields_from_ldap(user, ldap_user)
+ self.sync_groups_from_ldap(user, ldap_user)
return (user, built)
diff --git a/zproject/default_settings.py b/zproject/default_settings.py
--- a/zproject/default_settings.py
+++ b/zproject/default_settings.py
@@ -2,6 +2,8 @@
from email.headerregistry import Address
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple
+from django_auth_ldap.config import GroupOfUniqueNamesType, LDAPGroupType
+
from scripts.lib.zulip_tools import deport
from zproject.settings_types import JwtAuthKey, OIDCIdPConfigDict, SAMLIdPConfigDict
@@ -69,6 +71,8 @@
FAKE_LDAP_MODE: Optional[str] = None
FAKE_LDAP_NUM_USERS = 8
AUTH_LDAP_ADVANCED_REALM_ACCESS_CONTROL: Optional[Dict[str, Any]] = None
+LDAP_SYNCHRONIZED_GROUPS_BY_REALM: Dict[str, List[str]] = {}
+AUTH_LDAP_GROUP_TYPE: LDAPGroupType = GroupOfUniqueNamesType()
# Social auth; we support providing values for some of these
# settings in zulip-secrets.conf instead of settings.py in development.
diff --git a/zproject/prod_settings_template.py b/zproject/prod_settings_template.py
--- a/zproject/prod_settings_template.py
+++ b/zproject/prod_settings_template.py
@@ -266,6 +266,20 @@
# ]
# }
+
+## LDAP group sync configuration.
+## See: https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#synchronizing-groups
+# AUTH_LDAP_GROUP_TYPE = GroupOfUniqueNamesType()
+# AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
+# "ou=groups,dc=www,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(objectClass=groupOfUniqueNames)"
+# )
+# LDAP_SYNCHRONIZED_GROUPS_BY_REALM = {
+# "subdomain1" : [
+# "group1",
+# "group2",
+# ]
+# }
+
########
## Google OAuth.
##
| diff --git a/zerver/tests/fixtures/ldap/directory.json b/zerver/tests/fixtures/ldap/directory.json
--- a/zerver/tests/fixtures/ldap/directory.json
+++ b/zerver/tests/fixtures/ldap/directory.json
@@ -77,5 +77,24 @@
"uid": ["user2_with_shared_email"],
"sn": ["shortname"],
"mail": ["[email protected]"]
+ },
+
+ "ou=groups,dc=zulip,dc=com": {
+ "ou": "groups"
+ },
+ "cn=cool_test_group,ou=groups,dc=zulip,dc=com": {
+ "objectClass": ["groupOfUniqueNames"],
+ "cn": ["cool_test_group"],
+ "uniqueMember": [
+ "uid=hamlet,ou=users,dc=zulip,dc=com"
+ ]
+ },
+ "cn=another_test_group,ou=groups,dc=zulip,dc=com": {
+ "objectClass": ["groupOfUniqueNames"],
+ "cn": ["another_test_group"],
+ "uniqueMember": [
+ "uid=hamlet,ou=users,dc=zulip,dc=com",
+ "uid=cordelia,ou=users,dc=zulip,dc=com"
+ ]
}
}
diff --git a/zerver/tests/test_auth_backends.py b/zerver/tests/test_auth_backends.py
--- a/zerver/tests/test_auth_backends.py
+++ b/zerver/tests/test_auth_backends.py
@@ -63,6 +63,10 @@
do_set_realm_authentication_methods,
do_set_realm_property,
)
+from zerver.actions.user_groups import (
+ bulk_add_members_to_user_groups,
+ create_user_group_in_database,
+)
from zerver.actions.user_settings import do_change_password, do_change_user_setting
from zerver.actions.users import change_user_is_active, do_deactivate_user
from zerver.lib.avatar import avatar_url
@@ -89,6 +93,7 @@
)
from zerver.lib.types import Validator
from zerver.lib.upload.base import DEFAULT_AVATAR_SIZE, MEDIUM_AVATAR_SIZE, resize_avatar
+from zerver.lib.user_groups import is_user_in_group
from zerver.lib.users import get_all_api_keys, get_api_key, get_raw_user_data
from zerver.lib.utils import assert_is_not_none
from zerver.lib.validator import (
@@ -109,6 +114,7 @@
Realm,
RealmDomain,
Stream,
+ UserGroup,
UserProfile,
clear_supported_auth_backends_cache,
get_realm,
@@ -7248,5 +7254,136 @@ def test_invalid_realm_for_user_failure(self) -> None:
self.assert_json_error_contains(result, "Invalid subdomain", 404)
+class LDAPGroupSyncTest(ZulipTestCase):
+ @override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
+ def test_ldap_group_sync(self) -> None:
+ self.init_default_ldap_database()
+
+ hamlet = self.example_user("hamlet")
+ with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
+ result = sync_user_from_ldap(hamlet, mock.Mock())
+ self.assertTrue(result)
+ self.assertTrue(hamlet.is_active)
+
+ realm = get_realm("zulip")
+
+ with self.settings(
+ AUTH_LDAP_GROUP_SEARCH=LDAPSearch(
+ "ou=groups,dc=zulip,dc=com",
+ ldap.SCOPE_ONELEVEL,
+ "(objectClass=groupOfUniqueNames)",
+ ),
+ LDAP_SYNCHRONIZED_GROUPS_BY_REALM={
+ "zulip": [
+ "cool_test_group",
+ ]
+ },
+ LDAP_APPEND_DOMAIN="zulip.com",
+ ), self.assertLogs("zulip.ldap", "DEBUG") as zulip_ldap_log:
+ self.assertFalse(UserGroup.objects.filter(realm=realm, name="cool_test_group").exists())
+
+ create_user_group_in_database(
+ "cool_test_group", [], realm, acting_user=None, description="Created by LDAP sync"
+ )
+
+ self.assertTrue(UserGroup.objects.filter(realm=realm, name="cool_test_group").exists())
+
+ user_group = UserGroup.objects.get(realm=realm, name="cool_test_group")
+
+ self.assertFalse(
+ is_user_in_group(
+ user_group,
+ hamlet,
+ direct_member_only=True,
+ )
+ )
+
+ sync_user_from_ldap(hamlet, mock.Mock())
+ self.assertTrue(
+ is_user_in_group(
+ user_group,
+ hamlet,
+ direct_member_only=True,
+ )
+ )
+
+ # Add a user to a Zulip group that they are not member of in ldap.
+ # This implies that they should be deleted from the Zulip group
+ # upon the next sync.
+ cordelia = self.example_user("cordelia")
+ bulk_add_members_to_user_groups(
+ [user_group],
+ [cordelia.id],
+ acting_user=None,
+ )
+
+ self.assertTrue(
+ is_user_in_group(
+ UserGroup.objects.get(realm=realm, name="cool_test_group"),
+ cordelia,
+ direct_member_only=True,
+ )
+ )
+
+ # This should remove cordelia from cool_test_group
+ sync_user_from_ldap(cordelia, mock.Mock())
+
+ self.assertFalse(
+ is_user_in_group(
+ UserGroup.objects.get(realm=realm, name="cool_test_group"),
+ cordelia,
+ direct_member_only=True,
+ )
+ )
+
+ hamlet = self.example_user("hamlet")
+ cordelia = self.example_user("cordelia")
+
+ self.assertEqual(
+ zulip_ldap_log.output,
+ [
+ f"DEBUG:zulip.ldap:Syncing groups for user: {hamlet.id}",
+ "DEBUG:zulip.ldap:intended groups: {'cool_test_group'}; zulip groups: set()",
+ f"DEBUG:zulip.ldap:add {hamlet.id} to ['cool_test_group']",
+ f"DEBUG:zulip.ldap:Syncing groups for user: {cordelia.id}",
+ "DEBUG:zulip.ldap:intended groups: set(); zulip groups: {'cool_test_group'}",
+ f"DEBUG:zulip.ldap:removing groups {{'cool_test_group'}} from {cordelia.id}",
+ ],
+ )
+
+ # Test an exception using a malformed ldap group search setting.
+ with self.settings(
+ AUTH_LDAP_GROUP_SEARCH=LDAPSearch(
+ "ou=groups,dc=zulip,dc=com",
+ ldap.SCOPE_ONELEVEL,
+ "(objectClass=groupOfUniqueNames", # this is malformed, missing ")"
+ ),
+ LDAP_SYNCHRONIZED_GROUPS_BY_REALM={
+ "zulip": [
+ "cool_test_group",
+ ]
+ },
+ LDAP_APPEND_DOMAIN="zulip.com",
+ ), self.assertLogs("django_auth_ldap", "WARN") as django_ldap_log, self.assertLogs(
+ "zulip.ldap", "DEBUG"
+ ) as zulip_ldap_log:
+ with self.assertRaisesRegex(
+ ZulipLDAPError,
+ "search_s.*",
+ ):
+ sync_user_from_ldap(cordelia, mock.Mock())
+
+ self.assertEqual(
+ zulip_ldap_log.output,
+ [f"DEBUG:zulip.ldap:Syncing groups for user: {cordelia.id}"],
+ )
+ self.assertEqual(
+ django_ldap_log.output,
+ [
+ 'WARNING:django_auth_ldap:search_s("ou=groups,dc=zulip,dc=com", 1, "(&(objectClass=groupOfUniqueNames(uniqueMember=uid=cordelia,ou=users,dc=zulip,dc=com))", "None", 0) while authenticating cordelia',
+ ],
+ )
+
+
# Don't load the base class as a test: https://bugs.python.org/issue17519.
del SocialAuthBase
| Sync LDAP e-Mail distributor with user groups
Hi together,
currently we are evaluating zulip. While setting up our test instance i noticed that there is no way in syncing our ldap groups with the zulip user-groups.
In our LDAP we have some e-mail groups which work as distributor, i'd like to use these as the base for zulip groups. In example, when someone is in the e-mail distributor "[email protected]" i'd like to have them in the zulip group "it".
Sync LDAP e-Mail distributor with user groups
Hi together,
currently we are evaluating zulip. While setting up our test instance i noticed that there is no way in syncing our ldap groups with the zulip user-groups.
In our LDAP we have some e-mail groups which work as distributor, i'd like to use these as the base for zulip groups. In example, when someone is in the e-mail distributor "[email protected]" i'd like to have them in the zulip group "it".
| @H3npi thanks for the suggestion. This should be doable with the Zulip API, though it's probably worth adding a quick little management command to do this; it shouldn't be hard.
@timabbott, how can this be done via the API? I do not see an API call for setting/managing user groups
@H3npi there are API calls similar to https://zulipchat.com/api/get-user-groups for editing them; they just aren't documented yet.
(If you want to spelunk how to do it, `zerver/views/user_groups.py` in this project has the code)
Hi,
is the *group syncing with LDAP* actively being worked on or planned to include in a future release? I believe that together with https://github.com/zulip/zulip/issues/9480 this is a very much desired feature. A scenario could be, that a new user is added on the LDAP backend, can login to Zulip and automatically joins the streams which fit to its LDAP groups.
@lreiher would a better solution for your use case involve just syncing initial stream subscriptions from some LDAP groups?
I am not so sure if I understand your comment right. Ideally, every LDAP group in a specific search scope would create a new user group in Zulip. Specific streams could then be made accessible to this user group by #9480. I guess that whenever Zulip syncs user data, it should then also sync group data to reflect the most recent changes to those groups.
Hello @zulip/server-authentication, @zulip/server-settings members, this issue was labeled with the "area: authentication", "area: settings (admin/org)" labels, so you may want to check it out!
<!-- areaLabelAddition -->
>(If you want to spelunk how to do it, zerver/views/user_groups.py in this project has the code)
@timabbott How would I go about using the API calls/functions in this file? They don't seem to be included in the `zulip` python package.
@jjblack just to check, is what you're looking for syncing Zulip streams with LDAP, or user groups (which are just groups of people you can mention) with LDAP? This issue is just for the "user groups" feature.
@timabbott I am looking for a way to sync Zulip user groups with LDAP groups.
Hi,
I am also looking for this feature - currently we add users to groups manually in my org, LDAP group integration would be really useful.
@timabbott I suggest it might be the best to:
a) sync LDAP groups to Zulip groups (so they can be @mentioned),
and
b) allow adding entire Zulip groups to streams (this way the feature will work with other auth methods, not only with LDAP)
We are also looking for this feature.
It would be great if an LDAP group is synced with the stream and/or mapped with groups. In any case it would be necessary to synchronize stream user and LDAP groups.
Furthermore a support for group sync for SSO via SAML or Oauth would then be the perfect solution.
@mateuszmandera we should discuss the right model for this after the 2.1 release work is complete.
Please also include the options to map ldap groups to zulip admins. :-)
For the rest: zulip LDAP implementation is really nice, and specifically: so much better than the one from rocket! It's the reason we prefer zulip over it! It just needs these group features, to eliminate the need to configure things locally in zulip.
Thanks for making zulip available! :-)
> Please also include the options to map ldap groups to zulip admins. :-)
That feature is already available. In settings.py you can do this:
```
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
"is_realm_admin": "cn=YourAdminGroup,ou=groups,dc=example,dc=com",
"is_staff": "cn=YourStaffGroup,ou=groups,dc=example,dc=com",
}
```
But I am still missing the possibility to map LDAP groups to user groups in Zulip.
I need this functionality as well! Is there any news on when this could get implemented? It kind of feels like the first thing you would want after LDAP login.
We also need this functionality and it would go hand in hand with #9480 in my opinion - LDAP sync of the groups and members, then assign those groups to streams. This would greatly simplify group and stream management in Zulip.
Any update on a target fix for the groups? We can manually update these, however it will make pushing for zulip to be our production product with out group changes in LDAP changing what streams people can see.
This is now a priority for the project, since it will be an important part of the overall vision detailed for using user groups for all permissions in Zulip detailed in #19525.
@mateuszmandera can I add this to your TODO list?
Retagged this as a Zulip 6.0 release goal, since that's the release we're targeting #19525 for.
I know it's already moving, but just another +1 for this feature :) Glad to see it's right around the corner.
Any update ? Apparently It has not been included in Zulip 6.0 :/
> Any update ? Apparently It has not been included in Zulip 6.0 :/
I am working on partly implementing this feature, but it might take a while until it is merged. Maybe one or two more months.
The PR is almost done.
Just fyi, there is a follow up PR to complete Rocketchats sync feature:
https://github.com/zulip/zulip/pull/24490
(for anyone interested)
@H3npi thanks for the suggestion. This should be doable with the Zulip API, though it's probably worth adding a quick little management command to do this; it shouldn't be hard.
@timabbott, how can this be done via the API? I do not see an API call for setting/managing user groups
@H3npi there are API calls similar to https://zulipchat.com/api/get-user-groups for editing them; they just aren't documented yet.
(If you want to spelunk how to do it, `zerver/views/user_groups.py` in this project has the code)
Hi,
is the *group syncing with LDAP* actively being worked on or planned to include in a future release? I believe that together with https://github.com/zulip/zulip/issues/9480 this is a very much desired feature. A scenario could be, that a new user is added on the LDAP backend, can login to Zulip and automatically joins the streams which fit to its LDAP groups.
@lreiher would a better solution for your use case involve just syncing initial stream subscriptions from some LDAP groups?
I am not so sure if I understand your comment right. Ideally, every LDAP group in a specific search scope would create a new user group in Zulip. Specific streams could then be made accessible to this user group by #9480. I guess that whenever Zulip syncs user data, it should then also sync group data to reflect the most recent changes to those groups.
Hello @zulip/server-authentication, @zulip/server-settings members, this issue was labeled with the "area: authentication", "area: settings (admin/org)" labels, so you may want to check it out!
<!-- areaLabelAddition -->
>(If you want to spelunk how to do it, zerver/views/user_groups.py in this project has the code)
@timabbott How would I go about using the API calls/functions in this file? They don't seem to be included in the `zulip` python package.
@jjblack just to check, is what you're looking for syncing Zulip streams with LDAP, or user groups (which are just groups of people you can mention) with LDAP? This issue is just for the "user groups" feature.
@timabbott I am looking for a way to sync Zulip user groups with LDAP groups.
Hi,
I am also looking for this feature - currently we add users to groups manually in my org, LDAP group integration would be really useful.
@timabbott I suggest it might be the best to:
a) sync LDAP groups to Zulip groups (so they can be @mentioned),
and
b) allow adding entire Zulip groups to streams (this way the feature will work with other auth methods, not only with LDAP)
We are also looking for this feature.
It would be great if an LDAP group is synced with the stream and/or mapped with groups. In any case it would be necessary to synchronize stream user and LDAP groups.
Furthermore a support for group sync for SSO via SAML or Oauth would then be the perfect solution.
@mateuszmandera we should discuss the right model for this after the 2.1 release work is complete.
Please also include the options to map ldap groups to zulip admins. :-)
For the rest: zulip LDAP implementation is really nice, and specifically: so much better than the one from rocket! It's the reason we prefer zulip over it! It just needs these group features, to eliminate the need to configure things locally in zulip.
Thanks for making zulip available! :-)
> Please also include the options to map ldap groups to zulip admins. :-)
That feature is already available. In settings.py you can do this:
```
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
"is_realm_admin": "cn=YourAdminGroup,ou=groups,dc=example,dc=com",
"is_staff": "cn=YourStaffGroup,ou=groups,dc=example,dc=com",
}
```
But I am still missing the possibility to map LDAP groups to user groups in Zulip.
I need this functionality as well! Is there any news on when this could get implemented? It kind of feels like the first thing you would want after LDAP login.
We also need this functionality and it would go hand in hand with #9480 in my opinion - LDAP sync of the groups and members, then assign those groups to streams. This would greatly simplify group and stream management in Zulip.
Any update on a target fix for the groups? We can manually update these, however it will make pushing for zulip to be our production product with out group changes in LDAP changing what streams people can see.
This is now a priority for the project, since it will be an important part of the overall vision detailed for using user groups for all permissions in Zulip detailed in #19525.
@mateuszmandera can I add this to your TODO list?
Retagged this as a Zulip 6.0 release goal, since that's the release we're targeting #19525 for.
I know it's already moving, but just another +1 for this feature :) Glad to see it's right around the corner.
Any update ? Apparently It has not been included in Zulip 6.0 :/
> Any update ? Apparently It has not been included in Zulip 6.0 :/
I am working on partly implementing this feature, but it might take a while until it is merged. Maybe one or two more months.
The PR is almost done.
Just fyi, there is a follow up PR to complete Rocketchats sync feature:
https://github.com/zulip/zulip/pull/24490
(for anyone interested) | 2022-09-23T22:35:56 |
zulip/zulip | 23,094 | zulip__zulip-23094 | [
"22885"
] | c99a40803b4d1b85cf296b6012e9fc27ac48df05 | diff --git a/zerver/lib/email_notifications.py b/zerver/lib/email_notifications.py
--- a/zerver/lib/email_notifications.py
+++ b/zerver/lib/email_notifications.py
@@ -408,8 +408,15 @@ def do_send_missedmessage_events_reply_in_zulip(
triggers = [message["trigger"] for message in missed_messages]
unique_triggers = set(triggers)
+ personal_mentioned = any(
+ message["trigger"] == "mentioned" and message["mentioned_user_group_id"] is None
+ for message in missed_messages
+ )
+
context.update(
mention="mentioned" in unique_triggers or "wildcard_mentioned" in unique_triggers,
+ personal_mentioned=personal_mentioned,
+ wildcard_mentioned="wildcard_mentioned" in unique_triggers,
stream_email_notify="stream_email_notify" in unique_triggers,
mention_count=triggers.count("mentioned") + triggers.count("wildcard_mentioned"),
mentioned_user_group_name=mentioned_user_group_name,
@@ -476,6 +483,7 @@ def do_send_missedmessage_events_reply_in_zulip(
stream = Stream.objects.only("id", "name").get(id=message.recipient.type_id)
stream_header = f"{stream.name} > {message.topic_name()}"
context.update(
+ stream_name=stream.name,
stream_header=stream_header,
)
else:
| diff --git a/zerver/tests/test_email_notifications.py b/zerver/tests/test_email_notifications.py
--- a/zerver/tests/test_email_notifications.py
+++ b/zerver/tests/test_email_notifications.py
@@ -518,7 +518,7 @@ def _extra_context_in_missed_stream_messages_mention(
if show_message_content:
verify_body_include = [
"Othello, the Moor of Venice: > 1 > 2 > 3 > 4 > 5 > 6 > 7 > 8 > 9 > 10 > @**King Hamlet** -- ",
- "You are receiving this because you were mentioned in Zulip Dev.",
+ "You are receiving this because you were personally mentioned.",
]
email_subject = "#Denmark > test"
verify_body_does_not_include: List[str] = []
@@ -527,7 +527,7 @@ def _extra_context_in_missed_stream_messages_mention(
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
- "View or reply in Zulip",
+ "View or reply in Zulip Dev Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
@@ -538,7 +538,7 @@ def _extra_context_in_missed_stream_messages_mention(
"1 2 3 4 5 6 7 8 9 10 @**King Hamlet**",
"private",
"group",
- "Reply to this email directly, or view it in Zulip",
+ "Reply to this email directly, or view it in Zulip Dev Zulip",
]
self._test_cases(
msg_id,
@@ -561,7 +561,7 @@ def _extra_context_in_missed_stream_messages_wildcard_mention(
if show_message_content:
verify_body_include = [
"Othello, the Moor of Venice: > 1 > 2 > 3 > 4 > 5 > @**all** -- ",
- "You are receiving this because you were mentioned in Zulip Dev.",
+ "You are receiving this because everyone was mentioned in #Denmark.",
]
email_subject = "#Denmark > test"
verify_body_does_not_include: List[str] = []
@@ -570,7 +570,7 @@ def _extra_context_in_missed_stream_messages_wildcard_mention(
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
- "View or reply in Zulip",
+ "View or reply in Zulip Dev Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
email_subject = "New messages"
@@ -580,7 +580,7 @@ def _extra_context_in_missed_stream_messages_wildcard_mention(
"1 2 3 4 5 @**all**",
"private",
"group",
- "Reply to this email directly, or view it in Zulip",
+ "Reply to this email directly, or view it in Zulip Dev Zulip",
]
self._test_cases(
msg_id,
@@ -599,7 +599,7 @@ def _extra_context_in_missed_stream_messages_email_notify(self, send_as_user: bo
msg_id = self.send_stream_message(self.example_user("othello"), "denmark", "12")
verify_body_include = [
"Othello, the Moor of Venice: > 1 > 2 > 3 > 4 > 5 > 6 > 7 > 8 > 9 > 10 > 12 -- ",
- "You are receiving this because you have email notifications enabled for this stream.",
+ "You are receiving this because you have email notifications enabled for #Denmark.",
]
email_subject = "#Denmark > test"
self._test_cases(
@@ -619,7 +619,7 @@ def _extra_context_in_missed_stream_messages_mention_two_senders(
)
verify_body_include = [
"Cordelia, Lear's daughter: > 0 > 1 > 2 Othello, the Moor of Venice: > @**King Hamlet** -- ",
- "You are receiving this because you were mentioned in Zulip Dev.",
+ "You are receiving this because you were personally mentioned.",
]
email_subject = "#Denmark > test"
self._test_cases(
@@ -648,14 +648,14 @@ def _extra_context_in_missed_personal_messages(
verify_body_include = [
"This email does not include message content because your organization has disabled",
"http://zulip.testserver/help/hide-message-content-in-emails",
- "View or reply in Zulip",
+ "View or reply in Zulip Dev Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
elif message_content_disabled_by_user:
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
- "View or reply in Zulip",
+ "View or reply in Zulip Dev Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
email_subject = "New messages"
@@ -664,7 +664,7 @@ def _extra_context_in_missed_personal_messages(
"Extremely personal message!",
"mentioned",
"group",
- "Reply to this email directly, or view it in Zulip",
+ "Reply to this email directly, or view it in Zulip Dev Zulip",
]
self._test_cases(
msg_id,
@@ -681,7 +681,7 @@ def _reply_to_email_in_missed_personal_messages(self, send_as_user: bool) -> Non
self.example_user("hamlet"),
"Extremely personal message!",
)
- verify_body_include = ["Reply to this email directly, or view it in Zulip"]
+ verify_body_include = ["Reply to this email directly, or view it in Zulip Dev Zulip"]
email_subject = "PMs with Othello, the Moor of Venice"
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user)
@@ -717,7 +717,7 @@ def _extra_context_in_missed_huddle_messages_two_others(
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
- "View or reply in Zulip",
+ "View or reply in Zulip Dev Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
email_subject = "New messages"
@@ -726,7 +726,7 @@ def _extra_context_in_missed_huddle_messages_two_others(
"Othello, the Moor of Venice Othello, the Moor of Venice",
"Group personal message!",
"mentioned",
- "Reply to this email directly, or view it in Zulip",
+ "Reply to this email directly, or view it in Zulip Dev Zulip",
]
self._test_cases(
msg_id,
@@ -849,7 +849,7 @@ def test_smaller_user_group_mention_priority(self) -> None:
expected_email_include = [
"Othello, the Moor of Venice: > @*hamlet_only* > @*hamlet_and_cordelia* -- ",
- "You are receiving this because @hamlet_only was mentioned in Zulip Dev.",
+ "You are receiving this because @hamlet_only was mentioned.",
]
for text in expected_email_include:
@@ -889,7 +889,76 @@ def test_personal_over_user_group_mention_priority(self) -> None:
expected_email_include = [
"Othello, the Moor of Venice: > @*hamlet_and_cordelia* > @**King Hamlet** -- ",
- "You are receiving this because you were mentioned in Zulip Dev.",
+ "You are receiving this because you were personally mentioned.",
+ ]
+
+ for text in expected_email_include:
+ self.assertIn(text, self.normalize_string(mail.outbox[0].body))
+
+ def test_user_group_over_wildcard_mention_priority(self) -> None:
+ hamlet = self.example_user("hamlet")
+ cordelia = self.example_user("cordelia")
+ othello = self.example_user("othello")
+
+ hamlet_and_cordelia = create_user_group(
+ "hamlet_and_cordelia", [hamlet, cordelia], get_realm("zulip")
+ )
+
+ wildcard_mentioned_message_id = self.send_stream_message(othello, "Denmark", "@**all**")
+ user_group_mentioned_message_id = self.send_stream_message(
+ othello, "Denmark", "@*hamlet_and_cordelia*"
+ )
+
+ handle_missedmessage_emails(
+ hamlet.id,
+ [
+ {
+ "message_id": wildcard_mentioned_message_id,
+ "trigger": "wildcard_mentioned",
+ "mentioned_user_group_id": None,
+ },
+ {
+ "message_id": user_group_mentioned_message_id,
+ "trigger": "mentioned",
+ "mentioned_user_group_id": hamlet_and_cordelia.id,
+ },
+ ],
+ )
+
+ expected_email_include = [
+ "Othello, the Moor of Venice: > @**all** > @*hamlet_and_cordelia* -- ",
+ "You are receiving this because @hamlet_and_cordelia was mentioned.",
+ ]
+
+ for text in expected_email_include:
+ self.assertIn(text, self.normalize_string(mail.outbox[0].body))
+
+ def test_wildcard_over_stream_mention_priority(self) -> None:
+ hamlet = self.example_user("hamlet")
+ othello = self.example_user("othello")
+
+ stream_mentioned_message_id = self.send_stream_message(othello, "Denmark", "1")
+ wildcard_mentioned_message_id = self.send_stream_message(othello, "Denmark", "@**all**")
+
+ handle_missedmessage_emails(
+ hamlet.id,
+ [
+ {
+ "message_id": stream_mentioned_message_id,
+ "trigger": "stream_email_notify",
+ "mentioned_user_group_id": None,
+ },
+ {
+ "message_id": wildcard_mentioned_message_id,
+ "trigger": "wildcard_mentioned",
+ "mentioned_user_group_id": None,
+ },
+ ],
+ )
+
+ expected_email_include = [
+ "Othello, the Moor of Venice: > 1 > @**all** -- ",
+ "You are receiving this because everyone was mentioned in #Denmark.",
]
for text in expected_email_include:
@@ -1450,7 +1519,7 @@ def test_empty_backticks_in_missed_message(self) -> None:
self.example_user("hamlet"),
"```\n```",
)
- verify_body_include = ["view it in Zulip"]
+ verify_body_include = ["view it in Zulip Dev Zulip"]
email_subject = "PMs with Othello, the Moor of Venice"
self._test_cases(
msg_id, verify_body_include, email_subject, send_as_user=False, verify_html_body=True
| Improve missed message email notifications
As [discussed on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/notification.20emails/near/1428483), we should improve missed message notification emails to be more specific as to why the user is receiving the notification, as well as cleaning up some phrasing. Specifically, we should use the following "You are receiving this because..." lines, in priority order:
1. If the message(s) in the notification include a personal mention: "You are receiving this because you were personally mentioned."
2. If the message(s) in the notification include a group mention: "You are receiving this because @{{ mentioned_user_group_name }} was mentioned." [probably doesn't matter too much which group is listed, if there are multiple ones]
3. If the message(s) in the notification include a wildcard mention: "You are receiving this because everyone was mentioned."
4. Stream message notifications: "You are receiving this because you have email notifications enabled for `#<stream name>`."
In all the notifications emails, we should also replace the "Reply to this email directly..." sentence with: "Reply to this email directly, [view it in {{ realm_name }} Zulip](), or [manage email preferences]()."
| Hello @zulip/server-development members, this issue was labeled with the "area: emails" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
@Rixant You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
@zulipbot claim
Welcome to Zulip, @dqkqd! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
| 2022-09-29T05:43:58 |
zulip/zulip | 23,159 | zulip__zulip-23159 | [
"22636"
] | ad2795698bdf2d7a30bad8a5bc354f665b9cc36f | diff --git a/zerver/lib/markdown/__init__.py b/zerver/lib/markdown/__init__.py
--- a/zerver/lib/markdown/__init__.py
+++ b/zerver/lib/markdown/__init__.py
@@ -589,13 +589,17 @@ class InlineImageProcessor(markdown.treeprocessors.Treeprocessor):
view.
"""
+ def __init__(self, zmd: "ZulipMarkdown") -> None:
+ super().__init__(zmd)
+ self.zmd = zmd
+
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
assert url is not None
- if is_static_or_current_realm_url(url, self.md.zulip_realm):
+ if is_static_or_current_realm_url(url, self.zmd.zulip_realm):
# Don't rewrite images on our own site (e.g. emoji, user uploads).
continue
img.set("src", get_camo_url(url))
@@ -627,6 +631,10 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 10
+ def __init__(self, zmd: "ZulipMarkdown") -> None:
+ super().__init__(zmd)
+ self.zmd = zmd
+
def add_a(
self,
root: Element,
@@ -642,8 +650,8 @@ def add_a(
desc = desc if desc is not None else ""
# Update message.has_image attribute.
- if "message_inline_image" in class_attr and self.md.zulip_message:
- self.md.zulip_message.has_image = True
+ if "message_inline_image" in class_attr and self.zmd.zulip_message:
+ self.zmd.zulip_message.has_image = True
if insertion_index is not None:
div = Element("div")
@@ -756,7 +764,7 @@ def get_actual_image_url(self, url: str) -> str:
return url
def is_image(self, url: str) -> bool:
- if not self.md.image_preview_enabled:
+ if not self.zmd.image_preview_enabled:
return False
parsed_url = urllib.parse.urlparse(url)
# remove HTML URLs which end with image extensions that can not be shorted
@@ -831,7 +839,7 @@ def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
return None
def youtube_id(self, url: str) -> Optional[str]:
- if not self.md.image_preview_enabled:
+ if not self.zmd.image_preview_enabled:
return None
# YouTube video id extraction regular expression from https://pastebin.com/KyKAFv1s
# Slightly modified to support URLs of the forms
@@ -869,7 +877,7 @@ def youtube_image(self, url: str) -> Optional[str]:
return None
def vimeo_id(self, url: str) -> Optional[str]:
- if not self.md.image_preview_enabled:
+ if not self.zmd.image_preview_enabled:
return None
# (http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
@@ -988,7 +996,7 @@ def set_text(text: str) -> None:
else:
current_node.tail = text
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
@@ -1229,9 +1237,9 @@ def run(self, root: Element) -> None:
}
# Set has_link and similar flags whenever a message is processed by Markdown
- if self.md.zulip_message:
- self.md.zulip_message.has_link = len(found_urls) > 0
- self.md.zulip_message.has_image = False # This is updated in self.add_a
+ if self.zmd.zulip_message:
+ self.zmd.zulip_message.has_link = len(found_urls) > 0
+ self.zmd.zulip_message.has_image = False # This is updated in self.add_a
for url in unique_urls:
# Due to rewrite_local_links_to_relative, we need to
@@ -1242,14 +1250,16 @@ def run(self, root: Element) -> None:
parsed_url = urllib.parse.urlsplit(urllib.parse.urljoin("/", url))
host = parsed_url.netloc
- if host != "" and host != self.md.zulip_realm.host:
+ if host != "" and (
+ self.zmd.zulip_realm is None or host != self.zmd.zulip_realm.host
+ ):
continue
if not parsed_url.path.startswith("/user_uploads/"):
continue
path_id = parsed_url.path[len("/user_uploads/") :]
- self.md.zulip_rendering_result.potential_attachment_path_ids.append(path_id)
+ self.zmd.zulip_rendering_result.potential_attachment_path_ids.append(path_id)
if len(found_urls) == 0:
return
@@ -1298,7 +1308,7 @@ def run(self, root: Element) -> None:
netloc = urlsplit(url).netloc
if netloc == "" or (
- self.md.zulip_realm is not None and netloc == self.md.zulip_realm.host
+ self.zmd.zulip_realm is not None and netloc == self.zmd.zulip_realm.host
):
# We don't have a strong use case for doing URL preview for relative links.
continue
@@ -1323,20 +1333,20 @@ def run(self, root: Element) -> None:
# is enabled, but URL previews are a beta feature and YouTube
# previews are pretty stable.
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data and db_data.sent_by_bot:
continue
- if not self.md.url_embed_preview_enabled:
+ if not self.zmd.url_embed_preview_enabled:
continue
- if self.md.url_embed_data is None or url not in self.md.url_embed_data:
- self.md.zulip_rendering_result.links_for_preview.add(url)
+ if self.zmd.url_embed_data is None or url not in self.zmd.url_embed_data:
+ self.zmd.zulip_rendering_result.links_for_preview.add(url)
continue
# Existing but being None means that we did process the
# URL, but it was not valid to preview.
- extracted_data = self.md.url_embed_data[url]
+ extracted_data = self.zmd.url_embed_data[url]
if extracted_data is None:
continue
@@ -1359,12 +1369,13 @@ def run(self, root: Element) -> None:
class CompiledInlineProcessor(markdown.inlinepatterns.InlineProcessor):
- def __init__(self, compiled_re: Pattern[str], md: markdown.Markdown) -> None:
+ def __init__(self, compiled_re: Pattern[str], zmd: "ZulipMarkdown") -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
- self.md = md
+ self.md = zmd
+ self.zmd = zmd
class Timestamp(markdown.inlinepatterns.Pattern):
@@ -1479,8 +1490,12 @@ def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
"""Translates emoticons like `:)` into emoji like `:smile:`."""
+ def __init__(self, pattern: str, zmd: "ZulipMarkdown") -> None:
+ super().__init__(pattern, zmd)
+ self.zmd = zmd
+
def handleMatch(self, match: Match[str]) -> Optional[Element]:
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data is None or not db_data.translate_emoticons:
return None
@@ -1502,12 +1517,16 @@ def handleMatch(self, match: Match[str]) -> Optional[Element]:
class Emoji(markdown.inlinepatterns.Pattern):
+ def __init__(self, pattern: str, zmd: "ZulipMarkdown") -> None:
+ super().__init__(pattern, zmd)
+ self.zmd = zmd
+
def handleMatch(self, match: Match[str]) -> Optional[Union[str, Element]]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji: Dict[str, EmojiInfo] = {}
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data.active_realm_emoji
@@ -1528,7 +1547,7 @@ def content_has_emoji_syntax(content: str) -> bool:
class Tex(markdown.inlinepatterns.Pattern):
- def handleMatch(self, match: Match[str]) -> Element:
+ def handleMatch(self, match: Match[str]) -> Union[str, Element]:
rendered = render_tex(match.group("body"), is_inline=True)
if rendered is not None:
return self.md.htmlStash.store(rendered)
@@ -1608,18 +1627,19 @@ def url_to_a(
class CompiledPattern(markdown.inlinepatterns.Pattern):
- def __init__(self, compiled_re: Pattern[str], md: markdown.Markdown) -> None:
+ def __init__(self, compiled_re: Pattern[str], zmd: "ZulipMarkdown") -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
- self.md = md
+ self.md = zmd
+ self.zmd = zmd
class AutoLink(CompiledPattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group("url")
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
return url_to_a(db_data, url)
@@ -1793,7 +1813,7 @@ def __init__(
self,
source_pattern: str,
format_string: str,
- md: markdown.Markdown,
+ zmd: "ZulipMarkdown",
) -> None:
# Do not write errors to stderr (this still raises exceptions)
options = re2.Options()
@@ -1816,12 +1836,12 @@ def __init__(
r"(?<!%)(%%)*%([a-fA-F0-9][a-fA-F0-9])", r"\1%%\2", format_string
)
- super().__init__(compiled_re2, md)
+ super().__init__(compiled_re2, zmd)
def handleMatch( # type: ignore[override] # https://github.com/python/mypy/issues/10197
self, m: Match[str], data: str
) -> Union[Tuple[Element, int, int], Tuple[None, None, None]]:
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
url = url_to_a(
db_data,
self.format_string % m.groupdict(),
@@ -1843,7 +1863,7 @@ def handleMatch( # type: ignore[override] # https://github.com/python/mypy/issu
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
name = m.group("match")
silent = m.group("silent") == "_"
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data is not None:
wildcard = mention.user_mention_matches_wildcard(name)
@@ -1867,13 +1887,13 @@ def handleMatch( # type: ignore[override] # https://github.com/python/mypy/issu
if wildcard:
if not silent:
- self.md.zulip_rendering_result.mentions_wildcard = True
+ self.zmd.zulip_rendering_result.mentions_wildcard = True
user_id = "*"
elif user is not None:
assert isinstance(user, FullNameInfo)
if not silent:
- self.md.zulip_rendering_result.mentions_user_ids.add(user.id)
+ self.zmd.zulip_rendering_result.mentions_user_ids.add(user.id)
name = user.full_name
user_id = str(user.id)
else:
@@ -1899,13 +1919,13 @@ def handleMatch( # type: ignore[override] # https://github.com/python/mypy/issu
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
name = m.group("match")
silent = m.group("silent") == "_"
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data is not None:
user_group = db_data.mention_data.get_user_group(name)
if user_group:
if not silent:
- self.md.zulip_rendering_result.mentions_user_group_ids.add(user_group.id)
+ self.zmd.zulip_rendering_result.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
@@ -1928,7 +1948,7 @@ def handleMatch( # type: ignore[override] # https://github.com/python/mypy/issu
class StreamPattern(CompiledInlineProcessor):
def find_stream_id(self, name: str) -> Optional[int]:
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data is None:
return None
stream_id = db_data.stream_names.get(name)
@@ -1959,7 +1979,7 @@ def handleMatch( # type: ignore[override] # https://github.com/python/mypy/issu
class StreamTopicPattern(CompiledInlineProcessor):
def find_stream_id(self, name: str) -> Optional[int]:
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data is None:
return None
stream_id = db_data.stream_names.get(name)
@@ -2013,6 +2033,10 @@ class AlertWordNotificationProcessor(markdown.preprocessors.Preprocessor):
"`",
}
+ def __init__(self, zmd: "ZulipMarkdown") -> None:
+ super().__init__(zmd)
+ self.zmd = zmd
+
def check_valid_start_position(self, content: str, index: int) -> bool:
if index <= 0 or content[index] in self.allowed_before_punctuation:
return True
@@ -2024,14 +2048,14 @@ def check_valid_end_position(self, content: str, index: int) -> bool:
return False
def run(self, lines: List[str]) -> List[str]:
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
if db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
- # we find to the set self.md.zulip_rendering_result.user_ids_with_alert_words.
+ # we find to the set self.zmd.zulip_rendering_result.user_ids_with_alert_words.
realm_alert_words_automaton = db_data.realm_alert_words_automaton
@@ -2043,11 +2067,15 @@ def run(self, lines: List[str]) -> List[str]:
if self.check_valid_start_position(
content, end_index - len(original_value)
) and self.check_valid_end_position(content, end_index + 1):
- self.md.zulip_rendering_result.user_ids_with_alert_words.update(user_ids)
+ self.zmd.zulip_rendering_result.user_ids_with_alert_words.update(user_ids)
return lines
class LinkInlineProcessor(markdown.inlinepatterns.LinkInlineProcessor):
+ def __init__(self, pattern: str, zmd: "ZulipMarkdown") -> None:
+ super().__init__(pattern, zmd)
+ self.zmd = zmd
+
def zulip_specific_link_changes(self, el: Element) -> Union[None, Element]:
href = el.get("href")
assert href is not None
@@ -2058,7 +2086,7 @@ def zulip_specific_link_changes(self, el: Element) -> Union[None, Element]:
return None # no-op; the link is not processed.
# Rewrite local links to be relative
- db_data: Optional[DbData] = self.md.zulip_db_data
+ db_data: Optional[DbData] = self.zmd.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
# Make changes to <a> tag attributes
@@ -2103,11 +2131,11 @@ def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.uti
ZEPHYR_MIRROR_MARKDOWN_KEY = -2
-class Markdown(markdown.Markdown):
+class ZulipMarkdown(markdown.Markdown):
zulip_message: Optional[Message]
zulip_realm: Optional[Realm]
zulip_db_data: Optional[DbData]
- zulip_rendering_result: Optional[MessageRenderingResult]
+ zulip_rendering_result: MessageRenderingResult
image_preview_enabled: bool
url_embed_preview_enabled: bool
url_embed_data: Optional[Dict[str, Optional[UrlEmbedData]]]
@@ -2322,7 +2350,7 @@ def handle_zephyr_mirror(self) -> None:
)
-md_engines: Dict[Tuple[int, bool], Markdown] = {}
+md_engines: Dict[Tuple[int, bool], ZulipMarkdown] = {}
linkifier_data: Dict[int, List[LinkifierDict]] = {}
@@ -2332,7 +2360,7 @@ def make_md_engine(linkifiers_key: int, email_gateway: bool) -> None:
del md_engines[md_engine_key]
linkifiers = linkifier_data[linkifiers_key]
- md_engines[md_engine_key] = Markdown(
+ md_engines[md_engine_key] = ZulipMarkdown(
linkifiers=linkifiers,
linkifiers_key=linkifiers_key,
email_gateway=email_gateway,
diff --git a/zerver/lib/markdown/fenced_code.py b/zerver/lib/markdown/fenced_code.py
--- a/zerver/lib/markdown/fenced_code.py
+++ b/zerver/lib/markdown/fenced_code.py
@@ -415,16 +415,16 @@ def pop(self) -> None:
def run(self, lines: Iterable[str]) -> List[str]:
"""Match and store Fenced Code Blocks in the HtmlStash."""
+ from zerver.lib.markdown import ZulipMarkdown
+
output: List[str] = []
processor = self
self.handlers: List[ZulipBaseHandler] = []
default_language = None
- try:
+ if isinstance(self.md, ZulipMarkdown) and self.md.zulip_realm is not None:
default_language = self.md.zulip_realm.default_code_block_language
- except AttributeError:
- pass
handler = OuterHandler(processor, output, self.run_content_validators, default_language)
self.push(handler)
diff --git a/zerver/lib/subdomains.py b/zerver/lib/subdomains.py
--- a/zerver/lib/subdomains.py
+++ b/zerver/lib/subdomains.py
@@ -1,5 +1,6 @@
import re
import urllib
+from typing import Optional
from django.conf import settings
from django.http import HttpRequest
@@ -55,7 +56,7 @@ def is_root_domain_available() -> bool:
return not Realm.objects.filter(string_id=Realm.SUBDOMAIN_FOR_ROOT_DOMAIN).exists()
-def is_static_or_current_realm_url(url: str, realm: Realm) -> bool:
+def is_static_or_current_realm_url(url: str, realm: Optional[Realm]) -> bool:
split_url = urllib.parse.urlsplit(url)
split_static_url = urllib.parse.urlsplit(settings.STATIC_URL)
@@ -67,7 +68,11 @@ def is_static_or_current_realm_url(url: str, realm: Realm) -> bool:
# HTTPS access to this Zulip organization's domain; our existing
# HTTPS protects this request, and there's no privacy benefit to
# using camo in front of the Zulip server itself.
- if split_url.netloc == realm.host and f"{split_url.scheme}://" == settings.EXTERNAL_URI_SCHEME:
+ if (
+ realm is not None
+ and split_url.netloc == realm.host
+ and f"{split_url.scheme}://" == settings.EXTERNAL_URI_SCHEME
+ ):
return True
# Relative URLs will be processed by the browser the same way as the above.
| Realm is not always passed
This conditional assumes the realm is valid, and it is not always.
https://github.com/zulip/zulip/blob/58d1be8085c5278de1f94087c1a8c848968cd58f/zerver/lib/subdomains.py#L73
| Hello @zulip/server-production members, this issue was labeled with the "area: production" label, so you may want to check it out!
<!-- areaLabelAddition -->
What's the case which gets here without a realm set?
The caller is here:
https://github.com/zulip/zulip/blob/10a0bf4de4f3594ee5f72ef5b6e873bb894e8d9d/zerver/lib/markdown/__init__.py#L598
Although `Markdown.zulip_realm` is [typed](https://github.com/zulip/zulip/blob/10a0bf4de4f3594ee5f72ef5b6e873bb894e8d9d/zerver/lib/markdown/__init__.py#L2108) `Optional[Realm]`, the problem is that `Processor.md` is [stubbed](https://github.com/python/typeshed/blob/55cddc7ac779c8c678f12e63993a0a7ee346d090/stubs/Markdown/markdown/util.pyi#L24) as `Any`. | 2022-10-06T21:02:11 |
|
zulip/zulip | 23,181 | zulip__zulip-23181 | [
"21709"
] | c1dfa1fd1198a6ada178561831e2a75af4eeddf5 | diff --git a/zerver/actions/users.py b/zerver/actions/users.py
--- a/zerver/actions/users.py
+++ b/zerver/actions/users.py
@@ -163,21 +163,30 @@ def do_deactivate_user(
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
- delete_user_sessions(user_profile)
- event = dict(
- type="realm_user",
- op="remove",
- person=dict(user_id=user_profile.id, full_name=user_profile.full_name),
- )
- send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
+ transaction.on_commit(lambda: delete_user_sessions(user_profile))
- if user_profile.is_bot:
- event = dict(
- type="realm_bot",
+ event_remove_user = dict(
+ type="realm_user",
op="remove",
- bot=dict(user_id=user_profile.id, full_name=user_profile.full_name),
+ person=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
- send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
+ transaction.on_commit(
+ lambda: send_event(
+ user_profile.realm, event_remove_user, active_user_ids(user_profile.realm_id)
+ )
+ )
+
+ if user_profile.is_bot:
+ event_remove_bot = dict(
+ type="realm_bot",
+ op="remove",
+ bot=dict(user_id=user_profile.id, full_name=user_profile.full_name),
+ )
+ transaction.on_commit(
+ lambda: send_event(
+ user_profile.realm, event_remove_bot, bot_owner_user_ids(user_profile)
+ )
+ )
@transaction.atomic(durable=True)
| diff --git a/zerver/tests/test_decorators.py b/zerver/tests/test_decorators.py
--- a/zerver/tests/test_decorators.py
+++ b/zerver/tests/test_decorators.py
@@ -1266,7 +1266,8 @@ def test_send_deactivated_user(self) -> None:
"""
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
- do_deactivate_user(user_profile, acting_user=None)
+ with self.captureOnCommitCallbacks(execute=True):
+ do_deactivate_user(user_profile, acting_user=None)
result = self.client_post(
"/json/messages",
diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -1625,7 +1625,8 @@ def test_clear_sessions(self) -> None:
self.assert_json_success(result)
self.assertEqual(Session.objects.filter(pk=session_key).count(), 1)
- do_deactivate_user(user, acting_user=None)
+ with self.captureOnCommitCallbacks(execute=True):
+ do_deactivate_user(user, acting_user=None)
self.assertEqual(Session.objects.filter(pk=session_key).count(), 0)
result = self.client_get("/json/users")
| sync_ldap_user_data error
It seems that after switching to version 5.1, the `LDAP_DEACTIVATE_NON_MATCHING_USERS` setting stopped working correctly for me.
```
root@zserver:/home/zulip/deployments/current# ./scripts/get-django-setting LDAP_DEACTIVATE_NON_MATCHING_USERS
True
```
Sync command
`root@zserver:/home/zulip/deployments/current# su zulip -c './manage.py sync_ldap_user_data'`
ends with an error. I replaced the real username and domain for privacy reasons.
```
2022-04-06 17:11:01.106 DEBG [django_auth_ldap] search_s('OU=MyOU,DC=MyDC', 2, '(sAMAccountName=%(user)s)') returned 0 objects:
2022-04-06 17:11:01.317 ERR [zulip.sync_ldap_user_data] LDAP sync failed
Traceback (most recent call last):
File "/home/zulip/deployments/2022-04-03-15-35-19/zproject/backends.py", line 1099, in sync_user_from_ldap
ldap_username = backend.django_to_ldap_username(user_profile.delivery_email)
File "/home/zulip/deployments/2022-04-03-15-35-19/zproject/backends.py", line 668, in django_to_ldap_username
raise ZulipLDAPExceptionNoMatchingLDAPUser(
zproject.backends.ZulipLDAPExceptionNoMatchingLDAPUser: No LDAP user matching django_to_ldap_username result: myusername. Input username: [email protected]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/management/commands/sync_ldap_user_data.py", line 30, in sync_ldap_user_data
sync_user_from_ldap(u, logger)
File "/home/zulip/deployments/2022-04-03-15-35-19/zproject/backends.py", line 1106, in sync_user_from_ldap
do_deactivate_user(user_profile, acting_user=None)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/actions.py", line 1463, in do_deactivate_user
delete_user_sessions(user_profile)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/sessions.py", line 47, in delete_user_sessions
delete_session(session)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/sessions.py", line 41, in delete_session
session_engine.SessionStore(session.session_key).delete()
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/safe_session_cached_db.py", line 24, in delete
assert not get_connection().in_atomic_block
AssertionError
Traceback (most recent call last):
File "/home/zulip/deployments/2022-04-03-15-35-19/zproject/backends.py", line 1099, in sync_user_from_ldap
ldap_username = backend.django_to_ldap_username(user_profile.delivery_email)
File "/home/zulip/deployments/2022-04-03-15-35-19/zproject/backends.py", line 668, in django_to_ldap_username
raise ZulipLDAPExceptionNoMatchingLDAPUser(
zproject.backends.ZulipLDAPExceptionNoMatchingLDAPUser: No LDAP user matching django_to_ldap_username result: myusername. Input username: [email protected]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./manage.py", line 157, in <module>
execute_from_command_line(sys.argv)
File "./manage.py", line 122, in execute_from_command_line
utility.execute()
File "/srv/zulip-venv-cache/726c7d1d35d9cd61bb4b70e98167a1db7b0c524d/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/srv/zulip-venv-cache/726c7d1d35d9cd61bb4b70e98167a1db7b0c524d/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/srv/zulip-venv-cache/726c7d1d35d9cd61bb4b70e98167a1db7b0c524d/zulip-py3-venv/lib/python3.8/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/management/commands/sync_ldap_user_data.py", line 80, in handle
sync_ldap_user_data(user_profiles, not options["force"])
File "/usr/lib/python3.8/contextlib.py", line 75, in inner
return func(*args, **kwds)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/management/commands/sync_ldap_user_data.py", line 30, in sync_ldap_user_data
sync_user_from_ldap(u, logger)
File "/home/zulip/deployments/2022-04-03-15-35-19/zproject/backends.py", line 1106, in sync_user_from_ldap
do_deactivate_user(user_profile, acting_user=None)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/actions.py", line 1463, in do_deactivate_user
delete_user_sessions(user_profile)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/sessions.py", line 47, in delete_user_sessions
delete_session(session)
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/sessions.py", line 41, in delete_session
session_engine.SessionStore(session.session_key).delete()
File "/home/zulip/deployments/2022-04-03-15-35-19/zerver/lib/safe_session_cached_db.py", line 24, in delete
assert not get_connection().in_atomic_block
AssertionError
```
As a result, the user remains active.
| Thanks for the report! I [posted this on chat.zulip.org](https://chat.zulip.org/#narrow/stream/31-production-help/topic/sync_ldap_user_data.20error.20.2321709/near/1361750) for more interactive discussion.
We're hitting this as well on our production instance.
Happened on 4.11 and now on 5.1 as well.
sync_ldap_user_data never completes, same trace back in ``safe_session_cached_db.py", line 24``
Happens regardless of ``LDAP_DEACTIVATE_NON_MATCHING_USERS`` true or false, both config settings trigger the same traceback.
Any ideas on a fix/workaround?
Zulip chat has a workaround, documenting here so it's discoverable (Zulip chats are not indexed by search engines seemingly)
Carrying this patch in our local branch for now and testing.
```quote
For a short-term fix, @Aggr, you can remove the @transaction.atomic line in zerver/management/commands/sync_ldap_user_data.py and re-run the import.
```
Hello @zulip/server-authentication members, this issue was labeled with the "area: authentication" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-10-09T19:37:11 |
zulip/zulip | 23,196 | zulip__zulip-23196 | [
"20162"
] | f37ac8038475a8d30f28e59e5f5210ff15033c3e | diff --git a/analytics/management/commands/populate_analytics_db.py b/analytics/management/commands/populate_analytics_db.py
--- a/analytics/management/commands/populate_analytics_db.py
+++ b/analytics/management/commands/populate_analytics_db.py
@@ -92,6 +92,16 @@ def handle(self, *args: Any, **options: Any) -> None:
)
do_change_user_role(shylock, UserProfile.ROLE_REALM_OWNER, acting_user=None)
+ # Create guest user for set_guest_users_statistic.
+ create_user(
+ "[email protected]",
+ "Bassanio",
+ realm,
+ full_name="Bassanio",
+ role=UserProfile.ROLE_GUEST,
+ force_date_joined=installation_time,
+ )
+
administrators_user_group = UserGroup.objects.get(
name=UserGroup.ADMINISTRATORS_GROUP_NAME, realm=realm, is_system_group=True
)
diff --git a/analytics/views/stats.py b/analytics/views/stats.py
--- a/analytics/views/stats.py
+++ b/analytics/views/stats.py
@@ -55,11 +55,18 @@ def render_stats(
analytics_ready: bool = True,
) -> HttpResponse:
assert request.user.is_authenticated
+
+ # Same query to get guest user count as in get_seat_count in corporate/lib/stripe.py.
+ guest_users = UserProfile.objects.filter(
+ realm=request.user.realm, is_active=True, is_bot=False, role=UserProfile.ROLE_GUEST
+ ).count()
+
page_params = dict(
data_url_suffix=data_url_suffix,
for_installation=for_installation,
remote=remote,
upload_space_used=request.user.realm.currently_used_upload_space_bytes(),
+ guest_users=guest_users,
)
request_language = get_and_set_request_language(
diff --git a/corporate/lib/stripe.py b/corporate/lib/stripe.py
--- a/corporate/lib/stripe.py
+++ b/corporate/lib/stripe.py
@@ -69,6 +69,8 @@ def get_seat_count(
.exclude(role=UserProfile.ROLE_GUEST)
.count()
) + extra_non_guests_count
+
+ # This guest count calculation should match the similar query in render_stats().
guests = (
UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, role=UserProfile.ROLE_GUEST
| Add summary statistics to /stats page
At present, it can be hard to find basic statistics about a Zulip organization. They may be hidden deep in the menus, or require careful parsing of graphs.
To address this, we should display summary statistics at the top of the organization's `/stats` page that answer the most common questions an administrator might have about their organization. In particular, we should show:
1) Number of users: Current number of non-deactivated users
2) Number of guests: This is relevant because of the details of [how Zulip Cloud orgs are billed](https://zulip.com/help/zulip-cloud-billing#how-are-guest-accounts-billed-is-there-special-pricing).
2) Total number of messages
4) Number of messages in the last 30 days
5) Storage space in use (currently shown under Personal settings > Uploaded files, e.g. "Organization using 0.0% of 5 GB."
[CZO discussion thread](https://chat.zulip.org/#narrow/stream/2-general/topic/showing.20org.20storage.20use)
| Hello @zulip/server-analytics members, this issue was labeled with the "area: analytics" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @Parth-Mittal-NITK, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@zulipbot claim
Hi @mittal-parth , any progress on this one?
I am so sorry @alya, got caught up with college work. I had understood the existing code to some extent and was building the layout . Since I will be able to take it up only after a week, I'll leave it for someone else to take it up if interested. If not, I'll resume after a week.
@zulipbot abandon
Hi @alya! This seems like an interesting feature. Do you have any design for the summary stats in mind? It would be wonderful if you can suggest some CSS or existing templates from which we can get some ideas.
@oliver-pham Have you explored the code for the `/stats` page?
Once you have done so, you can post your thoughts and get feedback on chat.zulip.org (in #frontend for more technical discussion, or #design to discuss what it would look like).
Hi @oliver-pham! Since I had gotten some idea about the code around stats, it would be great if you can do the designing and I can fetch the data from the backend. Let me know if that sounds good! π
I'm not a designer myself, but I can try to help you with the design. I've created a topic for this in #frontend on (on chat.zulip.org) and suggested some ideas. Before claiming the issue, can we discuss your approaches in #frontend, @mittal-parth? | 2022-10-11T12:23:27 |
|
zulip/zulip | 23,200 | zulip__zulip-23200 | [
"19596"
] | 385a408be5daaefd930b1ac82154b6dbcf7d67e8 | diff --git a/zerver/lib/remote_server.py b/zerver/lib/remote_server.py
--- a/zerver/lib/remote_server.py
+++ b/zerver/lib/remote_server.py
@@ -161,7 +161,7 @@ def send_analytics_to_remote_server() -> None:
try:
result = send_to_push_bouncer("GET", "server/analytics/status", {})
except PushNotificationBouncerRetryLaterError as e:
- logging.warning(e.msg)
+ logging.warning(e.msg, exc_info=True)
return
last_acked_realm_count_id = result["last_realm_count_id"]
diff --git a/zerver/middleware.py b/zerver/middleware.py
--- a/zerver/middleware.py
+++ b/zerver/middleware.py
@@ -26,6 +26,7 @@
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
+from django.utils.log import log_response
from django.utils.translation import gettext as _
from django.views.csrf import csrf_failure as html_csrf_failure
from django_scim.middleware import SCIMAuthCheckMiddleware
@@ -456,8 +457,23 @@ def process_exception(
return json_unauthorized(www_authenticate="session")
if isinstance(exception, JsonableError):
- return json_response_from_error(exception)
- if RequestNotes.get_notes(request).error_format == "JSON":
+ response = json_response_from_error(exception)
+ if response.status_code >= 500:
+ # Here we use Django's log_response the way Django uses
+ # it normally to log error responses. However, we make the small
+ # modification of including the traceback to make the log message
+ # more helpful. log_response takes care of knowing not to duplicate
+ # the logging, so Django won't generate a second log message.
+ log_response(
+ "%s: %s",
+ response.reason_phrase,
+ request.path,
+ response=response,
+ request=request,
+ exc_info=True,
+ )
+ return response
+ if RequestNotes.get_notes(request).error_format == "JSON" and not settings.TEST_SUITE:
capture_exception(exception)
json_error_logger = logging.getLogger("zerver.middleware.json_error_handler")
json_error_logger.error(traceback.format_exc(), extra=dict(request=request))
| diff --git a/zerver/lib/test_classes.py b/zerver/lib/test_classes.py
--- a/zerver/lib/test_classes.py
+++ b/zerver/lib/test_classes.py
@@ -1590,7 +1590,7 @@ def side_effect(*args: Any, **kwargs: Any) -> None:
complete_event_type is not None
and all_event_types is not None
and complete_event_type not in all_event_types
- ):
+ ): # nocoverage
raise Exception(
f"""
Error: This test triggered a message using the event "{complete_event_type}", which was not properly
diff --git a/zerver/tests/test_integrations_dev_panel.py b/zerver/tests/test_integrations_dev_panel.py
--- a/zerver/tests/test_integrations_dev_panel.py
+++ b/zerver/tests/test_integrations_dev_panel.py
@@ -22,7 +22,7 @@ def test_check_send_webhook_fixture_message_for_error(self) -> None:
"custom_headers": "{}",
"is_json": "true",
}
- with self.assertLogs(level="ERROR") as logs:
+ with self.assertLogs(level="ERROR") as logs, self.settings(TEST_SUITE=False):
response = self.client_post(target_url, data)
self.assertEqual(response.status_code, 500) # Since the response would be forwarded.
diff --git a/zerver/tests/test_logging_handlers.py b/zerver/tests/test_logging_handlers.py
--- a/zerver/tests/test_logging_handlers.py
+++ b/zerver/tests/test_logging_handlers.py
@@ -81,7 +81,9 @@ def simulate_error(self) -> logging.LogRecord:
"django.request", level="ERROR"
) as request_error_log, self.assertLogs(
"zerver.middleware.json_error_handler", level="ERROR"
- ) as json_error_handler_log:
+ ) as json_error_handler_log, self.settings(
+ TEST_SUITE=False
+ ):
rate_limit_patch.side_effect = capture_and_throw
result = self.client_get("/json/users")
self.assert_json_error(result, "Internal server error", status_code=500)
diff --git a/zerver/tests/test_push_notifications.py b/zerver/tests/test_push_notifications.py
--- a/zerver/tests/test_push_notifications.py
+++ b/zerver/tests/test_push_notifications.py
@@ -477,11 +477,9 @@ def test_push_bouncer_api(self) -> None:
"ConnectionError while trying to connect to push notification bouncer",
502,
)
- self.assertEqual(
- error_log.output,
- [
- f"ERROR:django.request:Bad Gateway: {endpoint}",
- ],
+ self.assertIn(
+ f"ERROR:django.request:Bad Gateway: {endpoint}\nTraceback",
+ error_log.output[0],
)
with responses.RequestsMock() as resp, self.assertLogs(level="WARNING") as warn_log:
@@ -489,11 +487,11 @@ def test_push_bouncer_api(self) -> None:
result = self.client_post(endpoint, {"token": token}, subdomain="zulip")
self.assert_json_error(result, "Received 500 from push notification bouncer", 502)
self.assertEqual(
- warn_log.output,
- [
- "WARNING:root:Received 500 from push notification bouncer",
- f"ERROR:django.request:Bad Gateway: {endpoint}",
- ],
+ warn_log.output[0],
+ "WARNING:root:Received 500 from push notification bouncer",
+ )
+ self.assertIn(
+ f"ERROR:django.request:Bad Gateway: {endpoint}\nTraceback", warn_log.output[1]
)
# Add tokens
@@ -571,13 +569,12 @@ def test_analytics_api(self) -> None:
user = self.example_user("hamlet")
end_time = self.TIME_ZERO
- with responses.RequestsMock() as resp, mock.patch(
- "zerver.lib.remote_server.logging.warning"
- ) as mock_warning:
+ with responses.RequestsMock() as resp, self.assertLogs(level="WARNING") as mock_warning:
resp.add(responses.GET, ANALYTICS_STATUS_URL, body=ConnectionError())
send_analytics_to_remote_server()
- mock_warning.assert_called_once_with(
- "ConnectionError while trying to connect to push notification bouncer"
+ self.assertIn(
+ "WARNING:root:ConnectionError while trying to connect to push notification bouncer\nTraceback ",
+ mock_warning.output[0],
)
self.assertTrue(resp.assert_call_count(ANALYTICS_STATUS_URL, 1))
diff --git a/zerver/webhooks/travis/tests.py b/zerver/webhooks/travis/tests.py
--- a/zerver/webhooks/travis/tests.py
+++ b/zerver/webhooks/travis/tests.py
@@ -114,25 +114,6 @@ def test_travis_exclude_glob_events(self) -> None:
expect_noop=True,
)
- def test_travis_invalid_event(self) -> None:
- payload = self.get_body("build")
- payload = payload.replace("push", "invalid_event")
- expected_error_messsage = """
-Error: This test triggered a message using the event "invalid_event", which was not properly
-registered via the @webhook_view(..., event_types=[...]). These registrations are important for Zulip
-self-documenting the supported event types for this integration.
-
-You can fix this by adding "invalid_event" to ALL_EVENT_TYPES for this webhook.
-""".strip()
- with self.assertLogs("django.request"):
- with self.assertLogs("zerver.middleware.json_error_handler", level="ERROR") as m:
- self.client_post(
- self.url,
- payload,
- content_type="application/x-www-form-urlencoded",
- )
- self.assertIn(expected_error_messsage, m.output[0])
-
def test_travis_noop(self) -> None:
expected_error_message = """
While no message is expected given expect_noop=True,
| obscure error messages when zulip push notification gateway is unreachable
our zulip server is hosted in a dmz .
accidentally, the firewall rule for allowing outgoing connection to push.zulipchat.com was not active. we block all outgoing connections by default, as we do not want to be some "hop" or source of spam and illegal action, if some machine is being hacked inside the dmz.
we did not take any notice on this,and users didn't complain because push notifications are not that important to them.
but we started to wonder, why we got weird and very intermittend notification mails from django module like this
```
Subject: [Django] chat:Bad Gateway:/api/v1/users/me/apns_device_token
Logger django.request, from module django.utils.log line 230:
Error generated by <**username**> <**user email**> (Member) on <**servername**> deployment
No stack trace available
Deployed code:
- git: None
- ZULIP_VERSION: 4.3
Request info:
- path: /api/v1/users/me/apns_device_token
- POST: {'token': ['<**token string**>'], 'appid': ['org.zulip.Zulip']}
- REMOTE_ADDR: "<**IP**>"
- QUERY_STRING: ""
- SERVER_NAME: ""
```
in server log, there is proper logging of gateway unreachablility :
```
2021-08-17 00:14:59.285 WARN [] ConnectionError while trying to connect to push notification bouncer
2021-08-17 01:06:19.380 WARN [] ConnectionError while trying to connect to push notification bouncer
2021-08-17 02:05:03.906 WARN [] ConnectionError while trying to connect to push notification bouncer
```
it looks there is some room for improvement:
1. don't send weird django error message
2. send proper notification to the admin or/and to the end-user, if push notification message could not be sent
also see https://chat.zulip.org/#narrow/stream/31-production-help/topic/.5BDjango.5D.20chat.3ABad.20Gateway.3A.2Fapi.2Fv1.2Fusers.2Fme.2Fapns_device_token/near/1246328
| Hello @zulip/server-api, @zulip/server-production members, this issue was labeled with the "area: production", "area: api" labels, so you may want to check it out!
<!-- areaLabelAddition -->
That error email does indeed seem pretty messy for what is fundamentally a networking issue connecting to the [mobile push notifications service](https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html). The code path is here:
```
$ git grep 'while trying to connect'
zerver/lib/remote_server.py: f"{e.__class__.__name__} while trying to connect to push notification bouncer"
zerver/tests/test_push_notifications.py: "ConnectionError while trying to connect to push notification bouncer",
zerver/tests/test_push_notifications.py: "ConnectionError while trying to connect to push notification bouncer"
```
And my guess is that we're somewhere raising an exception or doing a `logging.error` type call without passing a proper stack trace. We do have a test that suggests that's the case:
```
$ git grep 'Bad Gateway'
zerver/tests/test_push_notifications.py: f"ERROR:django.request:Bad Gateway: {endpoint}",
zerver/tests/test_push_notifications.py: f"ERROR:django.request:Bad Gateway: {endpoint}",
```
It should be possible to reproduce and debug in a Zulip development environment after registering for the push notifications service, though I don't think Django will try to actually send those error reporting emails in that setting. | 2022-10-11T18:27:31 |
zulip/zulip | 23,264 | zulip__zulip-23264 | [
"23170"
] | abccc483f4298fbaed2b4623d20bb43f5c185f8f | diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
--- a/zerver/lib/url_redirects.py
+++ b/zerver/lib/url_redirects.py
@@ -21,6 +21,10 @@ class URLRedirect:
HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
# Add URL redirects for help center documentation here:
+ URLRedirect(
+ "/help/add-custom-profile-fields",
+ "/help/custom-profile-fields",
+ ),
URLRedirect(
"/help/enable-enter-to-send",
"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message",
| Migrate "Add custom profile fields" -> "Custom profile fields"
Since the "Add custom profile fields" is not just about _adding_ fields, we should:
- [ ] Rename it to "Custom profile fields", including updating the URL accordingly.
- [ ] Update links to it, linking to #add-a-custom-profile-field where appropriate.
- [ ] Add a section on editing custom profile fields.
For simplicity, this can be done after #23169 is merged.
@drrosa I'm marking this as a release goal, but you can treat it as the lowest priority among the release goals assigned to you. Thanks!
| Hello @zulip/server-user-docs members, this issue was labeled with the "area: documentation (user)" label, so you may want to check it out!
<!-- areaLabelAddition -->
Hey, @alya I would like to work on this issue can you please assign me this issue?
@akashthedeveloper This issue has already been assigned. See https://zulip.readthedocs.io/en/latest/overview/contributing.html for advice on picking an issue to work on. | 2022-10-17T22:05:59 |
|
zulip/zulip | 23,318 | zulip__zulip-23318 | [
"23267"
] | a2d68e90cc335d51812c77c30d0206c0c76cee3d | diff --git a/zerver/actions/message_edit.py b/zerver/actions/message_edit.py
--- a/zerver/actions/message_edit.py
+++ b/zerver/actions/message_edit.py
@@ -127,8 +127,8 @@ def maybe_send_resolve_topic_notifications(
old_topic: str,
new_topic: str,
changed_messages: List[Message],
-) -> bool:
- """Returns True if resolve topic notifications were in fact sent."""
+) -> Optional[int]:
+ """Returns resolved_topic_message_id if resolve topic notifications were in fact sent."""
# Note that topics will have already been stripped in check_update_message.
#
# This logic is designed to treat removing a weird "β ββ "
@@ -154,7 +154,7 @@ def maybe_send_resolve_topic_notifications(
# administrator can the messages in between. We consider this
# to be a fundamental risk of irresponsible message deletion,
# not a bug with the "resolve topics" feature.
- return False
+ return None
# Compute the users who either sent or reacted to messages that
# were moved via the "resolve topic' action. Only those users
@@ -172,7 +172,7 @@ def maybe_send_resolve_topic_notifications(
elif topic_unresolved:
notification_string = _("{user} has marked this topic as unresolved.")
- internal_send_stream_message(
+ resolved_topic_message_id = internal_send_stream_message(
sender,
stream,
new_topic,
@@ -182,7 +182,7 @@ def maybe_send_resolve_topic_notifications(
limit_unread_user_ids=affected_participant_ids,
)
- return True
+ return resolved_topic_message_id
def send_message_moved_breadcrumbs(
@@ -813,7 +813,7 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
send_event(user_profile.realm, event, users_to_be_notified)
- sent_resolve_topic_notification = False
+ resolved_topic_message_id = None
if topic_name is not None and content is None and len(changed_messages) > 0:
# When stream is changed and topic is marked as resolved or unresolved
# in the same API request, resolved or unresolved notification should
@@ -824,7 +824,7 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
stream_to_send_resolve_topic_notification = new_stream
assert stream_to_send_resolve_topic_notification is not None
- sent_resolve_topic_notification = maybe_send_resolve_topic_notifications(
+ resolved_topic_message_id = maybe_send_resolve_topic_notifications(
user_profile=user_profile,
stream=stream_to_send_resolve_topic_notification,
old_topic=orig_topic_name,
@@ -867,25 +867,57 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
new_thread_notification_string = None
if send_notification_to_new_thread and (
new_stream is not None
- or not sent_resolve_topic_notification
+ or not resolved_topic_message_id
or (
pre_truncation_topic_name is not None
and orig_topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
!= pre_truncation_topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
)
):
- if moved_all_visible_messages:
+ stream_for_new_topic = new_stream if new_stream is not None else stream_being_edited
+ assert stream_for_new_topic.recipient_id is not None
+
+ new_topic = topic_name if topic_name is not None else orig_topic_name
+
+ changed_message_ids = [changed_message.id for changed_message in changed_messages]
+
+ # We calculate whether the user moved the entire topic
+ # using that user's own permissions, which is important to
+ # avoid leaking information about whether there are
+ # messages in the destination topic's deeper history that
+ # the acting user does not have permission to access.
+ #
+ # TODO: These queries are quite inefficient, in that we're
+ # fetching full copies of all the messages in the
+ # destination topic to answer the question of whether the
+ # current user has access to at least one such message.
+ #
+ # The main strength of the current implementation is that
+ # it reuses existing logic, which is good for keeping it
+ # correct as we maintain the codebase.
+ preexisting_topic_messages = messages_for_topic(
+ stream_for_new_topic.recipient_id, new_topic
+ ).exclude(id__in=[*changed_message_ids, resolved_topic_message_id])
+
+ visible_preexisting_messages = bulk_access_messages(
+ user_profile, preexisting_topic_messages, stream=stream_for_new_topic
+ )
+
+ no_visible_preexisting_messages = len(visible_preexisting_messages) == 0
+
+ if no_visible_preexisting_messages and moved_all_visible_messages:
new_thread_notification_string = gettext_lazy(
"This topic was moved here from {old_location} by {user}."
)
- elif changed_messages_count == 1:
- new_thread_notification_string = gettext_lazy(
- "A message was moved here from {old_location} by {user}."
- )
else:
- new_thread_notification_string = gettext_lazy(
- "{changed_messages_count} messages were moved here from {old_location} by {user}."
- )
+ if changed_messages_count == 1:
+ new_thread_notification_string = gettext_lazy(
+ "A message was moved here from {old_location} by {user}."
+ )
+ else:
+ new_thread_notification_string = gettext_lazy(
+ "{changed_messages_count} messages were moved here from {old_location} by {user}."
+ )
send_message_moved_breadcrumbs(
user_profile,
| diff --git a/zerver/tests/test_message_edit.py b/zerver/tests/test_message_edit.py
--- a/zerver/tests/test_message_edit.py
+++ b/zerver/tests/test_message_edit.py
@@ -1822,6 +1822,50 @@ def test_move_message_to_stream(self) -> None:
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
+ def test_move_message_to_preexisting_topic(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
+ "iago",
+ "test move stream",
+ "new stream",
+ "test",
+ # Set the user's translation language to German to test that
+ # it is overridden by the realm's default language.
+ "de",
+ )
+
+ self.send_stream_message(
+ sender=self.example_user("iago"),
+ stream_name="new stream",
+ topic_name="test",
+ content="Always here",
+ )
+
+ result = self.client_patch(
+ f"/json/messages/{msg_id}",
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_all",
+ "send_notification_to_old_thread": "true",
+ },
+ HTTP_ACCEPT_LANGUAGE="de",
+ )
+
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 1)
+ self.assertEqual(
+ messages[0].content,
+ f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 5)
+ self.assertEqual(
+ messages[4].content,
+ f"3 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
def test_move_message_realm_admin_cant_move_to_another_realm(self) -> None:
user_profile = self.example_user("iago")
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
@@ -1971,6 +2015,44 @@ def test_move_message_to_stream_change_later(self) -> None:
f"2 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
+ def test_move_message_to_preexisting_topic_change_later(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
+ "iago", "test move stream", "new stream", "test"
+ )
+
+ self.send_stream_message(
+ sender=self.example_user("iago"),
+ stream_name="new stream",
+ topic_name="test",
+ content="Always here",
+ )
+
+ result = self.client_patch(
+ f"/json/messages/{msg_id_later}",
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_later",
+ "send_notification_to_old_thread": "true",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 2)
+ self.assertEqual(messages[0].id, msg_id)
+ self.assertEqual(
+ messages[1].content,
+ f"2 messages were moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 4)
+ self.assertEqual(messages[0].id, msg_id_later)
+ self.assertEqual(
+ messages[3].content,
+ f"2 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
def test_move_message_to_stream_change_later_all_moved(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
@@ -2001,6 +2083,43 @@ def test_move_message_to_stream_change_later_all_moved(self) -> None:
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
+ def test_move_message_to_preexisting_topic_change_later_all_moved(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
+ "iago", "test move stream", "new stream", "test"
+ )
+
+ self.send_stream_message(
+ sender=self.example_user("iago"),
+ stream_name="new stream",
+ topic_name="test",
+ content="Always here",
+ )
+
+ result = self.client_patch(
+ f"/json/messages/{msg_id}",
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_later",
+ "send_notification_to_old_thread": "true",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 1)
+ self.assertEqual(
+ messages[0].content,
+ f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 5)
+ self.assertEqual(messages[0].id, msg_id)
+ self.assertEqual(
+ messages[4].content,
+ f"3 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
def test_move_message_to_stream_change_one(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
@@ -2032,6 +2151,44 @@ def test_move_message_to_stream_change_one(self) -> None:
f"A message was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
+ def test_move_message_to_preexisting_topic_change_one(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
+ "iago", "test move stream", "new stream", "test"
+ )
+
+ self.send_stream_message(
+ sender=self.example_user("iago"),
+ stream_name="new stream",
+ topic_name="test",
+ content="Always here",
+ )
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id_later),
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_one",
+ "send_notification_to_old_thread": "true",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 3)
+ self.assertEqual(messages[0].id, msg_id)
+ self.assertEqual(
+ messages[2].content,
+ f"A message was moved from this topic to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 3)
+ self.assertEqual(messages[0].id, msg_id_later)
+ self.assertEqual(
+ messages[2].content,
+ f"A message was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
def test_move_message_to_stream_change_all(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test"
@@ -2062,6 +2219,43 @@ def test_move_message_to_stream_change_all(self) -> None:
f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
)
+ def test_move_message_to_preexisting_topic_change_all(self) -> None:
+ (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
+ "iago", "test move stream", "new stream", "test"
+ )
+
+ self.send_stream_message(
+ sender=self.example_user("iago"),
+ stream_name="new stream",
+ topic_name="test",
+ content="Always here",
+ )
+
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id_later),
+ {
+ "stream_id": new_stream.id,
+ "propagate_mode": "change_all",
+ "send_notification_to_old_thread": "true",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, old_stream, "test")
+ self.assert_length(messages, 1)
+ self.assertEqual(
+ messages[0].content,
+ f"This topic was moved to #**new stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
+ messages = get_topic_messages(user_profile, new_stream, "test")
+ self.assert_length(messages, 5)
+ self.assertEqual(messages[0].id, msg_id)
+ self.assertEqual(
+ messages[4].content,
+ f"3 messages were moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**.",
+ )
+
def test_move_message_between_streams_policy_setting(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"othello", "old_stream_1", "new_stream_1", "test"
@@ -2380,7 +2574,7 @@ def test_move_message_to_stream_and_topic(self) -> None:
"iago", "test move stream", "new stream", "test"
)
- with self.assert_database_query_count(53), cache_tries_captured() as cache_tries:
+ with self.assert_database_query_count(55), cache_tries_captured() as cache_tries:
result = self.client_patch(
f"/json/messages/{msg_id}",
{
| Change logic for Notification Bot messages
At present, whenever all the messages in a topic mare moved, the Notification Bot posts a message that says: "This topic was moved here from (somewhere) by (someone)."
This message is confusing when the topic the message(s) are being moved _to_ already has other messages in it. We should therefore update the logic so that the "This topic was moved here from (somewhere) by (someone)." message is only used when (in addition to the current rules) there are no messages in the destination topic.
When there _are_ messages in the destination topic, we should use the other type of notification messages we use for moves, i.e. "A message was moved here from..." or "N messages were moved here from...", as appropriate.
No changes need to be made to the notifications on the original topic.
| 2022-10-22T03:37:30 |
|
zulip/zulip | 23,329 | zulip__zulip-23329 | [
"23132"
] | 530406e2ef4d98e14d7094389a9bff88cacc25f7 | diff --git a/zerver/lib/onboarding.py b/zerver/lib/onboarding.py
--- a/zerver/lib/onboarding.py
+++ b/zerver/lib/onboarding.py
@@ -154,7 +154,7 @@ def select_welcome_bot_response(human_response_lower: str) -> str:
)
+ "\n\n",
_(
- "Check out [Recent conversations](#recent_topics) to see what's happening! "
+ "Check out [Recent conversations](#recent) to see what's happening! "
'You can return to this conversation by clicking "Private messages" in the upper left.'
),
]
| diff --git a/frontend_tests/node_tests/hashchange.js b/frontend_tests/node_tests/hashchange.js
--- a/frontend_tests/node_tests/hashchange.js
+++ b/frontend_tests/node_tests/hashchange.js
@@ -205,6 +205,19 @@ run_test("hash_interactions", ({override}) => {
[floating_recipient_bar, "update"],
]);
+ // Test old "#recent_topics" hash redirects to "#recent".
+ recent_topics_ui_shown = false;
+ window.location.hash = "#recent_topics";
+
+ helper.clear_events();
+ $window_stub.trigger("hashchange");
+ assert.equal(recent_topics_ui_shown, true);
+ helper.assert_events([
+ [overlays, "close_for_hash_change"],
+ [message_viewport, "stop_auto_scrolling"],
+ ]);
+ assert.equal(window.location.hash, "#recent");
+
window.location.hash = "#narrow/stream/Denmark";
helper.clear_events();
diff --git a/zerver/tests/test_tutorial.py b/zerver/tests/test_tutorial.py
--- a/zerver/tests/test_tutorial.py
+++ b/zerver/tests/test_tutorial.py
@@ -98,7 +98,7 @@ def test_response_to_pm_for_topic(self) -> None:
expected_response = (
"In Zulip, topics [tell you what a message is about](/help/streams-and-topics). "
"They are light-weight subjects, very similar to the subject line of an email.\n\n"
- "Check out [Recent conversations](#recent_topics) to see what's happening! "
+ "Check out [Recent conversations](#recent) to see what's happening! "
'You can return to this conversation by clicking "Private messages" in the upper left.'
)
self.assertEqual(most_recent_message(user).content, expected_response)
| Rename Recent topics to Recent conversations
Once #19449 has been resolved, we should rename Recent topics to Recent conversations across the board. This includes the left sidebar, documentation, settings menus, marketing pages, and anywhere else this term may be used.
| Hello @zulip/server-misc, @zulip/server-sidebars, @zulip/server-user-docs members, this issue was labeled with the "area: left-sidebar", "area: documentation (user)", "area: portico" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
Hello @akarsh-jain-790!
Thanks for your interest in Zulip! You have attempted to claim an issue without the label "help wanted". You can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) label.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
@laurynmm adding this one to your list.
Just to clarify as I start prep work for these updates, is the URL hash of `#recent_topics` part of this task/issue, see screenshot below? Or is this just updating the text instances of "Recent topics" as user-facing text in the web-app and documentation?
I feel like doing the URL hash change will be more involved than just the text updates after reading through [this part](https://zulip.readthedocs.io/en/latest/subsystems/hashchange-system.html) of the subsystems documentation.
---
**Recent topics URL screenshot**:

I think we probably do want to change that. My proposal would be to change it to just `#recent`, with semi-permanent `hashchange.js` code (with a TODO/compatibility comment noting that making the `reload.js` logic work after upgrades for self-hosted systems means we need to keep this logic for a couple years) for treating the old `#recent_topics` as `#recent` and then rewriting the URL. And then we'll likely need a database migration to change `#recent_topics` to `#recent` in the "default view" columns in both UserProfile and RealmUserDefault, since that stores the default views as strings.
That hashchange logic change probably makes sense to be its own commit.
@laurynmm one thought I had is that I think we don't want to deploy the Recent topics addition of private messages to Zulip Cloud or include in a beta before we change the "Recent topics" strings to "Recent conversations"; but changing the URL is probably not a blocker. So maybe it makes sense to do this in the form of two PRs, first one to just change the strings and a second to change the less visible (but more complex to change) details.
Sounds great to me! | 2022-10-24T13:32:22 |
zulip/zulip | 23,362 | zulip__zulip-23362 | [
"23344"
] | 9ed1c79f1b94d4ef7d7432c9c3058bb60f3c7b4e | diff --git a/zerver/views/auth.py b/zerver/views/auth.py
--- a/zerver/views/auth.py
+++ b/zerver/views/auth.py
@@ -796,9 +796,11 @@ def login_page(
is_preview = "preview" in request.GET
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
if request.user.is_authenticated and is_2fa_verified(request.user):
- return HttpResponseRedirect(request.user.realm.uri)
+ redirect_to = get_safe_redirect_to(next, request.user.realm.uri)
+ return HttpResponseRedirect(redirect_to)
elif request.user.is_authenticated and not is_preview:
- return HttpResponseRedirect(request.user.realm.uri)
+ redirect_to = get_safe_redirect_to(next, request.user.realm.uri)
+ return HttpResponseRedirect(redirect_to)
if is_subdomain_root_or_alias(request) and settings.ROOT_DOMAIN_LANDING_PAGE:
redirect_url = reverse("realm_redirect")
if request.GET:
diff --git a/zerver/views/registration.py b/zerver/views/registration.py
--- a/zerver/views/registration.py
+++ b/zerver/views/registration.py
@@ -2,10 +2,10 @@
import urllib
from contextlib import suppress
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
-from urllib.parse import urlencode
+from urllib.parse import urlencode, urljoin
from django.conf import settings
-from django.contrib.auth import authenticate, get_backends
+from django.contrib.auth import REDIRECT_FIELD_NAME, authenticate, get_backends
from django.contrib.sessions.backends.base import SessionBase
from django.core import validators
from django.core.exceptions import ValidationError
@@ -83,7 +83,6 @@
create_preregistration_user,
finish_desktop_flow,
finish_mobile_flow,
- get_safe_redirect_to,
redirect_and_log_into_subdomain,
redirect_to_deactivation_notice,
)
@@ -988,7 +987,13 @@ def realm_redirect(request: HttpRequest, next: str = REQ(default="")) -> HttpRes
if form.is_valid():
subdomain = form.cleaned_data["subdomain"]
realm = get_realm(subdomain)
- redirect_to = get_safe_redirect_to(next, realm.uri)
+ redirect_to = urljoin(realm.uri, settings.HOME_NOT_LOGGED_IN)
+
+ if next:
+ redirect_to = append_url_query_string(
+ redirect_to, urlencode({REDIRECT_FIELD_NAME: next})
+ )
+
return HttpResponseRedirect(redirect_to)
else:
form = RealmRedirectForm()
| diff --git a/zerver/tests/test_auth_backends.py b/zerver/tests/test_auth_backends.py
--- a/zerver/tests/test_auth_backends.py
+++ b/zerver/tests/test_auth_backends.py
@@ -420,7 +420,7 @@ def test_login_preview(self) -> None:
result = self.client_get("/login/")
self.assertEqual(result.status_code, 302)
- self.assertEqual(result["Location"], "http://zulip.testserver")
+ self.assertEqual(result["Location"], "http://zulip.testserver/")
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipDummyBackend",))
def test_no_backend_enabled(self) -> None:
@@ -4988,7 +4988,7 @@ def totp(*args: Any, **kwargs: Any) -> int:
# already logged in.
result = self.client_get("/accounts/login/")
self.assertEqual(result.status_code, 302)
- self.assertEqual(result["Location"], "http://zulip.testserver")
+ self.assertEqual(result["Location"], "http://zulip.testserver/")
class TestDevAuthBackend(ZulipTestCase):
diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py
--- a/zerver/tests/test_signup.py
+++ b/zerver/tests/test_signup.py
@@ -1049,7 +1049,7 @@ def test_login_page_redirects_logged_in_user(self) -> None:
"""
self.login("cordelia")
response = self.client_get("/login/")
- self.assertEqual(response["Location"], "http://zulip.testserver")
+ self.assertEqual(response["Location"], "http://zulip.testserver/")
def test_options_request_to_login_page(self) -> None:
response = self.client_options("/login/")
@@ -1067,7 +1067,7 @@ def test_login_page_redirects_logged_in_user_under_2fa(self) -> None:
self.login_2fa(user_profile)
response = self.client_get("/login/")
- self.assertEqual(response["Location"], "http://zulip.testserver")
+ self.assertEqual(response["Location"], "http://zulip.testserver/")
def test_start_two_factor_auth(self) -> None:
request = HostRequestMock()
@@ -3962,6 +3962,14 @@ def test_login_page_redirects_for_root_domain(self, mock_get_host: MagicMock) ->
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "/accounts/go/?next=%2Fupgrade%2F")
+ def test_login_page_redirects_using_next_when_already_authenticated(self) -> None:
+ hamlet = self.example_user("hamlet")
+ self.login("hamlet")
+
+ result = self.client_get("/login/", {"next": "/upgrade/"})
+ self.assertEqual(result.status_code, 302)
+ self.assertEqual(result["Location"], f"{hamlet.realm.uri}/upgrade/")
+
@patch("django.http.HttpRequest.get_host")
def test_login_page_works_without_subdomains(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = "www.testserver"
@@ -4227,7 +4235,7 @@ def totp(*args: Any, **kwargs: Any) -> int:
# Going to login page should redirect to '/' if user is already
# logged in.
result = self.client_get("/accounts/login/")
- self.assertEqual(result["Location"], "http://zulip.testserver")
+ self.assertEqual(result["Location"], "http://zulip.testserver/")
class NameRestrictionsTest(ZulipTestCase):
@@ -4242,7 +4250,7 @@ def test_realm_redirect_without_next_param(self) -> None:
result = self.client_post("/accounts/go/", {"subdomain": "zephyr"})
self.assertEqual(result.status_code, 302)
- self.assertEqual(result["Location"], "http://zephyr.testserver")
+ self.assertEqual(result["Location"], "http://zephyr.testserver/login/")
result = self.client_post("/accounts/go/", {"subdomain": "invalid"})
self.assert_in_success_response(["We couldn't find that Zulip organization."], result)
@@ -4255,4 +4263,4 @@ def test_realm_redirect_with_next_param(self) -> None:
result = self.client_post("/accounts/go/?next=billing", {"subdomain": "lear"})
self.assertEqual(result.status_code, 302)
- self.assertEqual(result["Location"], "http://lear.testserver/billing")
+ self.assertEqual(result["Location"], "http://lear.testserver/login/?next=billing")
| "Log in to your organization" page should always take user to login page
At present, entering an organization with [public access](https://zulip.com/help/public-access-option) enabled into https://zulip.com/accounts/go/ takes the user to the logged out view of that page.
However, it's much more likely that the user in this situation wants to log in, not to see the logged-out view. Thus, clicking "Next" should always go to the login page (`/login`), never the public view.
[CZO discussion](https://chat.zulip.org/#narrow/stream/101-design/topic/log.20in.20flow.20w.2F.20web-public/near/1454922)
| My [previous PR](https://github.com/zulip/zulip/pull/23225) is awaiting **Integration Review**. In the meantime, I am working on this issue. Thanks.
Sure, assigning this issue to you -- thanks @prakhar1144 ! | 2022-10-27T17:18:19 |
zulip/zulip | 23,366 | zulip__zulip-23366 | [
"22538"
] | 1176f1e674663c6d13acad843edc6adf1d083804 | diff --git a/zerver/lib/email_notifications.py b/zerver/lib/email_notifications.py
--- a/zerver/lib/email_notifications.py
+++ b/zerver/lib/email_notifications.py
@@ -27,6 +27,7 @@
from zerver.lib.queue import queue_json_publish
from zerver.lib.send_email import FromAddress, send_future_email
from zerver.lib.soft_deactivation import soft_reactivate_if_personal_notification
+from zerver.lib.topic import get_topic_resolution_and_bare_name
from zerver.lib.types import DisplayRecipientT
from zerver.lib.url_encoding import (
huddle_narrow_url,
@@ -481,10 +482,11 @@ def do_send_missedmessage_events_reply_in_zulip(
)
message = missed_messages[0]["message"]
stream = Stream.objects.only("id", "name").get(id=message.recipient.type_id)
- stream_header = f"{stream.name} > {message.topic_name()}"
+ topic_resolved, topic_name = get_topic_resolution_and_bare_name(message.topic_name())
context.update(
stream_name=stream.name,
- stream_header=stream_header,
+ topic_name=topic_name,
+ topic_resolved=topic_resolved,
)
else:
raise AssertionError("Invalid messages!")
diff --git a/zerver/lib/topic.py b/zerver/lib/topic.py
--- a/zerver/lib/topic.py
+++ b/zerver/lib/topic.py
@@ -270,3 +270,17 @@ def get_topic_history_for_stream(
cursor.close()
return generate_topic_history_from_db_rows(rows)
+
+
+def get_topic_resolution_and_bare_name(stored_name: str) -> Tuple[bool, str]:
+ """
+ Resolved topics are denoted only by a title change, not by a boolean toggle in a database column. This
+ method inspects the topic name and returns a tuple of:
+
+ - Whether the topic has been resolved
+ - The topic name with the resolution prefix, if present in stored_name, removed
+ """
+ if stored_name.startswith(RESOLVED_TOPIC_PREFIX):
+ return (True, stored_name[len(RESOLVED_TOPIC_PREFIX) :])
+
+ return (False, stored_name)
| diff --git a/zerver/lib/test_classes.py b/zerver/lib/test_classes.py
--- a/zerver/lib/test_classes.py
+++ b/zerver/lib/test_classes.py
@@ -54,6 +54,7 @@
from zerver.decorator import do_two_factor_login
from zerver.lib.cache import bounce_key_prefix_for_testing
from zerver.lib.initial_password import initial_password
+from zerver.lib.message import access_message
from zerver.lib.notification_data import UserMessageNotificationsData
from zerver.lib.rate_limiter import bounce_redis_key_prefix_for_testing
from zerver.lib.sessions import get_session_dict_user
@@ -71,7 +72,7 @@
tee_stdout_and_find_extra_console_output,
)
from zerver.lib.test_helpers import find_key_by_email, instrument_url, queries_captured
-from zerver.lib.topic import filter_by_topic_name_via_message
+from zerver.lib.topic import RESOLVED_TOPIC_PREFIX, filter_by_topic_name_via_message
from zerver.lib.user_groups import get_system_user_group_for_user
from zerver.lib.users import get_api_key
from zerver.lib.validator import check_string
@@ -1315,6 +1316,26 @@ def check_user_subscribed_only_to_streams(self, user_name: str, streams: List[St
for x, y in zip(subscribed_streams, streams):
self.assertEqual(x["name"], y.name)
+ def resolve_topic_containing_message(
+ self,
+ acting_user: UserProfile,
+ target_message_id: int,
+ **extra: str,
+ ) -> "TestHttpResponse":
+ """
+ Mark all messages within the topic associated with message `target_message_id` as resolved.
+ """
+ message, _ = access_message(acting_user, target_message_id)
+ return self.api_patch(
+ acting_user,
+ f"/api/v1/messages/{target_message_id}",
+ {
+ "topic": RESOLVED_TOPIC_PREFIX + message.topic_name(),
+ "propagate_mode": "change_all",
+ },
+ **extra,
+ )
+
def send_webhook_payload(
self,
user_profile: UserProfile,
diff --git a/zerver/tests/test_email_notifications.py b/zerver/tests/test_email_notifications.py
--- a/zerver/tests/test_email_notifications.py
+++ b/zerver/tests/test_email_notifications.py
@@ -626,6 +626,29 @@ def _extra_context_in_missed_stream_messages_mention_two_senders(
msg_id, verify_body_include, email_subject, send_as_user, trigger="mentioned"
)
+ def _resolved_topic_missed_stream_messages_thread_friendly(self, send_as_user: bool) -> None:
+ topic_name = "threading and so forth"
+ othello_user = self.example_user("othello")
+ msg_id = -1
+ for i in range(0, 3):
+ msg_id = self.send_stream_message(
+ othello_user,
+ "Denmark",
+ content=str(i),
+ topic_name=topic_name,
+ )
+
+ self.assert_json_success(self.resolve_topic_containing_message(othello_user, msg_id))
+
+ verify_body_include = [
+ "Othello, the Moor of Venice: > 0 > 1 > 2 -- ",
+ "You are receiving this because you have email notifications enabled for #Denmark.",
+ ]
+ email_subject = "[resolved] #Denmark > threading and so forth"
+ self._test_cases(
+ msg_id, verify_body_include, email_subject, send_as_user, trigger="stream_email_notify"
+ )
+
def _extra_context_in_missed_personal_messages(
self,
send_as_user: bool,
@@ -1028,6 +1051,13 @@ def test_extra_context_in_missed_stream_messages_email_notify_as_user(self) -> N
def test_extra_context_in_missed_stream_messages_email_notify(self) -> None:
self._extra_context_in_missed_stream_messages_email_notify(False)
+ @override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
+ def test_resolved_topic_missed_stream_messages_thread_friendly_as_user(self) -> None:
+ self._resolved_topic_missed_stream_messages_thread_friendly(True)
+
+ def test_resolved_topic_missed_stream_messages_thread_friendly(self) -> None:
+ self._resolved_topic_missed_stream_messages_thread_friendly(False)
+
@override_settings(EMAIL_GATEWAY_PATTERN="")
def test_reply_warning_in_missed_personal_messages(self) -> None:
self._reply_warning_in_missed_personal_messages(False)
diff --git a/zerver/tests/test_message_edit.py b/zerver/tests/test_message_edit.py
--- a/zerver/tests/test_message_edit.py
+++ b/zerver/tests/test_message_edit.py
@@ -3006,12 +3006,9 @@ def test_mark_topic_as_resolved(self) -> None:
self.assert_json_error(result, "Nothing to change")
resolved_topic = RESOLVED_TOPIC_PREFIX + original_topic
- result = self.client_patch(
- "/json/messages/" + str(id1),
- {
- "topic": resolved_topic,
- "propagate_mode": "change_all",
- },
+ result = self.resolve_topic_containing_message(
+ admin_user,
+ id1,
HTTP_ACCEPT_LANGUAGE="de",
)
| Group email notifications for resolved/unresolved topics into the same thread
Resolving topics changes which thread messages are grouped into in email notifications, which is annoying for users. We should figure out how to fix this.
As [discussed on CZO](https://chat.zulip.org/#narrow/stream/137-feedback/topic/resolved.20topics.20in.20email.20digests/near/1407208), the first step is to investigate whether or not emails with the subject `[resolved] {original topic}` get grouped together with emails with the subject `{original topic}` by email clients. The results of this investigation should be posted on the [CZO discussion thread](https://chat.zulip.org/#narrow/stream/137-feedback/topic/resolved.20topics.20in.20email.20digests).
| Hello @zulip/server-development members, this issue was labeled with the "area: emails" label, so you may want to check it out!
<!-- areaLabelAddition -->
Updated the CZO thread over the past couple evenings with some investigations; at this point I have a test case written so I think it's sane to
@zulipbot claim
Welcome to Zulip, @klardotsh! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
| 2022-10-27T22:38:42 |
zulip/zulip | 23,388 | zulip__zulip-23388 | [
"23368"
] | 925fa4ca50215e7d4ba0e7d72b1b77ad3f0b5a38 | diff --git a/zerver/views/custom_profile_fields.py b/zerver/views/custom_profile_fields.py
--- a/zerver/views/custom_profile_fields.py
+++ b/zerver/views/custom_profile_fields.py
@@ -33,7 +33,7 @@
check_union,
validate_select_field_data,
)
-from zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm
+from zerver.models import CustomProfileField, Realm, UserProfile, custom_profile_fields_for_realm
def list_realm_custom_profile_fields(
@@ -135,8 +135,10 @@ def update_only_display_in_profile_summary(
return True
-def display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:
- query = CustomProfileField.objects.filter(display_in_profile_summary=True)
+def display_in_profile_summary_limit_reached(
+ realm: Realm, profile_field_id: Optional[int] = None
+) -> bool:
+ query = CustomProfileField.objects.filter(realm=realm, display_in_profile_summary=True)
if profile_field_id is not None:
query = query.exclude(id=profile_field_id)
return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS
@@ -153,7 +155,7 @@ def create_realm_custom_profile_field(
field_type: int = REQ(json_validator=check_int),
display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),
) -> HttpResponse:
- if display_in_profile_summary and display_in_profile_summary_limit_reached():
+ if display_in_profile_summary and display_in_profile_summary_limit_reached(user_profile.realm):
raise JsonableError(
_("Only 2 custom profile fields can be displayed in the profile summary.")
)
@@ -213,7 +215,9 @@ def update_realm_custom_profile_field(
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
- if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):
+ if display_in_profile_summary and display_in_profile_summary_limit_reached(
+ user_profile.realm, field.id
+ ):
raise JsonableError(
_("Only 2 custom profile fields can be displayed in the profile summary.")
)
| Save failed: Only 2 custom profile fields can be displayed in the profile summary.
I tried to set a custom profile field to be displayed in the profile summary. However, it fails with an error message "Save failed: Only 2 custom profile fields can be displayed in the profile summary." even though no custom profile field has been set to displayed.
https://user-images.githubusercontent.com/2891235/198561250-59519f45-85a4-4490-8f72-9fae343930be.mov
Server: hosted Zulip server
```console
Zulip Server
Version 6.0-dev-2021-gd461383c1f
Forked from upstream at 6.0-dev-2009-g0affc7ac6f
```
Client: Zulip Desktop v5.9.3
| @yogesh-sirsat , @sahil839 Any ideas on what might be going on here / how we can debug it?
Hello @zulip/server-settings members, this issue was labeled with the "area: settings (admin/org)" label, so you may want to check it out!
<!-- areaLabelAddition -->
Hey @caizixian, is it the first time you are setting custom profile fields to display in profile summary?
I can't reproduce this error on main, but it is reproducible on domainname.zulipchat.com
P.S. Not a frontend bug I think because I tried sending an API request and got the same error.
It is not even reproducible on branch 6.0-beta1, @alya @sahil839 which branch do we use for subdomain.zulipchat.com?
@yogesh-sirsat
> is it the first time you are setting custom profile fields to display in profile summary?
Yes
> I can't reproduce this error on main, but it is reproducible on domainname.zulipchat.com
Yes, I had the error on one of the *domainname*.zulipchat.com | 2022-10-30T10:30:16 |
|
zulip/zulip | 23,473 | zulip__zulip-23473 | [
"21876"
] | 63355c2a34e1315be6498ebc9d02d4b7f1a4f4be | diff --git a/zerver/views/documentation.py b/zerver/views/documentation.py
--- a/zerver/views/documentation.py
+++ b/zerver/views/documentation.py
@@ -110,6 +110,15 @@ def get_path(self, article: str) -> DocumentationArticle:
endpoint_method=None,
)
+ if path == "/zerver/api/api-doc-template.md":
+ # This template shouldn't be accessed directly.
+ return DocumentationArticle(
+ article_path=self.path_template % ("missing",),
+ article_http_status=404,
+ endpoint_path=None,
+ endpoint_method=None,
+ )
+
# The following is a somewhat hacky approach to extract titles from articles.
# Hack: `context["article"] has a leading `/`, so we use + to add directories.
article_path = os.path.join(settings.DEPLOY_ROOT, "templates") + path
| diff --git a/zerver/tests/test_docs.py b/zerver/tests/test_docs.py
--- a/zerver/tests/test_docs.py
+++ b/zerver/tests/test_docs.py
@@ -137,6 +137,14 @@ def test_api_doc_endpoints(self) -> None:
)
self.assertEqual(result.status_code, 404)
+ result = self.client_get(
+ # This template shouldn't be accessed directly.
+ "/api/api-doc-template",
+ follow=True,
+ HTTP_X_REQUESTED_WITH="XMLHttpRequest",
+ )
+ self.assertEqual(result.status_code, 404)
+
# Test some API doc endpoints for specific content and metadata.
self._test("/api/", "The Zulip API")
self._test("/api/api-keys", "be careful with it")
| We should 404 on /api/api-doc-template, not 500
Requesting `/api/api-doc-template` directly hits this code:
https://github.com/zulip/zulip/blob/c2148dc4d9e2ba1ea96d8ee0c972fcf77af43e8d/zerver/views/documentation.py#L190-L196
Those `assert`s cause the request to 500, since `/zerver/api/api-doc-template.md` is an internal URL which is not meant to be accessed directly.
Right above these lines:
https://github.com/zulip/zulip/blob/c2148dc4d9e2ba1ea96d8ee0c972fcf77af43e8d/zerver/views/documentation.py#L107-L110
...we should add a check for:
```python3
if path == "/zerver/api/api-doc-template.md":
# This shouldn't be accessible directly
return DocumentationArticle(
article_path=self.path_template % ("missing",),
article_http_status=404,
endpoint_path=None,
endpoint_method=None,
)
| ha! @alexmv did you see this in the CZO logs today? I ran a test today while working on [PR 21835](https://github.com/zulip/zulip/pull/21835) to provide 100% test coverage for a dozen files, including the API docs.
Anyway, if you want to play reviewer on that PR, I can trivially include this (and the test case...) :-)
Hello @zulip/server-api members, this issue was labeled with the "area: documentation (api and integrations)" label, so you may want to check it out!
<!-- areaLabelAddition -->
Yeah feel free to submit a PR for this @asah!
And yes, feel free to poke me on #21835 once it's ready for review.
Hey can I contribute on this issue too?
@zulipbot claim
@zulipbot unclaim
@zulipbot claim
Hello @asah, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
@zulipbot claim
@alexmv weird - I can't reproduce this, e.g. these both return 404 today:
https://chat.zulip.org/api/api-doc-template.md
https://chat.zulip.org/zerver/api/api-doc-template.md
https://zulip.com/api/api-doc-template replicates, as does https://chat.zulip.org/api/api-doc-template
@karanka You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
Got it
On Fri, Apr 22, 2022, 8:36 PM Alex Vandiver ***@***.***>
wrote:
> https://zulip.com/api/api-doc-template replicates
>
> β
> Reply to this email directly, view it on GitHub
> <https://github.com/zulip/zulip/issues/21876#issuecomment-1107072759>, or
> unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AABCFYVNRAKRJIHOPVC7AG3VGNAZZANCNFSM5UADOAZA>
> .
> You are receiving this because you were mentioned.Message ID:
> ***@***.***>
>
| 2022-11-07T19:41:21 |
zulip/zulip | 23,522 | zulip__zulip-23522 | [
"23517"
] | 2b0a4aad50365e9653bb0c154b35a68405195e5e | diff --git a/zerver/models.py b/zerver/models.py
--- a/zerver/models.py
+++ b/zerver/models.py
@@ -1485,10 +1485,13 @@ class UserBaseSettings(models.Model):
created after the change.
"""
- # UI settings
+ ### Generic UI settings
enter_sends = models.BooleanField(default=False)
- # display settings
+ ### Display settings. ###
+ # left_side_userlist was removed from the UI in Zulip 6.0; the
+ # database model is being temporarily preserved in case we want to
+ # restore a version of the setting, preserving who had it enabled.
left_side_userlist = models.BooleanField(default=False)
default_language = models.CharField(default="en", max_length=MAX_LANGUAGE_ID_LENGTH)
# This setting controls which view is rendered first when Zulip loads.
| diff --git a/docs/testing/manual-testing.md b/docs/testing/manual-testing.md
--- a/docs/testing/manual-testing.md
+++ b/docs/testing/manual-testing.md
@@ -486,7 +486,6 @@ Do these tasks as Cordelia.
- Display settings
- Right now, these unfortunately require reloads to take effect.
- Default language (change to Spanish)
- - Show user list on left sidebar in narrow windows (verify by making window thinner)
- 24-hour time (and then test going back to AM/PM)
- Notifications
- Stream message
diff --git a/frontend_tests/node_tests/dispatch.js b/frontend_tests/node_tests/dispatch.js
--- a/frontend_tests/node_tests/dispatch.js
+++ b/frontend_tests/node_tests/dispatch.js
@@ -743,11 +743,6 @@ run_test("user_settings", ({override}) => {
dispatch(event);
assert_same(user_settings.default_language, "fr");
- event = event_fixtures.user_settings__left_side_userlist;
- user_settings.left_side_userlist = false;
- dispatch(event);
- assert_same(user_settings.left_side_userlist, true);
-
event = event_fixtures.user_settings__escape_navigates_to_default_view;
user_settings.escape_navigates_to_default_view = false;
let toggled = [];
diff --git a/frontend_tests/node_tests/i18n.js b/frontend_tests/node_tests/i18n.js
--- a/frontend_tests/node_tests/i18n.js
+++ b/frontend_tests/node_tests/i18n.js
@@ -100,7 +100,6 @@ run_test("tr_tag", ({mock_template}) => {
avatar_url: "http://example.com",
},
user_settings: {
- left_side_userlist: false,
twenty_four_hour_time: false,
enable_stream_desktop_notifications: false,
enable_stream_push_notifications: false,
diff --git a/frontend_tests/node_tests/lib/events.js b/frontend_tests/node_tests/lib/events.js
--- a/frontend_tests/node_tests/lib/events.js
+++ b/frontend_tests/node_tests/lib/events.js
@@ -898,13 +898,6 @@ exports.fixtures = {
value: true,
},
- user_settings__left_side_userlist: {
- type: "user_settings",
- op: "update",
- property: "left_side_userlist",
- value: true,
- },
-
user_settings__presence_disabled: {
type: "user_settings",
op: "update",
| Remove the "Show user list on left sidebar in narrow windows" display option
As [discussed on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/user.20list.20in.20left.20sidebar.20option/near/1460450), we have a "Show user list on left sidebar in narrow windows" display option, which puts the user list at the bottom of the left sidebar when the window is too narrow to display it on the right.
This option is not commonly used, and does not work very well with the addition of the new PMs section in the left sidebar. (Having two scrollbars in the sidebar looks quite awkward.) The new PMs section also addresses some of the use cases for always keeping the Users list visible, even in narrow windows.
We should therefore remove the "Show user list on left sidebar in narrow windows" display option (including all the code for implementing that setting). The help center page for this feature (https://zulip.com/help/move-the-users-list-to-the-left-sidebar) and any links to it should be dropped.
However, to make this decision reversible, we should keep (for now) the data for which users have turned on this feature.
| Hello @zulip/server-sidebars members, this issue was labeled with the "area: left-sidebar" label, so you may want to check it out!
<!-- areaLabelAddition -->
@amanagr It would be great if you could take this one on! | 2022-11-10T11:41:01 |
zulip/zulip | 23,544 | zulip__zulip-23544 | [
"23482"
] | e215015e8026a0e1d5116e1adad217d5568017d7 | diff --git a/zerver/actions/message_edit.py b/zerver/actions/message_edit.py
--- a/zerver/actions/message_edit.py
+++ b/zerver/actions/message_edit.py
@@ -134,14 +134,10 @@ def maybe_send_resolve_topic_notifications(
changed_messages: List[Message],
) -> bool:
"""Returns True if resolve topic notifications were in fact sent."""
-
# Note that topics will have already been stripped in check_update_message.
#
# This logic is designed to treat removing a weird "β ββ "
# prefix as unresolving the topic.
- if old_topic.lstrip(RESOLVED_TOPIC_PREFIX) != new_topic.lstrip(RESOLVED_TOPIC_PREFIX):
- return False
-
topic_resolved: bool = new_topic.startswith(RESOLVED_TOPIC_PREFIX) and not old_topic.startswith(
RESOLVED_TOPIC_PREFIX
)
@@ -542,6 +538,10 @@ def do_update_message(
user_id for user_id in new_stream_sub_ids if user_id not in old_stream_sub_ids
]
+ # We save the full topic name so that checks that require comparison
+ # between the original topic and the topic name passed into this function
+ # will not be affected by the potential truncation of topic_name below.
+ pre_truncation_topic_name = topic_name
if topic_name is not None:
topic_name = truncate_topic(topic_name)
target_message.set_topic_name(topic_name)
@@ -869,9 +869,9 @@ def user_info(um: UserMessage) -> Dict[str, Any]:
new_stream is not None
or not sent_resolve_topic_notification
or (
- topic_name is not None
+ pre_truncation_topic_name is not None
and orig_topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
- != topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
+ != pre_truncation_topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
)
):
if moved_all_visible_messages:
| diff --git a/zerver/tests/test_message_edit.py b/zerver/tests/test_message_edit.py
--- a/zerver/tests/test_message_edit.py
+++ b/zerver/tests/test_message_edit.py
@@ -17,7 +17,7 @@
from zerver.actions.realm_settings import do_change_realm_plan_type, do_set_realm_property
from zerver.actions.streams import do_change_stream_post_policy, do_deactivate_stream
from zerver.actions.users import do_change_user_role
-from zerver.lib.message import MessageDict, has_message_access, messages_for_ids
+from zerver.lib.message import MessageDict, has_message_access, messages_for_ids, truncate_topic
from zerver.lib.test_classes import ZulipTestCase, get_topic_messages
from zerver.lib.test_helpers import cache_tries_captured, queries_captured
from zerver.lib.topic import RESOLVED_TOPIC_PREFIX, TOPIC_NAME
@@ -28,7 +28,16 @@
topic_is_muted,
)
from zerver.lib.utils import assert_is_not_none
-from zerver.models import Message, Realm, Stream, UserMessage, UserProfile, get_realm, get_stream
+from zerver.models import (
+ MAX_TOPIC_NAME_LENGTH,
+ Message,
+ Realm,
+ Stream,
+ UserMessage,
+ UserProfile,
+ get_realm,
+ get_stream,
+)
if TYPE_CHECKING:
from django.test.client import _MonkeyPatchedWSGIResponse as TestHttpResponse
@@ -2722,6 +2731,125 @@ def test_notify_no_topic_after_message_move(self) -> None:
self.assert_length(messages, 1)
self.assertEqual(messages[0].content, "First")
+ def test_notify_resolve_topic_long_name(self) -> None:
+ user_profile = self.example_user("hamlet")
+ self.login("hamlet")
+ stream = self.make_stream("public stream")
+ self.subscribe(user_profile, stream.name)
+ # Marking topics with a long name as resolved causes the new topic name to be truncated.
+ # We want to avoid having code paths believing that the topic is "moved" instead of
+ # "resolved" in this edge case.
+ topic_name = "a" * MAX_TOPIC_NAME_LENGTH
+ msg_id = self.send_stream_message(
+ user_profile, stream.name, topic_name=topic_name, content="First"
+ )
+
+ resolved_topic = RESOLVED_TOPIC_PREFIX + topic_name
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "topic": resolved_topic,
+ "propagate_mode": "change_all",
+ },
+ )
+ self.assert_json_success(result)
+
+ new_topic_name = truncate_topic(resolved_topic)
+ messages = get_topic_messages(user_profile, stream, new_topic_name)
+ self.assert_length(messages, 2)
+ self.assertEqual(messages[0].content, "First")
+ self.assertEqual(
+ messages[1].content,
+ f"@_**{user_profile.full_name}|{user_profile.id}** has marked this topic as resolved.",
+ )
+
+ # Note that we are removing the prefix from the already truncated topic,
+ # so unresolved_topic_name will not be the same as the original topic_name
+ unresolved_topic_name = new_topic_name.replace(RESOLVED_TOPIC_PREFIX, "")
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "topic": unresolved_topic_name,
+ "propagate_mode": "change_all",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, unresolved_topic_name)
+ self.assert_length(messages, 3)
+ self.assertEqual(
+ messages[2].content,
+ f"@_**{user_profile.full_name}|{user_profile.id}** has marked this topic as unresolved.",
+ )
+
+ def test_notify_resolve_and_move_topic(self) -> None:
+ user_profile = self.example_user("hamlet")
+ self.login("hamlet")
+ stream = self.make_stream("public stream")
+ topic = "test"
+ self.subscribe(user_profile, stream.name)
+
+ # Resolve a topic normally first
+ msg_id = self.send_stream_message(user_profile, stream.name, "foo", topic_name=topic)
+ resolved_topic = RESOLVED_TOPIC_PREFIX + topic
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "topic": resolved_topic,
+ "propagate_mode": "change_all",
+ },
+ )
+ self.assert_json_success(result)
+
+ messages = get_topic_messages(user_profile, stream, resolved_topic)
+ self.assert_length(messages, 2)
+ self.assertEqual(
+ messages[1].content,
+ f"@_**{user_profile.full_name}|{user_profile.id}** has marked this topic as resolved.",
+ )
+
+ # Test unresolving a topic while moving it (β test -> bar)
+ new_topic = "bar"
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "topic": new_topic,
+ "propagate_mode": "change_all",
+ },
+ )
+ self.assert_json_success(result)
+ messages = get_topic_messages(user_profile, stream, new_topic)
+ self.assert_length(messages, 4)
+ self.assertEqual(
+ messages[2].content,
+ f"@_**{user_profile.full_name}|{user_profile.id}** has marked this topic as unresolved.",
+ )
+ self.assertEqual(
+ messages[3].content,
+ f"This topic was moved here from #**public stream>β test** by @_**{user_profile.full_name}|{user_profile.id}**.",
+ )
+
+ # Now test moving the topic while also resolving it (bar -> β baz)
+ new_resolved_topic = RESOLVED_TOPIC_PREFIX + "baz"
+ result = self.client_patch(
+ "/json/messages/" + str(msg_id),
+ {
+ "topic": new_resolved_topic,
+ "propagate_mode": "change_all",
+ },
+ )
+ self.assert_json_success(result)
+ messages = get_topic_messages(user_profile, stream, new_resolved_topic)
+ self.assert_length(messages, 6)
+ self.assertEqual(
+ messages[4].content,
+ f"@_**{user_profile.full_name}|{user_profile.id}** has marked this topic as resolved.",
+ )
+ self.assertEqual(
+ messages[5].content,
+ f"This topic was moved here from #**public stream>{new_topic}** by @_**{user_profile.full_name}|{user_profile.id}**.",
+ )
+
def parameterized_test_move_message_involving_private_stream(
self,
from_invite_only: bool,
| Resolving topics that are longer than 59 characters doesn't generate expected notices
I'm not sure how best to fix this, but if you try to mark as resolved a 59-60 character name topic, then we end up truncating the topic name (because there's not space to add the 2 extra characters that are `RESOLVED_TOPIC_PREFIX`); this in turn makes logic like the following unable to correctly determine whether the topic was truncated:
```
new_thread_notification_string = None
if send_notification_to_new_thread and (
new_stream is not None
or not sent_resolve_topic_notification
or (
topic_name is not None
and orig_topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
!= topic_name.lstrip(RESOLVED_TOPIC_PREFIX)
)
):
```
I'm not sure how best to fix this. I think it's possible we want to (1) change the topic truncation logic to use a single character `β¦` symbol rather than `...`, which might help some, and then do some sort of special logic that saves the raw topic name passed into the `message_edit` code path so that we can check whether it's a topic resolution pre-truncation.
```
if topic_name is not None:
topic_name = truncate_topic(topic_name)
```
| Hello @zulip/server-message-view members, this issue was labeled with the "area: message-editing" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2022-11-12T22:53:11 |
Subsets and Splits