repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
zulip/zulip | 29,561 | zulip__zulip-29561 | [
"29350"
] | 5e671de1e75f837cce7280e474fe20c108bb49bc | diff --git a/zerver/lib/import_realm.py b/zerver/lib/import_realm.py
--- a/zerver/lib/import_realm.py
+++ b/zerver/lib/import_realm.py
@@ -1,3 +1,4 @@
+import collections
import logging
import os
import shutil
@@ -155,6 +156,22 @@
"attachment_path": {},
}
+message_id_to_attachments: Dict[str, Dict[int, List[str]]] = {
+ "zerver_message": collections.defaultdict(list),
+ "zerver_scheduledmessage": collections.defaultdict(list),
+}
+
+
+def map_messages_to_attachments(data: TableData) -> None:
+ for attachment in data["zerver_attachment"]:
+ for message_id in attachment["messages"]:
+ message_id_to_attachments["zerver_message"][message_id].append(attachment["path_id"])
+
+ for scheduled_message_id in attachment["scheduled_messages"]:
+ message_id_to_attachments["zerver_scheduledmessage"][scheduled_message_id].append(
+ attachment["path_id"]
+ )
+
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
if table not in ID_MAP:
@@ -181,16 +198,20 @@ def fix_upload_links(data: TableData, message_table: TableName) -> None:
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
+
+ Applied to attachments path_id found in messages of zerver_message and zerver_scheduledmessage tables.
"""
for message in data[message_table]:
if message["has_attachment"] is True:
- for key, value in path_maps["attachment_path"].items():
- if key in message["content"]:
- message["content"] = message["content"].replace(key, value)
- if message["rendered_content"]:
- message["rendered_content"] = message["rendered_content"].replace(
- key, value
- )
+ for attachment_path in message_id_to_attachments[message_table][message["id"]]:
+ message["content"] = message["content"].replace(
+ attachment_path, path_maps["attachment_path"][attachment_path]
+ )
+
+ if message["rendered_content"]:
+ message["rendered_content"] = message["rendered_content"].replace(
+ attachment_path, path_maps["attachment_path"][attachment_path]
+ )
def fix_streams_can_remove_subscribers_group_column(data: TableData, realm: Realm) -> None:
@@ -1424,6 +1445,18 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Rea
sender_map = {user["id"]: user for user in data["zerver_userprofile"]}
+ # TODO: de-dup how we read these json files.
+ attachments_file = os.path.join(import_dir, "attachment.json")
+ if not os.path.exists(attachments_file):
+ raise Exception("Missing attachment.json file!")
+
+ # Important: map_messages_to_attachments should be called before fix_upload_links
+ # which is called by import_message_data and another for zerver_scheduledmessage.
+ with open(attachments_file, "rb") as f:
+ attachment_data = orjson.loads(f.read())
+
+ map_messages_to_attachments(attachment_data)
+
# Import zerver_message and zerver_usermessage
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
@@ -1486,15 +1519,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Rea
bulk_import_model(data, UserStatus)
# Do attachments AFTER message data is loaded.
- # TODO: de-dup how we read these json files.
- fn = os.path.join(import_dir, "attachment.json")
- if not os.path.exists(fn):
- raise Exception("Missing attachment.json file!")
-
- logging.info("Importing attachment data from %s", fn)
- with open(fn, "rb") as f:
- attachment_data = orjson.loads(f.read())
-
+ logging.info("Importing attachment data from %s", attachments_file)
import_attachments(attachment_data)
# Import the analytics file.
| Performance of rewriting attachment URLs is O(M Γ N)
During import, we check every message for every potential attachment rename -- which is probably extremely inefficient, especially for large imports with many attachments. We should ideally rewrite that logic to be more performant.
| Hello @zulip/server-misc members, this issue was labeled with the "area: export/import" label, so you may want to check it out!
<!-- areaLabelAddition -->
working on this
Hi @alexmv
I would like to contribute. Could you provide more details? | 2024-04-02T05:46:07 |
|
zulip/zulip | 29,567 | zulip__zulip-29567 | [
"19658"
] | aa31f9ae8db5d7dfd112def6c943c4bf78cfa9e1 | diff --git a/zerver/lib/markdown/__init__.py b/zerver/lib/markdown/__init__.py
--- a/zerver/lib/markdown/__init__.py
+++ b/zerver/lib/markdown/__init__.py
@@ -1417,7 +1417,15 @@ def handleMatch(self, match: Match[str]) -> Optional[Element]:
# Use HTML5 <time> element for valid timestamps.
time_element = Element("time")
if timestamp.tzinfo:
- timestamp = timestamp.astimezone(timezone.utc)
+ try:
+ timestamp = timestamp.astimezone(timezone.utc)
+ except ValueError:
+ error_element = Element("span")
+ error_element.set("class", "timestamp-error")
+ error_element.text = markdown.util.AtomicString(
+ f"Invalid time format: {time_input_string}"
+ )
+ return error_element
else:
timestamp = timestamp.replace(tzinfo=timezone.utc)
time_element.set("datetime", timestamp.isoformat().replace("+00:00", "Z"))
| diff --git a/zerver/tests/fixtures/markdown_test_cases.json b/zerver/tests/fixtures/markdown_test_cases.json
--- a/zerver/tests/fixtures/markdown_test_cases.json
+++ b/zerver/tests/fixtures/markdown_test_cases.json
@@ -838,6 +838,13 @@
"marked_expected_output": "<p><span>**hello world**</span></p>",
"text_content": "Invalid time format: **hello world**"
},
+ {
+ "name": "timestamp_invalid_timezone",
+ "input": "<time:1969-12-31T00:00:00+32:00>",
+ "expected_output": "<p><span class=\"timestamp-error\">Invalid time format: 1969-12-31T00:00:00+32:00</span></p>",
+ "marked_expected_output": "<p><span>1969-12-31T00:00:00+32:00</span></p>",
+ "text_content": "Invalid time format: 1969-12-31T00:00:00+32:00"
+ },
{
"name": "timestamp_unix",
"input": "Let's meet at <time:1496701800>.",
| Invalid timezone offset in `<time>` picker leads to 500
A message containing:
```
<time:1969-12-31T00:00:00+32:00>
```
..fails to send:
```
2021-09-01 20:41:56.250 ERR [] Exception in Markdown parser; input (sanitized) was: '<xxxx:xxxx-xx-xxxxx:xx:xx+xx:xx>'
(message unknown)
Traceback (most recent call last):
File "/srv/zulip/zerver/lib/markdown/__init__.py", line 2466, in do_convert
rendering_result.rendered_content = timeout(5, lambda: _md_engine.convert(content))
File "/srv/zulip/zerver/lib/timeout.py", line 94, in timeout
raise thread.exc_info[1].with_traceback(thread.exc_info[2])
File "/srv/zulip/zerver/lib/timeout.py", line 53, in run
self.result = func()
File "/srv/zulip/zerver/lib/markdown/__init__.py", line 2466, in <lambda>
rendering_result.rendered_content = timeout(5, lambda: _md_engine.convert(content))
File "/srv/zulip-py3-venv/lib/python3.6/site-packages/markdown/core.py", line 268, in convert
newRoot = treeprocessor.run(root)
File "/srv/zulip-py3-venv/lib/python3.6/site-packages/markdown/treeprocessors.py", line 371, in run
self.__handleInline(text), child
File "/srv/zulip-py3-venv/lib/python3.6/site-packages/markdown/treeprocessors.py", line 132, in __handleInline
self.inlinePatterns[patternIndex], data, patternIndex, startIndex
File "/srv/zulip-py3-venv/lib/python3.6/site-packages/markdown/treeprocessors.py", line 286, in __applyPattern
node = pattern.handleMatch(match)
File "/srv/zulip/zerver/lib/markdown/__init__.py", line 1361, in handleMatch
timestamp = timestamp.astimezone(datetime.timezone.utc)
ValueError: offset must be a timedelta strictly between -timedelta(hours=24) and timedelta(hours=24).
```
We should handle this more gracefully than a 500 error -- ideally, by not treating it as a smart timepicker, but just text.
| Hello @zulip/server-dependencies, @zulip/server-markdown members, this issue was labeled with the "area: markdown", "area: dependencies" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@chdinesh1089 @akshatdalton would either of you be interested in working on this?
Tagging as a priority because the markdown processor should never 500.
I might not be able to start working on this for a few days due to some other immediately pressing tasks. Will claim the issue when I get time to start working on this. I think it is fine if @akshatdalton or others interested work on this before I get a chance.
@zulipbot claim
Hello @jaiongit!
Thanks for your interest in Zulip! You have attempted to claim an issue without the labels "help wanted", "good first issue". Since you're a new contributor, you can only claim and submit pull requests for issues with the [help wanted](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22help+wanted%22) or [good first issue](https://github.com/zulip/zulip/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+label%3A%22good+first+issue%22) labels.
If this is your first time here, we recommend reading our [guide for new contributors](https://zulip.readthedocs.io/en/latest/overview/contributing.html) before getting started.
@zulipbot claim
Welcome to Zulip, @jaiongit! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
Hello @jaiongit, you have been unassigned from this issue because you have not updated this issue or any referenced pull requests for over 14 days.
You can reclaim this issue or claim any other issue by commenting `@zulipbot claim` on that issue.
Thanks for your contributions, and hope to see you again soon!
I am working on it.
@timabbott We can't really do anything in this case (except putting try and except block or calling the `__str__` method which won't look good). The problem is in dateutil library which doesn't raise an error in the constructor. I have opened the issue here: https://github.com/dateutil/dateutil/issues/1231.
What would the try/except block option look like?
@zulipbot claim
Welcome to Zulip, @tagyieh! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@tagyieh You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
| 2024-04-02T14:52:38 |
zulip/zulip | 29,590 | zulip__zulip-29590 | [
"29575"
] | e4cbca698d5222a5e52fd11a01398b582f980838 | diff --git a/zerver/lib/onboarding_steps.py b/zerver/lib/onboarding_steps.py
--- a/zerver/lib/onboarding_steps.py
+++ b/zerver/lib/onboarding_steps.py
@@ -32,6 +32,9 @@ def to_dict(self) -> Dict[str, str]:
OneTimeNotice(
name="first_stream_created_banner",
),
+ OneTimeNotice(
+ name="jump_to_conversation_banner",
+ ),
]
# We may introduce onboarding step of types other than 'one time notice'
| diff --git a/zerver/tests/test_onboarding_steps.py b/zerver/tests/test_onboarding_steps.py
--- a/zerver/tests/test_onboarding_steps.py
+++ b/zerver/tests/test_onboarding_steps.py
@@ -22,9 +22,10 @@ def test_some_done_some_not(self) -> None:
do_mark_onboarding_step_as_read(self.user, "visibility_policy_banner")
do_mark_onboarding_step_as_read(self.user, "intro_inbox_view_modal")
onboarding_steps = get_next_onboarding_steps(self.user)
- self.assert_length(onboarding_steps, 2)
+ self.assert_length(onboarding_steps, 3)
self.assertEqual(onboarding_steps[0]["name"], "intro_recent_view_modal")
self.assertEqual(onboarding_steps[1]["name"], "first_stream_created_banner")
+ self.assertEqual(onboarding_steps[2]["name"], "jump_to_conversation_banner")
with self.settings(TUTORIAL_ENABLED=False):
onboarding_steps = get_next_onboarding_steps(self.user)
| Add a first-time banner for jumping to sent message conversation
<!-- Issue description -->
In #29186, we are making it the case that when you send a message, your view jumps to the conversation you sent it to. We should add a compose banner **the first time** this happens.
- Color: green
- Text (we can play with it on the PR):
> Viewing the conversation where you sent your message. To go back, use the **back** button in your browser or desktop app.
<!-- Link to a message in the chat.zulip.org discussion. Message links will still work even if the topic is renamed or resolved. Link back to this issue from the chat.zulip.org thread. -->
[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/jumping.20to.20sent.20message.20conversation.20.28experiment.29.20.2329186/near/1772725)
| Hello @zulip/server-onboarding members, this issue was labeled with the "area: onboarding" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2024-04-03T10:10:52 |
zulip/zulip | 29,641 | zulip__zulip-29641 | [
"28877"
] | c5027e6bca6e28d62efe006235668d288aef7862 | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in api_docs/changelog.md, as well as "**Changes**"
# entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 255
+API_FEATURE_LEVEL = 256
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/actions/message_delete.py b/zerver/actions/message_delete.py
--- a/zerver/actions/message_delete.py
+++ b/zerver/actions/message_delete.py
@@ -3,7 +3,7 @@
from zerver.lib import retention
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
-from zerver.models import Message, Realm, UserMessage, UserProfile
+from zerver.models import Message, Realm, Stream, UserMessage, UserProfile
from zerver.tornado.django_api import send_event_on_commit
@@ -15,6 +15,34 @@ class DeleteMessagesEvent(TypedDict, total=False):
stream_id: int
+def check_update_first_message_id(
+ realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]
+) -> None:
+ # This will not update the `first_message_id` of streams where the
+ # first message was deleted prior to the implementation of this function.
+ assert stream.recipient_id is not None
+ if stream.first_message_id not in message_ids:
+ return
+ current_first_message_id = (
+ Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)
+ .values_list("id", flat=True)
+ .order_by("id")
+ .first()
+ )
+
+ stream.first_message_id = current_first_message_id
+ stream.save(update_fields=["first_message_id"])
+
+ stream_event = dict(
+ type="stream",
+ op="update",
+ property="first_message_id",
+ value=stream.first_message_id,
+ stream_id=stream.id,
+ )
+ send_event_on_commit(realm, stream_event, users_to_notify)
+
+
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single direct message, as any other behaviour is not possible with
@@ -52,6 +80,9 @@ def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
+ if message_type == "stream":
+ stream = Stream.objects.get(id=sample_message.recipient.type_id)
+ check_update_first_message_id(realm, stream, message_ids, users_to_notify)
event["message_type"] = message_type
send_event_on_commit(realm, event, users_to_notify)
| diff --git a/zerver/tests/test_message_delete.py b/zerver/tests/test_message_delete.py
--- a/zerver/tests/test_message_delete.py
+++ b/zerver/tests/test_message_delete.py
@@ -11,6 +11,7 @@
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Message, Realm, UserProfile
from zerver.models.realms import get_realm
+from zerver.models.streams import get_stream
if TYPE_CHECKING:
from django.test.client import _MonkeyPatchedWSGIResponse as TestHttpResponse
@@ -321,3 +322,28 @@ def test_delete_message_in_unsubscribed_private_stream(self) -> None:
result = self.client_delete(f"/json/messages/{msg_id}")
self.assert_json_success(result)
self.assertFalse(Message.objects.filter(id=msg_id).exists())
+
+ def test_update_first_message_id_on_stream_message_deletion(self) -> None:
+ realm = get_realm("zulip")
+ stream_name = "test"
+ cordelia = self.example_user("cordelia")
+ self.make_stream(stream_name)
+ self.subscribe(cordelia, stream_name)
+ message_ids = [self.send_stream_message(cordelia, stream_name) for _ in range(5)]
+ first_message_id = message_ids[0]
+
+ message = Message.objects.get(id=message_ids[3])
+ do_delete_messages(realm, [message])
+ stream = get_stream(stream_name, realm)
+ self.assertEqual(stream.first_message_id, first_message_id)
+
+ first_message = Message.objects.get(id=first_message_id)
+ do_delete_messages(realm, [first_message])
+ stream = get_stream(stream_name, realm)
+ self.assertEqual(stream.first_message_id, message_ids[1])
+
+ all_messages = Message.objects.filter(id__in=message_ids)
+ with self.assert_database_query_count(23):
+ do_delete_messages(realm, all_messages)
+ stream = get_stream(stream_name, realm)
+ self.assertEqual(stream.first_message_id, None)
diff --git a/zerver/tests/test_retention.py b/zerver/tests/test_retention.py
--- a/zerver/tests/test_retention.py
+++ b/zerver/tests/test_retention.py
@@ -1138,7 +1138,7 @@ def test_do_delete_messages_multiple(self) -> None:
message_ids = [self.send_stream_message(cordelia, "Verona", str(i)) for i in range(10)]
messages = Message.objects.filter(id__in=message_ids)
- with self.assert_database_query_count(20):
+ with self.assert_database_query_count(21):
do_delete_messages(realm, messages)
self.assertFalse(Message.objects.filter(id__in=message_ids).exists())
| Update first message ID when first message is deleted
When a message is deleted, we should update the stored ID of the first message in the stream. Because we currently do not, deleting the first message may result in an extraneous "more topics" link in the left sidebar, with no additional topics shown when you click it.
Note: The symptom may be hard to replicate; we should focus on fixing the technical issue, as described in @timabbott 's comment below.
| Hello @zulip/server-message-view members, this issue was labeled with the "area: message-editing" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
This may be tricky; I'm not sure. `do_delete_messages` probably just needs to check the stream containing the deleted messages, if any, and just do a quick query to check if its `first_message_id` is stale, and if so, update it.
It is reasonably likely that doing that change will cause a test in `test_events` for deleting messages to fail -- because we're not sending an event to notify existing clients of the new value. Whether it not that happens, we ideally would send an event `op="update"` event for the `first_message_id` property to live-update clients.
The manual test case to verify this is working is that "more topics" should no long incorrectly appearing if you create a new stream, send a message, and then delete the first message (to "stream events") that had been sent to that stream.
I couldn't reproduce the issue
here's how I have tried:
1. Create new stream.
2. Send a message to "stream events".
3. Delete the first message that i had sent to "stream events" or delete the message fist messsage sent by bot in "stream events".
[Screencast from 10-02-24 10:41:32 PM IST.webm](https://github.com/zulip/zulip/assets/90370535/638e9c57-a477-42f3-918c-832cbc8c32f0)
While working on #28941 , I got to see "more topics" appears up after following steps.
1. Mute any of the topic of particular stream.
2. Click on any other topic, "more topics" appears.
I just want to confirm if it's a desired behaviour ?
[Screencast from 16-02-24 12:16:29 AM IST.webm](https://github.com/zulip/zulip/assets/90370535/0d19550f-4bfc-4d5f-bc88-9ded40bf6c7e)
@zulipbot abandon
Yes, "more topics" is expected when you have muted topics (i.e., there are more topics to show).
@zulipbot claim
@alya I want to work on this
Welcome to Zulip, @rohit-sharma-1802! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@zulipbot abandon
@zulipbot claim
Welcome to Zulip, @UdaySagar-Git! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
Hey @alya , I'm currently having trouble reproducing this issue. Could you please provide detailed steps on how to replicate it?
@UdaySagar-Git please take a look at the [Zulip contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html), and keep in mind the following guideline:
> Before you claim an issue, you should be confident that you will be able to tackle it effectively.
I will go ahead and unassign this issue, and you should feel free to re-claim it once you have figured out how to approach it (or pick a different one if you prefer).
I have updated the issue description to hopefully be more clear.
@zulipbot claim
Welcome to Zulip, @Epik-Whale463! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@Epik-Whale463 You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
hey @alya , if the issue is not resolved, i can fix this....
@zulipbot claim
Welcome to Zulip, @Thanush19! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@Thanush19 You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
@zulipbot claim
| 2024-04-05T17:02:23 |
zulip/zulip | 29,645 | zulip__zulip-29645 | [
"29594"
] | f4d109c289f614273b43b411cbd8d1fad128842e | diff --git a/zerver/webhooks/github/view.py b/zerver/webhooks/github/view.py
--- a/zerver/webhooks/github/view.py
+++ b/zerver/webhooks/github/view.py
@@ -829,7 +829,8 @@ def get_topic_based_on_type(payload: WildValue, event: str) -> str:
"issues": get_issue_body,
"member": get_member_body,
"membership": get_membership_body,
- "opened_or_update_pull_request": get_opened_or_update_pull_request_body,
+ "opened_pull_request": get_opened_or_update_pull_request_body,
+ "updated_pull_request": get_opened_or_update_pull_request_body,
"assigned_or_unassigned_pull_request": get_assigned_or_unassigned_pull_request_body,
"page_build": get_page_build_body,
"ping": get_ping_body,
@@ -951,8 +952,10 @@ def get_zulip_event_name(
"""
if header_event == "pull_request":
action = payload["action"].tame(check_string)
- if action in ("opened", "synchronize", "reopened", "edited"):
- return "opened_or_update_pull_request"
+ if action in ("opened", "reopened"):
+ return "opened_pull_request"
+ elif action in ("synchronize", "edited"):
+ return "updated_pull_request"
if action in ("assigned", "unassigned"):
return "assigned_or_unassigned_pull_request"
if action == "closed":
| Github integration: Filtering Pull request update
We are currently evaluating Zulip for our open-source organization. One thing we would like to do is bridge Github notifications such as created issues, comments, or created pull requests. We set up the bot and the web hook, however it seems like the web hook only has `opened_or_update_pull_request` as topic filter. Unfortunately, pull request updates create way too much noise because people do this frequently, which renders the Github integration almost useless for us.
Is there a way to only receive notifications on opened pull requests or comments?
[Zulip Link](https://chat.zulip.org/#narrow/stream/127-integrations/topic/github.20bot.20filtering)
CZO thread
| Hmm, filtering the event types beyond the (better than just about every other webhook, but still incomplete) set of things that GitHub itself lets you filter on has been supported for a while but we've only just added the documentation for it (`https://chat.zulip.org/integrations/doc/github` has a partial version)
Let's discuss on chat.zulip.org adding more fine-grained GitHub event types though.
So I saw something of the sort `{event_type}/{action}`. Is this implemented? I tried `pull_request/opened` and `opened_or_update_pull_request/opened` but that did not seem to work.
Are the events listed [here](https://chat.zulip.org/integrations/doc/github) the only ones supported? | 2024-04-06T12:14:13 |
|
zulip/zulip | 29,762 | zulip__zulip-29762 | [
"29041"
] | cc793612f096afdf42a12974f4abc533389166a3 | diff --git a/zerver/lib/import_realm.py b/zerver/lib/import_realm.py
--- a/zerver/lib/import_realm.py
+++ b/zerver/lib/import_realm.py
@@ -39,6 +39,7 @@
from zerver.lib.user_groups import create_system_user_groups_for_realm
from zerver.lib.user_message import UserMessageLite, bulk_insert_ums
from zerver.lib.utils import generate_api_key, process_list_in_batches
+from zerver.lib.zulip_update_announcements import send_zulip_update_announcements
from zerver.models import (
AlertWord,
Attachment,
@@ -1508,6 +1509,15 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Rea
# Realm object is reactivated.
maybe_enqueue_audit_log_upload(realm)
+ # If the export was NOT generated by another zulip server, the
+ # 'zulip_update_announcements_level' is set to None by default.
+ # Set it to the latest level to avoid receiving older update messages.
+ is_realm_imported_from_other_zulip_server = RealmAuditLog.objects.filter(
+ realm=realm, event_type=RealmAuditLog.REALM_EXPORTED, acting_user=None
+ ).exists()
+ if not is_realm_imported_from_other_zulip_server:
+ send_zulip_update_announcements(skip_delay=False, realm_imported_from_other_product=realm)
+
return realm
diff --git a/zerver/lib/zulip_update_announcements.py b/zerver/lib/zulip_update_announcements.py
--- a/zerver/lib/zulip_update_announcements.py
+++ b/zerver/lib/zulip_update_announcements.py
@@ -145,8 +145,11 @@ def is_group_direct_message_sent_to_admins_within_days(realm: Realm, days: int)
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
extra_data__contains={
+ # Note: We're looking for the transition away from None,
+ # which usually will be to level 0, but can be to a higher
+ # initial level if the organization was imported from
+ # another chat tool.
RealmAuditLog.OLD_VALUE: None,
- RealmAuditLog.NEW_VALUE: 0,
"property": "zulip_update_announcements_level",
},
).first()
@@ -204,12 +207,19 @@ def send_messages_and_update_level(
realm.save(update_fields=["zulip_update_announcements_level"])
-def send_zulip_update_announcements(skip_delay: bool) -> None:
+def send_zulip_update_announcements(
+ skip_delay: bool, realm_imported_from_other_product: Optional[Realm] = None
+) -> None:
latest_zulip_update_announcements_level = get_latest_zulip_update_announcements_level()
- realms = get_realms_behind_zulip_update_announcements_level(
- level=latest_zulip_update_announcements_level
- )
+ if realm_imported_from_other_product:
+ realms = [realm_imported_from_other_product]
+ else:
+ realms = list(
+ get_realms_behind_zulip_update_announcements_level(
+ level=latest_zulip_update_announcements_level
+ )
+ )
for realm in realms:
# Refresh the realm from the database and check its
@@ -228,16 +238,17 @@ def send_zulip_update_announcements(skip_delay: bool) -> None:
new_zulip_update_announcements_level = None
if realm_zulip_update_announcements_level is None:
- # realm predates the zulip update announcements feature.
+ # This realm predates the zulip update announcements feature, or
+ # was imported from another product (Slack, Mattermost, etc.).
# Group DM the administrators to set or verify the stream for
# zulip update announcements.
group_direct_message = internal_prep_group_direct_message_for_old_realm(realm, sender)
messages = [group_direct_message]
- new_zulip_update_announcements_level = 0
- elif (
- realm_zulip_update_announcements_level == 0
- and realm.zulip_update_announcements_stream is None
- ):
+ if realm_imported_from_other_product:
+ new_zulip_update_announcements_level = latest_zulip_update_announcements_level
+ else:
+ new_zulip_update_announcements_level = 0
+ elif realm.zulip_update_announcements_stream is None:
# We wait for a week after sending group DMs to let admins configure
# stream for zulip update announcements. After that, they miss updates
# until they don't configure.
@@ -253,13 +264,12 @@ def send_zulip_update_announcements(skip_delay: bool) -> None:
):
continue
- if realm.zulip_update_announcements_stream is not None:
- messages = internal_prep_zulip_update_announcements_stream_messages(
- current_level=realm_zulip_update_announcements_level,
- latest_level=latest_zulip_update_announcements_level,
- sender=sender,
- realm=realm,
- )
+ messages = internal_prep_zulip_update_announcements_stream_messages(
+ current_level=realm_zulip_update_announcements_level,
+ latest_level=latest_zulip_update_announcements_level,
+ sender=sender,
+ realm=realm,
+ )
new_zulip_update_announcements_level = latest_zulip_update_announcements_level
| diff --git a/zerver/tests/test_import_export.py b/zerver/tests/test_import_export.py
--- a/zerver/tests/test_import_export.py
+++ b/zerver/tests/test_import_export.py
@@ -346,12 +346,24 @@ def export_realm(
consent_message_id=consent_message_id,
)
+ def export_realm_and_create_auditlog(
+ self,
+ original_realm: Realm,
+ exportable_user_ids: Optional[Set[int]] = None,
+ consent_message_id: Optional[int] = None,
+ public_only: bool = False,
+ ) -> None:
+ RealmAuditLog.objects.create(
+ realm=original_realm, event_type=RealmAuditLog.REALM_EXPORTED, event_time=timezone_now()
+ )
+ self.export_realm(original_realm, exportable_user_ids, consent_message_id, public_only)
+
def test_export_files_from_local(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.upload_files_for_user(user)
self.upload_files_for_realm(user)
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
self.verify_attachment_json(user)
self.verify_uploads(user, is_s3=False)
@@ -382,7 +394,7 @@ def test_public_only_export_files_private_uploads_not_included(self) -> None:
is_message_realm_public=True,
)
- self.export_realm(realm, public_only=True)
+ self.export_realm_and_create_auditlog(realm, public_only=True)
# The attachment row shouldn't have been exported:
self.assertEqual(read_json("attachment.json")["zerver_attachment"], [])
@@ -401,7 +413,7 @@ def test_export_files_from_s3(self) -> None:
self.upload_files_for_user(user)
self.upload_files_for_realm(user)
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
self.verify_attachment_json(user)
self.verify_uploads(user, is_s3=True)
@@ -423,7 +435,7 @@ def test_zulip_realm(self) -> None:
realm_user_default.default_language = "de"
realm_user_default.save()
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
data = read_json("realm.json")
self.assert_length(data["zerver_userprofile_crossrealm"], 3)
@@ -500,7 +512,7 @@ def test_export_realm_with_exportable_user_ids(self) -> None:
self.example_user("iago"), self.example_user("hamlet")
)
- self.export_realm(realm, exportable_user_ids=user_ids)
+ self.export_realm_and_create_auditlog(realm, exportable_user_ids=user_ids)
data = read_json("realm.json")
@@ -612,7 +624,7 @@ def test_export_realm_with_member_consent(self) -> None:
)
assert message is not None
- self.export_realm(realm, consent_message_id=message.id)
+ self.export_realm_and_create_auditlog(realm, consent_message_id=message.id)
data = read_json("realm.json")
@@ -895,6 +907,10 @@ def test_import_realm(self) -> None:
new_realm_emoji.author = None
new_realm_emoji.save()
+ RealmAuditLog.objects.create(
+ realm=original_realm, event_type=RealmAuditLog.REALM_EXPORTED, event_time=timezone_now()
+ )
+
getters = self.get_realm_getters()
snapshots: Dict[str, object] = {}
@@ -1361,7 +1377,7 @@ def get_realm_user_default_values(r: Realm) -> Dict[str, object]:
def test_import_realm_with_invalid_email_addresses_fails_validation(self) -> None:
realm = get_realm("zulip")
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
data = read_json("realm.json")
data["zerver_userprofile"][0]["delivery_email"] = "invalid_email_address"
@@ -1378,7 +1394,7 @@ def test_import_realm_with_invalid_email_addresses_fails_validation(self) -> Non
# Such data should never reasonably get generated, but we should still
# be defensive against it (since it can still happen due to bugs or manual edition
# of export files in an attempt to get us to import malformed data).
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
data = read_json("realm.json")
data["zerver_userprofile"][0]["email"] = "invalid_email_address"
@@ -1394,7 +1410,7 @@ def test_import_realm_with_no_realm_user_default_table(self) -> None:
original_realm = Realm.objects.get(string_id="zulip")
RealmUserDefault.objects.get(realm=original_realm).delete()
- self.export_realm(original_realm)
+ self.export_realm_and_create_auditlog(original_realm)
with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"):
do_import_realm(get_output_dir(), "test-zulip")
@@ -1414,7 +1430,7 @@ def test_import_realm_with_no_realm_user_default_table(self) -> None:
def test_import_realm_notify_bouncer(self) -> None:
original_realm = Realm.objects.get(string_id="zulip")
- self.export_realm(original_realm)
+ self.export_realm_and_create_auditlog(original_realm)
with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"), patch(
"zerver.lib.remote_server.send_to_push_bouncer"
@@ -1451,7 +1467,7 @@ def test_import_files_from_local(self) -> None:
self.upload_files_for_user(user)
self.upload_files_for_realm(user)
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"):
do_import_realm(get_output_dir(), "test-zulip")
@@ -1516,7 +1532,7 @@ def test_import_files_from_s3(self) -> None:
self.upload_files_for_realm(user)
self.upload_files_for_user(user)
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"):
do_import_realm(get_output_dir(), "test-zulip")
@@ -1610,7 +1626,7 @@ def test_import_of_authentication_methods(self) -> None:
realm, authentication_methods_dict, acting_user=None
)
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"):
do_import_realm(get_output_dir(), "test-zulip")
@@ -1621,7 +1637,7 @@ def test_import_of_authentication_methods(self) -> None:
imported_realm.authentication_methods_dict(),
)
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
with self.settings(BILLING_ENABLED=True), self.assertLogs(level="WARN") as mock_warn:
do_import_realm(get_output_dir(), "test-zulip2")
@@ -1646,7 +1662,7 @@ def test_plan_type(self) -> None:
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
self.upload_files_for_user(user)
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
with self.settings(BILLING_ENABLED=True), self.assertLogs(level="INFO"):
imported_realm = do_import_realm(get_output_dir(), "test-zulip-1")
@@ -1663,7 +1679,7 @@ def test_plan_type(self) -> None:
# Importing the same export data twice would cause conflict on unique fields,
# so instead re-export the original realm via self.export_realm, which handles
# this issue.
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"):
imported_realm = do_import_realm(get_output_dir(), "test-zulip-2")
@@ -1679,13 +1695,15 @@ def test_plan_type(self) -> None:
def test_system_usergroup_audit_logs(self) -> None:
realm = get_realm("zulip")
- self.export_realm(realm)
+ self.export_realm_and_create_auditlog(realm)
# Simulate an external export where user groups are missing.
data = read_json("realm.json")
data.pop("zerver_usergroup")
data.pop("zerver_namedusergroup")
data.pop("zerver_realmauditlog")
+ data["zerver_realm"][0]["zulip_update_announcements_level"] = None
+ data["zerver_realm"][0]["zulip_update_announcements_stream"] = None
# User groups data is missing. So, all the realm group based settings
# should be None.
diff --git a/zerver/tests/test_mattermost_importer.py b/zerver/tests/test_mattermost_importer.py
--- a/zerver/tests/test_mattermost_importer.py
+++ b/zerver/tests/test_mattermost_importer.py
@@ -849,7 +849,7 @@ def test_do_convert_data_with_direct_messages(self) -> None:
messages = Message.objects.filter(realm=realm)
for message in messages:
self.assertIsNotNone(message.rendered_content)
- self.assert_length(messages, 11)
+ self.assert_length(messages, 12)
stream_messages = messages.filter(recipient__type=Recipient.STREAM).order_by("date_sent")
stream_recipients = stream_messages.values_list("recipient", flat=True)
@@ -880,7 +880,7 @@ def test_do_convert_data_with_direct_messages(self) -> None:
"date_sent"
)
personal_recipients = personal_messages.values_list("recipient", flat=True)
- self.assert_length(personal_messages, 4)
+ self.assert_length(personal_messages, 5)
self.assert_length(set(personal_recipients), 3)
self.assertEqual(personal_messages[0].sender.email, "[email protected]")
self.assertRegex(personal_messages[0].content, "hey harry\n\n\\[harry-ron.jpg\\]\\(.*\\)")
diff --git a/zerver/tests/test_rocketchat_importer.py b/zerver/tests/test_rocketchat_importer.py
--- a/zerver/tests/test_rocketchat_importer.py
+++ b/zerver/tests/test_rocketchat_importer.py
@@ -1003,7 +1003,7 @@ def test_do_convert_data(self) -> None:
self.assertIsNotNone(message.rendered_content)
# After removing user_joined, added_user, discussion_created, etc.
# messages. (Total messages were 66.)
- self.assert_length(messages, 43)
+ self.assert_length(messages, 44)
stream_messages = messages.filter(recipient__type=Recipient.STREAM).order_by("date_sent")
stream_recipients = stream_messages.values_list("recipient", flat=True)
@@ -1025,8 +1025,8 @@ def test_do_convert_data(self) -> None:
"date_sent"
)
huddle_recipients = huddle_messages.values_list("recipient", flat=True)
- self.assert_length(huddle_messages, 4)
- self.assert_length(set(huddle_recipients), 1)
+ self.assert_length(huddle_messages, 5)
+ self.assert_length(set(huddle_recipients), 2)
self.assertEqual(huddle_messages[0].sender.email, "[email protected]")
self.assertEqual(huddle_messages[0].content, "Hey people!")
diff --git a/zerver/tests/test_slack_importer.py b/zerver/tests/test_slack_importer.py
--- a/zerver/tests/test_slack_importer.py
+++ b/zerver/tests/test_slack_importer.py
@@ -1354,6 +1354,7 @@ def test_slack_import_to_existing_database(
{
RealmAuditLog.SUBSCRIPTION_CREATED,
RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
+ RealmAuditLog.REALM_PROPERTY_CHANGED,
RealmAuditLog.REALM_CREATED,
RealmAuditLog.REALM_IMPORTED,
RealmAuditLog.USER_GROUP_CREATED,
@@ -1363,7 +1364,7 @@ def test_slack_import_to_existing_database(
},
)
- self.assertEqual(Message.objects.filter(realm=realm).count(), 82)
+ self.assertEqual(Message.objects.filter(realm=realm).count(), 83)
# All auth backends are enabled initially.
self.assertTrue(all(realm.authentication_methods_dict().values()))
diff --git a/zerver/tests/test_zulip_update_announcements.py b/zerver/tests/test_zulip_update_announcements.py
--- a/zerver/tests/test_zulip_update_announcements.py
+++ b/zerver/tests/test_zulip_update_announcements.py
@@ -1,11 +1,15 @@
+import os
from datetime import timedelta
from unittest import mock
+from unittest.mock import call, patch
import time_machine
from django.conf import settings
from django.utils.timezone import now as timezone_now
from typing_extensions import override
+from zerver.data_import.mattermost import do_convert_data
+from zerver.lib.import_realm import do_import_realm
from zerver.lib.message import remove_single_newlines
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.zulip_update_announcements import (
@@ -306,3 +310,108 @@ def test_remove_single_newlines(self) -> None:
input_text = "- This is a bullet.\n- This is another bullet.\n\n1. This is a list\n1. This is more list."
expected_output = "- This is a bullet.\n- This is another bullet.\n\n1. This is a list\n1. This is more list."
self.assertEqual(remove_single_newlines(input_text), expected_output)
+
+ def test_zulip_updates_for_realm_imported_from_other_product(self) -> None:
+ with mock.patch(
+ "zerver.lib.zulip_update_announcements.zulip_update_announcements",
+ self.zulip_update_announcements,
+ ):
+ mattermost_data_dir = self.fixture_file_name("", "mattermost_fixtures")
+ output_dir = self.make_import_output_dir("mattermost")
+
+ with patch("builtins.print") as mock_print, self.assertLogs(level="WARNING"):
+ do_convert_data(
+ mattermost_data_dir=mattermost_data_dir,
+ output_dir=output_dir,
+ masking_content=True,
+ )
+ self.assertEqual(
+ mock_print.mock_calls,
+ [
+ call("Generating data for", "gryffindor"),
+ call("Generating data for", "slytherin"),
+ ],
+ )
+
+ gryffindor_output_dir = os.path.join(output_dir, "gryffindor")
+
+ with self.assertLogs(level="INFO"):
+ do_import_realm(
+ import_dir=gryffindor_output_dir,
+ subdomain="gryffindor",
+ )
+
+ imported_realm = get_realm("gryffindor")
+ notification_bot = get_system_bot(settings.NOTIFICATION_BOT, imported_realm.id)
+
+ # Verify for realm imported from other product:
+ # * zulip_update_announcements_level = latest level
+ # * zulip_update_announcements_stream = None
+ # * group DM sent to admins suggesting to set the stream.
+ self.assertEqual(imported_realm.zulip_update_announcements_level, 2)
+ self.assertIsNone(imported_realm.zulip_update_announcements_stream)
+ group_direct_message = Message.objects.filter(
+ realm=imported_realm, sender=notification_bot
+ ).first()
+ assert group_direct_message is not None
+ self.assertIn(
+ "These notifications are currently turned off in your organization. "
+ "If you configure a stream within one week, your organization will not miss any update messages.",
+ group_direct_message.content,
+ )
+
+ # Two new updates added.
+ new_updates = [
+ ZulipUpdateAnnouncement(
+ level=3,
+ message="Announcement message 3.",
+ ),
+ ZulipUpdateAnnouncement(
+ level=4,
+ message="Announcement message 4.",
+ ),
+ ]
+ self.zulip_update_announcements.extend(new_updates)
+
+ # Wait for one week before starting to skip sending updates.
+ now = timezone_now()
+ with time_machine.travel(now + timedelta(days=6), tick=False):
+ send_zulip_update_announcements(skip_delay=False)
+ imported_realm.refresh_from_db()
+ self.assertEqual(imported_realm.zulip_update_announcements_level, 2)
+
+ # No stream configured. Skip updates.
+ with time_machine.travel(now + timedelta(days=8), tick=False):
+ send_zulip_update_announcements(skip_delay=False)
+ imported_realm.refresh_from_db()
+ self.assertEqual(imported_realm.zulip_update_announcements_level, 4)
+ zulip_updates_message_query = Message.objects.filter(
+ realm=imported_realm,
+ sender=notification_bot,
+ recipient__type=Recipient.STREAM,
+ )
+ self.assertFalse(zulip_updates_message_query.exists())
+
+ new_updates = [
+ ZulipUpdateAnnouncement(
+ level=5,
+ message="Announcement message 5.",
+ ),
+ ZulipUpdateAnnouncement(
+ level=6,
+ message="Announcement message 6.",
+ ),
+ ]
+ self.zulip_update_announcements.extend(new_updates)
+
+ # Stream configured, send update messages.
+ imported_realm.zulip_update_announcements_stream = get_stream(
+ "Gryffindor common room", imported_realm
+ )
+ imported_realm.save()
+
+ with time_machine.travel(now + timedelta(days=10), tick=False):
+ send_zulip_update_announcements(skip_delay=False)
+ imported_realm.refresh_from_db()
+ self.assertEqual(imported_realm.zulip_update_announcements_level, 6)
+ self.assertTrue(zulip_updates_message_query.exists())
| Set announcement streams when an organisation gets imported.
https://github.com/zulip/zulip/pull/28720#discussion_r1486598760
> So this probably is something we should fork to its own follow-up issue, but when an organization gets imported from another product, do any of these announcement stream fields get set? I assume the answer is no.
> We probably want to initialize them as we would for a new realm somehow when doing data imports from non-Zulip servers; probably a little prep PR we could do for that is make sure the export tarballs have a clear "what product was this exported from" field -- and then we can build on top of that.
Currently, the streams are set to None.
| Hello @zulip/server-misc members, this issue was labeled with the "area: export/import" label, so you may want to check it out!
<!-- areaLabelAddition -->
I think a fine solution here is not to configure any streams, and to send administrators the same initial group DM that we send when no stream is configured for Zulip updates.
Yeah I like the plan of sending the initial DMs. | 2024-04-17T11:56:19 |
zulip/zulip | 29,770 | zulip__zulip-29770 | [
"27802"
] | 10f0d5dce3e3b8743d59c034dcfc22f6c5736bc3 | diff --git a/zerver/lib/markdown/help_relative_links.py b/zerver/lib/markdown/help_relative_links.py
--- a/zerver/lib/markdown/help_relative_links.py
+++ b/zerver/lib/markdown/help_relative_links.py
@@ -181,7 +181,7 @@ def group_handle_match(key: str) -> str:
"drafts": ["Drafts", "/#drafts", draft_instructions],
"scheduled": ["Scheduled messages", "/#scheduled", scheduled_instructions],
"recent": ["Recent conversations", "/#recent", recent_instructions],
- "all": ["Combined feed", "/#all_messages", all_instructions],
+ "all": ["Combined feed", "/#feed", all_instructions],
"starred": ["Starred messages", "/#narrow/is/starred", starred_instructions],
"direct": ["Direct message feed", "/#narrow/is/dm", direct_instructions],
"inbox": ["Inbox", "/#inbox", inbox_instructions],
diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
--- a/zerver/lib/url_redirects.py
+++ b/zerver/lib/url_redirects.py
@@ -83,6 +83,7 @@ class URLRedirect:
URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
URLRedirect("/help/bots-and-integrations", "/help/bots-overview"),
URLRedirect("/help/configure-notification-bot", "/help/configure-automated-notices"),
+ URLRedirect("/help/all-messages", "/help/combined-feed"),
]
LANDING_PAGE_REDIRECTS = [
| diff --git a/web/tests/hashchange.test.js b/web/tests/hashchange.test.js
--- a/web/tests/hashchange.test.js
+++ b/web/tests/hashchange.test.js
@@ -228,7 +228,7 @@ run_test("hash_interactions", ({override, override_rewire}) => {
[message_viewport, "stop_auto_scrolling"],
]);
- window.location.hash = "#all_messages";
+ window.location.hash = "#feed";
hide_all_called = false;
helper.clear_events();
diff --git a/web/tests/hotkey.test.js b/web/tests/hotkey.test.js
--- a/web/tests/hotkey.test.js
+++ b/web/tests/hotkey.test.js
@@ -21,9 +21,8 @@ const $ = require("./lib/zjquery");
// it calls any external module other than `ui.foo`, it'll crash.
// Future work includes making sure it actually does call `ui.foo()`.
-// Since all the tests here are based on narrow starting with all_messages.
-// We set our default narrow to the combined feed here.
-window.location.hash = "#all_messages";
+// All tests use the combined feed as the default narrow.
+window.location.hash = "#feed";
set_global("navigator", {
platform: "",
| Rename "All messages" to "Combined feed"
<!-- Issue description -->
We should rename "All messages" to "Combined feed", and update documentation accordingly.
This issue is just for changing user-facing strings. Backend changes can happen once we are confident that the new name has been finalized.
<!-- Link to a message in the chat.zulip.org discussion. Message links will still work even if the topic is renamed or resolved. Link back to this issue from the chat.zulip.org thread. -->
[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/All.20messages.20view.20label.20and.20icon/near/1668230)
| Hello @zulip/server-message-view, @zulip/server-user-docs members, this issue was labeled with the "area: message view", "area: documentation (user)" labels, so you may want to check it out!
<!-- areaLabelAddition -->
Please assign this issue to me @alya
@alya Can I get assigned to this issue? I've previously worked on renaming `display-settings` to preferences in pr [26110](https://github.com/zulip/zulip/pull/26110).
@nimishmedatwal Sure, thanks!
@zulipbot claim
@darksapien23151 This issue cannot be claimed, as someone else is already working on it. Please see our [contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html#your-first-codebase-contribution) for advice on finding an issue to work on. Thanks!
@nimishmedatwal You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
@zulipbot claim
@ayush4345 This issue cannot be claimed, as someone else is already working on it. Please see our [contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html#your-first-codebase-contribution) for advice on finding an issue to work on. Thanks!
@zulipbot claim
@ayush4345 This issue cannot be claimed, as someone else is already working on it. Please see our [contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html#your-first-codebase-contribution) for advice on finding an issue to work on. Thanks!
@zulipbot claim
@bisoladebiyi This issue cannot be claimed, as someone else is already working on it. Please see our [contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html#your-first-codebase-contribution) for advice on finding an issue to work on. Thanks!
@zulipbot claim
Welcome to Zulip, @harshsbhat! We just sent you an invite to collaborate on this repository at https://github.com/zulip/zulip/invitations. Please accept this invite in order to claim this issue and begin a fun, rewarding experience contributing to Zulip!
Here's some tips to get you off to a good start:
- Join me on the [Zulip developers' server](https://chat.zulip.org), to get help, chat about this issue, and meet the other developers.
- [Unwatch this repository](https://help.github.com/articles/unwatching-repositories/), so that you don't get 100 emails a day.
As you work on this issue, you'll also want to refer to the [Zulip code contribution guide](https://zulip.readthedocs.io/en/latest/contributing/index.html), as well as the rest of the developer documentation on that site.
See you on the other side (that is, the pull request side)!
@zulipbot claim
@nupurkale78 This issue cannot be claimed, as someone else is already working on it. Please see our [contributor guide](https://zulip.readthedocs.io/en/latest/overview/contributing.html#your-first-codebase-contribution) for advice on finding an issue to work on. Thanks!
@harshsbhat You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
| 2024-04-17T20:15:22 |
zulip/zulip | 29,964 | zulip__zulip-29964 | [
"28947"
] | acf13e49cffade5b763c7639cae342eeb5817c86 | diff --git a/analytics/lib/counts.py b/analytics/lib/counts.py
--- a/analytics/lib/counts.py
+++ b/analytics/lib/counts.py
@@ -2,11 +2,10 @@
import time
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
-from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+from typing import Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
from django.conf import settings
from django.db import connection, models
-from django.db.models import F
from psycopg2.sql import SQL, Composable, Identifier, Literal
from typing_extensions import TypeAlias, override
@@ -312,27 +311,47 @@ def do_increment_logging_stat(
return
table = stat.data_collector.output_table
+ id_args: Dict[str, Union[int, None]] = {}
+ conflict_args: List[str] = []
if table == RealmCount:
assert isinstance(model_object_for_bucket, Realm)
- id_args: Dict[
- str, Optional[Union[Realm, UserProfile, Stream, "RemoteRealm", "RemoteZulipServer"]]
- ] = {"realm": model_object_for_bucket}
+ id_args = {"realm_id": model_object_for_bucket.id}
+ conflict_args = ["realm_id"]
elif table == UserCount:
assert isinstance(model_object_for_bucket, UserProfile)
- id_args = {"realm": model_object_for_bucket.realm, "user": model_object_for_bucket}
+ id_args = {
+ "realm_id": model_object_for_bucket.realm_id,
+ "user_id": model_object_for_bucket.id,
+ }
+ conflict_args = ["user_id"]
elif table == StreamCount:
assert isinstance(model_object_for_bucket, Stream)
- id_args = {"realm": model_object_for_bucket.realm, "stream": model_object_for_bucket}
+ id_args = {
+ "realm_id": model_object_for_bucket.realm_id,
+ "stream_id": model_object_for_bucket.id,
+ }
+ conflict_args = ["stream_id"]
elif table == RemoteInstallationCount:
assert isinstance(model_object_for_bucket, RemoteZulipServer)
- id_args = {"server": model_object_for_bucket, "remote_id": None}
+ id_args = {"server_id": model_object_for_bucket.id, "remote_id": None}
+ conflict_args = ["server_id"]
elif table == RemoteRealmCount:
assert isinstance(model_object_for_bucket, RemoteRealm)
+ # For RemoteRealmCount (e.g. `mobile_pushes_forwarded::day`),
+ # we have no `remote_id` nor `realm_id`, since they are not
+ # imported from the remote server, which is the source of
+ # truth of those two columns. Their "ON CONFLICT" is thus the
+ # only unique key we have, which is `remote_realm_id`, and not
+ # `server_id` / `realm_id`.
id_args = {
- "server": model_object_for_bucket.server,
- "remote_realm": model_object_for_bucket,
+ "server_id": model_object_for_bucket.server_id,
+ "remote_realm_id": model_object_for_bucket.id,
"remote_id": None,
+ "realm_id": None,
}
+ conflict_args = [
+ "remote_realm_id",
+ ]
else:
raise AssertionError("Unsupported CountStat output_table")
@@ -343,16 +362,49 @@ def do_increment_logging_stat(
else:
raise AssertionError("Unsupported CountStat frequency")
- row, created = table._default_manager.get_or_create(
- property=stat.property,
- subgroup=subgroup,
- end_time=end_time,
- defaults={"value": increment},
- **id_args,
+ is_subgroup: SQL = SQL("NULL")
+ if subgroup is not None:
+ is_subgroup = SQL("NOT NULL")
+ # For backwards consistency, we cast the subgroup to a string
+ # in Python; this emulates the behaviour of `get_or_create`,
+ # which was previously used in this function, and performed
+ # this cast because the `subgroup` column is defined as a
+ # `CharField`. Omitting this explicit cast causes a subgroup
+ # of the boolean False to be passed as the PostgreSQL false,
+ # which it stringifies as the lower-case `'false'`, not the
+ # initial-case `'False'` if Python stringifies it.
+ #
+ # Other parts of the system (e.g. count_message_by_user_query)
+ # already use PostgreSQL to cast bools to strings, resulting
+ # in `subgroup` values of lower-case `'false'` -- for example
+ # in `messages_sent:is_bot:hour`. Fixing this inconsistency
+ # via a migration is complicated by these records being
+ # exchanged over the wire from remote servers.
+ subgroup = str(subgroup)
+ conflict_args.append("subgroup")
+
+ id_column_names = SQL(", ").join(map(Identifier, id_args.keys()))
+ id_values = SQL(", ").join(map(Literal, id_args.values()))
+ conflict_columns = SQL(", ").join(map(Identifier, conflict_args))
+
+ sql_query = SQL(
+ """
+ INSERT INTO {table_name}(property, subgroup, end_time, value, {id_column_names})
+ VALUES (%s, %s, %s, %s, {id_values})
+ ON CONFLICT (property, end_time, {conflict_columns})
+ WHERE subgroup IS {is_subgroup}
+ DO UPDATE SET
+ value = {table_name}.value + EXCLUDED.value
+ """
+ ).format(
+ table_name=Identifier(table._meta.db_table),
+ id_column_names=id_column_names,
+ id_values=id_values,
+ conflict_columns=conflict_columns,
+ is_subgroup=is_subgroup,
)
- if not created:
- row.value = F("value") + increment
- row.save(update_fields=["value"])
+ with connection.cursor() as cursor:
+ cursor.execute(sql_query, [stat.property, subgroup, end_time, increment])
def do_drop_all_analytics_tables() -> None:
diff --git a/zilencer/migrations/0060_remove_remoterealmcount_unique_remote_realm_installation_count_and_more.py b/zilencer/migrations/0060_remove_remoterealmcount_unique_remote_realm_installation_count_and_more.py
new file mode 100644
--- /dev/null
+++ b/zilencer/migrations/0060_remove_remoterealmcount_unique_remote_realm_installation_count_and_more.py
@@ -0,0 +1,81 @@
+# Generated by Django 5.0.5 on 2024-05-06 15:17
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("zilencer", "0059_remoterealmauditlog_add_synced_billing_event_type_index"),
+ ]
+
+ operations = [
+ migrations.SeparateDatabaseAndState(
+ # Django does not provide a RenameConstraint operation.
+ # "Constraints" are created as indexes in PostgreSQL, so
+ # we rename the underlying indexes.
+ database_operations=[
+ migrations.RunSQL(
+ sql=(
+ "ALTER INDEX unique_remote_realm_installation_count "
+ "RENAME TO unique_server_realm_installation_count"
+ ),
+ reverse_sql=(
+ "ALTER INDEX unique_server_realm_installation_count "
+ "RENAME TO unique_remote_realm_installation_count"
+ ),
+ ),
+ migrations.RunSQL(
+ sql=(
+ "ALTER INDEX unique_remote_realm_installation_count_null_subgroup "
+ "RENAME TO unique_server_realm_installation_count_null_subgroup"
+ ),
+ reverse_sql=(
+ "ALTER INDEX unique_server_realm_installation_count_null_subgroup "
+ "RENAME TO unique_remote_realm_installation_count_null_subgroup"
+ ),
+ ),
+ ],
+ state_operations=[
+ migrations.RemoveConstraint(
+ model_name="remoterealmcount",
+ name="unique_remote_realm_installation_count",
+ ),
+ migrations.RemoveConstraint(
+ model_name="remoterealmcount",
+ name="unique_remote_realm_installation_count_null_subgroup",
+ ),
+ migrations.AddConstraint(
+ model_name="remoterealmcount",
+ constraint=models.UniqueConstraint(
+ condition=models.Q(("subgroup__isnull", False)),
+ fields=("server", "realm_id", "property", "subgroup", "end_time"),
+ name="unique_server_realm_installation_count",
+ ),
+ ),
+ migrations.AddConstraint(
+ model_name="remoterealmcount",
+ constraint=models.UniqueConstraint(
+ condition=models.Q(("subgroup__isnull", True)),
+ fields=("server", "realm_id", "property", "end_time"),
+ name="unique_server_realm_installation_count_null_subgroup",
+ ),
+ ),
+ ],
+ ),
+ migrations.AddConstraint(
+ model_name="remoterealmcount",
+ constraint=models.UniqueConstraint(
+ condition=models.Q(("subgroup__isnull", False)),
+ fields=("remote_realm_id", "property", "subgroup", "end_time"),
+ name="unique_remote_realm_installation_count",
+ ),
+ ),
+ migrations.AddConstraint(
+ model_name="remoterealmcount",
+ constraint=models.UniqueConstraint(
+ condition=models.Q(("subgroup__isnull", True)),
+ fields=("remote_realm_id", "property", "end_time"),
+ name="unique_remote_realm_installation_count_null_subgroup",
+ ),
+ ),
+ ]
diff --git a/zilencer/models.py b/zilencer/models.py
--- a/zilencer/models.py
+++ b/zilencer/models.py
@@ -416,14 +416,28 @@ class RemoteRealmCount(BaseRemoteCount):
class Meta:
constraints = [
+ # These two constraints come from the information as
+ # provided by the remote server, for rows they provide.
UniqueConstraint(
fields=["server", "realm_id", "property", "subgroup", "end_time"],
condition=Q(subgroup__isnull=False),
- name="unique_remote_realm_installation_count",
+ name="unique_server_realm_installation_count",
),
UniqueConstraint(
fields=["server", "realm_id", "property", "end_time"],
condition=Q(subgroup__isnull=True),
+ name="unique_server_realm_installation_count_null_subgroup",
+ ),
+ # These two constraints come from our internal
+ # record-keeping, which has a RemoteRealm object.
+ UniqueConstraint(
+ fields=["remote_realm_id", "property", "subgroup", "end_time"],
+ condition=Q(subgroup__isnull=False),
+ name="unique_remote_realm_installation_count",
+ ),
+ UniqueConstraint(
+ fields=["remote_realm_id", "property", "end_time"],
+ condition=Q(subgroup__isnull=True),
name="unique_remote_realm_installation_count_null_subgroup",
),
UniqueConstraint(
| diff --git a/analytics/tests/test_counts.py b/analytics/tests/test_counts.py
--- a/analytics/tests/test_counts.py
+++ b/analytics/tests/test_counts.py
@@ -1324,6 +1324,11 @@ def test_increment(self) -> None:
do_increment_logging_stat(self.default_realm, stat, None, self.TIME_ZERO)
self.assertTableState(RealmCount, ["value"], [[3]])
+ def test_do_increment_logging_start_query_count(self) -> None:
+ stat = LoggingCountStat("test", RealmCount, CountStat.DAY)
+ with self.assert_database_query_count(1):
+ do_increment_logging_stat(self.default_realm, stat, None, self.TIME_ZERO)
+
class TestLoggingCountStats(AnalyticsTestCase):
def test_aggregation(self) -> None:
diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py
--- a/zerver/tests/test_signup.py
+++ b/zerver/tests/test_signup.py
@@ -932,7 +932,7 @@ def test_register(self) -> None:
# seem to be any O(N) behavior. Some of the cache hits are related
# to sending messages, such as getting the welcome bot, looking up
# the alert words for a realm, etc.
- with self.assert_database_query_count(105), self.assert_memcached_count(18):
+ with self.assert_database_query_count(104), self.assert_memcached_count(18):
with self.captureOnCommitCallbacks(execute=True):
self.register(self.nonreg_email("test"), "test")
diff --git a/zerver/tests/test_users.py b/zerver/tests/test_users.py
--- a/zerver/tests/test_users.py
+++ b/zerver/tests/test_users.py
@@ -907,7 +907,7 @@ def test_create_user_with_multiple_streams(self) -> None:
prereg_user = PreregistrationUser.objects.get(email="[email protected]")
- with self.assert_database_query_count(94):
+ with self.assert_database_query_count(93):
with self.assert_memcached_count(23):
with self.capture_send_event_calls(expected_num_events=11) as events:
fred = do_create_user(
| Use single database query in `do_incremental_logging_stat`
We use an inefficient database query construction for `do_incremental_logging_stat`:
```
row, created = table._default_manager.get_or_create(
property=stat.property,
subgroup=subgroup,
end_time=end_time,
defaults={"value": increment},
**id_args,
)
if not created:
row.value = F("value") + increment
row.save(update_fields=["value"])
```
Django (and postgres) support doing this operation in a single SQL query, perhaps using https://docs.djangoproject.com/en/5.0/ref/models/expressions/#f-expressions. I'm not sure whether https://docs.djangoproject.com/en/5.0/ref/models/querysets/#update-or-create will do the right thing in Django 5, or if it'll do multiple queries.
You can use this settings tweak:
```
diff --git a/zproject/computed_settings.py b/zproject/computed_settings.py
index 8d691753da..813a896956 100644
--- a/zproject/computed_settings.py
+++ b/zproject/computed_settings.py
@@ -864,11 +864,11 @@ LOGGING: Dict[str, Any] = {
"propagate": False,
},
## Uncomment the following to get all database queries logged to the console
- # 'django.db': {
- # 'level': 'DEBUG',
- # 'handlers': ['console'],
- # 'propagate': False,
- # },
+ "django.db": {
+ "level": "DEBUG",
+ "handlers": ["console"],
+ "propagate": False,
+ },
# other libraries, alphabetized
"django_auth_ldap": {
"level": "DEBUG",
```
to make Django print all database queries to verify your work.
See [this chat.zulip.org thread](https://chat.zulip.org/#narrow/stream/3-backend/topic/mark-as-unread.20performance/near/1738515) for details.
| Hello @zulip/server-analytics members, this issue was labeled with the "area: analytics" label, so you may want to check it out!
<!-- areaLabelAddition -->
@zulipbot claim
@Mahhheshh You have been unassigned from this issue because you have not made any updates for over 14 days. Please feel free to reclaim the issue if you decide to pick up again. Thanks!
yes
Can I work on this issue?
@zulipbot claim
Hello @ritikraj26, it looks like you've currently claimed 1 issue in this repository. We encourage new contributors to focus their efforts on at most 1 issue at a time, so please complete your work on your other claimed issues before trying to claim this issue again.
We look forward to your valuable contributions!
| 2024-05-06T16:20:19 |
zulip/zulip | 29,992 | zulip__zulip-29992 | [
"28412"
] | 64189bfb7f072fbfdb578e588c91b646fd11bc46 | diff --git a/zerver/actions/message_send.py b/zerver/actions/message_send.py
--- a/zerver/actions/message_send.py
+++ b/zerver/actions/message_send.py
@@ -1531,8 +1531,10 @@ def check_private_message_policy(
realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]
) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
- if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
- # We allow direct messages only between users and bots,
+ if sender.is_bot or (
+ len(user_profiles) == 1 and (user_profiles[0].is_bot or user_profiles[0] == sender)
+ ):
+ # We allow direct messages only between users and bots or to oneself,
# to avoid breaking the tutorial as well as automated
# notifications from system bots to users.
return
| diff --git a/zerver/tests/test_message_send.py b/zerver/tests/test_message_send.py
--- a/zerver/tests/test_message_send.py
+++ b/zerver/tests/test_message_send.py
@@ -2609,6 +2609,73 @@ def test_ensure_stream_gets_called(self) -> None:
# wasn't automatically created.
Stream.objects.get(name=stream_name, realm_id=realm.id)
+ def test_direct_message_to_self_and_bot_in_dm_disabled_org(self) -> None:
+ """
+ Test that a user can send a direct message to themselves and to a bot in a DM disabled organization
+ """
+ sender = self.example_user("hamlet")
+ sender.realm.private_message_policy = Realm.PRIVATE_MESSAGE_POLICY_DISABLED
+ sender.realm.save()
+
+ # Create a non-bot user
+ recipient_user = self.example_user("othello")
+ recipient_user.realm = sender.realm
+
+ # Create a new bot user
+ bot = do_create_user(
+ email="[email protected]",
+ password="",
+ realm=sender.realm,
+ full_name="Test Bot",
+ bot_type=UserProfile.DEFAULT_BOT,
+ bot_owner=sender,
+ acting_user=None,
+ )
+
+ # Test sending a message to self
+ result = self.api_post(
+ sender,
+ "/api/v1/messages",
+ {
+ "type": "private",
+ "to": orjson.dumps([sender.id]).decode(),
+ "content": "Test message to self",
+ },
+ )
+ self.assert_json_success(result)
+
+ msg = self.get_last_message()
+ expected = "Test message to self"
+ self.assertEqual(msg.content, expected)
+
+ # Test sending a message to non-bot user
+ result = self.api_post(
+ sender,
+ "/api/v1/messages",
+ {
+ "type": "private",
+ "to": orjson.dumps([recipient_user.id]).decode(),
+ "content": "Test message",
+ },
+ )
+ self.assert_json_error(result, "Direct messages are disabled in this organization.")
+
+ # Test sending a message to the bot
+ result = self.api_post(
+ sender,
+ "/api/v1/messages",
+ {
+ "type": "private",
+ "to": orjson.dumps([bot.id]).decode(),
+ "content": "Test message to bot",
+ },
+ )
+ self.assert_json_success(result)
+
+ msg = self.get_last_message()
+ expected = "Test message to bot"
+ self.assertEqual(msg.content, expected)
+
class TestCrossRealmPMs(ZulipTestCase):
def make_realm(self, domain: str) -> Realm:
| Make it possible to send a DM to bots and to yourself when DMs are disabled
<!-- Issue description -->
Even when DMs are disabled, you should still be able to send a DM to bots and to yourself. We broke something about this at some point, though. Currently, with a bot:
- Replying to a message works fine.
- Starting a new conversation looks like it won't work, but you can actually do it if you send with the keyboard (the button is disabled).
The tooltip on the disabled "Send" button also looks broken specifically when you're composing to a bot, probably because that button is actually supposed to be enabled, and we're in some weird intermediate state:
<img width="888" alt="Screenshot-2023-12-18-at-1 35 05AM" src="https://github.com/zulip/zulip/assets/2090066/1469d6fe-9b1f-4c7f-9e72-fe621cf40715">
This bug is mainly because of the placement of ```check_posting_policy_for_compose_box``` method inside ```on_compose_select_recipient_update``` rather than ```update_on_recipient_change```, which is causing it to update the posting policy of compose only when you change the things in recipient dropdown, and not when you update the sender.
<!-- Link to a message in the chat.zulip.org discussion. Message links will still work even if the topic is renamed or resolved. Link back to this issue from the chat.zulip.org thread. -->
[CZO thread](https://chat.zulip.org/#narrow/stream/9-issues/topic/compose.20box.20tooltips.20lag.20.2327158/near/1705825)
| @zulipbot claim | 2024-05-07T19:13:08 |
zulip/zulip | 30,033 | zulip__zulip-30033 | [
"30014"
] | 9a3263ec5decd4603519671bb9e193a5c662510b | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -91,7 +91,7 @@
r"find accounts for another email address",
# SPECIAL CASES
# Because topics usually are lower-case, this would look weird if it were capitalized
- r"more topics",
+ r"show all topics",
# Used alone in a parenthetical where capitalized looks worse.
r"^deprecated$",
# We want the similar text in the Private Messages section to have the same capitalization.
diff --git a/zerver/models/streams.py b/zerver/models/streams.py
--- a/zerver/models/streams.py
+++ b/zerver/models/streams.py
@@ -137,7 +137,7 @@ class Stream(models.Model):
can_remove_subscribers_group = models.ForeignKey(UserGroup, on_delete=models.RESTRICT)
# The very first message ID in the stream. Used to help clients
- # determine whether they might need to display "more topics" for a
+ # determine whether they might need to display "show all topics" for a
# stream based on what messages they have cached.
first_message_id = models.IntegerField(null=True, db_index=True)
diff --git a/zilencer/management/commands/populate_db.py b/zilencer/management/commands/populate_db.py
--- a/zilencer/management/commands/populate_db.py
+++ b/zilencer/management/commands/populate_db.py
@@ -318,7 +318,7 @@ def handle(self, *args: Any, **options: Any) -> None:
if options["max_topics"] is None:
# If max_topics is not set, we use a default that's big
- # enough "more topics" should appear, and scales slowly
+ # enough "show all topics" should appear, and scales slowly
# with the number of messages.
options["max_topics"] = 8 + options["num_messages"] // 1000
| diff --git a/web/tests/topic_list_data.test.js b/web/tests/topic_list_data.test.js
--- a/web/tests/topic_list_data.test.js
+++ b/web/tests/topic_list_data.test.js
@@ -326,7 +326,7 @@ test("get_list_info unreads", ({override}) => {
list_info = get_list_info();
assert.equal(list_info.items.length, 12);
assert.equal(list_info.more_topics_unreads, 3);
- // Topic 14 now makes it above the "more topics" fold.
+ // Topic 14 now makes it above the "show all topics" fold.
assert.equal(list_info.more_topics_have_unread_mention_messages, false);
assert.equal(list_info.num_possible_topics, 16);
assert.equal(list_info.more_topics_unread_count_muted, true);
| Rename "more topics" to "all topics" in left sidebar
<!-- Issue description -->
The "more topics" links in the left sidebar show all the topics in the channel, so we should relabel them for clarity. All the documentation should be updated accordingly (but not the changelog).
The "more conversations" link doesn't necessarily show all DMs, so we should leave it as-is for now.
This is a tiny piece of the planned [left sidebar redesign](https://terpimost.github.io/zulip-sidebar/#stream).
| 2024-05-09T16:20:03 |
|
zulip/zulip | 30,061 | zulip__zulip-30061 | [
"30016"
] | 160076fdfa19e3852b61bcec943d9417273a2841 | diff --git a/zerver/actions/message_send.py b/zerver/actions/message_send.py
--- a/zerver/actions/message_send.py
+++ b/zerver/actions/message_send.py
@@ -1794,6 +1794,8 @@ def _internal_prep_message(
mention_backend: Optional[MentionBackend] = None,
limit_unread_user_ids: Optional[Set[int]] = None,
disable_external_notifications: bool = False,
+ forged: bool = False,
+ forged_timestamp: Optional[float] = None,
) -> Optional[SendMessageRequest]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
@@ -1822,6 +1824,8 @@ def _internal_prep_message(
mention_backend=mention_backend,
limit_unread_user_ids=limit_unread_user_ids,
disable_external_notifications=disable_external_notifications,
+ forged=forged,
+ forged_timestamp=forged_timestamp,
)
except JsonableError as e:
logging.exception(
@@ -1842,6 +1846,8 @@ def internal_prep_stream_message(
*,
email_gateway: bool = False,
limit_unread_user_ids: Optional[Set[int]] = None,
+ forged: bool = False,
+ forged_timestamp: Optional[float] = None,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
@@ -1856,6 +1862,8 @@ def internal_prep_stream_message(
content=content,
email_gateway=email_gateway,
limit_unread_user_ids=limit_unread_user_ids,
+ forged=forged,
+ forged_timestamp=forged_timestamp,
)
| diff --git a/zerver/tests/test_integrations.py b/zerver/tests/test_integrations.py
--- a/zerver/tests/test_integrations.py
+++ b/zerver/tests/test_integrations.py
@@ -62,7 +62,7 @@ def test_no_missing_screenshot_path(self) -> None:
message = (
'"{path}" does not exist for webhook {webhook_name}.\n'
"Consider updating zerver.lib.integrations.DOC_SCREENSHOT_CONFIG\n"
- 'and running "tools/generate-integration-docs-screenshot" to keep the screenshots up-to-date.'
+ 'and running "tools/screenshots/generate-integration-docs-screenshot" to keep the screenshots up-to-date.'
)
for integration_name in DOC_SCREENSHOT_CONFIG:
configs = DOC_SCREENSHOT_CONFIG[integration_name]
| Script to auto-generate message screenshots on /for/research
<!-- Issue description -->
At present, we manually generate screenshots on /for/research (and other landing pages), which makes them difficult to maintain. The UI in these screenshots is currently quite out of date. We should write a script to automate how the message screenshots are generated.
The script should be structured so that it's easy to:
- Extend it to screenshots on other pages. (Some will require a content pass, so let's get one page done first.)
- Update character names, channel names, topic names, and message content without having to think much about the code.
Notes:
- Use `tools/generate-integration-docs-screenshot` as a starting point. `zilencer/management/commands/add_mock_conversation.py` may also be helpful, but does not currently run.
- Keep the message content, topics and channels the same as we have in the screenshots now. You can use placeholders for the profile images; I expect we'll update those. The senders should not be bots, of course.
- This is just for the message screenshots, not the streams (a.k.a. channels) and topics screenshot, which we'll be redoing separately in Figma.
<details>
<summary>
Screenshots to update
</summary>


</details>
| Hello @zulip/server-misc members, this issue was labeled with the "area: portico" label, so you may want to check it out!
<!-- areaLabelAddition -->
@roanster007 Are you interested in picking up this one? Assigning it to you for now, but feel free to unassign yourself if it doesn't seem like a good fit. | 2024-05-11T11:48:17 |
zulip/zulip | 30,089 | zulip__zulip-30089 | [
"30053"
] | 7ec6f5296e25a01320c590fc9823f520d3ef5eb9 | diff --git a/zerver/lib/onboarding.py b/zerver/lib/onboarding.py
--- a/zerver/lib/onboarding.py
+++ b/zerver/lib/onboarding.py
@@ -359,21 +359,6 @@ def send_initial_realm_messages(realm: Realm) -> None:
:point_right: Click on this message to start a new message in the same conversation.
""")
- content_of_zulip_update_announcements_topic_name = (
- _("""
-Welcome! To help you learn about new features and configuration options,
-this topic will receive messages about important changes in Zulip.
-
-You can read these update messages whenever it's convenient, or
-[mute]({mute_topic_help_url}) this topic if you are not interested.
-If your organization does not want to receive these announcements,
-they can be disabled. [Learn more]({zulip_update_announcements_help_url}).
- """)
- ).format(
- zulip_update_announcements_help_url="/help/configure-automated-notices#zulip-update-announcements",
- mute_topic_help_url="/help/mute-a-topic",
- )
-
welcome_messages: List[Dict[str, str]] = []
# Messages added to the "welcome messages" list last will be most
@@ -383,15 +368,6 @@ def send_initial_realm_messages(realm: Realm) -> None:
#
# Initial messages are configured below.
- # Zulip updates system advertisement.
- welcome_messages += [
- {
- "channel_name": str(Realm.DEFAULT_NOTIFICATION_STREAM_NAME),
- "topic_name": str(Realm.ZULIP_UPDATE_ANNOUNCEMENTS_TOPIC_NAME),
- "content": content_of_zulip_update_announcements_topic_name,
- },
- ]
-
# Advertising moving messages.
welcome_messages += [
{
diff --git a/zerver/lib/zulip_update_announcements.py b/zerver/lib/zulip_update_announcements.py
--- a/zerver/lib/zulip_update_announcements.py
+++ b/zerver/lib/zulip_update_announcements.py
@@ -14,6 +14,7 @@
internal_prep_stream_message,
)
from zerver.lib.message import SendMessageRequest, remove_single_newlines
+from zerver.lib.topic import messages_for_topic
from zerver.models.realm_audit_logs import RealmAuditLog
from zerver.models.realms import Realm
from zerver.models.users import UserProfile, get_system_bot
@@ -264,11 +265,47 @@ def send_zulip_update_announcements(
):
continue
- messages = internal_prep_zulip_update_announcements_stream_messages(
- current_level=realm_zulip_update_announcements_level,
- latest_level=latest_zulip_update_announcements_level,
- sender=sender,
- realm=realm,
+ # Send an introductory message just before the first update message.
+ with override_language(realm.default_language):
+ topic_name = str(realm.ZULIP_UPDATE_ANNOUNCEMENTS_TOPIC_NAME)
+
+ stream = realm.zulip_update_announcements_stream
+ assert stream.recipient_id is not None
+ topic_has_messages = messages_for_topic(
+ realm.id, stream.recipient_id, topic_name
+ ).exists()
+
+ if not topic_has_messages:
+ content_of_introductory_message = (
+ """
+To help you learn about new features and configuration options,
+this topic will receive messages about important changes in Zulip.
+
+You can read these update messages whenever it's convenient, or
+[mute]({mute_topic_help_url}) this topic if you are not interested.
+If your organization does not want to receive these announcements,
+they can be disabled. [Learn more]({zulip_update_announcements_help_url}).
+"""
+ ).format(
+ zulip_update_announcements_help_url="/help/configure-automated-notices#zulip-update-announcements",
+ mute_topic_help_url="/help/mute-a-topic",
+ )
+ messages = [
+ internal_prep_stream_message(
+ sender,
+ stream,
+ topic_name,
+ remove_single_newlines(content_of_introductory_message),
+ )
+ ]
+
+ messages.extend(
+ internal_prep_zulip_update_announcements_stream_messages(
+ current_level=realm_zulip_update_announcements_level,
+ latest_level=latest_zulip_update_announcements_level,
+ sender=sender,
+ realm=realm,
+ )
)
new_zulip_update_announcements_level = latest_zulip_update_announcements_level
| diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py
--- a/zerver/tests/test_signup.py
+++ b/zerver/tests/test_signup.py
@@ -1305,7 +1305,7 @@ def check_able_to_create_realm(self, email: str, password: str = "test") -> None
# Check welcome messages
for stream_name, text, message_count in [
- (str(Realm.DEFAULT_NOTIFICATION_STREAM_NAME), "learn about new features", 3),
+ (str(Realm.DEFAULT_NOTIFICATION_STREAM_NAME), "a great place to say βhiβ", 2),
(str(Realm.ZULIP_SANDBOX_CHANNEL_NAME), "Use this topic to try out", 5),
]:
stream = get_stream(stream_name, realm)
diff --git a/zerver/tests/test_zulip_update_announcements.py b/zerver/tests/test_zulip_update_announcements.py
--- a/zerver/tests/test_zulip_update_announcements.py
+++ b/zerver/tests/test_zulip_update_announcements.py
@@ -122,9 +122,10 @@ def test_send_zulip_update_announcements(self) -> None:
recipient__type_id=verona.id,
date_sent__gte=now + timedelta(days=10),
).order_by("id")
- self.assert_length(stream_messages, 2)
- self.assertEqual(stream_messages[0].content, "Announcement message 3.")
- self.assertEqual(stream_messages[1].content, "Announcement message 4.")
+ self.assert_length(stream_messages, 3)
+ self.assertIn("To help you learn about new features", stream_messages[0].content)
+ self.assertEqual(stream_messages[1].content, "Announcement message 3.")
+ self.assertEqual(stream_messages[2].content, "Announcement message 4.")
self.assertEqual(realm.zulip_update_announcements_level, 4)
def test_send_zulip_update_announcements_with_stream_configured(self) -> None:
| Do not send first Zulip Updates message with onboarding messages in new organization
<!-- Issue description -->
To help users focus on the onboarding experience, we should not send the introductory "Zulip updates" message ("Welcome! To help you learn about new features and configuration options, ..." as part of onboarding in a new organization.
It probably makes the most sense to just wait until we are sending the first update, and send it right before the first update message. We should drop "Welcome!" from the start of the message.
| Hello @zulip/server-bots, @zulip/server-onboarding members, this issue was labeled with the "area: onboarding", "area: bots" labels, so you may want to check it out!
<!-- areaLabelAddition -->
@prakhar1144 Would you be up for taking care of this one? Please flag if this is tricky; I think from a product perspective, there are other reasonable options, as long as we delay the message.
I'm not sure if there are product decisions specific to self-hosters that need to be considered here. | 2024-05-14T17:47:50 |
zulip/zulip | 30,224 | zulip__zulip-30224 | [
"28220"
] | 05513c90f0909610e599f0313ad31f6eb117940c | diff --git a/analytics/management/commands/populate_analytics_db.py b/analytics/management/commands/populate_analytics_db.py
--- a/analytics/management/commands/populate_analytics_db.py
+++ b/analytics/management/commands/populate_analytics_db.py
@@ -285,6 +285,7 @@ def insert_fixture_data(
android, created = Client.objects.get_or_create(name="ZulipAndroid")
iOS, created = Client.objects.get_or_create(name="ZulipiOS")
react_native, created = Client.objects.get_or_create(name="ZulipMobile")
+ flutter, created = Client.objects.get_or_create(name="ZulipFlutter")
API, created = Client.objects.get_or_create(name="API: Python")
zephyr_mirror, created = Client.objects.get_or_create(name="zephyr_mirror")
unused, created = Client.objects.get_or_create(name="unused")
@@ -302,6 +303,7 @@ def insert_fixture_data(
android.id: self.generate_fixture_data(stat, 5, 5, 2, 0.6, 3),
iOS.id: self.generate_fixture_data(stat, 5, 5, 2, 0.6, 3),
react_native.id: self.generate_fixture_data(stat, 5, 5, 10, 0.6, 3),
+ flutter.id: self.generate_fixture_data(stat, 5, 5, 10, 0.6, 3),
API.id: self.generate_fixture_data(stat, 5, 5, 5, 0.6, 3),
zephyr_mirror.id: self.generate_fixture_data(stat, 1, 1, 3, 0.6, 3),
unused.id: self.generate_fixture_data(stat, 0, 0, 0, 0, 0),
@@ -313,6 +315,7 @@ def insert_fixture_data(
old_desktop.id: self.generate_fixture_data(stat, 50, 30, 8, 0.6, 3),
android.id: self.generate_fixture_data(stat, 50, 50, 2, 0.6, 3),
iOS.id: self.generate_fixture_data(stat, 50, 50, 2, 0.6, 3),
+ flutter.id: self.generate_fixture_data(stat, 5, 5, 10, 0.6, 3),
react_native.id: self.generate_fixture_data(stat, 5, 5, 10, 0.6, 3),
API.id: self.generate_fixture_data(stat, 50, 50, 5, 0.6, 3),
zephyr_mirror.id: self.generate_fixture_data(stat, 10, 10, 3, 0.6, 3),
diff --git a/analytics/views/stats.py b/analytics/views/stats.py
--- a/analytics/views/stats.py
+++ b/analytics/views/stats.py
@@ -525,7 +525,9 @@ def client_label_map(name: str) -> str:
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
- return "Mobile app"
+ return "Mobile app (React Native)"
+ if name in ["ZulipFlutter", "ZulipMobile/flutter"]:
+ return "Mobile app beta (Flutter)"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
| diff --git a/analytics/tests/test_stats_views.py b/analytics/tests/test_stats_views.py
--- a/analytics/tests/test_stats_views.py
+++ b/analytics/tests/test_stats_views.py
@@ -661,7 +661,9 @@ def test_map_arrays(self) -> None:
"website": [1, 2, 3],
"ZulipiOS": [1, 2, 3],
"ZulipElectron": [2, 5, 7],
- "ZulipMobile": [1, 5, 7],
+ "ZulipMobile": [1, 2, 3],
+ "ZulipMobile/flutter": [1, 1, 1],
+ "ZulipFlutter": [1, 1, 1],
"ZulipPython": [1, 2, 3],
"API: Python": [1, 2, 3],
"SomethingRandom": [4, 5, 6],
@@ -676,7 +678,8 @@ def test_map_arrays(self) -> None:
"Old desktop app": [32, 36, 39],
"Old iOS app": [1, 2, 3],
"Desktop app": [2, 5, 7],
- "Mobile app": [1, 5, 7],
+ "Mobile app (React Native)": [1, 2, 3],
+ "Mobile app beta (Flutter)": [2, 2, 2],
"Web app": [1, 2, 3],
"Python API": [2, 4, 6],
"SomethingRandom": [4, 5, 6],
| Make sure we're showing flutter app correctly in /stats graphs and the like
The discussion in https://github.com/zulip/zulip-flutter/issues/453 reminded me that we should audit the codebase for where we check for `ZulipMobile` use and make sure all of that code works well with the Flutter app. Most significant is likely to be figuring out how we want to display it on the /stats graphs.
| Hello @zulip/server-analytics members, this issue was labeled with the "area: analytics" label, so you may want to check it out!
<!-- areaLabelAddition -->
| 2024-05-27T15:47:38 |
zulip/zulip | 30,247 | zulip__zulip-30247 | [
"26369"
] | ec199082fd9317171d2ed26a514aee56566dfb60 | diff --git a/zerver/lib/import_realm.py b/zerver/lib/import_realm.py
--- a/zerver/lib/import_realm.py
+++ b/zerver/lib/import_realm.py
@@ -324,14 +324,17 @@ def fix_customprofilefield(data: TableData) -> None:
def fix_message_rendered_content(
- realm: Realm, sender_map: Dict[int, Record], messages: List[Record]
+ realm: Realm,
+ sender_map: Dict[int, Record],
+ messages: List[Record],
+ content_key: str = "content",
+ rendered_content_key: str = "rendered_content",
) -> None:
"""
- This function sets the rendered_content of all the messages
- after the messages have been imported from a non-Zulip platform.
+ This function sets the rendered_content of the messages we're importing.
"""
for message in messages:
- if message["rendered_content"] is not None:
+ if message[rendered_content_key] is not None:
# For Zulip->Zulip imports, we use the original rendered
# Markdown; this avoids issues where e.g. a mention can no
# longer render properly because a user has changed their
@@ -340,7 +343,7 @@ def fix_message_rendered_content(
# However, we still need to update the data-user-id and
# similar values stored on mentions, stream mentions, and
# similar syntax in the rendered HTML.
- soup = BeautifulSoup(message["rendered_content"], "html.parser")
+ soup = BeautifulSoup(message[rendered_content_key], "html.parser")
user_mentions = soup.findAll("span", {"class": "user-mention"})
if len(user_mentions) != 0:
@@ -357,7 +360,7 @@ def fix_message_rendered_content(
old_user_id = int(mention["data-user-id"])
if old_user_id in user_id_map:
mention["data-user-id"] = str(user_id_map[old_user_id])
- message["rendered_content"] = str(soup)
+ message[rendered_content_key] = str(soup)
stream_mentions = soup.findAll("a", {"class": "stream"})
if len(stream_mentions) != 0:
@@ -366,7 +369,7 @@ def fix_message_rendered_content(
old_stream_id = int(mention["data-stream-id"])
if old_stream_id in stream_id_map:
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
- message["rendered_content"] = str(soup)
+ message[rendered_content_key] = str(soup)
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
if len(user_group_mentions) != 0:
@@ -375,11 +378,11 @@ def fix_message_rendered_content(
old_user_group_id = int(mention["data-user-group-id"])
if old_user_group_id in user_group_id_map:
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
- message["rendered_content"] = str(soup)
+ message[rendered_content_key] = str(soup)
continue
try:
- content = message["content"]
+ content = message[content_key]
sender_id = message["sender_id"]
sender = sender_map[sender_id]
@@ -399,7 +402,7 @@ def fix_message_rendered_content(
translate_emoticons=translate_emoticons,
).rendered_content
- message["rendered_content"] = rendered_content
+ message[rendered_content_key] = rendered_content
if "scheduled_timestamp" not in message:
# This logic runs also for ScheduledMessage, which doesn't use
# the rendered_content_version field.
@@ -415,6 +418,30 @@ def fix_message_rendered_content(
)
+def fix_message_edit_history(
+ realm: Realm, sender_map: Dict[int, Record], messages: List[Record]
+) -> None:
+ user_id_map = ID_MAP["user_profile"]
+ for message in messages:
+ edit_history_json = message.get("edit_history")
+ if not edit_history_json:
+ continue
+
+ edit_history = orjson.loads(edit_history_json)
+ for edit_history_message_dict in edit_history:
+ edit_history_message_dict["user_id"] = user_id_map[edit_history_message_dict["user_id"]]
+
+ fix_message_rendered_content(
+ realm,
+ sender_map,
+ messages=edit_history,
+ content_key="prev_content",
+ rendered_content_key="prev_rendered_content",
+ )
+
+ message["edit_history"] = orjson.dumps(edit_history).decode()
+
+
def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
@@ -1683,6 +1710,9 @@ def import_message_data(realm: Realm, sender_map: Dict[int, Record], import_dir:
)
logging.info("Successfully rendered Markdown for message batch")
+ fix_message_edit_history(
+ realm=realm, sender_map=sender_map, messages=data["zerver_message"]
+ )
# A LOT HAPPENS HERE.
# This is where we actually import the message data.
bulk_import_model(data, Message)
| diff --git a/zerver/tests/test_import_export.py b/zerver/tests/test_import_export.py
--- a/zerver/tests/test_import_export.py
+++ b/zerver/tests/test_import_export.py
@@ -1086,6 +1086,54 @@ def assert_realm_values(f: Callable[[Realm], object]) -> None:
Message.objects.filter(realm=imported_realm).count(),
)
+ def test_import_message_edit_history(self) -> None:
+ realm = get_realm("zulip")
+ iago = self.example_user("iago")
+ hamlet = self.example_user("hamlet")
+ user_mention_message = f"@**King Hamlet|{hamlet.id}** Hello"
+
+ self.login_user(iago)
+ message_id = self.send_stream_message(
+ self.example_user("iago"), "Verona", user_mention_message
+ )
+
+ new_content = "new content"
+ result = self.client_patch(
+ f"/json/messages/{message_id}",
+ {
+ "content": new_content,
+ },
+ )
+ self.assert_json_success(result)
+
+ self.export_realm_and_create_auditlog(realm)
+ with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"):
+ do_import_realm(get_output_dir(), "test-zulip")
+ imported_realm = Realm.objects.get(string_id="test-zulip")
+
+ imported_message = Message.objects.filter(realm=imported_realm).latest("id")
+ imported_hamlet_id = UserProfile.objects.get(
+ delivery_email=hamlet.delivery_email, realm=imported_realm
+ ).id
+ imported_iago_id = UserProfile.objects.get(
+ delivery_email=iago.delivery_email, realm=imported_realm
+ ).id
+
+ edit_history_json = imported_message.edit_history
+ assert edit_history_json is not None
+ edit_history = orjson.loads(edit_history_json)
+ self.assert_length(edit_history, 1)
+
+ prev_version_of_message = edit_history[0]
+ # Ensure the "user_id" (of the sender) was updated correctly
+ # to the imported id in the data.
+ self.assertEqual(prev_version_of_message["user_id"], imported_iago_id)
+
+ # The mention metadata in the rendered content should be updated.
+ self.assertIn(
+ f'data-user-id="{imported_hamlet_id}"', prev_version_of_message["prev_rendered_content"]
+ )
+
def get_realm_getters(self) -> List[Callable[[Realm], object]]:
names = set()
getters: List[Callable[[Realm], object]] = []
| Message edit history is missing from the data that is being exported from zulip cloud
<!-- Describe what you were expecting to see, what you saw instead, and steps to take in order to reproduce the buggy behavior. Screenshots can be helpful. -->
After importing data into the zulip server build based on the `zulip-cloud-current` branch, all message that has been edited or moved will remain the mark in message view, but the popup after clicking the mark shows nothing:

<!-- Check the box for the version of Zulip you are using (see https://zulip.com/help/view-zulip-version).-->
**Zulip Server and web app version:**
- [x] Zulip Cloud (`*.zulipchat.com`)
- [x] Zulip Server 7.0+
- [ ] Zulip Server 6.0+
- [ ] Zulip Server 5.0 or older
- [ ] Other or not sure
| Hello @zulip/server-misc members, this issue was labeled with the "area: export/import" label, so you may want to check it out!
<!-- areaLabelAddition -->
Infect the edit history in export gets preserved in table `zerver_message`, but refers to the old user id before renumbering: 
So it's related to https://github.com/zulip/zulip/issues/11293
Thanks for the report @n0099! I agree with your assessment and appreciate the analysis and example. Tagging this as a priority. I think the fix will be to call `fix_message_rendered_content` on message edit history entries, with a bit of refactoring to make that possible. | 2024-05-30T01:01:51 |
zulip/zulip | 30,275 | zulip__zulip-30275 | [
"30255"
] | 8aea76448fba42f0f66bd8e3cfe446276a772c53 | diff --git a/zerver/lib/queue.py b/zerver/lib/queue.py
--- a/zerver/lib/queue.py
+++ b/zerver/lib/queue.py
@@ -89,6 +89,7 @@ def _get_parameters(self) -> pika.ConnectionParameters:
return pika.ConnectionParameters(
settings.RABBITMQ_HOST,
port=settings.RABBITMQ_PORT,
+ virtual_host=settings.RABBITMQ_VHOST,
heartbeat=self.rabbitmq_heartbeat,
tcp_options=tcp_options,
ssl_options=ssl_options,
diff --git a/zproject/default_settings.py b/zproject/default_settings.py
--- a/zproject/default_settings.py
+++ b/zproject/default_settings.py
@@ -191,6 +191,7 @@
MEMCACHED_USERNAME = None if get_secret("memcached_password") is None else "zulip@localhost"
RABBITMQ_HOST = "127.0.0.1"
RABBITMQ_PORT = 5672
+RABBITMQ_VHOST = "/"
RABBITMQ_USERNAME = "zulip"
RABBITMQ_USE_TLS = False
REDIS_HOST = "127.0.0.1"
diff --git a/zproject/prod_settings_template.py b/zproject/prod_settings_template.py
--- a/zproject/prod_settings_template.py
+++ b/zproject/prod_settings_template.py
@@ -606,6 +606,8 @@
## optionally RABBITMQ_PORT, to the hostname and port here.
# RABBITMQ_HOST = "127.0.0.1"
# RABBITMQ_PORT = 5672
+## To use a different RabbitMQ "virtual host", adjust this.
+# RABBITMQ_VHOST = "/"
## To use another RabbitMQ user than the default "zulip", set RABBITMQ_USERNAME here.
# RABBITMQ_USERNAME = "zulip"
## To access the RabbitMQ server over TLS, set this to True; this is
| Make RabbitMQ vHost configurable
In our setup we have a shared RabbitMQ service for the whole installation including our services. We separate these apps with vhosts. Unfortunately Zulip exposes neither Pika's URL Parameters or vhost settings. Thus it can only use the `/` vhost.
I propose that the Parameters here: https://github.com/zulip/zulip/blob/7377d3fab941bf137ecd70f3f4fa88731be5e768/zerver/lib/queue.py#L89-L96 expose the `vhost=` setting in the same way as the environment variables. Or alternatively allows specifying the RabbitMQ URL via pika's URLParameters.
| 2024-05-31T15:48:10 |
||
gpodder/mygpo | 57 | gpodder__mygpo-57 | [
"51"
] | f2a425e74f812dd64349392342832cd7cc73ec31 | diff --git a/mygpo/api/advanced/updates.py b/mygpo/api/advanced/updates.py
--- a/mygpo/api/advanced/updates.py
+++ b/mygpo/api/advanced/updates.py
@@ -30,7 +30,7 @@
class DeviceUpdates(View):
""" returns various updates for a device
- http://wiki.gpodder.org/wiki/Web_Services/API_2/Devices#Get_Updates """
+ https://gpoddernet.readthedocs.io/en/latest/api//Devices#Get_Updates """
@method_decorator(csrf_exempt)
@method_decorator(require_valid_user)
diff --git a/mygpo/users/settings.py b/mygpo/users/settings.py
--- a/mygpo/users/settings.py
+++ b/mygpo/users/settings.py
@@ -5,7 +5,7 @@
## Well-known settings
# this should be documented at
-# http://wiki.gpodder.org/wiki/Web_Services/API_2/Settings#Known_Settings
+# https://gpoddernet.readthedocs.io/en/latest/api//Settings#Known_Settings
# Flag to allow storing of user-agents
STORE_UA = WellKnownSetting('store_user_agent', True)
| Feedback link on left hand side is broken
There are a few issues with the Feedback "tab" on the left hand side:
* The styling is not rendering properly in FF 54 (Linux). I see no background for to the text that comes up, instead it dims the page and I see the text overlayed over the page text.
* The text indicates I am being redirected, but the redirect does not seem to execute.
* The redirect link goes to [getsatisfaction.com](http://retired.getsatisfaction.com/) which is not active. It should probably go to this issue tracker.
| 2017-08-02T10:19:03 |
||
gpodder/mygpo | 259 | gpodder__mygpo-259 | [
"241"
] | ad9dd1547ae46e570c75b3163099afb13468af8b | diff --git a/mygpo/data/feeddownloader.py b/mygpo/data/feeddownloader.py
--- a/mygpo/data/feeddownloader.py
+++ b/mygpo/data/feeddownloader.py
@@ -77,7 +77,9 @@ class PodcastUpdater(object):
""" Updates the podcast specified by the podcast_url """
def __init__(self, podcast_url):
- self.podcast_url = podcast_url
+ self.podcast_url = (
+ (podcast_url[:2046] + '..') if len(podcast_url) > 2048 else podcast_url
+ )
def update_podcast(self):
""" Update the podcast """
| Searching for long URL causes server error
Error report
````
Internal Server Error: /search.json
DataError at /search.json
value too long for type character varying(2048)
Request Method: GET
Request URL: https://www.gpodder.net/search.json?q=http://www.masterads.info/instagram/campanha.php?id=lNmb19mbuFmZyUSO2kjNhNTJnJ3buIXZrNWYyRXLuJ3bjB3bw5yYpxmY1BnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDau02bkNzMyZmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSZk5yYjNmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLnJmchJnLxEjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ3MzMxE2Mlcmcv5icrNWYyRnblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmL0J2YpxmY1BnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2MlUWbucmYyFmcuITMmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJ1xmL5JnchhmLyV2ajFmc05CN2BXamJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJhVnL4VmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM0cjMhNTJt92YucmYyFmcukjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUyZy9mL4dzMzEjLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUyb05ScyBnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CduVmcy9Gd0lmYuVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlUGZucmbppXYsJmLyV2ajFmc0ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUCdp5SZs9GdzlmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CdiNWasJWdw5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2Ml42Yu02bj5CZuFGblJXYj5CdiZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDauc3d3ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMl02bj5yN39GcuIDdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJrRnLyVmZyV3cyVGcw92YmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmL4VHdhJXaw5CNyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJ3BnLzRnblJncvR3bsdmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlczMzETYzUyZy9mLyt2YhJHduVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJt92YuQnblJncvRHdpJmblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmQWNlwUQVRkY1UCMyUCZ1UCcpJFRCJWNlAjMlkTMwIDMyUyb0FWbpRHbVBjMl0CMyUyclJ3bkF2ZulmVwITJkVTJN90QuMFVOVkUS9EVPRkTB10TDBjMlU0UTV0QBJWNl0jbktDctFmJ1MWZ3Q2NykDZ1QjM1UWZzAzYwMjYyImMlFjZzIGOhVGOjZzN3I2YjpDapRnY64mc11Dd49jO0VmbnFWb
Django Version: 2.0.8
Python Executable: /srv/mygpo/venv/bin/python3
Python Version: 3.6.5
Python Path: ['/srv/mygpo', '/srv/mygpo/venv/bin', '/srv/mygpo/venv/lib/python36.zip', '/srv/mygpo/venv/lib/python3.6', '/srv/mygpo/venv/lib/python3.6/lib-dynload', '/usr/lib/python3.6', '/srv/mygpo/venv/lib/python3.6/site-packages', '/srv/mygpo']
Server time: Tue, 6 Aug 2019 00:29:26 +0000
Installed Applications:
['django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.postgres',
'django_celery_results',
'django_celery_beat',
'mygpo.core',
'mygpo.podcasts',
'mygpo.chapters',
'mygpo.search',
'mygpo.users',
'mygpo.api',
'mygpo.web',
'mygpo.publisher',
'mygpo.subscriptions',
'mygpo.history',
'mygpo.favorites',
'mygpo.usersettings',
'mygpo.data',
'mygpo.userfeeds',
'mygpo.suggestions',
'mygpo.directory',
'mygpo.categories',
'mygpo.episodestates',
'mygpo.maintenance',
'mygpo.share',
'mygpo.administration',
'mygpo.pubsub',
'mygpo.podcastlists',
'mygpo.votes']
Installed Middleware:
['django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware']
Traceback:
File "/srv/mygpo/mygpo/data/feeddownloader.py" in parse_feed
122. self._validate_parsed(parsed)
File "/srv/mygpo/mygpo/data/feeddownloader.py" in _validate_parsed
175. raise NoEpisodesException('no episodes found')
During handling of the above exception (no episodes found), another exception occurred:
File "/srv/mygpo/mygpo/data/feeddownloader.py" in parse_feed
131. p = Podcast.objects.get(urls__url=self.podcast_url)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/manager.py" in manager_method
82. return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/query.py" in get
403. self.model._meta.object_name
The above exception (Podcast matching query does not exist.) was the direct cause of the following exception:
File "/srv/mygpo/mygpo/data/feeddownloader.py" in update_podcast
84. parsed, podcast, created = self.parse_feed()
File "/srv/mygpo/mygpo/data/feeddownloader.py" in parse_feed
135. raise NoPodcastCreated(ex) from pdne
During handling of the above exception (no episodes found), another exception occurred:
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/backends/utils.py" in _execute
85. return self.cursor.execute(sql, params)
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in check_closed_
30. return func(self, *args, **kwargs)
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in execute
263. self._pq_execute(self._query, conn._async)
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in _pq_execute
696. self._pq_fetch()
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in _pq_fetch
757. raise self._conn._create_exception(cursor=self)
The above exception (value too long for type character varying(2048)
) was the direct cause of the following exception:
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/core/handlers/exception.py" in inner
35. response = get_response(request)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/core/handlers/base.py" in _get_response
128. response = self.process_exception_by_middleware(e, request)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/core/handlers/base.py" in _get_response
126. response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/srv/mygpo/mygpo/api/simple.py" in tmp
39. return fn(request, *args, format=format, **kwargs)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/utils/decorators.py" in _wrapped_view
142. response = view_func(request, *args, **kwargs)
File "/srv/mygpo/mygpo/decorators.py" in tmp
76. return fn(request, *args, **kwargs)
File "/srv/mygpo/mygpo/decorators.py" in wrapper
111. resp = f(*args, **kwargs)
File "/srv/mygpo/mygpo/api/simple.py" in search
273. results = search_podcasts(query)[:NUM_RESULTS]
File "/srv/mygpo/mygpo/directory/search.py" in search_podcasts
21. updater.update_podcast()
File "/srv/mygpo/mygpo/data/feeddownloader.py" in update_podcast
115. self._update_podcast(podcast, parsed, episode_updater, res)
File "/srv/mygpo/mygpo/data/models.py" in __exit__
76. self.save()
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/base.py" in save
729. force_update=force_update, update_fields=update_fields)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/base.py" in save_base
759. updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/base.py" in _save_table
823. forced_update)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/base.py" in _do_update
872. return filtered._update(values) > 0
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/query.py" in _update
709. return query.get_compiler(self.db).execute_sql(CURSOR)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/sql/compiler.py" in execute_sql
1379. cursor = super().execute_sql(result_type)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/models/sql/compiler.py" in execute_sql
1068. cursor.execute(sql, params)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/backends/utils.py" in execute
68. return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/backends/utils.py" in _execute_with_wrappers
77. return executor(sql, params, many, context)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/backends/utils.py" in _execute
85. return self.cursor.execute(sql, params)
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/utils.py" in __exit__
89. raise dj_exc_value.with_traceback(traceback) from exc_value
File "/srv/mygpo/venv/lib/python3.6/site-packages/django/db/backends/utils.py" in _execute
85. return self.cursor.execute(sql, params)
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in check_closed_
30. return func(self, *args, **kwargs)
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in execute
263. self._pq_execute(self._query, conn._async)
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in _pq_execute
696. self._pq_fetch()
File "/srv/mygpo/venv/lib/python3.6/site-packages/psycopg2cffi/_impl/cursor.py" in _pq_fetch
757. raise self._conn._create_exception(cursor=self)
Exception Type: DataError at /search.json
Exception Value: value too long for type character varying(2048)
Request information:
USER: AnonymousUser
GET:
q = 'http://www.masterads.info/instagram/campanha.php?id=lNmb19mbuFmZyUSO2kjNhNTJnJ3buIXZrNWYyRXLuJ3bjB3bw5yYpxmY1BnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDau02bkNzMyZmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSZk5yYjNmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLnJmchJnLxEjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ3MzMxE2Mlcmcv5icrNWYyRnblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmL0J2YpxmY1BnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2MlUWbucmYyFmcuITMmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJ1xmL5JnchhmLyV2ajFmc05CN2BXamJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJhVnL4VmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM0cjMhNTJt92YucmYyFmcukjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUyZy9mL4dzMzEjLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUyb05ScyBnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CduVmcy9Gd0lmYuVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlUGZucmbppXYsJmLyV2ajFmc0ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUCdp5SZs9GdzlmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CdiNWasJWdw5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2Ml42Yu02bj5CZuFGblJXYj5CdiZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDauc3d3ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMl02bj5yN39GcuIDdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJrRnLyVmZyV3cyVGcw92YmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmL4VHdhJXaw5CNyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJ3BnLzRnblJncvR3bsdmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlczMzETYzUyZy9mLyt2YhJHduVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJt92YuQnblJncvRHdpJmblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmQWNlwUQVRkY1UCMyUCZ1UCcpJFRCJWNlAjMlkTMwIDMyUyb0FWbpRHbVBjMl0CMyUyclJ3bkF2ZulmVwITJkVTJN90QuMFVOVkUS9EVPRkTB10TDBjMlU0UTV0QBJWNl0jbktDctFmJ1MWZ3Q2NykDZ1QjM1UWZzAzYwMjYyImMlFjZzIGOhVGOjZzN3I2YjpDapRnY64mc11Dd49jO0VmbnFWb'
POST: No POST data
FILES: No FILES data
COOKIES: No cookie data
META:
HTTP_ACCEPT = 'application/json'
HTTP_CONNECTION = 'close'
HTTP_HOST = 'www.gpodder.net'
HTTP_USER_AGENT = 'Stremio podcasts addon'
HTTP_X_FORWARDED_FOR = '18.184.213.147'
HTTP_X_FORWARDED_PROTO = 'https'
HTTP_X_REAL_IP = '18.184.213.147'
HTTP_X_REQUEST_ID = '81ba942caff03f13c3168ae7994ae70c'
PATH_INFO = '/search.json'
QUERY_STRING = 'q=http://www.masterads.info/instagram/campanha.php?id=lNmb19mbuFmZyUSO2kjNhNTJnJ3buIXZrNWYyRXLuJ3bjB3bw5yYpxmY1BnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDau02bkNzMyZmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSZk5yYjNmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLnJmchJnLxEjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ3MzMxE2Mlcmcv5icrNWYyRnblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmL0J2YpxmY1BnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2MlUWbucmYyFmcuITMmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJ1xmL5JnchhmLyV2ajFmc05CN2BXamJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJhVnL4VmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM0cjMhNTJt92YucmYyFmcukjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUyZy9mL4dzMzEjLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUyb05ScyBnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CduVmcy9Gd0lmYuVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlUGZucmbppXYsJmLyV2ajFmc0ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUCdp5SZs9GdzlmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CdiNWasJWdw5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2Ml42Yu02bj5CZuFGblJXYj5CdiZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDauc3d3ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMl02bj5yN39GcuIDdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJrRnLyVmZyV3cyVGcw92YmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmL4VHdhJXaw5CNyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJ3BnLzRnblJncvR3bsdmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlczMzETYzUyZy9mLyt2YhJHduVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJt92YuQnblJncvRHdpJmblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmQWNlwUQVRkY1UCMyUCZ1UCcpJFRCJWNlAjMlkTMwIDMyUyb0FWbpRHbVBjMl0CMyUyclJ3bkF2ZulmVwITJkVTJN90QuMFVOVkUS9EVPRkTB10TDBjMlU0UTV0QBJWNl0jbktDctFmJ1MWZ3Q2NykDZ1QjM1UWZzAzYwMjYyImMlFjZzIGOhVGOjZzN3I2YjpDapRnY64mc11Dd49jO0VmbnFWb'
RAW_URI = '/search.json?q=http://www.masterads.info/instagram/campanha.php?id=lNmb19mbuFmZyUSO2kjNhNTJnJ3buIXZrNWYyRXLuJ3bjB3bw5yYpxmY1BnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDau02bkNzMyZmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSZk5yYjNmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLnJmchJnLxEjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ3MzMxE2Mlcmcv5icrNWYyRnblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmL0J2YpxmY1BnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2MlUWbucmYyFmcuITMmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJ1xmL5JnchhmLyV2ajFmc05CN2BXamJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJhVnL4VmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM0cjMhNTJt92YucmYyFmcukjZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUyZy9mL4dzMzEjLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUyb05ScyBnLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CduVmcy9Gd0lmYuVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlUGZucmbppXYsJmLyV2ajFmc0ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUCdp5SZs9GdzlmLyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUCM4E2Ml02bj5CdiNWasJWdw5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2Ml42Yu02bj5CZuFGblJXYj5CdiZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSbvNmLj5WezVGZuMXdk9GelZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwEzMzE2Ml02bj5CdzMDauc3d3ZmMlYmMlE2MlAHd0hWPyR3Ow1WYmU2YuV3bu5WYmJTJwgTYzUSbvNmLzRnblJncvRXL5ZWa55iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMl02bj5yN39GcuIDdmJTJmJTJhNTJwRHdo1jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJrRnLyVmZyV3cyVGcw92YmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlkjN5YTYzUSbvNmL4VHdhJXaw5CNyV2ajFmc0ZmMlYmMlE2MlAHZ11jc0tDctFmJlNmb19mbuFmZyUSO2kjNhNTJ3BnLzRnblJncvR3bsdmZyUiZyUSYzUCckVXPyR3Ow1WYmU2YuV3bu5WYmJTJ5YTO2E2MlsGduIXZmJXdzJXZwB3bj5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlczMzETYzUyZy9mLyt2YhJHduVGcv5iclt2YhJHdmJTJmJTJhNTJwRWd9IHd7AXbhZSZj5Wdv5mbhZmMlADOhNTJt92YuQnblJncvRHdpJmblB3buIXZrNWYyRnZyUiZyUSYzUCckVXPyR3Ow1WYmQWNlwUQVRkY1UCMyUCZ1UCcpJFRCJWNlAjMlkTMwIDMyUyb0FWbpRHbVBjMl0CMyUyclJ3bkF2ZulmVwITJkVTJN90QuMFVOVkUS9EVPRkTB10TDBjMlU0UTV0QBJWNl0jbktDctFmJ1MWZ3Q2NykDZ1QjM1UWZzAzYwMjYyImMlFjZzIGOhVGOjZzN3I2YjpDapRnY64mc11Dd49jO0VmbnFWb'
REMOTE_ADDR = ''
REQUEST_METHOD = 'GET'
SCRIPT_NAME = ''
SERVER_NAME = 'www.gpodder.net'
SERVER_PORT = '443'
SERVER_PROTOCOL = 'HTTP/1.0'
SERVER_SOFTWARE = 'gunicorn/19.9.0'
gunicorn.socket = <socket.socket fd=12, family=AddressFamily.AF_UNIX, type=SocketKind.SOCK_STREAM, proto=0, laddr=/tmp/mygpo.sock>
wsgi.errors = <gunicorn.http.wsgi.WSGIErrorsWrapper object at 0x7fa95cdec940>
wsgi.file_wrapper = ''
wsgi.input = <gunicorn.http.body.Body object at 0x7fa95cdec208>
wsgi.multiprocess = True
wsgi.multithread = False
wsgi.run_once = False
wsgi.url_scheme = 'https'
wsgi.version = '(1, 0)'
````
| hey @stefankoegl, I made the changes for this issue, can you give me access so that I can open a PR?
Great!
Actually you don't need any specific access to open a pull request. The standard workflow is to [fork the repository](https://help.github.com/en/articles/fork-a-repo), push your code there and then [create a pull request](https://help.github.com/en/articles/creating-a-pull-request-from-a-fork). | 2019-10-02T07:11:51 |
|
gpodder/mygpo | 274 | gpodder__mygpo-274 | [
"268"
] | 4741009b998a5e1a37dea1059079b903a3f81b5c | diff --git a/mygpo/search/index.py b/mygpo/search/index.py
--- a/mygpo/search/index.py
+++ b/mygpo/search/index.py
@@ -23,6 +23,9 @@
def search_podcasts(query):
""" Search for podcasts according to 'query' """
+ if is_query_too_short(query):
+ logger.debug('Found no podcasts for "{query}". Query is too short', query=query)
+ return Podcast.objects.none()
logger.debug('Searching for "{query}" podcasts"', query=query)
@@ -44,3 +47,7 @@ def search_podcasts(query):
)
return results
+
+
+def is_query_too_short(query):
+ return len(query.replace(" ", "")) <= settings.QUERY_LENGTH_CUTOFF
diff --git a/mygpo/settings.py b/mygpo/settings.py
--- a/mygpo/settings.py
+++ b/mygpo/settings.py
@@ -369,6 +369,11 @@ def get_intOrNone(name, default):
SEARCH_CUTOFF = float(os.getenv('SEARCH_CUTOFF', 0.3))
+# Maximum non-whitespace length of search query
+# If length of query is shorter than QUERY_LENGTH_CUTOFF, no results
+# will be returned to avoid a server timeout due to too many possible
+# responses
+QUERY_LENGTH_CUTOFF = int(os.getenv('QUERY_LENGTH_CUTOFF', 3))
### Sentry
| diff --git a/mygpo/search/tests.py b/mygpo/search/tests.py
--- a/mygpo/search/tests.py
+++ b/mygpo/search/tests.py
@@ -3,6 +3,7 @@
from mygpo.podcasts.models import Podcast
from django.contrib.postgres.search import SearchVector
+from django.test.utils import override_settings
from .index import search_podcasts
from .tasks import update_search_index
@@ -28,3 +29,28 @@ def test_search_podcast(self):
# search for the podcast
results = search_podcasts('awesome')
self.assertEqual(results[0].id, podcast.id)
+
+ @override_settings(QUERY_LENGTH_CUTOFF=3)
+ def test_shortest_search_podcast(self):
+ """
+ Search for a podcast with query length smaller than 3
+ With QUERY_LENGTH_CUTOFF = 3
+ Server would normally time out, however Podcasts exist for the given
+ search term.
+ """
+ # create a podcast
+ podcast = Podcast(
+ id=uuid.uuid1(),
+ title='The Tricky Podcast',
+ description='The only podcast containing tricky messages.',
+ )
+ podcast.save()
+
+ # explicitly trigger a search index update
+ update_search_index()
+
+ results = search_podcasts('The')
+ self.assertEqual(len(results), 0)
+
+ results = search_podcasts('The Tricky')
+ self.assertEqual(results[0].id, podcast.id)
| Slow podcast search with short query string
When searching for podcasts with a very short query string, the search takes a very long time, sometimes even times out, and most likely doesn't return any meaningful result.
Example: https://gpodder.net/search/?q=s
The search should return an empty result (and possibly an error message) if the query string is too short.
| This could be implemented in `mygpo.search.index.search_podcasts()`, by checking the length of the (non-whitespace parts of the) query string, and returning an [empty result set](https://docs.djangoproject.com/en/dev/ref/models/querysets/#none) or raising an exception.
https://github.com/gpodder/mygpo/blob/52e7af4021d64a07bb26899aba8303abccd6767b/mygpo/search/index.py#L24-L46 | 2019-10-16T20:34:31 |
gpodder/mygpo | 279 | gpodder__mygpo-279 | [
"232"
] | aad69a1450edf77a0cb73c9f62a56938b8617c9b | diff --git a/mygpo/web/utils.py b/mygpo/web/utils.py
--- a/mygpo/web/utils.py
+++ b/mygpo/web/utils.py
@@ -193,10 +193,10 @@ def normalize_twitter(s):
CCLICENSE = re.compile(
- r'http://(www\.)?creativecommons.org/licenses/([a-z-]+)/([0-9.]+)?/?'
+ r'https?://(www\.)?creativecommons.org/licenses/([a-z-]+)/([0-9.]+)?/?'
)
CCPUBLICDOMAIN = re.compile(
- r'http://(www\.)?creativecommons.org/licenses/publicdomain/?'
+ r'https?://(www\.)?creativecommons.org/licenses/publicdomain/?'
)
LicenseInfo = collections.namedtuple('LicenseInfo', 'name version url')
@@ -212,16 +212,28 @@ def license_info(license_url):
>>> i.url
'http://creativecommons.org/licenses/by/3.0/'
+ >>> ihttps = license_info('https://creativecommons.org/licenses/by/3.0/')
+ >>> i.name == ihttps.name and i.version == ihttps.version
+ True
+
>>> iwww = license_info('http://www.creativecommons.org/licenses/by/3.0/')
>>> i.name == iwww.name and i.version == iwww.version
True
+ >>> iwww = license_info('https://www.creativecommons.org/licenses/by/3.0/')
+ >>> i.name == iwww.name and i.version == iwww.version
+ True
+
>>> i = license_info('http://www.creativecommons.org/licenses/publicdomain')
>>> i.name
'Public Domain'
>>> i.version is None
True
+ >>> ihttps = license_info('https://www.creativecommons.org/licenses/publicdomain')
+ >>> i.name == ihttps.name and i.version == ihttps.version
+ True
+
>>> i = license_info('http://example.com/my-own-license')
>>> i.name is None
True
| Recognize CC licenses with https URLs
https://gpodder.net/directory/+license contains a list of licenses. When selecting a license the podcasts that use this particular license are shown.
In
https://github.com/gpodder/mygpo/blob/1950aae95d79c87bf703ec7f457fd84c2c9613f4/mygpo/web/utils.py#L204
the license URLs are matched against known patterns to show "nice" names instead of URLs. For creative commons licenses, only http URLs are recognized. http and https URLs should be recognized as identical.
| 2019-10-22T16:38:42 |
||
gpodder/mygpo | 493 | gpodder__mygpo-493 | [
"128"
] | e44fd058e07dfe54c3c8ef26511db26352567249 | diff --git a/mygpo/api/advanced/sync.py b/mygpo/api/advanced/sync.py
--- a/mygpo/api/advanced/sync.py
+++ b/mygpo/api/advanced/sync.py
@@ -38,7 +38,7 @@ def main(request, username):
except Client.DoesNotExist as e:
return HttpResponseNotFound(str(e))
- return JsonResponse(get_sync_status(user))
+ return JsonResponse(get_sync_status(request.user))
def get_sync_status(user):
@@ -77,11 +77,11 @@ def update_sync_status(user, synclist, stopsync):
dev = user.client_set.get(uid=uid)
for other_uid in devlist[1:]:
- other = user.get_device_by_uid(other_uid)
+ other = user.client_set.get(uid=other_uid)
dev.sync_with(other)
for uid in stopsync:
- dev = user.get_device_by_uid(uid)
+ dev = user.client_set.get(uid=uid)
try:
dev.stop_sync()
except ValueError:
| API: Device Synchronization API - Start / Stop Sync returning HTTP status 500
During my work on PR https://github.com/gpodder/mygpo/pull/122 is was testing the Device Synchronization API - Start / Stop Sync (https://gpoddernet.readthedocs.io/en/latest/api/reference/sync.html#post--api-2-sync-devices-(username).json)
I sent the following request
```json
{
"synchronize": [
[
"my-desktop", "cellphone"
]
]
}
```
and it is returning HTTP 500
```html
<html>
<head>
<title>500 Internal server error (gpodder.net)</title>
<link rel="stylesheet" type="text/css" href="/static/css/fail.css" />
</head>
<body>
<div id="c">
<div id="fail">
<h1>500 - Internal server error.</h1>
<p>
The service is currently overloaded.
Please try again later or contact us.
</p>
</div>
</div>
<img id="icon" src="/static/failpodder.png">
</body>
</html>
```
as a reference, a previous call to https://gpoddernet.readthedocs.io/en/latest/api/reference/sync.html#get--api-2-sync-devices-(username).json was returning:
```json
{
"synchronized": [],
"not-synchronized": [
"cellphone",
"my-desktop"
]
}
```
I'm able ot sync this devices on the web ui though.
| The cause for this is:
```
File "mygpo/api/advanced/sync.py", line 35, in main
update_sync_status(request.user, synclist, stopsync)
File "mygpo/api/advanced/sync.py", line 80, in update_sync_status
other = user.get_device_by_uid(other_uid)
File "venv/lib/python3.8/site-packages/django/utils/functional.py", line 241, in inner
return func(self._wrapped, *args)
AttributeError: 'User' object has no attribute 'get_device_by_uid'
```
Replacing `user.get_device_by_uid(other_uid)` with `Client.objects.get(user=user, uid=other_uid)` leads to more crashes.
@SiqingYu I do not know much about python. Could you please point me into the right direction to fix this? Synchronizing devices using the API will make AntennaPod's sync feature a lot more useful. Our users are currently pretty confused that they need to log into the website to make sync work. | 2020-10-03T14:53:33 |
|
gpodder/mygpo | 546 | gpodder__mygpo-546 | [
"178"
] | d21ddb5e31e0c24f2a056bc165eef2c842123716 | diff --git a/mygpo/directory/management/commands/update-toplist.py b/mygpo/directory/management/commands/update-toplist.py
--- a/mygpo/directory/management/commands/update-toplist.py
+++ b/mygpo/directory/management/commands/update-toplist.py
@@ -24,7 +24,7 @@ def handle(self, *args, **options):
silent = options.get("silent")
podcasts = Podcast.objects.all()
- total = podcasts.count_fast()
+ total = podcasts.count()
for n, podcast in enumerate(podcasts):
update_podcast_subscribers.delay(podcast.get_id())
| Error when trying to run manage.py update-toplist
```
Traceback (most recent call last):
File "manage.py", line 8, in <module>
execute_from_command_line(sys.argv)
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/home/mike/mygpo/mygpo/directory/management/commands/update-toplist.py", line 27, in handle
total = podcasts.count_fast()
AttributeError: 'PodcastQuerySet' object has no attribute 'count_fast'
```
| 2020-12-29T16:35:26 |
||
huggingface/text-generation-inference | 13 | huggingface__text-generation-inference-13 | [
"12"
] | 3efa5bbbfd5868695da4d5d9ad23d81f48f1e5a8 | diff --git a/server/text_generation/models/causal_lm.py b/server/text_generation/models/causal_lm.py
--- a/server/text_generation/models/causal_lm.py
+++ b/server/text_generation/models/causal_lm.py
@@ -354,7 +354,8 @@ def generate_token(
if stop:
# Decode all tokens
output_text = self.tokenizer.decode(
- all_input_ids.squeeze(-1), skip_special_tokens=True
+ all_input_ids.squeeze(-1), skip_special_tokens=True,
+ cleanup_tokenization_spaces=False
)
# Slice with input_length to remove padding
token_ids = all_input_ids[-new_input_length:]
| Causal LM modifies the input when returning text
One of the issues if that Causal LM returns the entire `input + generated_text`. And `input` is actually `tokenizer.decoder(tokenizer.encode(input))`. The issue comes when that process is lossy, and causes weird behaviours such as this https://huggingface.co/bigscience/bloom/discussions/153#6397907b71eb2455d898e0a4
We can instead either:
- actually use the input instead of going through a potentially lossy mechanism.
- change the API to always return "added" text. **BREAKING**
| One hot fix concerning the specific BLOOM issue is to set `clean_up_tokenization_spaces=False`, but it's only because BLOOM tokenizer is lossless. | 2022-12-20T14:25:29 |
|
huggingface/text-generation-inference | 82 | huggingface__text-generation-inference-82 | [
"80"
] | 17bc841b1be18436b32c533c8eaeacade1f45381 | diff --git a/server/text_generation/models/galactica.py b/server/text_generation/models/galactica.py
--- a/server/text_generation/models/galactica.py
+++ b/server/text_generation/models/galactica.py
@@ -2,7 +2,7 @@
import torch
import torch.distributed
-from typing import List, Optional, Type
+from typing import List, Optional, Type, Tuple
from accelerate import init_empty_weights
from safetensors import safe_open
@@ -145,6 +145,20 @@ def decode(self, generated_ids: List[int]) -> str:
generated_ids, skip_special_tokens=False, cleanup_tokenization_spaces=False
)
+ def forward(
+ self, input_ids, attention_mask, position_ids, past_key_values: Optional = None
+ ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]:
+ """Overwrite forward to ignore position_ids"""
+
+ # Model Forward
+ outputs = self.model.forward(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ past_key_values=past_key_values,
+ use_cache=True,
+ )
+ return outputs.logits, outputs.past_key_values
+
class GalacticaSharded(Galactica):
def __init__(
@@ -322,7 +336,6 @@ def forward(
outputs = self.model.forward(
input_ids=input_ids,
attention_mask=attention_mask,
- position_ids=position_ids,
past_key_values=past_key_values,
use_cache=True,
)
| TypeError: forward() got an unexpected keyword argument 'position_ids'
I started a server with
```
text-generation-launcher --model-id facebook/galactica-30b --num-shard 1
```
However, when I now send a request like
```
curl localhost:3000/generate -H 'Content-Type: application/json' -d '{"inputs":"Hi my name is","parameters":{"max_new_tokens":60}}'
```
it consistenly returns `TypeError: forward() got an unexpected keyword argument 'position_ids'`, with the following traceback:
```
2023-02-20T14:41:54.081514Z ERROR shard-manager: text_generation_launcher: "Method Prefill encountered an error.
Traceback (most recent call last):
File \"/home/user/miniconda3/envs/text_generation/bin/text-generation-server\", line 8, in <module>
sys.exit(app())
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/typer/main.py\", line 311, in __call__
return get_command(self)(*args, **kwargs)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__
return self.main(*args, **kwargs)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/typer/core.py\", line 778, in main
return _main(
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/typer/core.py\", line 216, in _main
rv = self.invoke(ctx)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/click/core.py\", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/click/core.py\", line 760, in invoke
return __callback(*args, **kwargs)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/typer/main.py\", line 683, in wrapper
return callback(**use_params) # type: ignore
File \"/home/user/text-generation-inference-new/server/text_generation/cli.py\", line 55, in serve
server.serve(model_id, revision, sharded, quantize, uds_path)
File \"/home/user/text-generation-inference-new/server/text_generation/server.py\", line 130, in serve
asyncio.run(serve_inner(model_id, revision, sharded, quantize))
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/asyncio/runners.py\", line 44, in run
return loop.run_until_complete(main)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/asyncio/base_events.py\", line 634, in run_until_complete
self.run_forever()
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/asyncio/base_events.py\", line 601, in run_forever
self._run_once()
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/asyncio/base_events.py\", line 1905, in _run_once
handle._run()
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/asyncio/events.py\", line 80, in _run
self._context.run(self._callback, *self._args)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/grpc_interceptor/server.py\", line 153, in invoke_intercept_method
return await self.intercept(
> File \"/home/user/text-generation-inference-new/server/text_generation/interceptor.py\", line 20, in intercept
return await response
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py\", line 82, in _unary_interceptor
raise error
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py\", line 73, in _unary_interceptor
return await behavior(request_or_iterator, context)
File \"/home/user/text-generation-inference-new/server/text_generation/server.py\", line 41, in Prefill
generations, next_batch = self.model.generate_token(batch)
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/contextlib.py\", line 79, in inner
return func(*args, **kwds)
File \"/home/user/text-generation-inference-new/server/text_generation/models/causal_lm.py\", line 297, in generate_token
logits, past = self.forward(
File \"/home/user/text-generation-inference-new/server/text_generation/models/causal_lm.py\", line 284, in forward
outputs = self.model.forward(
File \"/home/user/miniconda3/envs/text_generation/lib/python3.9/site-packages/accelerate/hooks.py\", line 156, in new_forward
output = old_forward(*args, **kwargs)
TypeError: forward() got an unexpected keyword argument 'position_ids'
" rank=0
2023-02-20T14:41:54.081879Z ERROR batch{batch_size=1}:prefill:prefill{id=0 size=1}:prefill{id=0 size=1}: text_generation_client: router/client/src/lib.rs:29: Server error: forward() got an unexpected keyword argument 'position_ids'
2023-02-20T14:41:54.081947Z ERROR HTTP request{otel.name=POST /generate http.client_ip= http.flavor=1.1 http.host=localhost:3000 http.method=POST http.route=/generate http.scheme=HTTP http.target=/generate http.user_agent=curl/7.82.0 otel.kind=server trace_id=c742e54c2eddc1bfcc788ed10b3e0c52}:generate{req=Json(GenerateRequest { inputs: "Hi my name is", parameters: GenerateParameters { temperature: None, repetition_penalty: None, top_k: None, top_p: None, do_sample: false, max_new_tokens: 60, stop: [], details: false, seed: None } })}:generate{request=GenerateRequest { inputs: "Hi my name is", parameters: GenerateParameters { temperature: None, repetition_penalty: None, top_k: None, top_p: None, do_sample: false, max_new_tokens: 60, stop: [], details: false, seed: None } }}:generate_stream{request=GenerateRequest { inputs: "Hi my name is", parameters: GenerateParameters { temperature: None, repetition_penalty: None, top_k: None, top_p: None, do_sample: false, max_new_tokens: 60, stop: [], details: false, seed: None } }}:infer{batch_size=1}:send_error: text_generation_router::infer: router/src/infer.rs:338: Request failed during generation: Server error: forward() got an unexpected keyword argument 'position_ids'
```
| 2023-02-20T18:05:09 |
||
huggingface/text-generation-inference | 106 | huggingface__text-generation-inference-106 | [
"105"
] | 3fef90d50fe6248abe320904e51dbf98972fe25b | diff --git a/server/text_generation_server/models/galactica.py b/server/text_generation_server/models/galactica.py
--- a/server/text_generation_server/models/galactica.py
+++ b/server/text_generation_server/models/galactica.py
@@ -96,6 +96,8 @@ def from_pb(
input_lengths = []
# Parse batch
+ max_sequence_length = 0
+ padding_right_offset = 0
for r in pb.requests:
# Add escape_custom_split_sequence to the CausalLMBatch logic
inputs.append(escape_custom_split_sequence(r.inputs))
@@ -103,8 +105,13 @@ def from_pb(
next_token_choosers.append(
NextTokenChooser.from_pb(r.parameters, len(tokenizer), device)
)
- stopping_criterias.append(
- StoppingCriteria.from_pb(r.stopping_parameters, tokenizer)
+ stopping_criteria = StoppingCriteria.from_pb(
+ r.stopping_parameters, tokenizer
+ )
+ stopping_criterias.append(stopping_criteria)
+ max_sequence_length = max(max_sequence_length, r.input_length)
+ padding_right_offset = max(
+ padding_right_offset, stopping_criteria.max_new_tokens
)
# Tokenize batch
@@ -114,6 +121,14 @@ def from_pb(
padding=True,
return_token_type_ids=False,
).to(device)
+ input_ids = tokenized_inputs["input_ids"]
+ # Allocate maximum attention_mask
+ attention_mask = input_ids.new_zeros(
+ (pb.size, max_sequence_length + padding_right_offset)
+ )
+ # Copy tokenizer attention_mask into fully allocated attention_mask
+ attention_mask[:, :max_sequence_length] = tokenized_inputs["attention_mask"]
+
position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1
position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1)
all_input_ids = tokenized_inputs["input_ids"].unsqueeze(-1)
@@ -121,8 +136,8 @@ def from_pb(
return cls(
batch_id=pb.id,
requests=pb.requests,
- input_ids=tokenized_inputs["input_ids"],
- attention_mask=tokenized_inputs["attention_mask"],
+ input_ids=input_ids,
+ attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=None,
all_input_ids=all_input_ids,
@@ -130,7 +145,8 @@ def from_pb(
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
size=pb.size,
- max_sequence_length=max(input_lengths),
+ max_sequence_length=max_sequence_length,
+ padding_right_offset=padding_right_offset,
)
| TypeError: __init__() missing 1 required positional argument: 'padding_right_offset'
There's a TypeError when doing a request to a Galactica model (in this case `facebook/galactica-30b`:
```
File \"/home/user/miniconda3/envs/text_gen_inference/lib/python3.9/site-packages/grpc_interceptor/server.py\", line 153, in invoke_intercept_method
return await self.intercept(
> File \"/home/user/userusertext-generation-inference/server/text_generation/interceptor.py\", line 20, in intercept
return await response
File \"/home/user/miniconda3/envs/text_gen_inference/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py\", line 82, in _unary_interceptor
raise error
File \"/home/user/miniconda3/envs/text_gen_inference/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py\", line 73, in _unary_interceptor
return await behavior(request_or_iterator, context)
File \"/home/user/userusertext-generation-inference/server/text_generation/server.py\", line 37, in Prefill
batch = self.model.batch_type.from_pb(
File \"/home/user/usertext-generation-inference/server/text_generation/models/galactica.py\", line 121, in from_pb
return cls(
TypeError: __init__() missing 1 required positional argument: 'padding_right_offset'
```
E.g.:
```
curl localhost:3000/generate_stream -H 'Content-Type: application/json' -d '{"inputs":"The Transformer architecture [START_REF]","parameters":{"max_new_tokens":100, "do_sample":true, "temperature":0.8}}'
```
Model was started with
```
text-generation-launcher --model-id facebook/galactica-30b --num-shard 1 --quantize
```
| 2023-03-07T19:05:11 |
||
huggingface/text-generation-inference | 114 | huggingface__text-generation-inference-114 | [
"112"
] | 941cd42e0cd2d51bb37a6f84572ceda2976b890d | diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py
--- a/server/text_generation_server/utils/tokens.py
+++ b/server/text_generation_server/utils/tokens.py
@@ -6,6 +6,7 @@
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
+ TypicalLogitsWarper,
RepetitionPenaltyLogitsProcessor,
PreTrainedTokenizerBase,
)
@@ -41,6 +42,7 @@ def __init__(
repetition_penalty=1.0,
top_k=None,
top_p=None,
+ typical_p=None,
do_sample=False,
seed=0,
device="cpu",
@@ -64,6 +66,9 @@ def __init__(
if top_p is not None and top_p < 1.0:
warpers.append(TopPLogitsWarper(top_p=top_p))
sampling = True
+ if typical_p is not None and typical_p < 1.0:
+ warpers.append(TypicalLogitsWarper(mass=typical_p))
+ sampling = True
self.warpers = warpers
self.choice = Sampling(seed, device) if sampling else Greedy()
@@ -92,6 +97,7 @@ def from_pb(
repetition_penalty=pb.repetition_penalty,
top_k=pb.top_k,
top_p=pb.top_p,
+ typical_p=pb.typical_p,
do_sample=pb.do_sample,
seed=pb.seed,
device=device,
| diff --git a/server/tests/conftest.py b/server/tests/conftest.py
--- a/server/tests/conftest.py
+++ b/server/tests/conftest.py
@@ -10,6 +10,7 @@ def default_pb_parameters():
repetition_penalty=1.0,
top_k=0,
top_p=1.0,
+ typical_p=1.0,
do_sample=False,
)
| Add typical sampling
HF has TypicalWarper and `typical_p`. Would be nice to be able to pass that.
| 2023-03-09T09:18:19 |
|
huggingface/text-generation-inference | 201 | huggingface__text-generation-inference-201 | [
"198"
] | 2475aede619c0c6d2ba8440303432d505c77f6d3 | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -24,7 +24,18 @@
FlashSantacoderSharded,
)
- FLASH_ATTENTION = torch.cuda.is_available()
+ if torch.cuda.is_available():
+ major, minor = torch.cuda.get_device_capability()
+ is_sm75 = major == 7 and minor == 5
+ is_sm8x = major == 8 and minor >= 0
+ is_sm90 = major == 9 and minor == 0
+
+ supported = is_sm75 or is_sm8x or is_sm90
+ if not supported:
+ raise ImportError(f"GPU with CUDA capability {major} {minor} is not supported")
+ FLASH_ATTENTION = True
+ else:
+ FLASH_ATTENTION = False
except ImportError:
logger.opt(exception=True).warning("Could not import Flash Attention enabled models")
FLASH_ATTENTION = False
| Request failed during generation: Server error: Expected is_sm90 || is_sm8x || is_sm75 to be true, but got false. (Could this error message be improved? If so, please report an enhancement request to PyTorch.)
Using the docker container ala these instructions:
https://github.com/huggingface/text-generation-inference#docker
in order to run the server locally. I'm using an app very similar to the one here:
https://huggingface.co/spaces/olivierdehaene/chat-llm-streaming to hit that local server.
I'm seeing this error in the server logs:
```
send_error: text_generation_router::infer: router/src/infer.rs:390: Request failed during generation: Server error: Expected is_sm90 || is_sm8x || is_sm75 to be true, but got false. (Could this error message be improved? If so, please report an enhancement request to PyTorch.)
```
Any ideas?
| Full traceback:
```
2023-04-18T19:49:59.869268Z ERROR shard-manager: text_generation_launcher: Method Prefill encountered an error.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 58, in serve
server.serve(model_id, revision, sharded, quantize, uds_path)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 135, in serve
asyncio.run(serve_inner(model_id, revision, sharded, quantize))
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/grpc_interceptor/server.py", line 159, in invoke_intercept_method
return await self.intercept(
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/interceptor.py", line 20, in intercept
return await response
File "/opt/conda/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 82, in _unary_interceptor
raise error
File "/opt/conda/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 73, in _unary_interceptor
return await behavior(request_or_iterator, context)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 46, in Prefill
generations, next_batch = self.model.generate_token(batch)
File "/opt/conda/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py", line 278, in generate_token
out, present = self.forward(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py", line 262, in forward
return self.model.forward(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_neox_modeling.py", line 676, in forward
hidden_states, present = self.gpt_neox(
File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_neox_modeling.py", line 614, in forward
hidden_states, residual = layer(
File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_neox_modeling.py", line 460, in forward
attn_output = self.attention(
File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_neox_modeling.py", line 324, in forward
flash_attn_cuda.fwd(
RuntimeError: Expected is_sm90 || is_sm8x || is_sm75 to be true, but got false. (Could this error message be improved? If so, please report an enhancement request to PyTorch.)
```
Because llama use FlashAttention by default, and your devices are not sm75/8x/90 gpu architectures. In my experience, 2080ti and A100 can work, but V100 can' t.
@Wen1163204547, thanks for your help!
@Jblauvs, I will add a check to see if the GPU architecture is supported before importing flash attention. | 2023-04-19T10:48:23 |
|
huggingface/text-generation-inference | 252 | huggingface__text-generation-inference-252 | [
"249"
] | db2b4e07544a238428999ab4a2509758eeee422c | diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py
--- a/server/text_generation_server/models/causal_lm.py
+++ b/server/text_generation_server/models/causal_lm.py
@@ -335,7 +335,7 @@ def concatenate(cls, batches: List["CausalLMBatch"]) -> "CausalLMBatch":
[t.view(len(batch), -1, *t.shape[-2:]) for t in layer]
for layer in batch.past_key_values
]
- elif batch.past_key_values[0][0].shape == 3:
+ elif len(batch.past_key_values[0][0].shape) == 3:
for layer in batch.past_key_values:
for k, t in enumerate(layer):
layer[k] = t.view(len(batch), -1, *t.shape[-2:])
| Concurrent issue - batch size can not be greater than 1
1. when I use ab tool to test with c=2:
```
ab -n 10 -c 2 -p post_data.json -T application/json -H 'accept: application/json' http://localhost:8080/generate
```
a runtime error has been rasied when batch_size > 1?
```
data:{"error":"Request failed during generation: Server error: not enough values to unpack (expected 4, got 3)","error_type":"generation"}
```

| @njhill
@paulcx what model are you using here?
> @paulcx what model are you using here?
bigscience/bloom-7b1
@paulcx @OlivierDehaene OK I see it, my bad, fix incoming. | 2023-04-27T07:23:08 |
|
huggingface/text-generation-inference | 356 | huggingface__text-generation-inference-356 | [
"349"
] | 91d9beec90fba479a6751a4c8efae25adc28b001 | diff --git a/server/text_generation_server/models/t5.py b/server/text_generation_server/models/t5.py
--- a/server/text_generation_server/models/t5.py
+++ b/server/text_generation_server/models/t5.py
@@ -40,7 +40,7 @@ def __init__(
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
- dtype = torch.float16
+ dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32
else:
device = torch.device("cpu")
dtype = torch.float32
| Question about sharding / TP
@OlivierDehaene @Narsil is it expected that the output should be the same (or very close) when using the TP implementation for a given model vs non-sharded/single GPU?
Am seeing quite different output, this is for example with flan-ul2 or flan-t5-xxl with 2 GPUs, using float16 for both single and double GPU cases.
This is using a different fork of the code - I'm still investigating and will also try with the latest from the main branch of this repo as-is, but would be very helpful to know generally what you observe / what's expected.
| > @OlivierDehaene @Narsil is it expected that the output should be the same (or very close) when using the TP implementation for a given model vs non-sharded/single GPU?
Yes everything should be very close.
Very close is hard to quantify though. Pytorch version can screw things up, dtype can mess up (some models are more susceptible to `bfloat16` to `float16` change.
https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#LL316C15-L316C71
https://github.com/huggingface/transformers/issues/20287
Those might be relevant to your issue too. (Maybe the sharding accentuates some precision issue)
Thanks @Narsil! | 2023-05-23T10:15:40 |
|
huggingface/text-generation-inference | 360 | huggingface__text-generation-inference-360 | [
"349"
] | 0a6494785c35ac1c0178ac03d36acd0b8c21de6f | diff --git a/server/text_generation_server/models/bloom.py b/server/text_generation_server/models/bloom.py
--- a/server/text_generation_server/models/bloom.py
+++ b/server/text_generation_server/models/bloom.py
@@ -160,7 +160,10 @@ def load_weights(
# XXX: Hack for Rowlinear to add the bias only once.
if rank != 0:
tensor = torch.zeros_like(tensor)
- elif isinstance(module, TensorParallelEmbedding) or name == "lm_head.weight":
+ elif (
+ isinstance(module, TensorParallelEmbedding)
+ or name == "lm_head.weight"
+ ):
size = slice_.get_shape()[0]
block_size = size // world_size
start = rank * block_size
diff --git a/server/text_generation_server/models/t5.py b/server/text_generation_server/models/t5.py
--- a/server/text_generation_server/models/t5.py
+++ b/server/text_generation_server/models/t5.py
@@ -40,7 +40,7 @@ def __init__(
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
- dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32
+ dtype = torch.float16
else:
device = torch.device("cpu")
dtype = torch.float32
@@ -154,9 +154,15 @@ def load_weights(
f"Name {name} -- Current {current_parameter_tensor.shape} and got {tensor.shape}"
)
- tensor = tensor.contiguous().to(dtype)
+ tensor = tensor.contiguous()
- if quantize == "bitsandbytes":
+ # See: https://github.com/huggingface/transformers/blob/1fe1e3caa44617047f149bcc0c0b566343b714a7/src/transformers/models/t5/modeling_t5.py#LL316C15-L316C71
+ if module_name.endswith("wo"):
+ tensor = tensor.to(torch.float32)
+ else:
+ tensor = tensor.to(dtype)
+
+ if quantize == "bitsandbytes" and not module_name.endswith("wo"):
if not HAS_BITS_AND_BYTES:
raise ImportError(
"bitsandbytes is not available on your machine either because it is not installed "
@@ -207,7 +213,7 @@ def linear(input, weight, bias):
module.linear = replace_linear(state)
- elif quantize == "gptq":
+ elif quantize == "gptq" and not module_name.endswith("wo"):
raise NotImplementedError(
"`gptq` is not implemented for now"
)
| diff --git a/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json b/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json
new file mode 100644
--- /dev/null
+++ b/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json
@@ -0,0 +1,60 @@
+{
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "eos_token",
+ "generated_tokens": 7,
+ "prefill": [
+ {
+ "id": 0,
+ "logprob": null,
+ "text": "<pad>"
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 3,
+ "logprob": -0.7001953,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 18,
+ "logprob": -1.1943359,
+ "special": false,
+ "text": "-"
+ },
+ {
+ "id": 26937,
+ "logprob": -1.2099609,
+ "special": false,
+ "text": "196"
+ },
+ {
+ "id": 3,
+ "logprob": -1.2451172,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 1956,
+ "logprob": -0.3322754,
+ "special": false,
+ "text": "Β°"
+ },
+ {
+ "id": 254,
+ "logprob": -0.19213867,
+ "special": false,
+ "text": "C"
+ },
+ {
+ "id": 1,
+ "logprob": -0.030151367,
+ "special": true,
+ "text": "</s>"
+ }
+ ]
+ },
+ "generated_text": "-196 Β°C"
+}
diff --git a/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded_load.json b/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded_load.json
new file mode 100644
--- /dev/null
+++ b/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded_load.json
@@ -0,0 +1,242 @@
+[
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "eos_token",
+ "generated_tokens": 7,
+ "prefill": [
+ {
+ "id": 0,
+ "logprob": null,
+ "text": "<pad>"
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 3,
+ "logprob": -0.7001953,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 18,
+ "logprob": -1.1943359,
+ "special": false,
+ "text": "-"
+ },
+ {
+ "id": 26937,
+ "logprob": -1.2119141,
+ "special": false,
+ "text": "196"
+ },
+ {
+ "id": 3,
+ "logprob": -1.2480469,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 1956,
+ "logprob": -0.33203125,
+ "special": false,
+ "text": "Β°"
+ },
+ {
+ "id": 254,
+ "logprob": -0.19250488,
+ "special": false,
+ "text": "C"
+ },
+ {
+ "id": 1,
+ "logprob": -0.030166626,
+ "special": true,
+ "text": "</s>"
+ }
+ ]
+ },
+ "generated_text": "-196 Β°C"
+ },
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "eos_token",
+ "generated_tokens": 7,
+ "prefill": [
+ {
+ "id": 0,
+ "logprob": null,
+ "text": "<pad>"
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 3,
+ "logprob": -0.7001953,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 18,
+ "logprob": -1.1943359,
+ "special": false,
+ "text": "-"
+ },
+ {
+ "id": 26937,
+ "logprob": -1.2119141,
+ "special": false,
+ "text": "196"
+ },
+ {
+ "id": 3,
+ "logprob": -1.2480469,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 1956,
+ "logprob": -0.33203125,
+ "special": false,
+ "text": "Β°"
+ },
+ {
+ "id": 254,
+ "logprob": -0.19250488,
+ "special": false,
+ "text": "C"
+ },
+ {
+ "id": 1,
+ "logprob": -0.030166626,
+ "special": true,
+ "text": "</s>"
+ }
+ ]
+ },
+ "generated_text": "-196 Β°C"
+ },
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "eos_token",
+ "generated_tokens": 7,
+ "prefill": [
+ {
+ "id": 0,
+ "logprob": null,
+ "text": "<pad>"
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 3,
+ "logprob": -0.7001953,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 18,
+ "logprob": -1.1943359,
+ "special": false,
+ "text": "-"
+ },
+ {
+ "id": 26937,
+ "logprob": -1.2119141,
+ "special": false,
+ "text": "196"
+ },
+ {
+ "id": 3,
+ "logprob": -1.2480469,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 1956,
+ "logprob": -0.33203125,
+ "special": false,
+ "text": "Β°"
+ },
+ {
+ "id": 254,
+ "logprob": -0.19250488,
+ "special": false,
+ "text": "C"
+ },
+ {
+ "id": 1,
+ "logprob": -0.030166626,
+ "special": true,
+ "text": "</s>"
+ }
+ ]
+ },
+ "generated_text": "-196 Β°C"
+ },
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "eos_token",
+ "generated_tokens": 7,
+ "prefill": [
+ {
+ "id": 0,
+ "logprob": null,
+ "text": "<pad>"
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 3,
+ "logprob": -0.7001953,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 18,
+ "logprob": -1.1943359,
+ "special": false,
+ "text": "-"
+ },
+ {
+ "id": 26937,
+ "logprob": -1.2099609,
+ "special": false,
+ "text": "196"
+ },
+ {
+ "id": 3,
+ "logprob": -1.2451172,
+ "special": false,
+ "text": " "
+ },
+ {
+ "id": 1956,
+ "logprob": -0.3322754,
+ "special": false,
+ "text": "Β°"
+ },
+ {
+ "id": 254,
+ "logprob": -0.19213867,
+ "special": false,
+ "text": "C"
+ },
+ {
+ "id": 1,
+ "logprob": -0.030151367,
+ "special": true,
+ "text": "</s>"
+ }
+ ]
+ },
+ "generated_text": "-196 Β°C"
+ }
+]
diff --git a/integration-tests/models/test_flash_neox.py b/integration-tests/models/test_flash_neox.py
--- a/integration-tests/models/test_flash_neox.py
+++ b/integration-tests/models/test_flash_neox.py
@@ -36,6 +36,8 @@ async def test_flash_neox_load(flash_neox, generate_load, response_snapshot):
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
- assert generated_texts, all([text == generated_texts[0] for text in generated_texts])
+ assert generated_texts, all(
+ [text == generated_texts[0] for text in generated_texts]
+ )
assert responses == response_snapshot
diff --git a/integration-tests/models/test_t5_sharded.py b/integration-tests/models/test_t5_sharded.py
new file mode 100644
--- /dev/null
+++ b/integration-tests/models/test_t5_sharded.py
@@ -0,0 +1,38 @@
+import pytest
+
+
[email protected](scope="module")
+def t5_sharded_handle(launcher):
+ with launcher("google/flan-t5-xxl", num_shard=2) as handle:
+ yield handle
+
+
[email protected](scope="module")
+async def t5_sharded(t5_sharded_handle):
+ await t5_sharded_handle.health(240)
+ return t5_sharded_handle.client
+
+
[email protected]
+async def test_t5_sharded(t5_sharded, response_snapshot):
+ response = await t5_sharded.generate(
+ "Please answer the following question. What is the boiling point of Nitrogen?",
+ max_new_tokens=10,
+ )
+
+ assert response == response_snapshot
+
+
[email protected]
+async def test_t5_sharded_load(t5_sharded, generate_load, response_snapshot):
+ responses = await generate_load(
+ t5_sharded,
+ "Please answer the following question. What is the boiling point of Nitrogen?",
+ max_new_tokens=10,
+ n=4,
+ )
+
+ assert len(responses) == 4
+ assert all([r.generated_text == responses[0].generated_text for r in responses])
+
+ assert responses == response_snapshot
| Question about sharding / TP
@OlivierDehaene @Narsil is it expected that the output should be the same (or very close) when using the TP implementation for a given model vs non-sharded/single GPU?
Am seeing quite different output, this is for example with flan-ul2 or flan-t5-xxl with 2 GPUs, using float16 for both single and double GPU cases.
This is using a different fork of the code - I'm still investigating and will also try with the latest from the main branch of this repo as-is, but would be very helpful to know generally what you observe / what's expected.
| > @OlivierDehaene @Narsil is it expected that the output should be the same (or very close) when using the TP implementation for a given model vs non-sharded/single GPU?
Yes everything should be very close.
Very close is hard to quantify though. Pytorch version can screw things up, dtype can mess up (some models are more susceptible to `bfloat16` to `float16` change.
https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#LL316C15-L316C71
https://github.com/huggingface/transformers/issues/20287
Those might be relevant to your issue too. (Maybe the sharding accentuates some precision issue)
Thanks @Narsil!
@OlivierDehaene thanks for https://github.com/huggingface/text-generation-inference/pull/356, I had also just discovered that bf16 works better for flan-t5-xxl sharded, per @Narsil's suggestion.
However it would be good if we could get it to work in fp16 so that V100's could be used, and since the results are more stable generally for variable length input.
It's interesting that:
- Non-sharded flan-t5-xxl works ok in fp16 (I know changes were made to the impl in the past to accommodate this)
- flan-ul2 actually appears to work ok when sharded in fp16. The output does begin to diverge slightly from a non-TP deployment but the differences are fairly minor
π‘ : Imagine a world, where we could change a model range values arbitrarily without worrying about dtypes :)
Since the model contains all of the weights, it should be doable to find invariants to allow some form of rescaling.. | 2023-05-23T15:40:21 |
huggingface/text-generation-inference | 393 | huggingface__text-generation-inference-393 | [
"366"
] | db2ebe3947c4cb018e1504ae417f3efb9a517d89 | diff --git a/server/text_generation_server/models/flash_santacoder.py b/server/text_generation_server/models/flash_santacoder.py
--- a/server/text_generation_server/models/flash_santacoder.py
+++ b/server/text_generation_server/models/flash_santacoder.py
@@ -54,12 +54,7 @@ def __init__(
)
# We do not use from_pretrained as we modified the model internal module layout
- try:
- filenames = weight_files(model_id, revision, ".bin")
- # Local files not found
- except LocalEntryNotFoundError:
- hub_files = weight_hub_files(model_id, revision, ".bin")
- filenames = download_weights(hub_files, model_id, revision)
+ filenames = weight_files(model_id, revision, ".safetensors")
with init_empty_weights():
model = FlashSantacoderForCausalLM(config)
@@ -91,85 +86,100 @@ def load_weights(
transpose: bool,
):
for filename in filenames:
- state_dict = torch.load(filename, map_location="cpu")
- for key, value in state_dict.items():
- value = value.to(device if quantize is None else "cpu").to(dtype)
-
- layer_name = ".".join(key.split(".")[:4])
-
- # Fused qkv
- if "q_attn.weight" in key or "kv_attn.weight" in key:
- final_key = layer_name + ".c_attn.weight"
- elif "q_attn.bias" in key or "kv_attn.bias" in key:
- final_key = layer_name + ".c_attn.bias"
-
- else:
- final_key = key
-
- module_name, param_name = final_key.rsplit(".", 1)
- module = model.get_submodule(module_name)
-
- try:
- current_parameter_tensor = module._parameters[param_name]
- except KeyError:
- current_parameter_tensor = None
-
- if current_parameter_tensor is not None:
- if transpose and (
- "c_fc.weight" in key
- or "c_proj.weight" in key
- or "q_attn.weight" in key
- or "kv_attn.weight" in key
- or "c_attn.weight" in key
- ):
- # Tranpose as we use nn.Linear instead of Conv1D
- value = value.T
-
- if current_parameter_tensor.device == torch.device("meta"):
- # Init qkv
- if "c_attn.weight" in final_key:
- module._parameters[param_name] = value.new_empty(
- (
- model.transformer.head_size
- * (model.transformer.num_heads + 2),
- value.shape[1],
+ with safe_open(
+ filename, framework="pt", device=str(device) if quantize is None else "cpu"
+ ) as f:
+ for key in f.keys():
+ value = f.get_tensor(key)
+ value = value.to(device if quantize is None else "cpu").to(dtype)
+
+ layer_name = ".".join(key.split(".")[:4])
+
+ # Fused qkv
+ if "q_attn.weight" in key or "kv_attn.weight" in key:
+ final_key = layer_name + ".c_attn.weight"
+ elif "q_attn.bias" in key or "kv_attn.bias" in key:
+ final_key = layer_name + ".c_attn.bias"
+
+ else:
+ final_key = key
+
+ module_name, param_name = final_key.rsplit(".", 1)
+ module = model.get_submodule(module_name)
+
+ try:
+ current_parameter_tensor = module._parameters[param_name]
+ except KeyError:
+ current_parameter_tensor = None
+
+ if current_parameter_tensor is not None:
+ if transpose and (
+ "c_fc.weight" in key
+ or "c_proj.weight" in key
+ or "q_attn.weight" in key
+ or "kv_attn.weight" in key
+ or "c_attn.weight" in key
+ ):
+ # Tranpose as we use nn.Linear instead of Conv1D
+ value = value.T
+
+ if current_parameter_tensor.device == torch.device("meta"):
+ # Init qkv
+ if "c_attn.weight" in final_key:
+ module._parameters[param_name] = value.new_empty(
+ (
+ model.transformer.head_size
+ * (model.transformer.num_heads + 2),
+ value.shape[1],
+ )
)
- )
- elif "c_attn.bias" in final_key:
- module._parameters[param_name] = value.new_empty(
- (
- model.transformer.head_size
- * (model.transformer.num_heads + 2)
+ elif "c_attn.bias" in final_key:
+ module._parameters[param_name] = value.new_empty(
+ (
+ model.transformer.head_size
+ * (model.transformer.num_heads + 2)
+ )
)
- )
- # Copy to correct slice
- if "q_attn.weight" in key:
- module._parameters[param_name][: value.shape[0]] = value
- elif "q_attn.bias" in key:
- module._parameters[param_name][: value.shape[0]] = value
- elif "kv_attn.weight" in key:
- module._parameters[param_name][
- model.transformer.head_size * model.transformer.num_heads :
- ] = value
- elif "kv_attn.bias" in key:
- module._parameters[param_name][
- model.transformer.head_size * model.transformer.num_heads :
- ] = value
+ # Copy to correct slice
+ if "q_attn.weight" in key:
+ module._parameters[param_name][: value.shape[0]] = value
+ elif "q_attn.bias" in key:
+ module._parameters[param_name][: value.shape[0]] = value
+ elif "kv_attn.weight" in key:
+ module._parameters[param_name][
+ model.transformer.head_size * model.transformer.num_heads :
+ ] = value
+ elif "kv_attn.bias" in key:
+ module._parameters[param_name][
+ model.transformer.head_size * model.transformer.num_heads :
+ ] = value
+ else:
+ if current_parameter_tensor.shape != value.shape:
+ raise ValueError(
+ f"Name {final_key} -- Current {current_parameter_tensor.shape} and got {value.shape}"
+ )
+ module._parameters[param_name] = value
else:
- if current_parameter_tensor.shape != value.shape:
- raise ValueError(
- f"Name {final_key} -- Current {current_parameter_tensor.shape} and got {value.shape}"
- )
- module._parameters[param_name] = value
- else:
- module._buffers[param_name] = value
+ module._buffers[param_name] = value
- del value
+ del value
+
+ if model.lm_head.weight.device == torch.device("meta"):
+ model.lm_head.weight = torch.nn.Parameter(model.transformer.wte.weight)
torch.cuda.empty_cache()
model.post_load_weights(quantize)
+ uninitialized_parameters = []
+ for n, p in model.named_parameters():
+ if p.data.device == torch.device("meta"):
+ uninitialized_parameters.append(n)
+ if uninitialized_parameters:
+ raise RuntimeError(
+ f"found uninitialized parameters in model : {uninitialized_parameters}"
+ )
+
def decode(self, generated_ids: List[int]) -> str:
# Do not skip special tokens as they are used for custom parsing rules of the generated text
return self.tokenizer.decode(
@@ -389,6 +399,8 @@ def load_weights(
else:
module._buffers[param_name] = tensor
- model.lm_head.weight = torch.nn.Parameter(model.transformer.wte.weight)
+ if model.lm_head.weight.device == torch.device("meta"):
+ model.lm_head.weight = torch.nn.Parameter(model.transformer.wte.weight)
+
torch.cuda.empty_cache()
model.post_load_weights(quantize)
| HuggingFaceH4/starchat-alpha fails with a weird 'No module named h4' error
### System Info
On prem setup with A100, running via docker container
```
2023-05-24T15:22:40.644196Z INFO text_generation_launcher: Runtime environment:
Target: x86_64-unknown-linux-gnu
Cargo version: 1.69.0
Commit sha: 91d9beec90fba479a6751a4c8efae25adc28b001
Docker label: sha-91d9bee
nvidia-smi:
Wed May 24 15:22:39 2023
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 525.116.04 Driver Version: 525.116.04 CUDA Version: 12.0 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA A100 80G... Off | 00000000:21:00.0 Off | 0 |
| N/A 37C P0 64W / 300W | 0MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
```
### Information
- [X] Docker
- [X] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Starting `HuggingFaceH4/starchat-alpha` model with:
```
docker run --device nvidia.com/gpu=all --shm-size 1g -p 8080:80 -v $PWD/data:/data -e -d ghcr.io/huggingface/text-generation-inference:latest --model-id HuggingFaceH4/starchat-alpha --max-total-tokens 8192 --num-shard 1
```
Fails with:
```
2023-05-24T15:10:07.936075Z INFO text_generation_launcher: Starting shard 0
2023-05-24T15:10:17.946411Z INFO text_generation_launcher: Waiting for shard 0 to be ready...
2023-05-24T15:10:27.761587Z ERROR shard-manager: text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 66, in serve
server.serve(model_id, revision, sharded, quantize, uds_path)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 155, in serve
asyncio.run(serve_inner(model_id, revision, sharded, quantize))
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 124, in serve_inner
model = get_model(model_id, revision, sharded, quantize)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 125, in get_model
return santacoder_cls(model_id, revision, quantize=quantize)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_santacoder.py", line 62, in __init__
self.load_weights(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_santacoder.py", line 89, in load_weights
state_dict = torch.load(filename, map_location="cpu")
File "/opt/conda/lib/python3.9/site-packages/torch/serialization.py", line 809, in load
return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
File "/opt/conda/lib/python3.9/site-packages/torch/serialization.py", line 1172, in _load
result = unpickler.load()
File "/opt/conda/lib/python3.9/site-packages/torch/serialization.py", line 1165, in find_class
return super().find_class(mod_name, name)
ModuleNotFoundError: No module named 'h4'
rank=0
```
What is interesting, the parent model (`--model-id bigcode/starcoder`) works just fine on the same setup and with the same launch parameters.
### Expected behavior
The model should load, eg for `bigcode/starcoder`:
```
docker run --device nvidia.com/gpu=0 --shm-size 1g -p 8080:80 -v $PWD/data:/data ghcr.io/huggingface/text-generation-inference:latest --model-id bigcode/starcoder --max-total-tokens 8192 --num-shard 1
2023-05-24T15:30:26.971236Z INFO text_generation_launcher: Waiting for shard 0 to be ready...
2023-05-24T15:30:28.709715Z INFO shard-manager: text_generation_launcher: Server started at unix:///tmp/text-generation-server-0
rank=0
2023-05-24T15:30:28.774118Z INFO text_generation_launcher: Shard 0 ready in 31.846887323s
2023-05-24T15:30:28.853441Z INFO text_generation_launcher: Starting Webserver
2023-05-24T15:30:29.837524Z INFO text_generation_router: router/src/main.rs:178: Connected
```
| I have the same problem, both in docker and host, can anyone help? | 2023-06-01T08:56:01 |
|
huggingface/text-generation-inference | 395 | huggingface__text-generation-inference-395 | [
"389"
] | db2ebe3947c4cb018e1504ae417f3efb9a517d89 | diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py
--- a/server/text_generation_server/models/causal_lm.py
+++ b/server/text_generation_server/models/causal_lm.py
@@ -496,11 +496,6 @@ def __init__(
else:
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
- self.has_position_ids = (
- inspect.signature(model.forward).parameters.get("position_ids", None)
- is not None
- )
-
super(CausalLM, self).__init__(
model=model,
tokenizer=tokenizer,
diff --git a/server/text_generation_server/models/model.py b/server/text_generation_server/models/model.py
--- a/server/text_generation_server/models/model.py
+++ b/server/text_generation_server/models/model.py
@@ -1,3 +1,4 @@
+import inspect
import torch
from abc import ABC, abstractmethod
@@ -29,6 +30,12 @@ def __init__(
self.device = device
self.rank = rank
self.world_size = world_size
+
+ self.has_position_ids = (
+ inspect.signature(model.forward).parameters.get("position_ids", None)
+ is not None
+ )
+
self.check_initialized()
@property
| SantaCoder Object has no attribute has_position_ids
### System Info
I am using text-generation-inference for a locally deployed StarCoder model, and I am encountering an error when running inference. Specifically, I am receiving the error message 'SantaCoder Object has no attribute has_position_ids'. After inspecting the code, I have determined the cause of the error.
The StarCoder model has a model-type of 'gpt-bigcode', which maps to the SantaCoder model in text-generation-inference. The SantaCoder model is a child class of CausalLM, which is a child class of Model. In CausalLM's __init__ function, an attribute called 'has_position_ids' is defined and used in CausalLM's forward function. However, SantaCoder does not call CausalLM's __init__ function. Instead, it calls Model's __init__ function using the syntax `super(CausalLM, self).__init__()`. As a result, when SantaCoder's forward function is called (which actually calls CausalLM's forward function), the 'has_position_ids' attribute is not defined, leading to the error.
It is unclear whether this is a bug or if I am using it incorrectly.
### Information
- [ ] Docker
- [X] The CLI directly
### Tasks
- [ ] An officially supported command
- [ ] My own modifications
### Reproduction
1. download starcoder
2. text-generation-launcher --model-id /root/autodl-tmp/models/starcoder --sharded false --trust-remote-code `true` --port 8088
3. use a client to send request
from text_generation import Client
API_URL = 'http://localhost:8088/'
generate_kwargs = dict(
temperature=0.2,
max_new_tokens=512,
top_p=0.95,
repetition_penalty=1.1,
do_sample=True,
seed=42,
)
def print_output(stream):
output = ''
for response in stream:
if response.token.text == "<|endoftext|>":
break
else:
output += response.token.text
return output
client = Client(API_URL)
prompt = 'def hello():'
stream = client.generate_stream(prompt, **generate_kwargs)
print_output(stream)
### Expected behavior
return the completion code.
| 2023-06-01T09:04:08 |
||
huggingface/text-generation-inference | 465 | huggingface__text-generation-inference-465 | [
"456"
] | f59fb8b630844c2ad2cd80e689202de89d45c37e | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -18,11 +18,43 @@
from text_generation_server.models.t5 import T5Sharded
from text_generation_server.models.gpt_neox import GPTNeoxSharded
+# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
+# in PyTorch 1.12 and later.
+torch.backends.cuda.matmul.allow_tf32 = True
+
+# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
+torch.backends.cudnn.allow_tf32 = True
+
+# Disable gradients
+torch.set_grad_enabled(False)
+
+__all__ = [
+ "Model",
+ "BLOOMSharded",
+ "CausalLM",
+ "FlashCausalLM",
+ "GalacticaSharded",
+ "Seq2SeqLM",
+ "SantaCoder",
+ "OPTSharded",
+ "T5Sharded",
+ "get_model",
+]
+
+FLASH_ATT_ERROR_MESSAGE = (
+ "{} requires CUDA and Flash Attention kernels to be installed.\n"
+ "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
+ "or install flash attention with `cd server && make install install-flash-attention`"
+)
+
try:
- if (
- torch.cuda.is_available()
- and not os.getenv("USE_FLASH_ATTENTION", "").lower() == "false"
- ):
+ if not os.getenv("USE_FLASH_ATTENTION", "").lower() == "false":
+ if not torch.cuda.is_available():
+ FLASH_ATT_ERROR_MESSAGE = (
+ "{} requires CUDA. No compatible CUDA devices found."
+ )
+ raise ImportError("CUDA is not available")
+
major, minor = torch.cuda.get_device_capability()
is_sm75 = major == 7 and minor == 5
is_sm8x = major == 8 and minor >= 0
@@ -30,6 +62,10 @@
supported = is_sm75 or is_sm8x or is_sm90
if not supported:
+ FLASH_ATT_ERROR_MESSAGE = (
+ "{} requires a CUDA device with capability 7.5, > 8.0 or 9.0. "
+ "No compatible CUDA device found."
+ )
raise ImportError(
f"GPU with CUDA capability {major} {minor} is not supported"
)
@@ -52,41 +88,12 @@
)
FLASH_ATTENTION = False
-__all__ = [
- "Model",
- "BLOOMSharded",
- "CausalLM",
- "FlashCausalLM",
- "GalacticaSharded",
- "Seq2SeqLM",
- "SantaCoder",
- "OPTSharded",
- "T5Sharded",
- "get_model",
-]
-
if FLASH_ATTENTION:
__all__.append(FlashNeoXSharded)
__all__.append(FlashRWSharded)
__all__.append(FlashSantacoderSharded)
__all__.append(FlashLlama)
-FLASH_ATT_ERROR_MESSAGE = (
- "{} requires Flash Attention CUDA kernels to be installed.\n"
- "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
- "or install flash attention with `cd server && make install install-flash-attention`"
-)
-
-# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
-# in PyTorch 1.12 and later.
-torch.backends.cuda.matmul.allow_tf32 = True
-
-# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
-torch.backends.cudnn.allow_tf32 = True
-
-# Disable gradients
-torch.set_grad_enabled(False)
-
def get_model(
model_id: str,
diff --git a/server/text_generation_server/utils/convert.py b/server/text_generation_server/utils/convert.py
--- a/server/text_generation_server/utils/convert.py
+++ b/server/text_generation_server/utils/convert.py
@@ -16,9 +16,9 @@ def check_file_size(source_file: Path, target_file: Path):
source_file_size = source_file.stat().st_size
target_file_size = target_file.stat().st_size
- if (source_file_size - target_file_size) / source_file_size > 0.01:
+ if (source_file_size - target_file_size) / source_file_size > 0.05:
raise RuntimeError(
- f"""The file size different is more than 1%:
+ f"""The file size different is more than 5%:
- {source_file}: {source_file_size}
- {target_file}: {target_file_size}
"""
diff --git a/server/text_generation_server/utils/hub.py b/server/text_generation_server/utils/hub.py
--- a/server/text_generation_server/utils/hub.py
+++ b/server/text_generation_server/utils/hub.py
@@ -26,7 +26,10 @@ def weight_hub_files(
filenames = [
s.rfilename
for s in info.siblings
- if s.rfilename.endswith(extension) and len(s.rfilename.split("/")) == 1
+ if s.rfilename.endswith(extension)
+ and len(s.rfilename.split("/")) == 1
+ and "arguments" not in s.rfilename
+ and "args" not in s.rfilename
]
if not filenames:
| Safe Tensor converting fails for LLaMa 13B and 30B
### System Info
```
2023-06-15T04:27:53.010592Z INFO text_generation_launcher: Runtime environment: [30/661]
Target: x86_64-unknown-linux-gnu
Cargo version: 1.69.0
Commit sha: 5ce89059f8149eaf313c63e9ded4199670cd74bb
Docker label: sha-5ce8905
nvidia-smi:
Thu Jun 15 04:27:51 2023
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 515.65.07 Driver Version: 515.65.07 CUDA Version: 11.8 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA A100-SXM... On | 00000000:10:00.0 Off | Off |
| N/A 34C P0 86W / 400W | 25302MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 1 NVIDIA A100-SXM... On | 00000000:16:00.0 Off | Off |
| N/A 30C P0 64W / 400W | 0MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 2 NVIDIA A100-SXM... On | 00000000:49:00.0 Off | Off |
| N/A 31C P0 73W / 400W | 0MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 3 NVIDIA A100-SXM... On | 00000000:4D:00.0 Off | Off |
| N/A 31C P0 71W / 400W | 0MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 4 NVIDIA A100-SXM... On | 00000000:C5:00.0 Off | Off |
| N/A 34C P0 91W / 400W | 32900MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 5 NVIDIA A100-SXM... On | 00000000:CA:00.0 Off | Off |
| N/A 34C P0 92W / 400W | 33044MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 6 NVIDIA A100-SXM... On | 00000000:E3:00.0 Off | Off |
| N/A 33C P0 96W / 400W | 33044MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 7 NVIDIA A100-SXM... On | 00000000:E7:00.0 Off | Off |
| N/A 35C P0 89W / 400W | 32900MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
```
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
I used the official script to convert llama weights 7B~65B and get the following error when launching 13B and 30B only
I ran this command
`CUDA_VISIBLE_DEVICES=2,3 TRANSFORMERS_CACHE=~/ckpts/cache/ FLASH_ATTENTION=1 text-generation-launcher --model-id ~/ckpts/llama-hf/30b/ --num-shard 2 --port XXXXX`
```
2023-06-15T02:42:46.121030Z INFO text_generation_launcher: Args { model_id: "~/ckpts/llama-hf/30b/", revision: None, sharded: None, num_shard: Some(2), quantize: None, trust_remote_code: false, max_concurrent_requests: 128, max_best_of: 2, max_stop_sequences: 4, max_input_length: 1000, max_total_tokens: 1512, max_batch_size: None, waiting_served_ratio: 1.2, max_batch_total_tokens: 32000, max_waiting_tokens: 20, port: 10208, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, env: false }
2023-06-15T02:42:46.121084Z INFO text_generation_launcher: Sharding model on 2 processes
2023-06-15T02:42:46.121251Z INFO text_generation_launcher: Starting download process.
2023-06-15T02:42:49.059320Z WARN download: text_generation_launcher: No safetensors weights found for model ~/ckpts/llama-hf/30b/ at revision None. Converting PyTorch weights to safetensors.
2023-06-15T02:42:49.059564Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00001-of-00007.bin to ~/ckpts/llama-hf/30b/model-00001-of
-00007.safetensors.
2023-06-15T02:46:40.369256Z INFO download: text_generation_launcher: Convert: [1/7] -- Took: 0:03:51.309267
2023-06-15T02:46:40.369344Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00002-of-00007.bin to ~/ckpts/llama-hf/30b/model-00002-of-00007.safetensors.
2023-06-15T02:50:24.190971Z INFO download: text_generation_launcher: Convert: [2/7] -- Took: 0:03:43.820986
2023-06-15T02:50:24.191183Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00003-of-00007.bin to ~/ckpts/llama-hf/30b/model-00003-of-00007.safetensors.
2023-06-15T02:54:06.621353Z INFO download: text_generation_launcher: Convert: [3/7] -- Took: 0:03:42.429557
2023-06-15T02:54:06.621511Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00004-of-00007.bin to ~/ckpts/llama-hf/30b/model-00004-of
-00007.safetensors.
11Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00004-of-00007.bin to ~/ckpts/llama-hf/30b/model-00004-of-00007.safetensors.
2023-06-15T02:57:45.265631Z INFO download: text_generation_launcher: Convert: [4/7] -- Took: 0:03:38.643727
2023-06-15T02:57:45.265740Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00005-of-00007.bin to ~/ckpts/llama-hf/30b/model-00005-of-00007.safetensors.
2023-06-15T03:01:38.281861Z INFO download: text_generation_launcher: Convert: [5/7] -- Took: 0:03:53.015641
2023-06-15T03:01:38.281982Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00006-of-00007.bin to ~/ckpts/llama-hf/30b/model-00006-of-00007.safetensors.
2023-06-15T03:03:43.722396Z INFO download: text_generation_launcher: Convert: [6/7] -- Took: 0:02:05.440018
2023-06-15T03:03:43.722865Z INFO download: text_generation_launcher: Convert ~/ckpts/llama-hf/30b/pytorch_model-00007-of-00007.bin to ~/ckpts/llama-hf/30b/model-00007-of-00007.safetensors.
2023-06-15T03:04:36.163221Z ERROR text_generation_launcher: Download encountered an error: Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 151, in download_weights
utils.convert_files(local_pt_files, local_st_files)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/convert.py", line 84, in convert_files
convert_file(pt_file, sf_file)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/convert.py", line 65, in convert_file
check_file_size(pt_file, sf_file)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/convert.py", line 20, in check_file_size
raise RuntimeError(
RuntimeError: The file size different is more than 1%:
- ~/ckpts/llama-hf/30b/pytorch_model-00007-of-00007.bin: 5900895281
- ~/ckpts/llama-hf/30b/model-00007-of-00007.safetensors: 5687891896
Error: DownloadError
```
### Expected behavior
The launch works for LLaMa 7B and 65B well so its confusing why it wont work for 13B and 30B
| Facing the same issue as of yesterday.
Hmm the `check_file_size` is pretty rough sanitation, the file might actually be OK but it's hard to tell without looking at the file.
You can try deactivating the check ? Remove the line 20 ?
Can you point to the actual model on the hub you're using too ? Because we don't have any issue with "official" checkpoints
In my case, we're using https://huggingface.co/ausboss/llama-30b-supercot
Initially we had the issue with https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor however with a restart it was working miraculously..
Line 65 is the line that actually needs to be removed in order to skip the check for the safetensor file size. And here is a temporary patch for Docker for anyone that wants to disable it:
Dockerfile:
```Dockerfile
FROM ghcr.io/huggingface/text-generation-inference:0.8.2
RUN sed '65d' /opt/conda/lib/python3.9/site-packages/text_generation_server/utils/convert.py | tee /opt/conda/lib/python3.9/site-packages/text_generation_server/utils/convert.py
```
@Narsil
I'm using the official LLaMa checkpoints that I converted using the official script.
The docker image I'm using is `https://ghcr.io/huggingface/text-generation-inference:latest`
and here's the environment I used for converting the llama ckpts:
- python 3.10
- torch 2.1
- transformers 4.30.1
- accelerate 0.20.3
- sentencepiece 0.1.99
- protobuf 3.20.0
The funny thing is the code works well with 7B and 65B models but fails for 13B and 30B
> Hmm the `check_file_size` is pretty rough sanitation, the file might actually be OK but it's hard to tell without looking at the file.
>
> You can try deactivating the check ? Remove the line 20 ?
Throws a InvalidHeaderDeserialization. Loading the same files using LlamaForCausalLM works fine in a notebook.
> The funny thing is the code works well with 7B and 65B models but fails for 13B and 30B
I converted 30B like 3 times today for quantization purposes without a hitch.. (I'm not on docker though)
> Throws a InvalidHeaderDeserialization. Loading the same files using LlamaForCausalLM works fine in a notebook.
You're trying to load `pickle` files (which would work for `LLamaForCausalLM`) it seems
To reproduce the safetensors error with supercot:
```shell
git lfs install
git clone https://huggingface.co/ausboss/llama-30b-supercot
docker run -v ./llama-30b-supercot:/usr/src/llama-30b-supercot --gpus all --rm ghcr.io/huggingface/text-generation-inference:sha-5ce8905 --model-id "/usr/src/llama-30b-supercot" --quantize bitsandbytes --trust-remote-code
```
Note that the same behavior happens with the current latest tag as well as 0.8.2. It always fails for us on the first tensor, which is the smallest and is a pickle. | 2023-06-16T14:57:37 |
|
huggingface/text-generation-inference | 472 | huggingface__text-generation-inference-472 | [
"471"
] | ece7ffa40a7e167400e57d89b8a73751d095184c | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -230,20 +230,12 @@ def get_model(
)
elif model_type == "t5":
- if sharded:
- return T5Sharded(
- model_id,
- revision,
- quantize=quantize,
- trust_remote_code=trust_remote_code,
- )
- else:
- return Seq2SeqLM(
- model_id,
- revision,
- quantize=quantize,
- trust_remote_code=trust_remote_code,
- )
+ return T5Sharded(
+ model_id,
+ revision,
+ quantize=quantize,
+ trust_remote_code=trust_remote_code,
+ )
if sharded:
raise ValueError("sharded is not supported for AutoModel")
diff --git a/server/text_generation_server/utils/logits_process.py b/server/text_generation_server/utils/logits_process.py
--- a/server/text_generation_server/utils/logits_process.py
+++ b/server/text_generation_server/utils/logits_process.py
@@ -42,25 +42,31 @@ def __init__(
self.static_next_logprob = None
def __call__(self, scores):
- if self.cuda_graph is None:
- self.static_scores = scores
- self.cuda_graph = torch.cuda.CUDAGraph()
-
- with torch.cuda.graph(self.cuda_graph, pool=mempool):
- local_scores = self.static_scores
- for warper in self.warpers:
- local_scores = warper(None, local_scores)
-
- self.static_warped_scores = local_scores
- # Compute logprobs
- self.static_next_logprob = torch.log_softmax(
- self.static_warped_scores, -1
- )
-
- self.static_scores.copy_(scores)
- self.cuda_graph.replay()
-
- return self.static_warped_scores, self.static_next_logprob
+ if torch.cuda.is_available():
+ if self.cuda_graph is None:
+ self.static_scores = scores
+ self.cuda_graph = torch.cuda.CUDAGraph()
+
+ with torch.cuda.graph(self.cuda_graph, pool=mempool):
+ local_scores = self.static_scores
+ for warper in self.warpers:
+ local_scores = warper(None, local_scores)
+
+ self.static_warped_scores = local_scores
+ # Compute logprobs
+ self.static_next_logprob = torch.log_softmax(
+ self.static_warped_scores, -1
+ )
+
+ self.static_scores.copy_(scores)
+ self.cuda_graph.replay()
+
+ return self.static_warped_scores, self.static_next_logprob
+
+ # CPU branch
+ for warper in self.warpers:
+ scores = warper(None, scores)
+ return scores, torch.log_softmax(scores, -1)
@lru_cache(10)
| endless Waiting for shard 0 to be ready... when using only 1 GPU (google/flan-t5-xl)
### System Info
ubuntu 20.04, CUDA 11.7, pytorch 2.0.1
text-generation-launcher docker latest
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
1. Download the huggingface google/flan-t5-xl model to your local drive (for example, _$PWD/models/flan-t5-xl_)
2. export CUDA_VISIBLE_DEVICES=0
3. docker run --gpus 0 -p 8080:80 -v $PWD/models/flan-t5-xl:/data ghcr.io/huggingface/text-generation-inference:latest --model-id google/flan-t5-xl --sharded false --weights-cache-override /data
I see Waiting for shard 0 to be ready... for ever. Both GPUs are idle.
**However, if I change to use 2 gpus, with --num-shard 2, the model is loaded and distributed to 2 GPUs.**
### Expected behavior
I suppose that the docker image should work also with 1 GPU without sharding.
| 2023-06-19T15:47:49 |
||
huggingface/text-generation-inference | 579 | huggingface__text-generation-inference-579 | [
"555"
] | b4024edd4549ab647b02b8619b4072e33e64f1f9 | diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py
--- a/server/text_generation_server/models/flash_rw.py
+++ b/server/text_generation_server/models/flash_rw.py
@@ -49,7 +49,13 @@ def __init__(
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
- weights = Weights(filenames, device, dtype, process_group=self.process_group)
+ weights = Weights(
+ filenames,
+ device,
+ dtype,
+ process_group=self.process_group,
+ aliases={"transformer.word_embeddings.weight": ["lm_head.weight"]},
+ )
config.quantize = quantize
| Tied weight optimization for checkpoints doesn't work with text-generation-inference.
### System Info
Ubuntu 20.04
4 A10 NVIDIA GPU's
I think checkpoints saved after this feature was merged don't work with text-generation-inference.
https://github.com/huggingface/transformers/issues/23868
With falcon models getting "`lm_head` not found"
I'll add more details once I find minimal steps to reproduce.
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Save tiiuae/falcon-40b checkpoint using transformers==4.30.2
launch text-generation-inference server
(using transformers==4.27.4 works without issue)
### Expected behavior
Expect the text-generation-inference weight loader to be able to find the `lm_head` weight in the checkpoint. Note this may be a safetensor issue.
| Could you share the name fo the affected model ?
It's simply a matter of weight naming, the conversion method here is a bit crude (but very efficient memory wise), we just need some weights renaming.
Hi @Narsil,
Just wanted to add some more detail as I have been dealing with this issue as well. If I load and save one of the falcon models:
```
model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instrusct", trust_remote_code=True)
model.save_pretrained("/path/to/model")
```
Then copy over the tokenizer and use that saved model to start the text-generation-inference server:
```
docker run --gpus '"device=2,3"' --shm-size 1g -p 8080:80 -v /data:/data ghcr.io/huggingface/text-generation-inference:0.9 --model-id /path/to/model --num-shard 1 --max-input-length 9000 --max-total-tokens 10000 --max-best-of 8 --trust-remote-code --max-batch-prefill-tokens 9000
```
When transformers version = `4.30.2` I get an error that looks something like this:
```
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 166, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 133, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 253, in get_model
return FlashRWSharded(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_rw.py", line 56, in __init__
model = FlashRWForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_rw_modeling.py", line 628, in __init__
self.transformer = FlashRWModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_rw_modeling.py", line 553, in __init__
self.word_embeddings = TensorParallelEmbedding(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 280, in __init__
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 73, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 49, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight transformer.word_embeddings.weight does not exist
```
However using transformers version = `4.27.4` to load and save the model allows the tgi server to start up as expected from the locally saved weights.
I have also tried using the PR you've linked above but that does not solve the issue which I think is related to how the model weights are saved rather than how they are converted to safetensors. Maybe this issue belongs in the transformers repo?
Can you try with
```
ghcr.io/huggingface/text-generation-inference:sha-b4024ed
```
This is what got modified 4 days ago (We changed a bit how we choose the actual tensors to copy).
Yeah just tried that now it runs into the same issue. Here is the full output when pointing to the model saved with `4.30.2`:
```
2023-07-10T18:06:11.828477Z INFO text_generation_launcher: Args { model_id: "/data/test-models/falcon-7b-instruct", revision: None, sharded: None, num_shard: Some(1), quantize: None, dtype: None, trust_remote_code: true, max_concurrent_requests: 128, max_best_of: 8, max_stop_sequences: 4, max_input_length: 9000, max_total_tokens: 10000, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 9000, max_batch_total_tokens: 16000, max_waiting_tokens: 20, hostname: "dc570373c710", port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_domain: None, ngrok_username: None, ngrok_password: None, env: false }
2023-07-10T18:06:11.828614Z INFO text_generation_launcher: Starting download process.
2023-07-10T18:06:13.292872Z WARN download: text_generation_launcher: No safetensors weights found for model /data/test-models/falcon-7b-instruct at revision None. Converting PyTorch weights to safetensors.
2023-07-10T18:06:27.323366Z INFO download: text_generation_launcher: Convert: [1/3] -- Took: 0:00:13.984885
2023-07-10T18:06:38.581181Z INFO download: text_generation_launcher: Convert: [2/3] -- Took: 0:00:11.257167
2023-07-10T18:06:53.949943Z INFO download: text_generation_launcher: Convert: [3/3] -- Took: 0:00:15.368202
2023-07-10T18:06:54.369737Z INFO text_generation_launcher: Successfully downloaded weights.
2023-07-10T18:06:54.369814Z WARN text_generation_launcher: `trust_remote_code` is set. Trusting that model `/data/test-models/falcon-7b-instruct` do not contain malicious code.
2023-07-10T18:06:54.369821Z WARN text_generation_launcher: Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.
2023-07-10T18:06:54.370572Z INFO text_generation_launcher: Starting shard 0
2023-07-10T18:06:56.617177Z WARN shard-manager: text_generation_launcher: We're not using custom kernels.
rank=0
2023-07-10T18:06:56.818961Z ERROR shard-manager: text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 166, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 133, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 253, in get_model
return FlashRWSharded(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_rw.py", line 56, in __init__
model = FlashRWForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_rw_modeling.py", line 634, in __init__
self.transformer = FlashRWModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_rw_modeling.py", line 559, in __init__
self.word_embeddings = TensorParallelEmbedding(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 280, in __init__
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 73, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 49, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight transformer.word_embeddings.weight does not exist
```
compared to pointing at the model saved with `4.27.4`:
```
2023-07-10T18:09:53.995176Z INFO text_generation_launcher: Args { model_id: "/data/test-models/falcon-7b-instruct-oldhf", revision: None, sharded: None, num_shard: Some(1), quantize: None, dtype: None, trust_remote_code: true, max_concurrent_requests: 128, max_best_of: 8, max_stop_sequences: 4, max_input_length: 9000, max_total_tokens: 10000, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 9000, max_batch_total_tokens: 16000, max_waiting_tokens: 20, hostname: "9ae087ae066c", port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_domain: None, ngrok_username: None, ngrok_password: None, env: false }
2023-07-10T18:09:53.995278Z INFO text_generation_launcher: Starting download process.
2023-07-10T18:09:55.481628Z WARN download: text_generation_launcher: No safetensors weights found for model /data/test-models/falcon-7b-instruct-oldhf at revision None. Converting PyTorch weights to safetensors.
2023-07-10T18:10:09.814556Z INFO download: text_generation_launcher: Convert: [1/3] -- Took: 0:00:14.287668
2023-07-10T18:10:22.541350Z INFO download: text_generation_launcher: Convert: [2/3] -- Took: 0:00:12.726082
2023-07-10T18:10:37.206531Z INFO download: text_generation_launcher: Convert: [3/3] -- Took: 0:00:14.664828
2023-07-10T18:10:37.940380Z INFO text_generation_launcher: Successfully downloaded weights.
2023-07-10T18:10:37.940456Z WARN text_generation_launcher: `trust_remote_code` is set. Trusting that model `/data/test-models/falcon-7b-instruct-oldhf` do not contain malicious code.
2023-07-10T18:10:37.940464Z WARN text_generation_launcher: Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.
2023-07-10T18:10:37.940728Z INFO text_generation_launcher: Starting shard 0
2023-07-10T18:10:40.184657Z WARN shard-manager: text_generation_launcher: We're not using custom kernels.
rank=0
2023-07-10T18:10:47.951716Z INFO text_generation_launcher: Waiting for shard 0 to be ready...
2023-07-10T18:10:50.542364Z INFO shard-manager: text_generation_launcher: Server started at unix:///tmp/text-generation-server-0
rank=0
2023-07-10T18:10:50.554101Z INFO text_generation_launcher: Shard 0 ready in 12.611513023s
2023-07-10T18:10:50.650702Z INFO text_generation_launcher: Starting Webserver
2023-07-10T18:10:50.728175Z WARN text_generation_router: router/src/main.rs:186: no pipeline tag found for model /data/test-models/falcon-7b-instruct-oldhf
2023-07-10T18:10:50.733065Z INFO text_generation_router: router/src/main.rs:205: Warming up model
2023-07-10T18:10:53.408244Z INFO text_generation_router: router/src/main.rs:214: Connected
2023-07-10T18:10:53.408264Z WARN text_generation_router: router/src/main.rs:219: Invalid hostname, defaulting to 0.0.0.0
```
Another thing I have noticed (though not sure if it is at all related), the newer version of transformers does not save the configuration and modeling python files when `save_pretrained` is used which results in an error when trying to load the saved model with `from_pretrained`
Edit: I believe this issue is unrelated I can download those files and place them in the saved model and load it up as expected. I have loaded both models with from pretrained and compared `model.transformer.word_embeddings.weight` and `model.lm_head.weight` and they are identical for both so it would seem that none of the weights are being skipped over when using `save_pretrained`
I just think the previous weights are already saved.
The new PR doesn't fix it because we're still pointing to a `trust_remote_code` model, which doesn't have the heursitics to choose the weights better.
PR incoming.
Yeah they are I had just wanted to confirm that by loading the models directly.
Great to hear let me know if there's anything I can do to help!
> the newer version of transformers does not save the configuration and modeling python files when save_pretrained is used which results in an error when trying to load the saved model with from_pretrained
This is not new, you used only `AutoModelFor...from_pretrained(..).save_pretrained(...)`.
Which never saved anything else than the model.
You need to do the same with `AutoConfig` and `AutoTokenizer` if you want those files in. Or do `pipe = pipeline(...); pipe.save_pretrained(...")` | 2023-07-10T18:40:43 |
|
huggingface/text-generation-inference | 580 | huggingface__text-generation-inference-580 | [
"500"
] | b4024edd4549ab647b02b8619b4072e33e64f1f9 | diff --git a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py
--- a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py
@@ -20,12 +20,12 @@
FastLayerNorm,
get_linear,
)
+from safetensors import SafetensorError
def load_multi_mqa(
config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size
):
-
if config.quantize == "gptq":
return _load_multi_mqa_gptq(
config, prefix, weights, bias, head_size, num_heads, hidden_size
@@ -74,8 +74,17 @@ def _load_multi_mqa_gptq(
qzeros = torch.cat([q_tensor, kv_tensor], dim=1)
g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx")
- bits = weights.get_tensor("gptq_bits").item()
- groupsize = weights.get_tensor("gptq_groupsize").item()
+ try:
+ bits = weights.get_tensor("gptq_bits").item()
+ groupsize = weights.get_tensor("gptq_groupsize").item()
+ except SafetensorError as e:
+ try:
+ import os
+
+ bits = int(os.getenv("GPTQ_BITS"))
+ groupsize = int(os.getenv("GPTQ_GROUPSIZE"))
+ except Exception:
+ raise e
weight = (qweight, qzeros, scales, g_idx, bits, groupsize)
@@ -99,7 +108,6 @@ def _load_multi_mqa_gptq(
def _load_multi_mqa(
config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size
):
-
if any("c_attn" in k for k in weights.routing.keys()):
slice_ = weights._get_slice(f"{prefix}.c_attn.weight")
shape = slice_.get_shape()
diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py
--- a/server/text_generation_server/utils/weights.py
+++ b/server/text_generation_server/utils/weights.py
@@ -1,6 +1,6 @@
from pathlib import Path
from typing import List, Dict, Optional
-from safetensors import safe_open
+from safetensors import safe_open, SafetensorError
import torch
@@ -120,8 +120,17 @@ def get_multi_weights_col(self, prefixes: List[str], quantize: str, dim: int):
torch.testing.assert_close(w2, w[0])
g_idx = w[0]
- bits = self.get_tensor("gptq_bits").item()
- groupsize = self.get_tensor("gptq_groupsize").item()
+ try:
+ bits = self.get_tensor("gptq_bits").item()
+ groupsize = self.get_tensor("gptq_groupsize").item()
+ except SafetensorError as e:
+ try:
+ import os
+
+ bits = int(os.getenv("GTPQ_BITS"))
+ groupsize = int(os.getenv("GTPQ_GROUPSIZE"))
+ except Exception:
+ raise e
weight = (qweight, qzeros, scales, g_idx, bits, groupsize)
else:
w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes]
| Unable to load GPTQ weights
### System Info
Hi, I'm using the latest version of text-generation-inference (image sha-ae466a8) on Runpod => docker. When I try to load a GPTQ file from local disk with QUANTIZE = gptq, I get the following trace:
`2023-06-28T07:58:54.412515423-04:00 {"timestamp":"2023-06-28T11:58:54.412338Z","level":"ERROR","fields":{"message":"Error when initializing model\nTraceback (most recent call last):\n File \"/opt/conda/bin/text-generation-server\", line 8, in <module>\n sys.exit(app())\n File \"/opt/conda/lib/python3.9/site-packages/typer/main.py\", line 311, in __call__\n return get_command(self)(*args, **kwargs)\n File \"/opt/conda/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__\n return self.main(*args, **kwargs)\n File \"/opt/conda/lib/python3.9/site-packages/typer/core.py\", line 778, in main\n return _main(\n File \"/opt/conda/lib/python3.9/site-packages/typer/core.py\", line 216, in _main\n rv = self.invoke(ctx)\n File \"/opt/conda/lib/python3.9/site-packages/click/core.py\", line 1657, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File \"/opt/conda/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/opt/conda/lib/python3.9/site-packages/click/core.py\", line 760, in invoke\n return __callback(*args, **kwargs)\n File \"/opt/conda/lib/python3.9/site-packages/typer/main.py\", line 683, in wrapper\n return callback(**use_params) # type: ignore\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py\", line 67, in serve\n server.serve(model_id, revision, sharded, quantize, trust_remote_code, uds_path)\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py\", line 155, in serve\n asyncio.run(serve_inner(model_id, revision, sharded, quantize, trust_remote_code))\n File \"/opt/conda/lib/python3.9/asyncio/runners.py\", line 44, in run\n return loop.run_until_complete(main)\n File \"/opt/conda/lib/python3.9/asyncio/base_events.py\", line 634, in run_until_complete\n self.run_forever()\n File \"/opt/conda/lib/python3.9/asyncio/base_events.py\", line 601, in run_forever\n self._run_once()\n File \"/opt/conda/lib/python3.9/asyncio/base_events.py\", line 1905, in _run_once\n handle._run()\n File \"/opt/conda/lib/python3.9/asyncio/events.py\", line 80, in _run\n self._context.run(self._callback, *self._args)\n> File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py\", line 124, in serve_inner\n model = get_model(model_id, revision, sharded, quantize, trust_remote_code)\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py\", line 185, in get_model\n return FlashLlama(\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py\", line 61, in __init__\n weights = Weights(filenames, device, dtype, process_group=self.process_group)\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py\", line 11, in __init__\n with safe_open(filename, framework=\"pytorch\") as f:\nFileNotFoundError: No such file or directory: \"/workspace/models/TheBloke_airoboros-33B-gpt4-1-4-GPTQ/airoboros-33b-gpt4-1-4-GPTQ-4bit-1g-act-order.safetensors\"\n"},"target":"text_generation_launcher","span":{"rank":0,"name":"shard-manager"},"spans":[{"rank":0,"name":"shard-manager"}]}`
**No such file or directory: \"/workspace/models/TheBloke_airoboros-33B-gpt4-1-4-GPTQ/airoboros-33b-gpt4-1-4-GPTQ-4bit-1g-act-order.safetensors**
I can confirm that the file is in fact present at that location.
Loading non-gptq safetensor files (without QUANTIZE gptq) works fine, so it seems specific to the GPTQ implementation.
GPTQ files are from this repository: https://huggingface.co/TheBloke/airoboros-33B-gpt4-1.4-GPTQ . Note: I've tried a few other GPTQ files and they also don't load.
Note that the file name won't exactly match the one on HuggingFace because it originally had more full stops in the file name which I thought were causing the issue. However, as you can see I tried renaming the file to simplify it and I'm still getting the error.
I'm running this on an A100 80GB, attached to network storage (where the gptq files are located). I've tried it with multiple instance types, and I don't *think* it's a Runpod issue.
Here's a screenshot of the settings I'm running with:

### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
1) Create a new Runpod Pod using the latest text-generation-inference docker image: ghcr.io/huggingface/text-generation-inference:latest
2) Configure the pod to load a GPTQ repository
3) When it tries to load, you'll see that it fails and goes into an infinite retry loop, shutting down the shard and restarting repeatedly.
### Expected behavior
GPTQ file should load and server should start.
| First of all this file might fail to load regardless because this repo pushes `gptq_bits` and `gptq_groupsize` into the file itself to be able to know what kind of quantization took place. Not sure theBloke ones have that.
And I highly doubt the error "FileNotFound" to be wrong. It *must* be the correct error and the file is not found.
If the file is there, maybe the docker is not executing in the same environment ? Maybe there's a tiny typo.
To clarify, I'm just giving it the repo folder name, and it's determining the file name **airoboros-33b-gpt4-1-4-GPTQ-4bit-1g-act-order.safetensors** by itself (and correctly doing so), so it clearly has access to the disk and to the file. Can you suggest some alternative GPTQ files or repos that I could test this with that work for you? Seeing if these load for me might narrow down the problem.
This should work: https://huggingface.co/huggingface/falcon-40b-gptq
Oh dear, now I get the dreaded "You are using a model of type RefinedWeb to instantiate a model of type ." error, AND the No such file or directory error...
`2023-06-28T11:03:17.918954322-04:00 {"timestamp":"2023-06-28T15:03:17.918720Z","level":"ERROR","fields":{"message":"Shard 0 failed to start:\nYou are using a model of type RefinedWeb to instantiate a model of type . This is not supported for all configurations of models and can yield errors.\nTraceback (most recent call last):\n\n File \"/opt/conda/bin/text-generation-server\", line 8, in <module>\n sys.exit(app())\n\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py\", line 67, in serve\n server.serve(model_id, revision, sharded, quantize, trust_remote_code, uds_path)\n\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py\", line 155, in serve\n asyncio.run(serve_inner(model_id, revision, sharded, quantize, trust_remote_code))\n\n File \"/opt/conda/lib/python3.9/asyncio/runners.py\", line 44, in run\n return loop.run_until_complete(main)\n\n File \"/opt/conda/lib/python3.9/asyncio/base_events.py\", line 647, in run_until_complete\n return future.result()\n\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py\", line 124, in serve_inner\n model = get_model(model_id, revision, sharded, quantize, trust_remote_code)\n\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py\", line 220, in get_model\n return FlashRWSharded(\n\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_rw.py\", line 51, in __init__\n weights = Weights(filenames, device, dtype, process_group=self.process_group)\n\n File \"/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py\", line 11, in __init__\n with safe_open(filename, framework=\"pytorch\") as f:\n\nFileNotFoundError: No such file or directory: `\"/workspace/models/huggingface_falcon-40b-gptq/model-00001-of-00003.safetensors\"\n\n"},"target":"text_generation_launcher"}`
(Once, again loading from networked disk).
Try a non network disk ? `No such file or directory: ` means somehow your network disk said the file didn't exist...
A non-network disk worked, thanks. The whole thing is very strange... It definitely has access to the disk and to the model. Anyway, for the moment I can just use directly attached disks. Thanks for your help!
(My next issue now is that I can't get TGI to download from private HuggingFace repos to get the models onto those directly attached disks, but I'll open that as a separate issue.)
HUGGING_FACE_HUB_TOKEN needs to be used to use a proper token.
Thanks @Narsil . Your comment helped me to figure out what I was doing wrong: I was trying to use HUGGINGFACE_HUB_TOKEN rather than HUGGING_FACE_HUB_TOKEN for this. Changing to HUGGING_FACE_HUB_TOKEN works. I'll also close the second issue with this outcome.
I have tried loading multiple quantized models (generated using GPTQforLLama, AutoGPTQ) from different developers (including Bloke).
This is the error I received the most -
RuntimeError: weight gptq_bits does not exist
With some models, I received -
RuntimeError: weight model.layers.0.self_attn.q_proj.g_idx does not exist
@ssmi153 Have you managed to run GPTQ models?
@GemsFord, TGI doesn't support any of those quantized models because they've got a custom quantization script which injects additional metadata into the model files. I've got a lot of respect for their developers, but I'm not a fan of this design choice. To quantize your own files you theoretically just execute `text-generation-server quantize [source-model-id] [target-folder]` from the CLI. However, I've recently tried this and found lots of issues with it: https://github.com/huggingface/text-generation-inference/issues/576 . The devs are very responsive, so hopefully they'll work out a better plan here.
Out of interest, I think this is the line in the code which causes the model loading problems: https://github.com/huggingface/text-generation-inference/blob/b4024edd4549ab647b02b8619b4072e33e64f1f9/server/text_generation_server/utils/weights.py#L123
Thanks @ssmi153. Very much clear.
> which injects additional metadata into the model files.
Do you have a solution to detect number of bits and groupsize at inference which doesn't require users to know this information ahead of time ?
The idea was to NOT require users to guess things. (and having to pass in a bunch of flags everywhere).
Adding `gptq_bits` and `gptq_groupsize` to existing checkpoints should be rather easy.
We could add flags again to allow reusing those but I honestly don't like it long term. (Every user needs to remember to specify the flags, and go on the model weights name and hope the actual values are listed somewhere/discoverable)
@OlivierDehaene Wdyt ?
Please also allow to pass info through flags. Users like me just want to use quantized models which are already available. We can check the actual values they provide in example code. | 2023-07-11T08:37:28 |
|
huggingface/text-generation-inference | 582 | huggingface__text-generation-inference-582 | [
"541"
] | b4024edd4549ab647b02b8619b4072e33e64f1f9 | diff --git a/server/text_generation_server/models/custom_modeling/t5_modeling.py b/server/text_generation_server/models/custom_modeling/t5_modeling.py
--- a/server/text_generation_server/models/custom_modeling/t5_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/t5_modeling.py
@@ -1006,12 +1006,7 @@ def __init__(self, config: T5Config, weights):
super().__init__(config)
self.model_dim = config.d_model
- try:
- self.shared = TensorParallelEmbedding(prefix="shared", weights=weights)
- except RuntimeError:
- self.shared = TensorParallelEmbedding(
- prefix="encoder.embed_tokens", weights=weights
- )
+ self.shared = TensorParallelEmbedding(prefix="shared", weights=weights)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
diff --git a/server/text_generation_server/models/t5.py b/server/text_generation_server/models/t5.py
--- a/server/text_generation_server/models/t5.py
+++ b/server/text_generation_server/models/t5.py
@@ -55,7 +55,16 @@ def __init__(
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
- filenames, device=device, dtype=dtype, process_group=self.process_group
+ filenames,
+ device=device,
+ dtype=dtype,
+ process_group=self.process_group,
+ aliases={
+ "shared.weight": [
+ "encoder.embed_tokens.weight",
+ "decoder.embed_tokens.weight",
+ ]
+ },
)
model = T5ForConditionalGeneration(config, weights)
| Custom model: RuntimeError: weight shared.weight does not exist
### System Info
```
Tue Jul 4 16:51:59 2023
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 530.41.03 Driver Version: 530.41.03 CUDA Version: 12.1 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 3090 Off| 00000000:21:00.0 On | N/A |
| 0% 51C P8 51W / 390W| 1047MiB / 24576MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| 0 N/A N/A 1762 G /usr/lib/xorg/Xorg 24MiB |
| 0 N/A N/A 2178 G /usr/bin/gnome-shell 83MiB |
| 0 N/A N/A 3994 G /usr/lib/xorg/Xorg 451MiB |
| 0 N/A N/A 4140 G /usr/bin/gnome-shell 50MiB |
| 0 N/A N/A 4827 G ...,WinRetrieveSuggestionsOnlyOnDemand 65MiB |
| 0 N/A N/A 5061 G ...9470975,14709274054277858675,262144 96MiB |
| 0 N/A N/A 35735 G /snap/thunderbird/339/thunderbird-bin 87MiB |
| 0 N/A N/A 36507 G ...sion,SpareRendererForSitePerProcess 40MiB |
| 0 N/A N/A 42817 G ...ures=SpareRendererForSitePerProcess 36MiB |
| 0 N/A N/A 47573 G ...ures=SpareRendererForSitePerProcess 92MiB |
| 0 N/A N/A 67787 G /usr/lib/firefox/firefox 11MiB |
+---------------------------------------------------------------------------------------+
```
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
When launching TGI on custom model derived from `lmsys/fastchat-t5-3b-v1.0` with the following command:
`docker run --rm --network none --gpus 0 -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-generation-inference:latest --model-id /data/fastchat-t5-3b-v1.0`
I got the following error message:
```
latest: Pulling from huggingface/text-generation-inference
Digest: sha256:29019a087e64ce951a6c9ca3b17a6823dfd9d25eeb56ec06c08150516fd60f0b
Status: Image is up to date for ghcr.io/huggingface/text-generation-inference:latest
2023-07-04T14:50:00.870189Z INFO text_generation_launcher: Args { model_id: "/data/fastchat-t5-3b-v1.0", revision: None, sharded: None, num_shard: None, quantize: None, dtype: None, trust_remote_code: false, max_concurrent_requests: 128, max_best_of: 2, max_stop_sequences: 4, max_input_length: 1024, max_total_tokens: 2048, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 4096, max_batch_total_tokens: 16000, max_waiting_tokens: 20, port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_domain: None, ngrok_username: None, ngrok_password: None, env: false }
2023-07-04T14:50:00.870282Z INFO text_generation_launcher: Starting download process.
2023-07-04T14:50:02.000718Z INFO download: text_generation_launcher: Files are already present on the host. Skipping download.
2023-07-04T14:50:02.371986Z INFO text_generation_launcher: Successfully downloaded weights.
2023-07-04T14:50:02.372146Z INFO text_generation_launcher: Starting shard 0
2023-07-04T14:50:04.072895Z WARN shard-manager: text_generation_launcher: We're not using custom kernels.
rank=0
2023-07-04T14:50:04.214047Z ERROR shard-manager: text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/t5_modeling.py", line 1005, in __init__
self.shared = TensorParallelEmbedding(prefix="shared", weights=weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 280, in __init__
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 73, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 49, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight shared.weight does not exist
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 166, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 133, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 279, in get_model
return T5Sharded(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/t5.py", line 61, in __init__
model = T5ForConditionalGeneration(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/t5_modeling.py", line 1007, in __init__
self.shared = TensorParallelEmbedding(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 280, in __init__
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 73, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 49, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight encoder.embed_tokens.weight does not exist
rank=0
2023-07-04T14:50:04.673754Z ERROR text_generation_launcher: Shard 0 failed to start
2023-07-04T14:50:04.673779Z ERROR text_generation_launcher: Traceback (most recent call last):
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/t5_modeling.py", line 1005, in __init__
self.shared = TensorParallelEmbedding(prefix="shared", weights=weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 280, in __init__
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 73, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 49, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight shared.weight does not exist
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 166, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 133, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 279, in get_model
return T5Sharded(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/t5.py", line 61, in __init__
model = T5ForConditionalGeneration(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/t5_modeling.py", line 1007, in __init__
self.shared = TensorParallelEmbedding(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 280, in __init__
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 73, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 49, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight encoder.embed_tokens.weight does not exist
2023-07-04T14:50:04.673806Z INFO text_generation_launcher: Shutting down shards
Error: ShardCannotStart
```
### Expected behavior
I'd like to run TGI on my custom model on my RTX-3090 GPU.
| Same problem with you when I'm using BLOOM combined with LoRA adapter, then i receive this error.

**RuntimeError: weight word_embeddings.weight does not exist**
I've tried with the original BLOOM but it does not happened.
I got similar error when I load wizardcoder with quantize tag, without quantize everything is just fine.
**RuntimeError: weight transformer.h.0.attn.c_attn.qweight does not exist**
run:
`text-generation-launcher --model-id wizardcoder --sharded false --port 8080 --quantize gptq`
Same with a LoRA merged falcon.
Happened to me as well, "fixed it" by reverting to the 0.8 version of the Docker container, so it seems 0.9 version specific.
@ckanaar thanks for the advice. It works for me too. | 2023-07-11T12:06:20 |
|
huggingface/text-generation-inference | 609 | huggingface__text-generation-inference-609 | [
"589"
] | b7327205a6f2f2c6349e75b8ea484e1e2823075a | diff --git a/server/text_generation_server/utils/convert.py b/server/text_generation_server/utils/convert.py
--- a/server/text_generation_server/utils/convert.py
+++ b/server/text_generation_server/utils/convert.py
@@ -94,6 +94,14 @@ def convert_files(pt_files: List[Path], sf_files: List[Path], discard_names: Lis
# We do this instead of using tqdm because we want to parse the logs with the launcher
for i, (pt_file, sf_file) in enumerate(zip(pt_files, sf_files)):
+ # Skip blacklisted files
+ if (
+ "arguments" in pt_file.name
+ or "args" in pt_file.name
+ or "training" in pt_file.name
+ ):
+ continue
+
start = datetime.datetime.now()
convert_file(pt_file, sf_file, discard_names)
elapsed = datetime.datetime.now() - start
| Can't load local flan-small models due to weight conversion failure
### System Info
OS Version:
Distributor ID: Ubuntu
Description: Ubuntu 20.04.3 LTS
Release: 20.04
Codename: focal
8 A-100 GPUS
Using latest text-generation-inference docker version.
I've run fine-tuning on a [Flan-T5-Small](https://huggingface.co/google/flan-t5-small) model and saved the checkpoint in my local directory. I've stored this local model checkpoint in my data2 volume and run the command as follows:
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data2 ghcr.io/huggingface/text-generation-inference:0.9 --model-id /data2/checkpoint-20 --num-shard $num_shard
But I run into errors with the converting weights as mentioned below.
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Run docker command above.
I get this error now:
2023-07-12T05:45:31.707548Z INFO text_generation_launcher: Args { model_id: "/data2/checkpoint-20", revision: None, sharded: None, num_shard: Some(2), quantize: None, dtype: None, trust_remote_code: false, max_concurrent_requests: 128, max_best_of: 2, max_stop_sequences: 4, max_input_length: 1024, max_total_tokens: 2048, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 4096, max_batch_total_tokens: 16000, max_waiting_tokens: 20, hostname: "0341f92fe465", port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_domain: None, ngrok_username: None, ngrok_password: None, env: false }
2023-07-12T05:45:31.707602Z INFO text_generation_launcher: Sharding model on 2 processes
2023-07-12T05:45:31.707781Z INFO text_generation_launcher: Starting download process.
2023-07-12T05:45:33.261253Z WARN download: text_generation_launcher: No safetensors weights found for model /data2/checkpoint-20 at revision None. Converting PyTorch weights to safetensors.
2023-07-12T05:45:33.711218Z ERROR text_generation_launcher: Download encountered an error: Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 164, in download_weights
utils.convert_files(local_pt_files, local_st_files)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/convert.py", line 53, in convert_files
convert_file(pt_file, sf_file)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/convert.py", line 21, in convert_file
if "state_dict" in loaded:
TypeError: argument of type 'Seq2SeqTrainingArguments' is not iterable
Error: DownloadError
### Expected behavior
I would expect the local model to load as do the models from the hugging-face library. Appreciate any help!
| 2023-07-13T16:57:54 |
||
huggingface/text-generation-inference | 661 | huggingface__text-generation-inference-661 | [
"612"
] | 5a1512c0253e759fb07142029127292d639ab117 | diff --git a/server/text_generation_server/utils/gptq/quantize.py b/server/text_generation_server/utils/gptq/quantize.py
--- a/server/text_generation_server/utils/gptq/quantize.py
+++ b/server/text_generation_server/utils/gptq/quantize.py
@@ -812,10 +812,13 @@ def inner(module, args):
tensor = weights.get_tensor(tensor_name)
setdeepattr(module, local_param, nn.Parameter(tensor))
else:
+ tensor = current_tensor.to(device=torch.device("cuda:0"))
+ if current_tensor.requires_grad:
+ tensor = nn.Parameter(tensor)
setdeepattr(
module,
local_param,
- nn.Parameter(current_tensor.to(device=torch.device("cuda:0"))),
+ tensor
)
return inner
| GPTQ Continuous Batching not working
### System Info
I pulled the latest commits few hrs back and built the docker image locally. I tried to use the gptq models such as Bloke 33b with the new changes to TGI regarding gptq. I am able to inference with the model but it seems to only server 1 request at a time. I tried to issue 3 requests from 3 different devices and it waits till one is finished and then continues to the next one.
When GPTQ was initially released in TGI, this was working (sort of but still slow due to how things are for gptq). I tested it with few models on 0.8 version but now it seems to either be a bug or would like to know if its intended.
docker run --gpus all --shm-size 40g -p 1000:80 -v $volume:/data -e GPTQ_BITS=4 -e GPTQ_GROUPSIZE=128 hug:gptq --max-input-length 2048 --max-total-tokens 6000 --model-id TheBloke/WizardLM-33B-V1.0-Uncensored-GPTQ --revision gptq-4bit-128g-actorder_True --num-shard 4 --trust-remote-code --quantize gptq --max-batch-total-tokens 10000
Thank You!
### Information
- [ ] Docker
- [ ] The CLI directly
### Tasks
- [ ] An officially supported command
- [ ] My own modifications
### Reproduction
Running gptq model and inferencing with it on multiple devices at same time
### Expected behavior
Perform continuous batching with gptq models.
| nvm it seems to work after i increased the --max-batch-total-tokens. I will do some more testing and close this asap if everything is good.
It seems like if theres a new request, it pauses the other request until its ready to stream the answer and then all streams can happen at the same time.
> It seems like if theres a new request, it pauses the other request until its ready to stream
There are various parameters discoverable in `text-generation-launcher --help` that can manage how we sequence/stack/unstack operations.
What you said is correct, but there's a little more to it.
I tried it but I couldnt get anything to make it answer multiple queries at same time :( Any guidance on this? Like what parameters should i use other than what I have? I have about 90gb vram to work with and would like to use a 33b model. Hopefully once that exllama pr gets merged, it would boost the speeds and quantization will be more usable to serve multi user scenarios :)
It's all your settings.
We are using GPTQ with *many* concurrent users in prod.
But it all boils down to your hardware + settings
@Narsil I got 4 A10G's (96GB VRAM) GPU's. Could you maybe give me like the docker command for smth you would be using to do concurrent requests? I think I should be able to use pretty much same numbers as you would be able to. And from there i can change them but would be nice to have a started point as you are using this in prod.
This is what i was doing. Am I missing something?
docker run --gpus all --shm-size 40g -p 1000:80 -v $volume:/data -e GPTQ_BITS=4 -e GPTQ_GROUPSIZE=128 hug:gptq --max-input-length 2048 --max-total-tokens 6000 --model-id TheBloke/WizardLM-33B-V1.0-Uncensored-GPTQ --revision gptq-4bit-128g-actorder_True --num-shard 4 --trust-remote-code --quantize gptq --max-batch-total-tokens 17000
Also, is WizardCoder not supported yet for GPTQ? I get missing tensor error when using the Bloke's new quantized models that are suppose to be working with TGI. The llama ones work so thinking the WC is not yet implemented
Thanks!
Nvm, I figured it out and yes it indeed works. The quality seems to be dropped significantly though but gptq works :)
Still would like to know about WizardCoder support.
Thanks!
> be dropped significantly though but gptq
Yes it's a model per model thing, possibly linked to seeding and examples too (but it really seems more model per model basis to my very surfacy investigation).
WizardCode is just GPTBigCode, no ? (So it's supported as santacoder models are)
@Narsil Hi! After your quantizing script upgrade, It is not possible to use it anymore. I just get same error for all models i try to quantize.
weight lm_head.weight does not exist
I also saw that it says loaded model but nothing gets loaded but then realized its loading empty weights. But yea im no longer able to quantize. I tried messing with the code and I also see that its loading the whole thing on 1 gpu so is there way to use all the gpus instead?
The error im seeing with GPTBigCodeForCausalLM models. I neither can load the gptq versions or quantize using the script myself. Guessing they are not supported for gptq yet as I am able to load them in float16 just fine
Also, the config is not loading trust_remote_code here:
config = AutoConfig.from_pretrained(
model_id,
trust_remote_code=True,
device_map="auto",
)
with init_empty_weights():
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True, torch_dtype=torch.float16)
model = model.eval()
I had to manually add it for it to work or else it throws the error.
Overall, if you want to test this, try to use gptq version of starchat or wizardcoder. Neither can be loaded unfortunately. | 2023-07-20T11:16:55 |
|
huggingface/text-generation-inference | 664 | huggingface__text-generation-inference-664 | [
"636"
] | 362883f259f9e07e22ef7b9cc568e2a16cbacc9a | diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
--- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
@@ -154,7 +154,7 @@ def _load_gqa(config, prefix: str, weights):
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
quantize=config.quantize,
- dim=0
+ dim=0,
)
if config.quantize != "gptq":
@@ -168,7 +168,9 @@ def _load_gqa(config, prefix: str, weights):
config.hidden_size,
], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
- return TensorParallelColumnLinear(get_linear(weight, bias=None, quantize=config.quantize))
+ return TensorParallelColumnLinear(
+ get_linear(weight, bias=None, quantize=config.quantize)
+ )
class FlashLlamaAttention(torch.nn.Module):
diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py
--- a/server/text_generation_server/models/flash_causal_lm.py
+++ b/server/text_generation_server/models/flash_causal_lm.py
@@ -39,6 +39,7 @@ def __init__(
device: torch.device,
):
self.block_size = BLOCK_SIZE
+ self.num_blocks = num_blocks
element_size = torch.tensor([], dtype=dtype).element_size()
x = self.block_size // element_size
@@ -714,7 +715,6 @@ def warmup(self, batch: FlashCausalLMBatch):
global CACHE_MANAGER
torch.cuda.empty_cache()
- torch.cuda.reset_peak_memory_stats(self.device)
try:
CACHE_MANAGER = CacheManager(
batch.blocks,
@@ -731,23 +731,20 @@ def warmup(self, batch: FlashCausalLMBatch):
f"You need to decrease `--max-batch-prefill-tokens`"
) from e
- # Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm)
- # Calculate the number of blocks that can be allocated with the
- # profiled peak memory.
torch.cuda.synchronize(self.device)
- peak_memory = torch.cuda.max_memory_reserved(self.device)
+ # Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm)
+ # Calculate the number of blocks that can be allocated with the free memory
dtype_size = torch.tensor([], dtype=self.dtype).element_size()
cache_block_size = BLOCK_SIZE * self.num_kv_heads * self.head_size
total_cache_size = self.num_layers * cache_block_size * 2 * dtype_size
- total_gpu_memory = torch.cuda.get_device_properties(self.device).total_memory
+ free_memory, _ = torch.cuda.mem_get_info(self.device)
- # 0.98 to add some wiggle room
num_blocks = (
- int((total_gpu_memory * 0.98 - peak_memory) // total_cache_size)
+ int(free_memory // total_cache_size)
# Add batch.blocks as we allocated it above, so it is included in the peak memory.
- + batch.blocks
+ + CACHE_MANAGER.num_blocks
)
del CACHE_MANAGER
diff --git a/server/text_generation_server/utils/gptq/quantize.py b/server/text_generation_server/utils/gptq/quantize.py
--- a/server/text_generation_server/utils/gptq/quantize.py
+++ b/server/text_generation_server/utils/gptq/quantize.py
@@ -864,8 +864,9 @@ def quantize(
)
with init_empty_weights():
- model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.float16,
- trust_remote_code=trust_remote_code)
+ model = AutoModelForCausalLM.from_config(
+ config, torch_dtype=torch.float16, trust_remote_code=trust_remote_code
+ )
model = model.eval()
print("LOADED model")
| How to config vllm gpu_memory_utilization?
Hi team, I am trying using codegen2.5 7b model on tgi with A100 40GB and it gives me out of memory error because of vllm. I wonder if there is any way I can config gpu_memory_utilization in the code such that the vllm does not reserve too memory beforehand
| We're working on some Quality of life to help with that : https://github.com/huggingface/text-generation-inference/pull/630
otherwise, try to look at the error message, it should give you the name of the parameters you can tweak to fix the RAM issue
```
--max-total-tokens
--max-batch-prefill-tokens
--max-input-length
# and maybe a couple others I usually only tweak those
```
I'm writing from memory, so just check the error message for the correct names or `text-generation-launcher --help`
Hi @Narsil, Thanks for the help. I try to tweek the number even I set --max-batch-prefill-tokens=1, and --max-batch-total-tokens=2 but it is still out of memory. What other things can I do? For the context, when I try not to use flash attention on llamamodel, it can work. When I use flash attention and not vllm, it also works. so there must be sth going on the vllm side.
Can you share a reproducible example ? And the full stacktrace ?
I have the same issue, kv cache warmup casue OOM | 2023-07-20T14:00:47 |
|
huggingface/text-generation-inference | 689 | huggingface__text-generation-inference-689 | [
"683"
] | 73a4d65d26801c550e0f1205800c002b147de84e | diff --git a/server/text_generation_server/server.py b/server/text_generation_server/server.py
--- a/server/text_generation_server/server.py
+++ b/server/text_generation_server/server.py
@@ -105,21 +105,21 @@ async def Decode(self, request, context):
def serve(
- model_id: str,
- revision: Optional[str],
- sharded: bool,
- quantize: Optional[str],
- dtype: Optional[str],
- trust_remote_code: bool,
- uds_path: Path,
-):
- async def serve_inner(
model_id: str,
revision: Optional[str],
- sharded: bool = False,
- quantize: Optional[str] = None,
- dtype: Optional[str] = None,
- trust_remote_code: bool = False,
+ sharded: bool,
+ quantize: Optional[str],
+ dtype: Optional[str],
+ trust_remote_code: bool,
+ uds_path: Path,
+):
+ async def serve_inner(
+ model_id: str,
+ revision: Optional[str],
+ sharded: bool = False,
+ quantize: Optional[str] = None,
+ dtype: Optional[str] = None,
+ trust_remote_code: bool = False,
):
unix_socket_template = "unix://{}-{}"
if sharded:
@@ -147,8 +147,10 @@ async def serve_inner(
# This will allocate those buffers.
from text_generation_server.utils.gptq.exllama import (
create_exllama_buffers,
+ set_device,
)
+ set_device(model.device)
create_exllama_buffers()
except ImportError:
pass
diff --git a/server/text_generation_server/utils/gptq/exllama.py b/server/text_generation_server/utils/gptq/exllama.py
--- a/server/text_generation_server/utils/gptq/exllama.py
+++ b/server/text_generation_server/utils/gptq/exllama.py
@@ -32,9 +32,16 @@ def ext_q4_matmul(x, q4, q4_width):
TEMP_DQ = None
+def set_device(device):
+ global DEVICE
+ DEVICE = device
+
+
def create_exllama_buffers():
global MAX_DQ, MAX_INNER, ACT_ORDER, DEVICE, TEMP_STATE, TEMP_DQ
+ assert DEVICE is not None, "call set_device first"
+
if ACT_ORDER:
# TODO: this should be set to rust side `max_total_tokens`, but TGI
# does not offer an API to expose this variable to python, as this variable
| exllama fails with sharded model
### System Info
image: sha-1da642b (released 2 days ago latest branch)
setup: a server with 2 RTX A6000 GPUs on it
256GB RAM
32 CPU cores
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [x] An officially supported command
- [ ] My own modifications
### Reproduction
```bash
#!/bin/bash
export GPTQ_BITS=4
export GPTQ_GROUPSIZE=128
export CUDA_VISIBLE_DEVICES=0,1
text-generation-launcher --model-id TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ \
--revision gptq-4bit-128g-actorder_True \
--quantize gptq \
--port 8080 \
--max-input-length 512 \
--max-best-of 4 \
--max-total-tokens 600 \
--max-batch-prefill-tokens 2048
```
OUTPUT:
2023-07-23T16:48:27.611908Z INFO text_generation_launcher: Sharding model on 2 processes
2023-07-23T16:48:27.611982Z INFO download: text_generation_launcher: Starting download process.
2023-07-23T16:48:28.856188Z INFO text_generation_launcher: Files are already present on the host. Skipping download.
2023-07-23T16:48:29.113858Z INFO download: text_generation_launcher: Successfully downloaded weights.
2023-07-23T16:48:29.114100Z INFO shard-manager: text_generation_launcher: Starting shard rank=0
2023-07-23T16:48:29.114562Z INFO shard-manager: text_generation_launcher: Starting shard rank=1
2023-07-23T16:48:39.123016Z INFO shard-manager: text_generation_launcher: Waiting for shard to be ready... rank=1
2023-07-23T16:48:39.123228Z INFO shard-manager: text_generation_launcher: Waiting for shard to be ready... rank=0
2023-07-23T16:48:47.629123Z ERROR shard-manager: text_generation_launcher: Shard complete standard error output:
You are using a model of type llama to instantiate a model of type . This is not supported for all configurations of models and can yield errors.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 180, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 150, in serve_inner
create_exllama_buffers()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/gptq/exllama.py", line 52, in create_exllama_buffers
prepare_buffers(DEVICE, temp_state, temp_dq)
TypeError: prepare_buffers(): incompatible function arguments. The following argument types are supported:
1. (arg0: torch.device, arg1: torch.Tensor, arg2: torch.Tensor) -> None
Invoked with: None, tensor([[0.]], dtype=torch.float16), tensor([[0.]], dtype=torch.float16)
rank=1
2023-07-23T16:48:47.727702Z ERROR text_generation_launcher: Shard 1 failed to start
2023-07-23T16:48:47.727724Z INFO text_generation_launcher: Shutting down shards
2023-07-23T16:48:47.730006Z ERROR shard-manager: text_generation_launcher: Shard complete standard error output:
You are using a model of type llama to instantiate a model of type . This is not supported for all configurations of models and can yield errors.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 180, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 150, in serve_inner
create_exllama_buffers()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/gptq/exllama.py", line 52, in create_exllama_buffers
prepare_buffers(DEVICE, temp_state, temp_dq)
TypeError: prepare_buffers(): incompatible function arguments. The following argument types are supported:
1. (arg0: torch.device, arg1: torch.Tensor, arg2: torch.Tensor) -> None
Invoked with: None, tensor([[0.]], dtype=torch.float16), tensor([[0.]], dtype=torch.float16)
rank=0
Error: ShardCannotStart
### Expected behavior
It works fine when I remove the CUDA_VISIBLE_DEVICES=0,1 and when it then runs on a single process.
| Can confirm this is happening on other models as well. Tried various models including FreeWilly2 GPTQ by Bloke and seems like only gptq-4bit-128g-actorder_False branch is able to load and rest all give the error above. | 2023-07-24T08:41:42 |
|
huggingface/text-generation-inference | 750 | huggingface__text-generation-inference-750 | [
"749"
] | 7766fee9b15b6f600ee73db7f8bd72f7c62f335e | diff --git a/server/text_generation_server/models/custom_modeling/opt_modeling.py b/server/text_generation_server/models/custom_modeling/opt_modeling.py
--- a/server/text_generation_server/models/custom_modeling/opt_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/opt_modeling.py
@@ -28,6 +28,7 @@
from transformers.modeling_utils import PreTrainedModel
from transformers import OPTConfig
from text_generation_server.utils.layers import (
+ FastLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
| NameError: name 'FastLinear' is not defined
### System Info
When running TGI on a custom OPT-based model TGI crashes with:
> NameError: name 'FastLinear' is not defined
I'm not exactly a Python expert, but it appears the `FastLinear` class is simply not imported in https://github.com/huggingface/text-generation-inference/blob/7766fee9b15b6f600ee73db7f8bd72f7c62f335e/server/text_generation_server/models/custom_modeling/opt_modeling.py#L30-L35
```
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 184, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 136, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 235, in get_model
return OPTSharded(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/opt.py", line 60, in __init__
model = OPTForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/opt_modeling.py", line 748, in __init__
self.model = OPTModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/opt_modeling.py", line 690, in __init__
self.decoder = OPTDecoder(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/opt_modeling.py", line 445, in __init__
self.project_out = FastLinear.load(
NameError: name 'FastLinear' is not defined
```
### Information
- [X] Docker
- [X] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
```
$ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model
```
### Expected behavior
The application should not crash.
| 2023-08-01T06:00:46 |
||
huggingface/text-generation-inference | 768 | huggingface__text-generation-inference-768 | [
"732"
] | 8b0d608f1f0e5495d7ca53528f602892d81758af | diff --git a/server/text_generation_server/utils/gptq/quantize.py b/server/text_generation_server/utils/gptq/quantize.py
--- a/server/text_generation_server/utils/gptq/quantize.py
+++ b/server/text_generation_server/utils/gptq/quantize.py
@@ -360,15 +360,21 @@ def free(self):
torch.cuda.empty_cache()
-def get_wikitext2(nsamples, seed, seqlen, model_id):
+def get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
- from transformers import AutoTokenizer
+ try:
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=False, trust_remote_code=trust_remote_code
+ )
+ except:
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=True, trust_remote_code=trust_remote_code
+ )
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
trainenc = tokenizer("\n\n".join(traindata["text"]), return_tensors="pt")
testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt")
@@ -386,18 +392,21 @@ def get_wikitext2(nsamples, seed, seqlen, model_id):
return trainloader, testenc
-def get_ptb(nsamples, seed, seqlen, model_id):
+def get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("ptb_text_only", "penn_treebank", split="train")
valdata = load_dataset("ptb_text_only", "penn_treebank", split="validation")
- from transformers import AutoTokenizer
-
try:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=False, trust_remote_code=trust_remote_code
+ )
except:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=True, trust_remote_code=trust_remote_code
+ )
+
trainenc = tokenizer("\n\n".join(traindata["sentence"]), return_tensors="pt")
testenc = tokenizer("\n\n".join(valdata["sentence"]), return_tensors="pt")
@@ -415,7 +424,7 @@ def get_ptb(nsamples, seed, seqlen, model_id):
return trainloader, testenc
-def get_c4(nsamples, seed, seqlen, model_id):
+def get_c4(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset(
@@ -433,12 +442,14 @@ def get_c4(nsamples, seed, seqlen, model_id):
use_auth_token=False,
)
- from transformers import AutoTokenizer
-
try:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=False, trust_remote_code=trust_remote_code
+ )
except:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=True, trust_remote_code=trust_remote_code
+ )
import random
@@ -481,18 +492,21 @@ def __init__(self, input_ids):
return trainloader, valenc
-def get_ptb_new(nsamples, seed, seqlen, model_id):
+def get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("ptb_text_only", "penn_treebank", split="train")
testdata = load_dataset("ptb_text_only", "penn_treebank", split="test")
- from transformers import AutoTokenizer
-
try:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=False, trust_remote_code=trust_remote_code
+ )
except:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=True, trust_remote_code=trust_remote_code
+ )
+
trainenc = tokenizer(" ".join(traindata["sentence"]), return_tensors="pt")
testenc = tokenizer(" ".join(testdata["sentence"]), return_tensors="pt")
@@ -510,7 +524,7 @@ def get_ptb_new(nsamples, seed, seqlen, model_id):
return trainloader, testenc
-def get_c4_new(nsamples, seed, seqlen, model_id):
+def get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset(
@@ -526,12 +540,14 @@ def get_c4_new(nsamples, seed, seqlen, model_id):
split="validation",
)
- from transformers import AutoTokenizer
-
try:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=False, trust_remote_code=trust_remote_code
+ )
except:
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id, use_fast=True, trust_remote_code=trust_remote_code
+ )
import random
@@ -562,17 +578,17 @@ def __init__(self, input_ids):
return trainloader, valenc
-def get_loaders(name, nsamples=128, seed=0, seqlen=2048, model_id=""):
+def get_loaders(name, nsamples=128, seed=0, seqlen=2048, model_id="", trust_remote_code=False):
if "wikitext2" in name:
- return get_wikitext2(nsamples, seed, seqlen, model_id)
+ return get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code)
if "ptb" in name:
if "new" in name:
- return get_ptb_new(nsamples, seed, seqlen, model_id)
- return get_ptb(nsamples, seed, seqlen, model_id)
+ return get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code)
+ return get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code)
if "c4" in name:
if "new" in name:
- return get_c4_new(nsamples, seed, seqlen, model_id)
- return get_c4(nsamples, seed, seqlen, model_id)
+ return get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code)
+ return get_c4(nsamples, seed, seqlen, model_id, trust_remote_code)
def find_layers(module, layers=(nn.Conv2d, nn.Linear), name=""):
@@ -906,7 +922,12 @@ def _unload():
seed = None
dataloader, testloader = get_loaders(
- dataset, nsamples=nsamples, seed=seed, model_id=model_id, seqlen=model.seqlen
+ dataset,
+ nsamples=nsamples,
+ seed=seed,
+ model_id=model_id,
+ seqlen=model.seqlen,
+ trust_remote_code=trust_remote_code
)
tick = time.time()
| No trust_remote_code passed to AutoTokenizer
### System Info
There is no trust_remote_code option passed in to the `AutoTokenizer` of `get_wikitext2` function (https://github.com/huggingface/text-generation-inference/blob/main/server/text_generation_server/utils/gptq/quantize.py#L363). This causes a `trust_remove_code=False` issue when I try to load a base model and quantize it with GPTQ with the command below
```
docker run \
-it \
--gpus all \
--shm-size 1g \
--pull always \
-v $volume:/data \
--entrypoint text-generation-server \
ghcr.io/huggingface/text-generation-inference:latest \
quantize \
--trust-remote-code \
/data/$model \
/data/$model-quantized
```
The current form of `get_wikitext2` looks like below:
```
def get_wikitext2(nsamples, seed, seqlen, model_id):
from datasets import load_dataset
traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
trainenc = tokenizer("\n\n".join(traindata["text"]), return_tensors="pt")
testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt")
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
```
### Information
- [ ] Docker
- [X] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
1. docker run \
-it \
--gpus all \
--shm-size 1g \
--pull always \
-v $volume:/data \
--entrypoint text-generation-server \
ghcr.io/huggingface/text-generation-inference:latest \
quantize \
--trust-remote-code \
/data/$model \
/data/$model-quantized
2. See error describe above related to `trust_remote_code=False`
### Expected behavior
I would expect the specified `trust_remote_code` option is passed into this function.
| 2023-08-03T15:11:43 |
||
huggingface/text-generation-inference | 785 | huggingface__text-generation-inference-785 | [
"784"
] | 16fadcec5711ff232977b38c74a1c8829af6a63b | diff --git a/server/text_generation_server/utils/gptq/quant_linear.py b/server/text_generation_server/utils/gptq/quant_linear.py
--- a/server/text_generation_server/utils/gptq/quant_linear.py
+++ b/server/text_generation_server/utils/gptq/quant_linear.py
@@ -263,7 +263,7 @@ def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize):
self.groupsize = groupsize
self.outfeatures = qweight.shape[1]
- self.infeatures = qweight.shape[0] * 32 // 4
+ self.infeatures = qweight.shape[0] * 32 // bits
@classmethod
def new(cls, bits, groupsize, infeatures, outfeatures, bias):
| GPTQ used 8bits may have a bug
### System Info
docker image: ghcr.io/huggingface/text-generation-inference:0.9.4
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [X] My own modifications
### Reproduction
When I modify the default bits (bits=4 -> bits=8) passed to the function quantize,
https://github.com/huggingface/text-generation-inference/blob/16fadcec5711ff232977b38c74a1c8829af6a63b/server/text_generation_server/cli.py#L219
then error message as shown below.
<img width="1093" alt="Snipaste_2023-08-07_12-10-11" src="https://github.com/huggingface/text-generation-inference/assets/32231230/466654af-a942-40d8-a68c-82d1dc392012">
### Expected behavior
I found a line of code in the quant_linear.py that might be wrong.
https://github.com/huggingface/text-generation-inference/blob/16fadcec5711ff232977b38c74a1c8829af6a63b/server/text_generation_server/utils/gptq/quant_linear.py#L266
the "4" in "self.infeatures = qweight.shape[0] * 32 // 4" mean 4bits. When I use 8bits gptq, the code may modify as "self.infeatures = qweight.shape[0] * 32 // 8"
Finally, I changed this line of code to "self.infeatures = qweight.shape[0] * 32 // self.bits" and obtain gptq used 8bits model successfully.
| 2023-08-07T10:19:18 |
||
huggingface/text-generation-inference | 794 | huggingface__text-generation-inference-794 | [
"787"
] | 1fdc88ee908beb8ae0afe17810a17b9b4d8848e2 | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -89,7 +89,7 @@ def get_model(
revision,
quantize=quantize,
dtype=dtype,
- dtypetrust_remote_code=trust_remote_code,
+ trust_remote_code=trust_remote_code,
)
if model_id.startswith("bigcode/"):
| small typo in galactica model loading
https://github.com/huggingface/text-generation-inference/blob/1fdc88ee908beb8ae0afe17810a17b9b4d8848e2/server/text_generation_server/models/__init__.py#L92
should be trust_remote_code
| 2023-08-08T10:09:22 |
||
huggingface/text-generation-inference | 795 | huggingface__text-generation-inference-795 | [
"531"
] | 0e8b47811e711b46fe80e6b4f5304186f83744d6 | diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py
--- a/server/text_generation_server/cli.py
+++ b/server/text_generation_server/cli.py
@@ -171,14 +171,14 @@ def download_weights(
for p in local_pt_files
]
try:
- from transformers import AutoConfig
import transformers
+ import json
- config = AutoConfig.from_pretrained(
- model_id,
- revision=revision,
- )
- architecture = config.architectures[0]
+
+ config_filename = hf_hub_download(model_id, revision=revision, filename="config.json")
+ with open(config_filename, "r") as f:
+ config = json.load(f)
+ architecture = config["architectures"][0]
class_ = getattr(transformers, architecture)
| ImportError: CUDA is not available
### System Info
```
Mon Jul 3 13:44:40 2023
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 510.39.01 Driver Version: 510.39.01 CUDA Version: 11.6 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA GeForce ... On | 00000000:21:00.0 On | N/A |
| 0% 43C P8 48W / 390W | 728MiB / 24576MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 0 N/A N/A 2005 G /usr/lib/xorg/Xorg 24MiB |
| 0 N/A N/A 2248 G /usr/bin/gnome-shell 85MiB |
| 0 N/A N/A 6729 G /usr/lib/xorg/Xorg 291MiB |
| 0 N/A N/A 6876 G /usr/bin/gnome-shell 72MiB |
| 0 N/A N/A 7521 G ...veSuggestionsOnlyOnDemand 25MiB |
| 0 N/A N/A 7798 G ...600725580435595488,262144 98MiB |
| 0 N/A N/A 116591 G ...rbird/339/thunderbird-bin 87MiB |
| 0 N/A N/A 117215 G ...RendererForSitePerProcess 37MiB |
+-----------------------------------------------------------------------------+
```
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
```
model=lmsys/fastchat-t5-3b-v1.0
num_shard=1
volume=/home/matthieu/Deployment/HF/TGI/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --num-shard $num_shard
```
While launching the script above, I got the following error message:
```
2023-07-03T11:44:07.302804Z INFO text_generation_launcher: Args { model_id: "lmsys/fastchat-t5-3b-v1.0", revision: None, sharded: None, num_shard: Some(1), quantize: None, dtype: None, trust_remote_code: false, max_concurrent_requests: 128, max_best_of: 2, max_stop_sequences: 4, max_input_length: 1024, max_total_tokens: 2048, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 4096, max_batch_total_tokens: 16000, max_waiting_tokens: 20, port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_domain: None, ngrok_username: None, ngrok_password: None, env: false }
2023-07-03T11:44:07.302933Z INFO text_generation_launcher: Starting download process.
2023-07-03T11:44:08.830897Z INFO download: text_generation_launcher: Files are already present on the host. Skipping download.
2023-07-03T11:44:09.105770Z INFO text_generation_launcher: Successfully downloaded weights.
2023-07-03T11:44:09.105971Z INFO text_generation_launcher: Starting shard 0
2023-07-03T11:44:10.796229Z WARN shard-manager: text_generation_launcher: We're not using custom kernels.
rank=0
2023-07-03T11:44:10.799492Z WARN shard-manager: text_generation_launcher: Could not import Flash Attention enabled models
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 64, in serve
from text_generation_server import server
File "<frozen importlib._bootstrap>", line 1058, in _handle_fromlist
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 12, in <module>
from text_generation_server.cache import Cache
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cache.py", line 3, in <module>
from text_generation_server.models.types import Batch
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 56, in <module>
raise ImportError("CUDA is not available")
ImportError: CUDA is not available
rank=0
2023-07-03T11:44:11.266975Z ERROR shard-manager: text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 166, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 133, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 274, in get_model
return T5Sharded(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/t5.py", line 46, in __init__
tokenizer = AutoTokenizer.from_pretrained(
File "/opt/conda/lib/python3.9/site-packages/transformers/models/auto/tokenization_auto.py", line 691, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 1825, in from_pretrained
return cls._from_pretrained(
File "/opt/conda/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 1988, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/opt/conda/lib/python3.9/site-packages/transformers/models/t5/tokenization_t5_fast.py", line 133, in __init__
super().__init__(
File "/opt/conda/lib/python3.9/site-packages/transformers/tokenization_utils_fast.py", line 114, in __init__
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
File "/opt/conda/lib/python3.9/site-packages/transformers/convert_slow_tokenizer.py", line 1307, in convert_slow_tokenizer
return converter_class(transformer_tokenizer).converted()
File "/opt/conda/lib/python3.9/site-packages/transformers/convert_slow_tokenizer.py", line 445, in __init__
from .utils import sentencepiece_model_pb2 as model_pb2
File "/opt/conda/lib/python3.9/site-packages/transformers/utils/sentencepiece_model_pb2.py", line 91, in <module>
_descriptor.EnumValueDescriptor(
File "/opt/conda/lib/python3.9/site-packages/google/protobuf/descriptor.py", line 796, in __new__
_message.Message._CheckCalledFromGeneratedFile()
TypeError: Descriptors cannot not be created directly.
If this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.
If you cannot immediately regenerate your protos, some other possible workarounds are:
1. Downgrade the protobuf package to 3.20.x or lower.
2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).
More information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates
rank=0
2023-07-03T11:44:11.708776Z ERROR text_generation_launcher: Shard 0 failed to start
2023-07-03T11:44:11.708802Z ERROR text_generation_launcher: /opt/conda/lib/python3.9/site-packages/torch/cuda/__init__.py:107: UserWarning: CUDA initialization: Unexpected error from cudaGetDeviceCount(). Did you run some cuda functions before calling NumCudaDevices() that might have already set an error? Error 803: system has unsupported display driver / cuda driver combination (Triggered internally at /opt/conda/conda-bld/pytorch_1678402412426/work/c10/cuda/CUDAFunctions.cpp:109.)
return torch._C._cuda_getDeviceCount() > 0
/opt/conda/lib/python3.9/site-packages/bitsandbytes/cextension.py:33: UserWarning: The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.
warn("The installed version of bitsandbytes was compiled without GPU support. "
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 166, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 133, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 274, in get_model
return T5Sharded(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/t5.py", line 46, in __init__
tokenizer = AutoTokenizer.from_pretrained(
File "/opt/conda/lib/python3.9/site-packages/transformers/models/auto/tokenization_auto.py", line 691, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 1825, in from_pretrained
return cls._from_pretrained(
File "/opt/conda/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 1988, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/opt/conda/lib/python3.9/site-packages/transformers/models/t5/tokenization_t5_fast.py", line 133, in __init__
super().__init__(
File "/opt/conda/lib/python3.9/site-packages/transformers/tokenization_utils_fast.py", line 114, in __init__
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
File "/opt/conda/lib/python3.9/site-packages/transformers/convert_slow_tokenizer.py", line 1307, in convert_slow_tokenizer
return converter_class(transformer_tokenizer).converted()
File "/opt/conda/lib/python3.9/site-packages/transformers/convert_slow_tokenizer.py", line 445, in __init__
from .utils import sentencepiece_model_pb2 as model_pb2
File "/opt/conda/lib/python3.9/site-packages/transformers/utils/sentencepiece_model_pb2.py", line 91, in <module>
_descriptor.EnumValueDescriptor(
File "/opt/conda/lib/python3.9/site-packages/google/protobuf/descriptor.py", line 796, in __new__
_message.Message._CheckCalledFromGeneratedFile()
TypeError: Descriptors cannot not be created directly.
If this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.
If you cannot immediately regenerate your protos, some other possible workarounds are:
1. Downgrade the protobuf package to 3.20.x or lower.
2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).
More information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates
2023-07-03T11:44:11.708829Z INFO text_generation_launcher: Shutting down shards
Error: ShardCannotStart
```
### Expected behavior
Would this have failed because I am using CUDA 11.6 and not CUDA 11.8 or above as recommended?
| I have the same issue. @Matthieu-Tinycoaching do you have any progress on this? Thanks.
Can you add `--pull always` to your command (or use specific tags) ?
Hi @Narsil Thank you for your reply. I've tried a few different tags (0.9.2, 0.9.4, 1.0.0), and got similar errors complaining about the `protobuf` package version.
Do we support `fastchat-t5`?
Okay it seems it's been fixed with transformers. I'll update the version.
This particular tokenizer is lacking a fast version, therefore we try to convert it, and the converter was protobuf==3.20 only for a long time (because sentencepiece never upgraded).
We now have multi version support in latest transformers. Fix incoming. | 2023-08-08T10:56:06 |
|
huggingface/text-generation-inference | 822 | huggingface__text-generation-inference-822 | [
"816"
] | 5df4c7c0d792738e0ed0fd3426770dea6d7233df | diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py
--- a/server/text_generation_server/utils/layers.py
+++ b/server/text_generation_server/utils/layers.py
@@ -502,8 +502,6 @@ def _update_cos_sin_cache(self, dtype, device, seqlen):
self.inv_freq = _create_inv_freq(self.dim, newbase, self.inv_freq.device)
self._seq_len_cached = seqlen
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
- if self.scaling_factor is not None:
- t /= self.scaling_factor
# Don't do einsum, it converts fp32 to fp16
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
| Unexpected results occur when using βrope-factor 2 βrope-scaling dynamic.
When I use -rope-factor 2 -rope-scaling dynamic to get LLaMA to achieve a 4k context length, the results are far worse on both long and short inputs than when I do not use these parameters (in the Chinese scene, some garbled text even appears). When I carefully read the code, I found the logic here to be a bit strange, it seems like it shouldnβt perform this action. I tried removing these two lines of code, and it seems to perform normally.
https://github.com/huggingface/text-generation-inference/blob/5df4c7c0d792738e0ed0fd3426770dea6d7233df/server/text_generation_server/utils/layers.py#L550
Iβm not sure if my modification is correct, so I didnβt submit a PR.
| 2023-08-11T13:31:55 |
||
huggingface/text-generation-inference | 851 | huggingface__text-generation-inference-851 | [
"843"
] | 737d5781e45fc2c297451773dea5ca1355b9a71d | diff --git a/server/text_generation_server/utils/watermark.py b/server/text_generation_server/utils/watermark.py
--- a/server/text_generation_server/utils/watermark.py
+++ b/server/text_generation_server/utils/watermark.py
@@ -19,8 +19,8 @@
from transformers import LogitsProcessor
from typing import List, Union
-GAMMA = os.getenv("WATERMARK_GAMMA", 0.5)
-DELTA = os.getenv("WATERMARK_DELTA", 2.0)
+GAMMA = float(os.getenv("WATERMARK_GAMMA", 0.5))
+DELTA = float(os.getenv("WATERMARK_DELTA", 2.0))
class WatermarkLogitsProcessor(LogitsProcessor):
| Watermarking bug
### System Info
using singularity with the container `text-generation-inference:1.0.0` .
i get this error:
```
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py", line 727, in warmup
_, batch = self.generate_token(batch)
File "/opt/conda/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_causal_lm.py", line 834, in generate_token
next_input_ids, next_token_logprobs = batch.next_token_chooser(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/tokens.py", line 224, in __call__
scores = self.watermark_processor(input_ids, scores)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/logits_process.py", line 398, in __call__
scores[i : i + 1] = processor(input_ids[i : i + 1], scores[i : i + 1])
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/watermark.py", line 88, in __call__
greenlist_ids = self._get_greenlist_ids(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/watermark.py", line 64, in _get_greenlist_ids
greenlist_size = int(max_value * self.gamma)
ValueError: invalid literal for int() with base 10: '0.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50.50
```
i think `greenlist_size = int(max_value * self.gamma)` should be `greenlist_size = max_value * int(self.gamma)`
I also tried setting the env variables only `export WATERMARK_GAMMA=0.5 && export WATERMARK_DELTA=2.0` and the exact issue.
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
```bash
export USE_FLASH_ATTENTION=True && text-generation-launcher \
--model-id /scratch//models/huggyllama-llama-7b \
--port 61697 \
--max-best-of 5 \
--max-total-tokens 2048 \
--max-input-length 1024 \
--max-batch-prefill-tokens 1024 \
--max-concurrent-requests 20 \
--sharded false \
--num-shard 1 \
--dtype float16 \
--disable-custom-kernels \
--watermark-gamma 0.5 \
--watermark-delta 2
```
### Expected behavior
It should multiply the number not the string `0.5`
| 2023-08-15T19:25:59 |
||
huggingface/text-generation-inference | 860 | huggingface__text-generation-inference-860 | [
"826"
] | d9bceb8e6b2090e5ca7668de2c9beac5eb94ded6 | diff --git a/server/text_generation_server/models/rw.py b/server/text_generation_server/models/rw.py
--- a/server/text_generation_server/models/rw.py
+++ b/server/text_generation_server/models/rw.py
@@ -67,18 +67,6 @@ def forward(
self, input_ids, attention_mask, position_ids, past_key_values: Optional = None
) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]:
# Model Forward
- if past_key_values is not None:
- reshaped_past_key_values = []
- for layer in past_key_values:
- past_keys, past_values = layer
- reshaped_past_key_values.append(
- (
- past_keys.view(-1, *past_keys.shape[-2:]),
- past_values.view(-1, *past_values.shape[-2:]),
- )
- )
- past_key_values = reshaped_past_key_values
-
outputs = self.model.forward(
input_ids=input_ids,
attention_mask=attention_mask,
| falcon-rw-1b does not work
### System Info
A Nvidia Tesla T4 GPU in Kubernetes running text-generation-inference version 1.0.0 (official image)
### Information
- [ ] Docker
- [X] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Run
```
text-generation-launcher --model-id tiiuae/falcon-rw-1b --trust-remote-code --revision e4b9872bb803165eb22f0a867d4e6a64d34fce19
```
Once the model is loaded, calling the server gives error:
```
$ curl 127.0.0.1:80/generate_stream -X POST -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'
data:{"token":{"id":43398,"text":" chapel","logprob":-4.859375,"special":false},"generated_text":null,"details":null}
data:{"error":"Request failed during generation: Server error: not enough values to unpack (expected 4, got 3)","error_type":"generation"}
```
In the server, there is this error:
```
2023-08-11T21:57:42.634325Z ERROR text_generation_launcher: Method Decode encountered an error.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 184, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/grpc_interceptor/server.py", line 159, in invoke_intercept_method
return await self.intercept(
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/interceptor.py", line 21, in intercept
return await response
File "/opt/conda/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 82, in _unary_interceptor
raise error
File "/opt/conda/lib/python3.9/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 73, in _unary_interceptor
return await behavior(request_or_iterator, context)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 98, in Decode
generations, next_batch = self.model.generate_token(batch)
File "/opt/conda/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/causal_lm.py", line 541, in generate_token
logits, past = self.forward(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/rw.py", line 82, in forward
outputs = self.model.forward(
File "/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-rw-1b/e4b9872bb803165eb22f0a867d4e6a64d34fce19/modeling_falcon.py", line 900, in forward
transformer_outputs = self.transformer(
File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-rw-1b/e4b9872bb803165eb22f0a867d4e6a64d34fce19/modeling_falcon.py", line 734, in forward
past_key_values = self._convert_to_rw_cache(past_key_values)
File "/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-rw-1b/e4b9872bb803165eb22f0a867d4e6a64d34fce19/modeling_falcon.py", line 622, in _convert_to_rw_cache
batch_size, num_heads, kv_length, head_dim = past_key_value[0][0].shape
ValueError: not enough values to unpack (expected 4, got 3)
```
### Expected behavior
No error
| Try running on main ?
It seems everything is working fine there. (also you don't seem to have flash attention enabled, that should help too `make install-flash-attn-v2`)
@Narsil I'm able to reproduce the issue on main (using the image for [sha-05dd14f](https://github.com/huggingface/text-generation-inference/pkgs/container/text-generation-inference/118561995?tag=sha-05dd14f)).
Note that the falcon models with "rw" in the name are configured with alibi set to true. Flash attention is [not used](https://github.com/huggingface/text-generation-inference/blob/05dd14fdb93f83ad5fde6d5b9cb6c21edef71aa1/server/text_generation_server/models/__init__.py#L217C44-L217C44) when alibi is enabled. The falcon models without alibi enabled (those without "rw" in the name) are running fine in TGI.
@Narsil Can you reopen the issue or should I create a new issue? | 2023-08-16T19:59:57 |
|
huggingface/text-generation-inference | 1,022 | huggingface__text-generation-inference-1022 | [
"1004"
] | c8a01d759173483efc2135c4e7506b23e14e7fc4 | diff --git a/server/text_generation_server/models/galactica.py b/server/text_generation_server/models/galactica.py
--- a/server/text_generation_server/models/galactica.py
+++ b/server/text_generation_server/models/galactica.py
@@ -80,6 +80,7 @@ def from_pb(
next_token_choosers = []
stopping_criterias = []
prefix_offsets = []
+ top_n_tokens = []
read_offsets = []
requests_idx_mapping = {}
@@ -96,6 +97,7 @@ def from_pb(
r.stopping_parameters, tokenizer
)
stopping_criterias.append(stopping_criteria)
+ top_n_tokens.append(r.top_n_tokens)
max_truncation = max(max_truncation, r.truncate)
max_decode_tokens += stopping_criteria.max_new_tokens
padding_right_offset = max(
@@ -129,6 +131,9 @@ def from_pb(
position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1
position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1)
all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1)
+ top_n_tokens_tensor = torch.tensor(
+ top_n_tokens, device=device, dtype=torch.int64
+ )
max_tokens = len(inputs) * max_input_length + max_decode_tokens
@@ -146,6 +151,8 @@ def from_pb(
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
+ top_n_tokens=top_n_tokens,
+ top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length.item(),
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
| missing 2 required positional arguments: 'top_n_tokens' and 'top_n_tokens_tensor' for the galactica model
### System Info
v1.0.3
### Information
- [ ] Docker
- [ ] The CLI directly
### Tasks
- [ ] An officially supported command
- [ ] My own modifications
### Reproduction
having a local galactica model in a folder called `facebook-galactica-30b-gptq` won't be detected since it will fail this check https://github.com/huggingface/text-generation-inference/blob/main/server/text_generation_server/models/__init__.py#L88
I suggest making it check `if "galactica" in in model_id` instead.
### Expected behavior
expected to detect a galactica model
| I changed the folder structure to `/models/facebook/galactica-30b-gptq/` only to discover another error.
```bash
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 63, in Warmup
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/galactica.py", line 135, in from_pb
return cls(
TypeError: __init__() missing 2 required positional arguments: 'top_n_tokens' and 'top_n_tokens_tensor'
``` | 2023-09-14T08:44:04 |
|
huggingface/text-generation-inference | 1,042 | huggingface__text-generation-inference-1042 | [
"1038"
] | 123749a3c999e32db798667041a4a9589d217c8e | diff --git a/server/text_generation_server/models/custom_modeling/t5_modeling.py b/server/text_generation_server/models/custom_modeling/t5_modeling.py
--- a/server/text_generation_server/models/custom_modeling/t5_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/t5_modeling.py
@@ -1032,9 +1032,17 @@ def __init__(self, config: T5Config, weights):
embed_tokens=self.shared,
)
- self.lm_head = TensorParallelHead.load(
- config, prefix="lm_head", weights=weights
- )
+ try:
+ self.lm_head = TensorParallelHead.load(
+ config, prefix="lm_head", weights=weights
+ )
+ except RuntimeError:
+ # Some models like t5-small were saved with shared weights unlike flan
+ # Since they are declared as the same arch we have no choice but hope
+ # that this is OK instead of using a proper flag.
+ self.lm_head = TensorParallelHead.load(
+ config, prefix="shared", weights=weights
+ )
def forward(
self,
| RuntimeError: weight lm_head.weight does not exist when loading T5 model into TGI
### System Info
Hello Team,
I am following the https://huggingface.co/docs/transformers/tasks/summarization tutorial for summarization. We do have TGI server and wanted to check if we can use TGI server to serve this model for summarization. When we try to load the `stevhliu/my_awesome_billsum_model` we get
```
tgi-text_generation_inference-1 | 2023-09-19T23:06:42.808534Z INFO text_generation_launcher: Args { model_id: "stevhliu/my_awesome_billsum_model", revision: None, validation_workers: 2, sharded: None, num_shard: Some(1), quantize: None, dtype: None, trust_remote_code: true, max_concurrent_requests: 1, max_best_of: 2, max_stop_sequences: 20, max_input_length: 128, max_total_tokens: 512, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 2048, max_batch_total_tokens: Some(2048), max_waiting_tokens: 20, hostname: "7499e2c9d3b5", port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, cuda_memory_fraction: 1.0, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_edge: None, env: false }
tgi-text_generation_inference-1 | 2023-09-19T23:06:42.808570Z WARN text_generation_launcher: `trust_remote_code` is set. Trusting that model `stevhliu/my_awesome_billsum_model` do not contain malicious code.
tgi-text_generation_inference-1 | 2023-09-19T23:06:42.808647Z INFO download: text_generation_launcher: Starting download process.
tgi-text_generation_inference-1 | 2023-09-19T23:06:44.587394Z INFO text_generation_launcher: Files are already present on the host. Skipping download.
tgi-text_generation_inference-1 |
tgi-text_generation_inference-1 | 2023-09-19T23:06:44.910979Z INFO download: text_generation_launcher: Successfully downloaded weights.
tgi-text_generation_inference-1 | 2023-09-19T23:06:44.911196Z INFO shard-manager: text_generation_launcher: Starting shard rank=0
tgi-inference_api-1 | INFO:botocore.credentials:Found credentials from IAM Role: DevEC2Role
tgi-inference_api-1 | I0919 23:06:46.473618889 1 ev_epoll1_linux.cc:121] grpc epoll fd: 20
tgi-inference_api-1 | I0919 23:06:46.475148764 1 socket_utils_common_posix.cc:407] Disabling AF_INET6 sockets because ::1 is not available.
tgi-inference_api-1 | I0919 23:06:46.475208015 1 socket_utils_common_posix.cc:336] TCP_USER_TIMEOUT is available. TCP_USER_TIMEOUT will be used thereafter
tgi-inference_api-1 | I0919 23:06:46.475286496 1 tcp_server_posix.cc:339] Failed to add :: listener, the environment may not support IPv6: UNKNOWN:Address family not supported by protocol {created_time:"2023-09-19T23:06:46.475177234+00:00", errno:97, os_error:"Address family not supported by protocol", syscall:"socket", target_address:"[::]:50051"}
tgi-text_generation_inference-1 | 2023-09-19T23:06:49.596335Z ERROR text_generation_launcher: Error when initializing model
tgi-text_generation_inference-1 | Traceback (most recent call last):
tgi-text_generation_inference-1 | File "/opt/conda/bin/text-generation-server", line 8, in <module>
tgi-text_generation_inference-1 | sys.exit(app())
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
tgi-text_generation_inference-1 | return get_command(self)(*args, **kwargs)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
tgi-text_generation_inference-1 | return self.main(*args, **kwargs)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
tgi-text_generation_inference-1 | return _main(
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
tgi-text_generation_inference-1 | rv = self.invoke(ctx)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
tgi-text_generation_inference-1 | return _process_result(sub_ctx.command.invoke(sub_ctx))
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
tgi-text_generation_inference-1 | return ctx.invoke(self.callback, **ctx.params)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 760, in invoke
tgi-text_generation_inference-1 | return __callback(*args, **kwargs)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
tgi-text_generation_inference-1 | return callback(**use_params) # type: ignore
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 78, in serve
tgi-text_generation_inference-1 | server.serve(
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 184, in serve
tgi-text_generation_inference-1 | asyncio.run(
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
tgi-text_generation_inference-1 | return loop.run_until_complete(main)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
tgi-text_generation_inference-1 | self.run_forever()
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
tgi-text_generation_inference-1 | self._run_once()
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
tgi-text_generation_inference-1 | handle._run()
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
tgi-text_generation_inference-1 | self._context.run(self._callback, *self._args)
tgi-text_generation_inference-1 | > File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 136, in serve_inner
tgi-text_generation_inference-1 | model = get_model(
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 244, in get_model
tgi-text_generation_inference-1 | return T5Sharded(
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/t5.py", line 70, in __init__
tgi-text_generation_inference-1 | model = T5ForConditionalGeneration(config, weights)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/t5_modeling.py", line 1035, in __init__
tgi-text_generation_inference-1 | self.lm_head = TensorParallelHead.load(
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 207, in load
tgi-text_generation_inference-1 | weight = weights.get_tensor(f"{prefix}.weight")
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 65, in get_tensor
tgi-text_generation_inference-1 | filename, tensor_name = self.get_filename(tensor_name)
tgi-text_generation_inference-1 | File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 52, in get_filename
tgi-text_generation_inference-1 | raise RuntimeError(f"weight {tensor_name} does not exist")
tgi-text_generation_inference-1 | RuntimeError: weight lm_head.weight does not exist
tgi-text_generation_inference-1 |
tgi-text_generation_inference-1 | 2023-09-19T23:06:50.216724Z ERROR shard-manager: text_generation_launcher: Shard complete standard error output:
```
Also, we are using the following `docker-compose` file to bring up the TGI, Could you please comment on that as well.
```
version: '3.5'
services:
text_generation_inference:
image: ghcr.io/huggingface/text-generation-inference:0.9.4
command: >
--model-id stevhliu/my_awesome_billsum_model
--num-shard 1
--max-input-length 128
--max-total-tokens 512
--max-batch-prefill-tokens 2048
--max-batch-total-tokens 2048
--max-concurrent-requests 1
--max-stop-sequences 20
--trust-remote-code
shm_size: 1g
env_file:
- .env
ports:
- "8080:80"
volumes:
- ${VOLUME}:/data
- ${CERTIFICATE_VOLUME_DIRECTORY}:/cert
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
```
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Following the official documentation for the TGI https://github.com/huggingface/text-generation-inference#docker causes the same error
```
model=t5-small
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.3 --model-id $model
```
```
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 53, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight lm_head.weight does not exist
```
### Expected behavior
What is the best way to serve the T5 model on TGI ?
| Try to use a more recent version of TGI , it contains many bugfixes along the lines of the one you describe.
Hi @Narsil , Could you please reopen the issue as I do not have reopen permission. The issue is still on the latest version `text-generation-inference:1.0.3` as mentioned in the `Reproduction` section. Could you please try that ?
@Narsil @anindya-saha I got the same issue when loading Bloomz model. I used the latest version of tgi also | 2023-09-21T06:32:35 |
|
huggingface/text-generation-inference | 1,061 | huggingface__text-generation-inference-1061 | [
"1056"
] | c5de7cd88679bc0331185c9cee75e4f68412243d | diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
--- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
@@ -179,7 +179,7 @@ def _load_gqa(config, prefix: str, weights):
dim=0,
)
- if config.quantize != "gptq":
+ if config.quantize not in ["gptq", "awq"]:
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
| AWQ models not loading
### System Info
When launching with an AWQ model with the `latest` tag TGI crashes
**System ENV output:**
OS version info:
`Linux version 6.1.55-1-lts (linux-lts@archlinux) (gcc (GCC) 13.2.1 20230801, GNU ld (GNU Binutils) 2.41.0) #1 SMP PREEMPT_DYNAMIC Sat, 23 Sep 2023 16:57:15 +0000`
```
2023-09-25T20:40:10.483833Z INFO text_generation_launcher: Runtime environment:
Target: x86_64-unknown-linux-gnu
Cargo version: 1.70.0
Commit sha: c5de7cd88679bc0331185c9cee75e4f68412243d
Docker label: sha-c5de7cd
nvidia-smi:
Mon Sep 25 20:40:10 2023
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.113.01 Driver Version: 535.113.01 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 3090 Off | 00000000:27:00.0 Off | N/A |
| 0% 23C P8 7W / 420W | 5MiB / 24576MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA GeForce RTX 3090 Off | 00000000:28:00.0 Off | N/A |
| 0% 26C P8 34W / 370W | 2MiB / 24576MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| No running processes found |
+---------------------------------------------------------------------------------------+
```
**Launch parameters:**
`docker run --gpus all --shm-size 1g -p 8085:80 -e HF_HUB_ENABLE_HF_TRANSFER=1 -e HUGGING_FACE_HUB_TOKEN= -v ssdStorage/docker/STATIC/tgi-data:/data ghcr.io/huggingface/text-generation-inference:latest --model-id TheBloke/Llama-2-70B-AWQ --quantize awq --num-shard 2 --max-input-length 3584 --max-total-tokens 4096`
**Error after launching:**
```
2023-09-25T20:34:02.974103Z INFO text_generation_launcher: Args { model_id: "TheBloke/Llama-2-70B-AWQ", revision: None, validation_workers: 2, sharded: None, num_shard: Some(2), quantize: Some(Awq), dtype: None, trust_remote_code: false, max_concurrent_requests: 128, max_best_of: 2, max_stop_sequences: 4, max_top_n_tokens: 5, max_input_length: 3584, max_total_tokens: 4096, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 4096, max_batch_total_tokens: None, max_waiting_tokens: 20, hostname: "d71d6ffe8981", port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, cuda_memory_fraction: 1.0, rope_scaling: None, rope_factor: None, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_edge: None, env: false }
2023-09-25T20:34:02.974145Z INFO text_generation_launcher: Sharding model on 2 processes
2023-09-25T20:34:02.974320Z INFO download: text_generation_launcher: Starting download process.
2023-09-25T20:34:05.099936Z INFO text_generation_launcher: Files are already present on the host. Skipping download.
2023-09-25T20:34:05.377123Z INFO download: text_generation_launcher: Successfully downloaded weights.
2023-09-25T20:34:05.377401Z INFO shard-manager: text_generation_launcher: Starting shard rank=1
2023-09-25T20:34:05.377403Z INFO shard-manager: text_generation_launcher: Starting shard rank=0
2023-09-25T20:34:08.171845Z ERROR text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 82, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 195, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 147, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 187, in get_model
return FlashLlama(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 68, in __init__
model = FlashLlamaForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 474, in __init__
self.model = FlashLlamaModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 412, in __init__
[
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 413, in <listcomp>
FlashLlamaLayer(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 349, in __init__
self.self_attn = FlashLlamaAttention(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 229, in __init__
self.query_key_value = load_attention(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 154, in load_attention
return _load_gqa(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 183, in _load_gqa
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
AttributeError: 'tuple' object has no attribute 'to'
2023-09-25T20:34:08.315602Z ERROR text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 82, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 195, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 147, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 187, in get_model
return FlashLlama(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 68, in __init__
model = FlashLlamaForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 474, in __init__
self.model = FlashLlamaModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 412, in __init__
[
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 413, in <listcomp>
FlashLlamaLayer(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 349, in __init__
self.self_attn = FlashLlamaAttention(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 229, in __init__
self.query_key_value = load_attention(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 154, in load_attention
return _load_gqa(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 183, in _load_gqa
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
AttributeError: 'tuple' object has no attribute 'to'
2023-09-25T20:34:08.581202Z ERROR shard-manager: text_generation_launcher: Shard complete standard error output:
[W socket.cpp:426] [c10d] The server socket cannot be initialized on [::]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
You are using a model of type llama to instantiate a model of type . This is not supported for all configurations of models and can yield errors.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 82, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 195, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 147, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 187, in get_model
return FlashLlama(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 68, in __init__
model = FlashLlamaForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 474, in __init__
self.model = FlashLlamaModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 412, in __init__
[
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 413, in <listcomp>
FlashLlamaLayer(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 349, in __init__
self.self_attn = FlashLlamaAttention(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 229, in __init__
self.query_key_value = load_attention(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 154, in load_attention
return _load_gqa(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 183, in _load_gqa
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
AttributeError: 'tuple' object has no attribute 'to'
rank=0
2023-09-25T20:34:08.680899Z ERROR text_generation_launcher: Shard 0 failed to start
2023-09-25T20:34:08.680914Z INFO text_generation_launcher: Shutting down shards
2023-09-25T20:34:08.681213Z ERROR shard-manager: text_generation_launcher: Shard complete standard error output:
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
[W socket.cpp:601] [c10d] The client socket cannot be initialized to connect to [localhost.localdomain]:29500 (errno: 97 - Address family not supported by protocol).
You are using a model of type llama to instantiate a model of type . This is not supported for all configurations of models and can yield errors.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 82, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 195, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 147, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 187, in get_model
return FlashLlama(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 68, in __init__
model = FlashLlamaForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 474, in __init__
self.model = FlashLlamaModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 412, in __init__
[
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 413, in <listcomp>
FlashLlamaLayer(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 349, in __init__
self.self_attn = FlashLlamaAttention(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 229, in __init__
self.query_key_value = load_attention(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 154, in load_attention
return _load_gqa(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 183, in _load_gqa
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
AttributeError: 'tuple' object has no attribute 'to'
rank=1
Error: ShardCannotStart
```
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Steps to reproduce:
1. with the `latest` tag docker container as of 25/09/2023 at 20:45 UTC launch with any AWQ model
### Expected behavior
Model launches and is ready for batch inferencing.
| Thanks @HarmonyTechLabs for reporting this. Another user @RonanKMcGovern also [reported](https://github.com/huggingface/text-generation-inference/issues/781#issuecomment-1733961039) issues with Llama 2 70B. Please note I was able to load Llama 2 7B successfully.
Llama 2 34B onwards implement GQA (grouped query attention), so could be related to that.
Can you try loading 34B and report here?
Thanks.
Sure thing! I had a thought, could this be an issue with sharding 70b across two cards? I'll go test 34b right now.
@abhinavkulkarni, no luck with 34b code llama. Here is the output. Please let me know if you'd like me to test anything further. More than happy to lend my hardware for testing!
```
2023-09-26T05:42:22.357111Z INFO text_generation_launcher: Args { model_id: "TheBloke/CodeLlama-34B-Instruct-AWQ", revision: None, validation_workers: 2, sharded: None, num_shard: None, quantize: Some(BitsandbytesNF4), dtype: None, trust_remote_code: false, max_concurrent_requests: 128, max_best_of: 2, max_stop_sequences: 4, max_top_n_tokens: 5, max_input_length: 3584, max_total_tokens: 4096, waiting_served_ratio: 1.2, max_batch_prefill_tokens: 4096, max_batch_total_tokens: None, max_waiting_tokens: 20, hostname: "6014ac3a1c4b", port: 80, shard_uds_path: "/tmp/text-generation-server", master_addr: "localhost", master_port: 29500, huggingface_hub_cache: Some("/data"), weights_cache_override: None, disable_custom_kernels: false, cuda_memory_fraction: 1.0, rope_scaling: None, rope_factor: None, json_output: false, otlp_endpoint: None, cors_allow_origin: [], watermark_gamma: None, watermark_delta: None, ngrok: false, ngrok_authtoken: None, ngrok_edge: None, env: false }
2023-09-26T05:42:22.357317Z INFO download: text_generation_launcher: Starting download process.
2023-09-26T05:42:24.749317Z INFO text_generation_launcher: Download file: model-00001-of-00002.safetensors
2023-09-26T05:43:50.058299Z INFO text_generation_launcher: Downloaded /data/models--TheBloke--CodeLlama-34B-Instruct-AWQ/snapshots/9f2e460ded6babaee180853dea1d04cff2882128/model-00001-of-00002.safetensors in 0:01:25.
2023-09-26T05:43:50.058408Z INFO text_generation_launcher: Download: [1/2] -- ETA: 0:01:25
2023-09-26T05:43:50.058809Z INFO text_generation_launcher: Download file: model-00002-of-00002.safetensors
2023-09-26T05:45:01.701470Z ERROR text_generation_launcher: An error occurred while downloading using `hf_transfer`. Consider disabling HF_HUB_ENABLE_HF_TRANSFER for better error handling.
2023-09-26T05:45:01.701499Z INFO text_generation_launcher: Retrying in 5 seconds
2023-09-26T05:45:06.705947Z INFO text_generation_launcher: Retry 1/4
2023-09-26T05:45:06.706000Z INFO text_generation_launcher: Download file: model-00002-of-00002.safetensors
2023-09-26T05:46:18.480839Z INFO text_generation_launcher: Downloaded /data/models--TheBloke--CodeLlama-34B-Instruct-AWQ/snapshots/9f2e460ded6babaee180853dea1d04cff2882128/model-00002-of-00002.safetensors in 0:01:11.
2023-09-26T05:46:18.482879Z INFO text_generation_launcher: Download: [2/2] -- ETA: 0
2023-09-26T05:46:18.899143Z INFO download: text_generation_launcher: Successfully downloaded weights.
2023-09-26T05:46:18.899692Z INFO shard-manager: text_generation_launcher: Starting shard rank=0
2023-09-26T05:46:22.283146Z ERROR text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 82, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 195, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 147, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 187, in get_model
return FlashLlama(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 68, in __init__
model = FlashLlamaForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 474, in __init__
self.model = FlashLlamaModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 412, in __init__
[
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 413, in <listcomp>
FlashLlamaLayer(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 349, in __init__
self.self_attn = FlashLlamaAttention(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 229, in __init__
self.query_key_value = load_attention(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 154, in load_attention
return _load_gqa(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 176, in _load_gqa
weight = weights.get_multi_weights_col(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 205, in get_multi_weights_col
w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes]
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 205, in <listcomp>
w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes]
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 103, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 53, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight model.layers.0.self_attn.q_proj.weight does not exist
2023-09-26T05:46:22.602795Z ERROR shard-manager: text_generation_launcher: Shard complete standard error output:
The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization.
The tokenizer class you load from this checkpoint is 'CodeLlamaTokenizer'.
The class this function is called from is 'LlamaTokenizer'.
You are using the default legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. If you see this, DO NOT PANIC! This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=True`. This should only be set if you understand what it means, and thouroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565
You are using a model of type llama to instantiate a model of type . This is not supported for all configurations of models and can yield errors.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 82, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 195, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 147, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 187, in get_model
return FlashLlama(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 68, in __init__
model = FlashLlamaForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 474, in __init__
self.model = FlashLlamaModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 412, in __init__
[
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 413, in <listcomp>
FlashLlamaLayer(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 349, in __init__
self.self_attn = FlashLlamaAttention(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 229, in __init__
self.query_key_value = load_attention(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 154, in load_attention
return _load_gqa(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 176, in _load_gqa
weight = weights.get_multi_weights_col(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 205, in get_multi_weights_col
w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes]
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 205, in <listcomp>
w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes]
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 103, in get_sharded
filename, tensor_name = self.get_filename(tensor_name)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/weights.py", line 53, in get_filename
raise RuntimeError(f"weight {tensor_name} does not exist")
RuntimeError: weight model.layers.0.self_attn.q_proj.weight does not exist
rank=0
Error: ShardCannotStart
2023-09-26T05:46:22.702485Z ERROR text_generation_launcher: Shard 0 failed to start
2023-09-26T05:46:22.702503Z INFO text_generation_launcher: Shutting down shards
``` | 2023-09-26T06:27:27 |
|
huggingface/text-generation-inference | 1,081 | huggingface__text-generation-inference-1081 | [
"1079"
] | 724199aaf172590c3658018c0e6bc6152cda4c2f | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -297,6 +297,8 @@ def get_model(
raise ValueError("awq quantization is not supported for AutoModel")
elif (quantize == "bitsandbytes-fp4") or (quantize == "bitsandbytes-nf4"):
raise ValueError("4bit quantization is not supported for AutoModel")
+ elif (quantize == "eetq"):
+ raise ValueError("Eetq quantization is not supported for AutoModel")
if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:
return CausalLM(
model_id,
| EETQ dependency not compiled for GQA/70b
### System Info
TGI 1.1.0, 70b model sharded across 2 - some system dependency didn't get added properly.
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Trying out EETQ but get this on start:
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.9/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.9/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.9/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/cli.py", line 83, in serve
server.serve(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 207, in serve
asyncio.run(
File "/opt/conda/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 634, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/opt/conda/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/server.py", line 159, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/__init__.py", line 201, in get_model
return FlashLlama(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/flash_llama.py", line 68, in __init__
model = FlashLlamaForCausalLM(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 478, in __init__
self.model = FlashLlamaModel(config, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 416, in __init__
[
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 417, in <listcomp>
FlashLlamaLayer(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 353, in __init__
self.self_attn = FlashLlamaAttention(
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 233, in __init__
self.query_key_value = load_attention(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 154, in load_attention
return _load_gqa(config, prefix, weights)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/models/custom_modeling/flash_llama_modeling.py", line 195, in _load_gqa
get_linear(weight, bias=None, quantize=config.quantize)
File "/opt/conda/lib/python3.9/site-packages/text_generation_server/utils/layers.py", line 280, in get_linear
raise ImportError(
ImportError: Please install EETQ from https://github.com/NetEase-FuXi/EETQ
### Expected behavior
Run normally
| Tagging @SidaZh for visibility | 2023-09-29T06:47:55 |
|
huggingface/text-generation-inference | 1,089 | huggingface__text-generation-inference-1089 | [
"1084"
] | 5ba53d44a18983a4de32d122f4cb46f4a17d9ef6 | diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py
--- a/server/text_generation_server/utils/flash_attn.py
+++ b/server/text_generation_server/utils/flash_attn.py
@@ -80,7 +80,7 @@ def attention(
)
if HAS_FLASH_ATTN:
- if window_size_left != 0:
+ if window_size_left != -1:
raise NotImplementedError(
"window_size_left is only available with flash attn v2"
)
| Flash attention (v1) is broken
Defaulting window_size_left to "-1" breaks flash attention v1 since v1 only works if window_size_left is 0 (line 83).
https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/utils/flash_attn.py#L60C7-L60C7
| 2023-10-02T15:50:48 |
||
huggingface/text-generation-inference | 1,096 | huggingface__text-generation-inference-1096 | [
"1095"
] | 8ec1b87f16f85f110bb3e7e6d6871525b571dbf9 | diff --git a/server/text_generation_server/models/custom_modeling/idefics_image_processing.py b/server/text_generation_server/models/custom_modeling/idefics_image_processing.py
--- a/server/text_generation_server/models/custom_modeling/idefics_image_processing.py
+++ b/server/text_generation_server/models/custom_modeling/idefics_image_processing.py
@@ -35,6 +35,7 @@
valid_images,
)
from io import BytesIO
+import base64
import requests
from transformers import TensorType, is_torch_available
@@ -194,10 +195,17 @@ def fetch_images(self, image_url_or_urls: Union[str, List[str]]):
if isinstance(image_url_or_urls, list):
return [self.fetch_images(x) for x in image_url_or_urls]
elif isinstance(image_url_or_urls, str):
- response = requests.get(image_url_or_urls, stream=True, headers=headers, timeout=(1, 5))
- response.raise_for_status()
+ image = image_url_or_urls
+
+ if image.startswith("http://") or image.startswith("https://"):
+ response = requests.get(image_url_or_urls, stream=True, headers=headers, timeout=(1, 5))
+ response.raise_for_status()
+ content = response.content
+ else:
+ content = base64.b64decode(image)
+
try:
- image = Image.open(BytesIO(response.content))
+ image = Image.open(BytesIO(content))
# image.verify()
except Exception:
raise ValueError(f"Could not load image from url {image_url_or_urls}")
| Support other means of passing image to IdeficsImageProcessor
### Feature request
IdeficsImageProcessor expects the image to be passed only in the form of a URL from which it can [fetch the image](https://github.com/huggingface/text-generation-inference/blob/7a6fad6aac67d9bf21fe75c034a6bcab5dbd88d2/server/text_generation_server/models/custom_modeling/idefics_image_processing.py#L182).
It would be beneficial to allow accepting images as base64 encoded strings.
### Motivation
We want to use IDEFICS on data not directly available under the HTTP endpoint. We either need to set up another server to unpack an image from our DVC and make it available under some URL or deploy a custom docker container with patched image download logic.
### Your contribution
We can submit a PR to accept images as base64 encoded strings. Roughly looking like this:
```python
if isinstance(image_url_or_urls, str):
decoded_bytes = None
try:
decoded_bytes = BytesIO(base64.b64decode(image_url_or_urls))
except:
print("Not a base64 encoded image")
if decoded_bytes is not None:
return Image.open(decoded_bytes)
else:
response = requests.get(image_url_or_urls, stream=True, headers=headers)
response.raise_for_status()
return Image.open(BytesIO(response.content))
```
| 2023-10-04T13:41:00 |
||
huggingface/text-generation-inference | 1,099 | huggingface__text-generation-inference-1099 | [
"1017"
] | 8ec1b87f16f85f110bb3e7e6d6871525b571dbf9 | diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py
--- a/server/text_generation_server/utils/layers.py
+++ b/server/text_generation_server/utils/layers.py
@@ -601,6 +601,19 @@ def static(cls, config, dim, base, device):
device=inv_freq.device,
scaling_factor=scaling_factor,
)
+ elif rope_scaling["type"] == "yarn":
+ return YarnPositionRotaryEmbedding(
+ dim=2 * inv_freq.shape[0],
+ max_position_embeddings=rope_scaling["original_max_position_embeddings"],
+ base=10000.0,
+ device=inv_freq.device,
+ scaling_factor=scaling_factor,
+ extrapolation_factor=1,
+ attn_factor=1,
+ beta_fast=32,
+ beta_slow=1
+
+ )
else:
raise NotImplementedError(
f"rope scaling type {rope_scaling['type']} is not implemented or invalid"
@@ -629,6 +642,19 @@ def load(cls, config, prefix, weights):
device=inv_freq.device,
scaling_factor=scaling_factor,
)
+ elif rope_scaling["type"] == "yarn":
+ return YarnPositionRotaryEmbedding(
+ dim=2 * inv_freq.shape[0],
+ max_position_embeddings=rope_scaling["original_max_position_embeddings"],
+ base=10000.0,
+ device=inv_freq.device,
+ scaling_factor=scaling_factor,
+ extrapolation_factor=1,
+ attn_factor=1,
+ beta_fast=32,
+ beta_slow=1
+
+ )
else:
raise NotImplementedError(
f"rope scaling type {rope_scaling['type']} is not implemented or invalid"
@@ -708,5 +734,76 @@ def _update_cos_sin_cache(self, dtype, device, seqlen):
self._cos_cached = torch.cos(freqs).to(dtype)
self._sin_cached = torch.sin(freqs).to(dtype)
+
+ # Inverse dim formula to find dim based on number of rotations
+ import math
+ def find_correction_dim(num_rotations, dim, base=10000, max_position_embeddings=2048):
+ return (dim * math.log(max_position_embeddings/(num_rotations * 2 * math.pi)))/(2 * math.log(base))
+
+ # Find dim range bounds based on rotations
+ def find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=2048):
+ low = math.floor(find_correction_dim(
+ low_rot, dim, base, max_position_embeddings))
+ high = math.ceil(find_correction_dim(
+ high_rot, dim, base, max_position_embeddings))
+ return max(low, 0), min(high, dim-1) # Clamp values just in case
+
+ def linear_ramp_mask(min, max, dim):
+ if min == max:
+ max += 0.001 # Prevent singularity
+
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
+ ramp_func = torch.clamp(linear_func, 0, 1)
+ return ramp_func
+
+ def get_mscale(scale=1):
+ if scale <= 1:
+ return 1.0
+ return 0.1 * math.log(scale) + 1.0
+
+ class YarnPositionRotaryEmbedding(PositionRotaryEmbedding):
+ def __init__(self, dim, max_position_embeddings, base, device, scaling_factor,*, extrapolation_factor, attn_factor, beta_fast, beta_slow):
+ inv_freq = _create_inv_freq(dim, base, device)
+ super().__init__(inv_freq, scaling_factor)
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ self.extrapolation_factor = extrapolation_factor
+ self.attn_factor = attn_factor
+ self.beta_fast = beta_fast
+ self.beta_slow = beta_slow
+ self.mscale = float(get_mscale(self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation
+
+ def _update_cos_sin_cache(self, dtype, device, seqlen):
+ # Reset the tables if the sequence length has changed,
+ # or if we're on a new device (possibly due to tracing for instance)
+ if (
+ seqlen > self._seq_len_cached
+ or self._cos_cached.device != device
+ or self._cos_cached.dtype != dtype
+ ):
+ if seqlen > self.max_position_embeddings:
+ inv_freq_extrapolation = _create_inv_freq(
+ self.dim, self.base, self.inv_freq.device
+ )
+ freqs = 1.0 / inv_freq_extrapolation
+ inv_freq_interpolation = 1.0 / (self.scaling_factor * freqs)
+ low, high = find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, self.max_position_embeddings)
+ inv_freq_mask = (1 - linear_ramp_mask(low, high, self.dim // 2).float().to(device)) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation
+ inv_freq = inv_freq_interpolation * (1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask
+
+ self.inv_freq = inv_freq
+ self.mscale = float(get_mscale(self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation
+
+
+ self._seq_len_cached = seqlen
+ t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
+ # Don't do einsum, it converts fp32 to fp16
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+
+ freqs = torch.outer(t, self.inv_freq.to(device=t.device))
+ self._cos_cached = (torch.cos(freqs) * self.mscale).to(dtype)
+ self._sin_cached = (torch.sin(freqs) * self.mscale).to(dtype)
+
except ImportError:
pass
| Support YaRN models (RoFormer implementation in rotary_embedding kernel)
### Feature request
@OlivierDehaene
Nous Research and EleutherAI have recently released the **YaRN** model, which comes in two versions with context sizes of 64k and 128k. This model utilizes RoFormer-style embeddings, distinguishing it from GPT-NeoX and GPT-J. It is built upon the foundation of the LLaMa 2 model, making it largely compatible with some minor adjustments required for optimal support.
### Motivation
The **YaRN** model's longer context length (up to 128k) is highly valuable for tasks involving extensive context, compared to the limited 4096 context length of the llama2 base model.
### Your contribution
YaRN paper: [YaRN: Efficient Context Window Extension of Large Language Models](https://arxiv.org/pdf/2309.00071.pdf)
YaRN Code: [YaRN Github](https://github.com/jquesnelle/yarn)
| +1 that would be awesome
there are models **TheBloke** hub:
For **7b Llama 2**: https://huggingface.co/TheBloke/Yarn-Llama-2-7B-128K-GPTQ
For **13b Llama 2**: https://huggingface.co/TheBloke/Yarn-Llama-2-13B-128K-GPTQ
Originating from:
https://huggingface.co/NousResearch/Yarn-Llama-2-7b-128k
https://huggingface.co/NousResearch/Yarn-Llama-2-13b-128k
+1
+1
+1
+1 | 2023-10-04T15:19:50 |
|
huggingface/text-generation-inference | 1,101 | huggingface__text-generation-inference-1101 | [
"1098"
] | 6df43da0a4f2721c12f0a5636526bb6829455565 | diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py
--- a/server/text_generation_server/utils/weights.py
+++ b/server/text_generation_server/utils/weights.py
@@ -212,7 +212,9 @@ def get_multi_weights_col(self, prefixes: List[str], quantize: str, dim: int):
g_idx = None
bits, groupsize = self._get_gptq_params()
- weight = (qweight, qzeros, scales, g_idx, bits, groupsize, False)
+ from text_generation_server.utils.layers import HAS_EXLLAMA
+ use_exllama = bits==4 and HAS_EXLLAMA and quantize == "gptq"
+ weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama)
else:
w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes]
weight = torch.cat(w, dim=dim)
| GPTQ doesn't use exllama for all layers
I can no longer contribute here and am not looking at any of the additions since the license change, to avoid contaminating myself because we have our own fork.
But I thought you guys would like to know that there's what I think was an oversight in the GPTQ impl. In `get_multi_weights_col` in `weights.py`, the last element of the `weights` tuple is hardcoded as `False`. I'm pretty sure should instead be something like `bits == 4 and HAS_EXLLAMA`.
Without this, half of the layers still use the slow triton kernel.
Fixing this gives a huge speedup, for example about 2.5x for llama-2-70b-GPTQ.
| 2023-10-04T15:51:13 |
||
huggingface/text-generation-inference | 1,182 | huggingface__text-generation-inference-1182 | [
"1142"
] | 3dbc649b11d14955a3d3448ed6db373a0563dfe9 | diff --git a/integration-tests/conftest.py b/integration-tests/conftest.py
--- a/integration-tests/conftest.py
+++ b/integration-tests/conftest.py
@@ -318,6 +318,7 @@ def docker_launcher(
],
volumes=volumes,
ports={"80/tcp": port},
+ shm_size="1G"
)
yield ContainerLauncherHandle(client, container.name, port)
| Update Docker to torch 2.1?
### Feature request
H100s have trouble with gptq quants due to not having latest pytorch, can in the next TGI Docker we update torch to this, or have one special for this for use on h100s?
### Motivation
Cant get tgi + gptq quant to work on h100s
### Your contribution
Sorry I dont have any contribution ^_^
| 2023-10-20T07:46:17 |
||
huggingface/text-generation-inference | 1,260 | huggingface__text-generation-inference-1260 | [
"1259"
] | a5def7c222174e03d815f890093584f3e815c5ce | diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py
--- a/server/text_generation_server/cli.py
+++ b/server/text_generation_server/cli.py
@@ -150,6 +150,17 @@ def download_weights(
if not extension == ".safetensors" or not auto_convert:
raise e
+ else:
+ # Try to load as a local PEFT model
+ try:
+ utils.download_and_unload_peft(
+ model_id, revision, trust_remote_code=trust_remote_code
+ )
+ utils.weight_files(model_id, revision, extension)
+ return
+ except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
+ pass
+
# Try to see if there are local pytorch weights
try:
# Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE
| PEFT support does not work with local directories
### System Info
text-generation-inference version: main branch
The rest of the system info is probably not relevant.
### Information
- [X] Docker
- [X] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Steps to reproduce:
1. Create a directory that contains PEFT weights, for example: `mypeft` which contains two files: `adapter_config.json` and `adapter_model.bin`
2. Try to download the base model using the following command: `text-generation-server download-weights mypeft`
### Expected behavior
It should be able to download the base model weights and merge the PEFT weights into the base model.
However, as mentioned in [this comment](https://github.com/huggingface/text-generation-inference/pull/762#issuecomment-1728042436):
> it seems the checks for PEFT model is only in the block checking if the model is not a local model
There is a workaround as documented in [this comment](https://github.com/huggingface/text-generation-inference/pull/762#issuecomment-1729689087)
| 2023-11-14T14:41:28 |
||
huggingface/text-generation-inference | 1,351 | huggingface__text-generation-inference-1351 | [
"1279"
] | f3aea78fb642967838e7b5b1940a25fe67f4f7a9 | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -55,10 +55,14 @@
FlashSantacoderSharded,
)
from text_generation_server.models.idefics import IDEFICSSharded
+ from text_generation_server.models.flash_mistral import FlashMistral
+ from text_generation_server.models.flash_mixtral import FlashMixtral
+ from text_generation_server.utils.flash_attn import HAS_FLASH_ATTN_V2_CUDA
except ImportError as e:
logger.warning(f"Could not import Flash Attention enabled models: {e}")
FLASH_ATTENTION = False
+ HAS_FLASH_ATTN_V2_CUDA = False
if FLASH_ATTENTION:
__all__.append(FlashNeoXSharded)
@@ -66,25 +70,7 @@
__all__.append(FlashSantacoderSharded)
__all__.append(FlashLlama)
__all__.append(IDEFICSSharded)
-
-MISTRAL = True
-try:
- from text_generation_server.models.flash_mistral import FlashMistral
-except ImportError as e:
- logger.warning(f"Could not import Mistral model: {e}")
- MISTRAL = False
-
-if MISTRAL:
__all__.append(FlashMistral)
-
-MIXTRAL = True
-try:
- from text_generation_server.models.flash_mixtral import FlashMixtral
-except ImportError as e:
- logger.warning(f"Could not import Mixtral model: {e}")
- MIXTRAL = False
-
-if MIXTRAL:
__all__.append(FlashMixtral)
@@ -295,7 +281,9 @@ def get_model(
)
if model_type == "mistral":
- if MISTRAL:
+ if (config_dict["sliding_window"] is None and FLASH_ATTENTION) or (
+ config_dict["sliding_window"] > 0 and HAS_FLASH_ATTN_V2_CUDA
+ ):
return FlashMistral(
model_id,
revision,
@@ -303,10 +291,11 @@ def get_model(
dtype=dtype,
trust_remote_code=trust_remote_code,
)
- raise NotImplementedError("Mistral models requires flash attention v2")
if model_type == "mixtral":
- if MIXTRAL:
+ if (config_dict["sliding_window"] is None and FLASH_ATTENTION) or (
+ config_dict["sliding_window"] > 0 and HAS_FLASH_ATTN_V2_CUDA
+ ):
return FlashMixtral(
model_id,
revision,
@@ -314,9 +303,6 @@ def get_model(
dtype=dtype,
trust_remote_code=trust_remote_code,
)
- raise NotImplementedError(
- "Mixtral models requires flash attention v2, stk and megablocks"
- )
if model_type == "opt":
return OPTSharded(
@@ -348,17 +334,17 @@ def get_model(
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics"))
if sharded:
- raise ValueError("sharded is not supported for AutoModel")
+ raise NotImplementedError("sharded is not supported for AutoModel")
if quantize == "gptq":
- raise ValueError(
+ raise NotImplementedError(
"gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`"
)
if quantize == "awq":
- raise ValueError("awq quantization is not supported for AutoModel")
+ raise NotImplementedError("awq quantization is not supported for AutoModel")
elif (quantize == "bitsandbytes-fp4") or (quantize == "bitsandbytes-nf4"):
- raise ValueError("4bit quantization is not supported for AutoModel")
+ raise NotImplementedError("4bit quantization is not supported for AutoModel")
elif quantize == "eetq":
- raise ValueError("Eetq quantization is not supported for AutoModel")
+ raise NotImplementedError("Eetq quantization is not supported for AutoModel")
if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:
return CausalLM(
model_id,
diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py
--- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py
@@ -27,11 +27,6 @@
from typing import Optional, List, Tuple
from text_generation_server.utils import paged_attention, flash_attn
-from text_generation_server.utils.flash_attn import (
- attention,
- HAS_FLASH_ATTN_V2_ROCM,
- HAS_FLASH_ATTN_V2_CUDA,
-)
from text_generation_server.utils.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
@@ -43,10 +38,6 @@
)
-if not HAS_FLASH_ATTN_V2_CUDA and not HAS_FLASH_ATTN_V2_ROCM:
- raise ImportError("Mistral model requires flash attn v2")
-
-
class MistralConfig(PretrainedConfig):
model_type = "mistral"
diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py
--- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py
@@ -27,12 +27,9 @@
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
+from loguru import logger
from text_generation_server.utils import paged_attention, flash_attn
-from text_generation_server.utils.flash_attn import (
- HAS_FLASH_ATTN_V2_ROCM,
- HAS_FLASH_ATTN_V2_CUDA,
-)
from text_generation_server.utils.layers import (
FastLinear,
FastRMSNorm,
@@ -44,18 +41,13 @@
get_linear,
)
-if not HAS_FLASH_ATTN_V2_CUDA and not HAS_FLASH_ATTN_V2_ROCM:
- raise ImportError("Mixtral model requires flash attn v2")
-
-try:
- import megablocks.ops as ops
-except ImportError:
- raise ImportError("Mixtral model requires megablocks to be installed")
-
+HAS_MEGABLOCKS = True
try:
import stk
+ import megablocks.ops as ops
except ImportError:
- raise ImportError("Mixtral model requires stk to be installed")
+ logger.warning("Mixtral: megablocks is not installed")
+ HAS_MEGABLOCKS = False
class MixtralConfig(PretrainedConfig):
@@ -590,7 +582,7 @@ def dense_forward(self, x: torch.Tensor) -> torch.Tensor:
return out
def forward(self, x: torch.Tensor) -> torch.Tensor:
- if len(x) > 256:
+ if len(x) > 256 and HAS_MEGABLOCKS:
return self.sparse_forward(x)
# This is faster when there is not a lot of tokens
return self.dense_forward(x)
| fix: Add V100 (older) GPU Support for Mistral 7b Models
# What does this PR do?
## Introduction
This PR introduces changes that significantly improve support for running Mistral 7b models on V100 GPU architectures. By update the latest `transformers` package, we ensure both compatibility and performance for users with V100 GPUs.
## Changes
- Altered the model instantiation logic in the scenario where the model type is 'mistral'.
- In environments where `FLASH_ATTENTION` is not available, the system defaults to using `CausalLM`. This ensures that Mistral models remain operational even without Flash Attention support.
## Impact
These updates are particularly beneficial for users with V100 GPU architectures, which previously faced compatibility issues due to `flash-attention` package limitations. With these changes, we can expand our hardware support, reduce barriers to entry, and streamline the user experience for a significant segment of our user base.
## Additional Notes
The `transformers` package has been updated to version 4.35.2, which now includes support for Mistral models. This update is essential since the `flash-attention` package does not support V100 GPUs, as discussed in [Dao-AILab/flash-attention#148](https://github.com/Dao-AILab/flash-attention/issues/148).
## Testing
- Comprehensive tests have been conducted to ensure that Mistral models perform as expected on V100 GPUs.
- Additional benchmarks have been added to compare the performance on V100 GPUs with previous versions.
I invite reviewers to pull this branch, test rigorously with V100 GPUs, and provide feedback on any aspects of this enhancement.
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
- [x] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [x] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
#1253
#1208
#319
- [x] Did you make sure to update the documentation with your changes? Here are the
[documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and
[here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
@abhijithnair1
| 2023-12-15T10:16:09 |
||
huggingface/text-generation-inference | 1,414 | huggingface__text-generation-inference-1414 | [
"1397"
] | 630800eed37b15c4b0c9eb8e6ab47212026720f7 | diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py
--- a/server/text_generation_server/utils/flash_attn.py
+++ b/server/text_generation_server/utils/flash_attn.py
@@ -23,10 +23,15 @@
try:
import flash_attn_2_cuda
except ImportError:
+ architecture_suffix = ""
+ if IS_CUDA_SYSTEM:
+ architecture_suffix = "-cuda"
+ elif IS_ROCM_SYSTEM:
+ architecture_suffix = "-rocm"
raise ImportError(
"Flash Attention V2 is not installed.\n"
"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
- "or install flash attention v2 with `cd server && make install install-flash-attention-v2`"
+ f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`"
)
if not (is_sm8x or is_sm90):
raise ImportError(
| Local Install: No rule to make target 'install-flash-attention-v2'
### System Info
**Release:** v1.3.4, main branch
**Target**: x86_64-unknown-linux-gnu
**Cargo version**: 1.70.0
**Commit sha**: 630800eed37b15c4b0c9eb8e6ab47212026720f7
**Docker label**: N/A
**nvidia-smi**: Wed Jan 3 18:13:50 2024
```
Mon Jan 8 15:13:03 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.54.03 Driver Version: 535.54.03 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 4090 On | 00000000:01:00.0 Off | Off |
| 0% 40C P8 26W / 450W | 3MiB / 24564MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA GeForce RTX 4090 On | 00000000:02:00.0 Off | Off |
| 0% 46C P8 26W / 450W | 3MiB / 24564MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| No running processes found |
+---------------------------------------------------------------------------------------+
```
### Information
- [ ] Docker
- [X] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Followed steps found within the local install instructions (https://github.com/huggingface/text-generation-inference#local-install), in addition to a couple undocumented make steps
1. Fork & pull from main branch, v1.3.4
2. Install and test Rust: 1.70.0 (90c541806 2023-05-31)
3. Install & test Conda: 23.11.0
5. Install and test libprotoc: 3.21.12
6. Execute `export BUILD_EXTENSIONS=True`
7. Execute `make install`
8. Execute `cd server && make install-vllm-cuda && cd ..`
9. Update source code for paged attention [as described here](https://github.com/huggingface/text-generation-inference/pull/1386/commits/ad7f8396732c12368c7961e15fe2287f1aa42db3)
10. Execute `cd server && make install-flash-attention && cd ..`
11. Execute `make run-falcon-7b-instruct`, which yields the following message:
_Flash Attention V2 is not installed.
Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) or install flash attention v2 with `cd server && make install install-flash-attention-v2`_
12. Execute `cd server && make install install-flash-attention-v2`, which yields the following error message:
_make: *** No rule to make target 'install-flash-attention-v2'. Stop._
### Expected behavior
Make target should be found and executed. Solution below.
**SOLUTION**:
The file that issues the warning, `server/text_generation_server/utils/flash_attn.py`, is missing the proper make target suffix. It should be:
`make install install-flash-attention-v2-cuda` or `install-flash-attention-v2-rocm`
| 2024-01-08T21:37:28 |
||
huggingface/text-generation-inference | 1,420 | huggingface__text-generation-inference-1420 | [
"1415"
] | 91d72675342e34c314a0d7cc9bb9ca9d8f5aa295 | diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py
--- a/server/text_generation_server/cli.py
+++ b/server/text_generation_server/cli.py
@@ -198,6 +198,35 @@ def download_weights(
if not extension == ".safetensors" or not auto_convert:
raise e
+ elif (Path(model_id) / "medusa_lm_head.pt").exists():
+ # Try to load as a local Medusa model
+ try:
+ import json
+
+ medusa_head = Path(model_id) / "medusa_lm_head.pt"
+ if auto_convert:
+ medusa_sf = Path(model_id) / "medusa_lm_head.safetensors"
+ if not medusa_sf.exists():
+ utils.convert_files([Path(medusa_head)], [medusa_sf], [])
+ medusa_config = Path(model_id) / "config.json"
+ with open(medusa_config, "r") as f:
+ config = json.load(f)
+
+ model_id = config["base_model_name_or_path"]
+ revision = "main"
+ try:
+ utils.weight_files(model_id, revision, extension)
+ logger.info(
+ f"Files for parent {model_id} are already present on the host. "
+ "Skipping download."
+ )
+ return
+ # Local files not found
+ except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
+ pass
+ except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
+ pass
+
elif (Path(model_id) / "adapter_config.json").exists():
# Try to load as a local PEFT model
try:
diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py
--- a/server/text_generation_server/models/flash_llama.py
+++ b/server/text_generation_server/models/flash_llama.py
@@ -71,15 +71,26 @@ def __init__(
from text_generation_server.utils.medusa import MedusaModel
from huggingface_hub import hf_hub_download
import json
-
- medusa_config = hf_hub_download(
- use_medusa, revision=revision, filename="config.json"
- )
+ import os
+ from pathlib import Path
+
+ is_local_model = (Path(use_medusa).exists() and Path(use_medusa).is_dir()) or os.getenv(
+ "WEIGHTS_CACHE_OVERRIDE", None
+ ) is not None
+
+ if not is_local_model:
+ medusa_config = hf_hub_download(
+ use_medusa, revision=revision, filename="config.json"
+ )
+ medusa_head = hf_hub_download(
+ use_medusa, revision=revision, filename="medusa_lm_head.pt"
+ )
+ else:
+ medusa_config = str(Path(use_medusa) / "config.json")
+ medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt")
+
with open(medusa_config, "r") as f:
config = json.load(f)
- medusa_head = hf_hub_download(
- use_medusa, revision=revision, filename="medusa_lm_head.pt"
- )
medusa_sf = medusa_head[: -len(".pt")] + ".safetensors"
weights = Weights(
[medusa_sf], device, dtype, process_group=self.process_group
| How to use local Medusa head?
It is said that Medusa can significantly accelerate inference speed. During my attempts to utilize it, I have observed that it does not support the use of local Medusa config and head. The code fragment I discovered that pertains to this functionality is as follows, which I have modified. However, I do not comprehend the meaning of 'medusa_sf'. The training process of Medusa does not generate new safetensors. What is this?
```python
medusa_config = f"{model_id}/config_medusa.json"
# medusa_config = hf_hub_download(
# use_medusa, revision=revision, filename="config.json"
# )
with open(medusa_config, "r") as f:
config = json.load(f)
medusa_head = f"{model_id}/medusa_lm_head.pt"
# medusa_head = hf_hub_download(
# use_medusa, revision=revision, filename="medusa_lm_head.pt"
# )
medusa_sf = medusa_head[: -len(".pt")] + ".safetensors"
weights = Weights(
[medusa_sf], device, dtype, process_group=self.process_group
)
lm_head = model.lm_head
model.lm_head = MedusaModel(config, weights, lm_head)
```
How should I employ TGI to access the local Medusa? A huge thank for your work!
| 2024-01-09T11:42:35 |
||
huggingface/text-generation-inference | 1,584 | huggingface__text-generation-inference-1584 | [
"1575"
] | b40e833493808ed80b0bd6d8a68252fff01d307a | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -54,6 +54,9 @@
from text_generation_server.models.flash_llama import (
FlashLlama,
)
+ from text_generation_server.models.flash_qwen2 import (
+ FlashQwen2,
+ )
from text_generation_server.models.flash_gemma import (
FlashGemma,
)
@@ -81,6 +84,7 @@
__all__.append(FlashMistral)
__all__.append(FlashMixtral)
__all__.append(FlashPhi)
+ __all__.append(FlashQwen2)
__all__.append(FlashStarcoder2)
MAMBA_AVAILABLE = True
@@ -328,6 +332,27 @@ def get_model(
dtype=dtype,
trust_remote_code=trust_remote_code,
)
+ elif model_type == "qwen2":
+ if FLASH_ATTENTION:
+ return FlashQwen2(
+ model_id,
+ revision,
+ quantize=quantize,
+ dtype=dtype,
+ trust_remote_code=trust_remote_code,
+ )
+ elif sharded:
+ raise NotImplementedError(
+ FLASH_ATT_ERROR_MESSAGE.format("Sharded Qwen2")
+ )
+ else:
+ return CausalLM(
+ model_id,
+ revision,
+ quantize=quantize,
+ dtype=dtype,
+ trust_remote_code=trust_remote_code,
+ )
if model_type == "gemma":
if FLASH_ATTENTION:
return FlashGemma(
diff --git a/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py b/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py
new file mode 100644
--- /dev/null
+++ b/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py
@@ -0,0 +1,390 @@
+import torch
+import torch.distributed
+
+from torch import nn
+from transformers.activations import ACT2FN
+from typing import Optional, List, Tuple
+
+from text_generation_server.utils import paged_attention, flash_attn
+from text_generation_server.utils.layers import (
+ TensorParallelRowLinear,
+ TensorParallelColumnLinear,
+ TensorParallelEmbedding,
+ PositionRotaryEmbedding,
+ TensorParallelHead,
+ get_linear,
+ FastRMSNorm,
+)
+
+
+def load_attention(config, prefix, weights):
+ if config.num_attention_heads != config.num_key_value_heads:
+ return _load_gqa(config, prefix, weights)
+ else:
+ return TensorParallelColumnLinear.load_multi(
+ config,
+ prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
+ dim=0,
+ weights=weights,
+ bias=True,
+ )
+
+
+def _load_gqa(config, prefix: str, weights):
+ assert config.hidden_size % config.num_attention_heads == 0
+ assert config.num_attention_heads % weights.process_group.size() == 0
+
+ weight = weights.get_multi_weights_col(
+ prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
+ quantize=config.quantize,
+ dim=0,
+ )
+
+ if config.quantize not in ["gptq", "awq"]:
+ weight = weight.to(dtype=weights.dtype).to(device=weights.device)
+
+ head_size = config.hidden_size // config.num_attention_heads
+ num_heads = config.num_attention_heads // weights.process_group.size()
+ num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
+ assert list(weight.shape) == [
+ (num_heads + 2 * num_key_value_heads) * head_size,
+ config.hidden_size,
+ ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
+
+ return TensorParallelColumnLinear(
+ get_linear(weight, bias=None, quantize=config.quantize)
+ )
+
+
+class Qwen2Attention(torch.nn.Module):
+ def __init__(
+ self,
+ prefix: str,
+ config,
+ weights,
+ ):
+ super().__init__()
+ self.max_past = (
+ config.sliding_window if config.sliding_window is not None else -1
+ )
+ self.num_heads = config.num_attention_heads
+ self.hidden_size = config.hidden_size
+ self.head_size = self.hidden_size // self.num_heads
+
+ self.rotary_emb = PositionRotaryEmbedding.static(
+ config=config,
+ dim=self.head_size,
+ base=config.rope_theta,
+ device=weights.device,
+ )
+
+ self.softmax_scale = self.head_size**-0.5
+
+ if self.num_heads % weights.process_group.size() != 0:
+ raise ValueError(
+ f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
+ f"and `num_shards`: {weights.process_group.size()}"
+ )
+ self.num_heads = self.num_heads // weights.process_group.size()
+ self.num_key_value_heads = (
+ config.num_key_value_heads // weights.process_group.size()
+ )
+
+ self.query_key_value = load_attention(config, prefix, weights)
+
+ self.o_proj = TensorParallelRowLinear.load(
+ config,
+ prefix=f"{prefix}.o_proj",
+ weights=weights,
+ bias=False,
+ )
+ self.num_groups = self.num_heads // self.num_key_value_heads
+ self.kv_head_mapping = torch.arange(
+ 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
+ ).repeat_interleave(self.num_groups)
+
+ def forward(
+ self,
+ hidden_states,
+ cos,
+ sin,
+ cu_seqlen_prefill,
+ kv_cache,
+ block_tables,
+ slots,
+ input_lengths,
+ max_s,
+ prefill_cache_indices,
+ ):
+ qkv = self.query_key_value(hidden_states)
+ query, kv = qkv.split(
+ [
+ self.head_size * self.num_heads,
+ 2 * self.head_size * self.num_key_value_heads,
+ ],
+ dim=1,
+ )
+ query = query.view(-1, self.num_heads, self.head_size)
+ kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
+
+ self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
+
+ if prefill_cache_indices is not None:
+ kv_to_cache = kv[prefill_cache_indices]
+ else:
+ kv_to_cache = kv
+
+ paged_attention.reshape_and_cache(
+ kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots
+ )
+
+ # output tensor
+ attn_output = torch.empty_like(query)
+
+ # Prefill
+ if cu_seqlen_prefill is not None:
+ # flash attention
+ flash_attn.attention(
+ query,
+ torch.select(kv, dim=1, index=0),
+ torch.select(kv, dim=1, index=1),
+ attn_output,
+ cu_seqlen_prefill,
+ max_s,
+ self.softmax_scale,
+ window_size_left=self.max_past,
+ )
+ # Decode
+ else:
+ paged_attention.attention(
+ attn_output,
+ query,
+ kv_cache[0],
+ kv_cache[1],
+ self.kv_head_mapping,
+ self.softmax_scale,
+ block_tables,
+ input_lengths,
+ max_s,
+ )
+
+ return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
+
+class Qwen2MLP(nn.Module):
+ def __init__(self, prefix, config, weights):
+ super().__init__()
+ act = config.hidden_act
+ self.act = (
+ ACT2FN[act]
+ if "gelu" not in act
+ else lambda x: torch.nn.functional.gelu(
+ x,
+ approximate=(
+ "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
+ ),
+ )
+ )
+ # Fuse gate and up proj
+ self.gate_up_proj = TensorParallelColumnLinear.load_multi(
+ config,
+ prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
+ weights=weights,
+ dim=0,
+ bias=False,
+ )
+ self.down_proj = TensorParallelRowLinear.load(
+ config,
+ prefix=f"{prefix}.down_proj",
+ weights=weights,
+ bias=False,
+ )
+ self.intermediate_size = (
+ config.intermediate_size // weights.process_group.size()
+ )
+
+ def forward(self, hidden_states):
+ gate_up_states = self.gate_up_proj(hidden_states)
+ gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
+ return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1])
+
+
+class Qwen2Layer(nn.Module):
+ def __init__(self, layer_id, config, weights):
+ super().__init__()
+ prefix = f"model.layers.{layer_id}"
+ self.self_attn = Qwen2Attention(prefix=f"{prefix}.self_attn", config=config, weights=weights)
+ self.mlp = Qwen2MLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
+ self.input_layernorm = FastRMSNorm.load(
+ prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
+ )
+ self.post_attention_layernorm = FastRMSNorm.load(
+ prefix=f"{prefix}.post_attention_layernorm",
+ weights=weights,
+ eps=config.rms_norm_eps,
+ )
+
+ def forward(
+ self,
+ hidden_states,
+ residual,
+ cos,
+ sin,
+ cu_seqlen_prefill,
+ kv_cache,
+ block_tables,
+ slots,
+ input_lengths,
+ max_s,
+ prefill_cache_indices,
+ ):
+ normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
+
+ # Self Attention
+ attn_output = self.self_attn(
+ normed_hidden_states,
+ cos,
+ sin,
+ cu_seqlen_prefill,
+ kv_cache,
+ block_tables,
+ slots,
+ input_lengths,
+ max_s,
+ prefill_cache_indices,
+ )
+
+ # faster post attention rms norm
+ normed_attn_res_output, attn_res = self.post_attention_layernorm(
+ attn_output, res
+ )
+
+ mlp_output = self.mlp(normed_attn_res_output)
+
+ return mlp_output, attn_res
+
+class Qwen2Model(torch.nn.Module):
+ def __init__(self, config, weights):
+ super().__init__()
+ process_group = weights.process_group
+ self.tp_rank = process_group.rank()
+ self.tp_world_size = process_group.size()
+ self.embed_tokens = TensorParallelEmbedding(
+ prefix="model.embed_tokens", weights=weights
+ )
+ self.layers = nn.ModuleList(
+ [
+ Qwen2Layer(
+ layer_id,
+ config,
+ weights,
+ )
+ for layer_id in range(config.num_hidden_layers)
+ ]
+ )
+ self.norm = FastRMSNorm.load(
+ prefix="model.norm", weights=weights, eps=config.rms_norm_eps
+ )
+
+ self.gradient_checkpointing = False
+
+ self.head_size = self.layers[0].self_attn.head_size
+ self.num_heads = self.layers[0].self_attn.num_heads
+ self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ position_ids: torch.Tensor,
+ cu_seqlen_prefill: Optional[torch.Tensor],
+ kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
+ block_tables: torch.Tensor,
+ slots: torch.Tensor,
+ input_lengths: torch.Tensor,
+ max_s: int,
+ true_max_s: int,
+ prefill_cache_indices: Optional[torch.Tensor],
+ ) -> torch.Tensor:
+ hidden_states = self.embed_tokens(input_ids)
+
+ # Get rotary cos and sin for this forward
+ # Avoid to index in each layer
+ cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
+ position_ids, true_max_s, hidden_states.dtype
+ )
+
+ residual = None
+ for i, layer in enumerate(self.layers):
+ hidden_states, residual = layer(
+ hidden_states,
+ residual,
+ cos,
+ sin,
+ cu_seqlen_prefill,
+ kv_cache[i],
+ block_tables,
+ slots,
+ input_lengths,
+ max_s,
+ prefill_cache_indices,
+ )
+
+ hidden_states, _ = self.norm(hidden_states, residual)
+
+ return hidden_states
+
+
+class Qwen2ForCausalLM(torch.nn.Module):
+ def __init__(self, config, weights):
+ super().__init__()
+
+ self.model = Qwen2Model(config, weights)
+ self.lm_head = TensorParallelHead.load(
+ config,
+ prefix="lm_head",
+ weights=weights,
+ )
+ self.max_past = config.sliding_window
+ self.max_past_tensor = (
+ torch.tensor(config.sliding_window, device=weights.device)
+ if self.max_past is not None
+ else None
+ )
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ position_ids: torch.Tensor,
+ cu_seqlen_prefill: Optional[torch.Tensor],
+ kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
+ block_tables: torch.Tensor,
+ slots: torch.Tensor,
+ input_lengths: torch.Tensor,
+ max_s: int,
+ prefill_cache_indices: Optional[torch.Tensor] = None,
+ lm_head_indices: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ true_max_s = max_s
+ if prefill_cache_indices is not None:
+ # Slots also need to be sliced as it has the same size as the whole kv tensor
+ slots = slots[prefill_cache_indices]
+ elif self.max_past is not None:
+ # Clamp in decode mode as paged attention requires clamped values whereas the flash attention
+ # kernel requires the true values
+ input_lengths = torch.clamp(input_lengths, max=self.max_past_tensor)
+
+ hidden_states = self.model(
+ input_ids,
+ position_ids,
+ cu_seqlen_prefill,
+ kv_cache,
+ block_tables,
+ slots,
+ input_lengths,
+ max_s,
+ true_max_s,
+ prefill_cache_indices,
+ )
+ if lm_head_indices is not None:
+ hidden_states = hidden_states[lm_head_indices]
+ logits = self.lm_head(hidden_states)
+ return logits
diff --git a/server/text_generation_server/models/flash_qwen2.py b/server/text_generation_server/models/flash_qwen2.py
new file mode 100644
--- /dev/null
+++ b/server/text_generation_server/models/flash_qwen2.py
@@ -0,0 +1,77 @@
+import torch
+import torch.distributed
+
+from opentelemetry import trace
+from transformers import AutoTokenizer
+from transformers.models.qwen2 import Qwen2Tokenizer
+from typing import Optional
+
+from text_generation_server.models import FlashCausalLM
+from text_generation_server.models.custom_modeling.flash_qwen2_modeling import (
+ Qwen2ForCausalLM,
+)
+from transformers.models.qwen2 import Qwen2Config
+from text_generation_server.utils import (
+ initialize_torch_distributed,
+ weight_files,
+ Weights,
+)
+
+tracer = trace.get_tracer(__name__)
+
+
+class FlashQwen2(FlashCausalLM):
+ def __init__(
+ self,
+ model_id: str,
+ revision: Optional[str] = None,
+ quantize: Optional[str] = None,
+ dtype: Optional[torch.dtype] = None,
+ trust_remote_code: bool = False,
+ ):
+ self.process_group, rank, world_size = initialize_torch_distributed()
+ if torch.cuda.is_available():
+ device = torch.device(f"cuda:{rank}")
+ dtype = torch.float16 if dtype is None else dtype
+ else:
+ raise NotImplementedError("FlashQwen2 is only available on GPU")
+
+ try:
+ tokenizer = Qwen2Tokenizer.from_pretrained(
+ model_id,
+ revision=revision,
+ trust_remote_code=trust_remote_code,
+ )
+ except Exception:
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id,
+ revision=revision,
+ trust_remote_code=trust_remote_code,
+ )
+
+ config = Qwen2Config.from_pretrained(
+ model_id, revision=revision, trust_remote_code=trust_remote_code
+ )
+ config.quantize = quantize
+
+ torch.distributed.barrier(group=self.process_group)
+
+ filenames = weight_files(model_id, revision=revision, extension=".safetensors")
+ weights = Weights(filenames, device, dtype, process_group=self.process_group)
+ if config.quantize in ["gptq", "awq"]:
+ weights._set_gptq_params(model_id, revision)
+
+ model = Qwen2ForCausalLM(config, weights)
+
+ torch.distributed.barrier(group=self.process_group)
+ super(FlashQwen2, self).__init__(
+ model=model,
+ tokenizer=tokenizer,
+ num_layers=len(model.model.layers),
+ num_kv_heads=model.model.num_key_value_heads,
+ head_size=model.model.head_size,
+ dtype=dtype,
+ device=device,
+ rank=rank,
+ world_size=world_size,
+ )
| diff --git a/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2.json b/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2.json
new file mode 100644
--- /dev/null
+++ b/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2.json
@@ -0,0 +1,84 @@
+{
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "length",
+ "generated_tokens": 10,
+ "prefill": [
+ {
+ "id": 2271,
+ "text": "Test",
+ "logprob": null
+ },
+ {
+ "id": 1681,
+ "text": " request",
+ "logprob": -7.0351562
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 369,
+ "text": " for",
+ "logprob": -2.1914062,
+ "special": false
+ },
+ {
+ "id": 279,
+ "text": " the",
+ "logprob": -2.6210938,
+ "special": false
+ },
+ {
+ "id": 2701,
+ "text": " following",
+ "logprob": -3.6445312,
+ "special": false
+ },
+ {
+ "id": 729,
+ "text": " function",
+ "logprob": -2.9648438,
+ "special": false
+ },
+ {
+ "id": 271,
+ "text": "\n\n",
+ "logprob": -1.9111328,
+ "special": false
+ },
+ {
+ "id": 31946,
+ "text": "Inputs",
+ "logprob": -1.6855469,
+ "special": false
+ },
+ {
+ "id": 25,
+ "text": ":",
+ "logprob": -1.6093254e-05,
+ "special": false
+ },
+ {
+ "id": 707,
+ "text": " def",
+ "logprob": -0.5678711,
+ "special": false
+ },
+ {
+ "id": 1477,
+ "text": " find",
+ "logprob": -2.5917969,
+ "special": false
+ },
+ {
+ "id": 6345,
+ "text": "_max",
+ "logprob": -1.8349609,
+ "special": false
+ }
+ ],
+ "top_tokens": null
+ },
+ "generated_text": " for the following function\n\nInputs: def find_max"
+}
diff --git a/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_all_params.json b/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_all_params.json
new file mode 100644
--- /dev/null
+++ b/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_all_params.json
@@ -0,0 +1,84 @@
+{
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "length",
+ "generated_tokens": 10,
+ "prefill": [
+ {
+ "id": 2271,
+ "text": "Test",
+ "logprob": null
+ },
+ {
+ "id": 1681,
+ "text": " request",
+ "logprob": -7.0351562
+ }
+ ],
+ "seed": 0,
+ "tokens": [
+ {
+ "id": 311,
+ "text": " to",
+ "logprob": -1.4472656,
+ "special": false
+ },
+ {
+ "id": 633,
+ "text": " get",
+ "logprob": -0.4741211,
+ "special": false
+ },
+ {
+ "id": 264,
+ "text": " a",
+ "logprob": 0.0,
+ "special": false
+ },
+ {
+ "id": 1140,
+ "text": " list",
+ "logprob": 0.0,
+ "special": false
+ },
+ {
+ "id": 315,
+ "text": " of",
+ "logprob": 0.0,
+ "special": false
+ },
+ {
+ "id": 678,
+ "text": " all",
+ "logprob": 0.0,
+ "special": false
+ },
+ {
+ "id": 279,
+ "text": " the",
+ "logprob": -0.2590332,
+ "special": false
+ },
+ {
+ "id": 3847,
+ "text": " users",
+ "logprob": -0.45239258,
+ "special": false
+ },
+ {
+ "id": 304,
+ "text": " in",
+ "logprob": -0.12322998,
+ "special": false
+ },
+ {
+ "id": 419,
+ "text": " this",
+ "logprob": -1.7275391,
+ "special": false
+ }
+ ],
+ "top_tokens": null
+ },
+ "generated_text": "Test request to get a list of all the users in this"
+}
diff --git a/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_load.json b/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_load.json
new file mode 100644
--- /dev/null
+++ b/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_load.json
@@ -0,0 +1,338 @@
+[
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "length",
+ "generated_tokens": 10,
+ "prefill": [
+ {
+ "id": 2271,
+ "text": "Test",
+ "logprob": null
+ },
+ {
+ "id": 1681,
+ "text": " request",
+ "logprob": -7.0351562
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 369,
+ "text": " for",
+ "logprob": -2.1914062,
+ "special": false
+ },
+ {
+ "id": 279,
+ "text": " the",
+ "logprob": -2.6210938,
+ "special": false
+ },
+ {
+ "id": 2701,
+ "text": " following",
+ "logprob": -3.6445312,
+ "special": false
+ },
+ {
+ "id": 729,
+ "text": " function",
+ "logprob": -2.9648438,
+ "special": false
+ },
+ {
+ "id": 271,
+ "text": "\n\n",
+ "logprob": -1.9111328,
+ "special": false
+ },
+ {
+ "id": 31946,
+ "text": "Inputs",
+ "logprob": -1.6855469,
+ "special": false
+ },
+ {
+ "id": 25,
+ "text": ":",
+ "logprob": -1.6093254e-05,
+ "special": false
+ },
+ {
+ "id": 707,
+ "text": " def",
+ "logprob": -0.5678711,
+ "special": false
+ },
+ {
+ "id": 1477,
+ "text": " find",
+ "logprob": -2.5917969,
+ "special": false
+ },
+ {
+ "id": 6345,
+ "text": "_max",
+ "logprob": -1.8349609,
+ "special": false
+ }
+ ],
+ "top_tokens": null
+ },
+ "generated_text": " for the following function\n\nInputs: def find_max"
+ },
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "length",
+ "generated_tokens": 10,
+ "prefill": [
+ {
+ "id": 2271,
+ "text": "Test",
+ "logprob": null
+ },
+ {
+ "id": 1681,
+ "text": " request",
+ "logprob": -7.0351562
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 369,
+ "text": " for",
+ "logprob": -2.1914062,
+ "special": false
+ },
+ {
+ "id": 279,
+ "text": " the",
+ "logprob": -2.6210938,
+ "special": false
+ },
+ {
+ "id": 2701,
+ "text": " following",
+ "logprob": -3.6445312,
+ "special": false
+ },
+ {
+ "id": 729,
+ "text": " function",
+ "logprob": -2.9648438,
+ "special": false
+ },
+ {
+ "id": 271,
+ "text": "\n\n",
+ "logprob": -1.9111328,
+ "special": false
+ },
+ {
+ "id": 31946,
+ "text": "Inputs",
+ "logprob": -1.6855469,
+ "special": false
+ },
+ {
+ "id": 25,
+ "text": ":",
+ "logprob": -1.6093254e-05,
+ "special": false
+ },
+ {
+ "id": 707,
+ "text": " def",
+ "logprob": -0.5678711,
+ "special": false
+ },
+ {
+ "id": 1477,
+ "text": " find",
+ "logprob": -2.5917969,
+ "special": false
+ },
+ {
+ "id": 6345,
+ "text": "_max",
+ "logprob": -1.8349609,
+ "special": false
+ }
+ ],
+ "top_tokens": null
+ },
+ "generated_text": " for the following function\n\nInputs: def find_max"
+ },
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "length",
+ "generated_tokens": 10,
+ "prefill": [
+ {
+ "id": 2271,
+ "text": "Test",
+ "logprob": null
+ },
+ {
+ "id": 1681,
+ "text": " request",
+ "logprob": -7.0351562
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 369,
+ "text": " for",
+ "logprob": -2.1914062,
+ "special": false
+ },
+ {
+ "id": 279,
+ "text": " the",
+ "logprob": -2.6210938,
+ "special": false
+ },
+ {
+ "id": 2701,
+ "text": " following",
+ "logprob": -3.6445312,
+ "special": false
+ },
+ {
+ "id": 729,
+ "text": " function",
+ "logprob": -2.9648438,
+ "special": false
+ },
+ {
+ "id": 271,
+ "text": "\n\n",
+ "logprob": -1.9111328,
+ "special": false
+ },
+ {
+ "id": 31946,
+ "text": "Inputs",
+ "logprob": -1.6855469,
+ "special": false
+ },
+ {
+ "id": 25,
+ "text": ":",
+ "logprob": -1.6093254e-05,
+ "special": false
+ },
+ {
+ "id": 707,
+ "text": " def",
+ "logprob": -0.5678711,
+ "special": false
+ },
+ {
+ "id": 1477,
+ "text": " find",
+ "logprob": -2.5917969,
+ "special": false
+ },
+ {
+ "id": 6345,
+ "text": "_max",
+ "logprob": -1.8349609,
+ "special": false
+ }
+ ],
+ "top_tokens": null
+ },
+ "generated_text": " for the following function\n\nInputs: def find_max"
+ },
+ {
+ "details": {
+ "best_of_sequences": null,
+ "finish_reason": "length",
+ "generated_tokens": 10,
+ "prefill": [
+ {
+ "id": 2271,
+ "text": "Test",
+ "logprob": null
+ },
+ {
+ "id": 1681,
+ "text": " request",
+ "logprob": -7.0351562
+ }
+ ],
+ "seed": null,
+ "tokens": [
+ {
+ "id": 369,
+ "text": " for",
+ "logprob": -2.1914062,
+ "special": false
+ },
+ {
+ "id": 279,
+ "text": " the",
+ "logprob": -2.6210938,
+ "special": false
+ },
+ {
+ "id": 2701,
+ "text": " following",
+ "logprob": -3.6445312,
+ "special": false
+ },
+ {
+ "id": 729,
+ "text": " function",
+ "logprob": -2.9648438,
+ "special": false
+ },
+ {
+ "id": 271,
+ "text": "\n\n",
+ "logprob": -1.9111328,
+ "special": false
+ },
+ {
+ "id": 31946,
+ "text": "Inputs",
+ "logprob": -1.6855469,
+ "special": false
+ },
+ {
+ "id": 25,
+ "text": ":",
+ "logprob": -1.6093254e-05,
+ "special": false
+ },
+ {
+ "id": 707,
+ "text": " def",
+ "logprob": -0.5678711,
+ "special": false
+ },
+ {
+ "id": 1477,
+ "text": " find",
+ "logprob": -2.5917969,
+ "special": false
+ },
+ {
+ "id": 6345,
+ "text": "_max",
+ "logprob": -1.8349609,
+ "special": false
+ }
+ ],
+ "top_tokens": null
+ },
+ "generated_text": " for the following function\n\nInputs: def find_max"
+ }
+]
diff --git a/integration-tests/models/test_flash_qwen2.py b/integration-tests/models/test_flash_qwen2.py
new file mode 100644
--- /dev/null
+++ b/integration-tests/models/test_flash_qwen2.py
@@ -0,0 +1,61 @@
+import pytest
+
+
[email protected](scope="module")
+def flash_qwen2_handle(launcher):
+ with launcher("Qwen/Qwen1.5-7B") as handle:
+ yield handle
+
+
[email protected](scope="module")
+async def flash_qwen2(flash_qwen2_handle):
+ await flash_qwen2_handle.health(300)
+ return flash_qwen2_handle.client
+
+
[email protected]
+async def test_flash_qwen2(flash_qwen2, response_snapshot):
+ response = await flash_qwen2.generate(
+ "Test request", max_new_tokens=10, decoder_input_details=True
+ )
+
+ assert response.details.generated_tokens == 10
+ assert response.generated_text == " for the following function\n\nInputs: def find_max"
+ assert response == response_snapshot
+
+
[email protected]
+async def test_flash_qwen2_all_params(flash_qwen2, response_snapshot):
+ response = await flash_qwen2.generate(
+ "Test request",
+ max_new_tokens=10,
+ repetition_penalty=1.2,
+ return_full_text=True,
+ stop_sequences=["test"],
+ temperature=0.5,
+ top_p=0.9,
+ top_k=10,
+ truncate=5,
+ typical_p=0.9,
+ watermark=True,
+ decoder_input_details=True,
+ seed=0,
+ )
+
+ assert response.details.generated_tokens == 10
+ assert response == response_snapshot
+
+
[email protected]
+async def test_flash_qwen2_load(flash_qwen2, generate_load, response_snapshot):
+ responses = await generate_load(
+ flash_qwen2, "Test request", max_new_tokens=10, n=4
+ )
+
+ assert len(responses) == 4
+ assert all(
+ [r.generated_text == responses[0].generated_text for r in responses]
+ ), f"{[r.generated_text for r in responses]}"
+ assert responses[0].generated_text == ": Let n = 10 - 1"
+
+ assert responses == response_snapshot
| Qwen1.5/Qwen2 model additions
### Model description
[Qwen2 models](https://huggingface.co/Qwen/Qwen1.5-72B-Chat) architecture has been added to `transformers` [version 4.37.0](https://github.com/huggingface/transformers/releases/tag/v4.37.0). Would be useful to be able to serve those models in TGI.
### Open source status
- [X] The model implementation is available
- [X] The model weights are available
### Provide useful links for the implementation
* [transformers pull request](https://github.com/huggingface/transformers/pull/28436)
* [vllm implementation of the qwen2 architecture](https://github.com/vllm-project/vllm/pull/2495)
| 2024-02-21T13:10:48 |
|
huggingface/text-generation-inference | 1,617 | huggingface__text-generation-inference-1617 | [
"1616"
] | 5a3903ba992b635e80a5df3977099b73cecfc92e | diff --git a/clients/python/text_generation/client.py b/clients/python/text_generation/client.py
--- a/clients/python/text_generation/client.py
+++ b/clients/python/text_generation/client.py
@@ -424,7 +424,7 @@ def __init__(
self.base_url = base_url
self.headers = headers
self.cookies = cookies
- self.timeout = ClientTimeout(timeout * 60)
+ self.timeout = ClientTimeout(timeout)
async def chat(
self,
| Incorrectly multiplied timeout by 60 in the asynchronous client
### System Info
I'm testing TGI using Docker. Below is the exact command I'm utilizing:
```console
docker run --gpus '"device=1,2"' --shm-size 1g -p 8000:80 -v ~/tgi-test:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id mistralai/Mistral-7B-v0.1 --max-input-length 8000 --max-total-tokens 8001
```
### Information
- [ ] Docker
- [ ] The CLI directly
### Tasks
- [ ] An officially supported command
- [ ] My own modifications
### Reproduction
Given the generation request:
```python
async def test():
start = time.time()
try:
response = await client.generate('1' * 6_000, max_new_tokens=1_800)
except Exception as ex:
pass
print(time.time() - start)
```
And this async client definition:
```python
client = AsyncClient('http://localhost:8000', timeout=1)
```
It doesn't timeout after 1 second:
```python
>>> await test()
60.88534379005432
```
But if we create a client with a timeout of 2/60:
```python
client = AsyncClient('http://localhost:8000', timeout=(2/60))
```
It does timeout after 2 seconds:
```python
>>> await test()
2.0035104751586914
```
### Expected behavior
The function should have timed out after 1 second with this client definition:
```python
client = AsyncClient('http://localhost:8000', timeout=1)
```
| 2024-02-29T14:38:41 |
||
huggingface/text-generation-inference | 1,667 | huggingface__text-generation-inference-1667 | [
"1652"
] | deb440b3a2179b1eccce9cf5dc1d4ff0e8a03135 | diff --git a/clients/python/text_generation/client.py b/clients/python/text_generation/client.py
--- a/clients/python/text_generation/client.py
+++ b/clients/python/text_generation/client.py
@@ -67,6 +67,7 @@ def __init__(
def chat(
self,
messages: List[Message],
+ repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
@@ -87,9 +88,13 @@ def chat(
Args:
messages (`List[Message]`):
List of messages
- frequency_penalty (`float`):
- The parameter for frequency penalty. 0.0 means no penalty. See [this
+ repetition_penalty (`float`):
+ The parameter for repetition penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ frequency_penalty (`float`):
+ The parameter for frequency penalty. 0.0 means no penalty
+ Penalize new tokens based on their existing frequency in the text so far,
+ decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
@@ -121,6 +126,7 @@ def chat(
request = ChatRequest(
model="tgi",
messages=messages,
+ repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
@@ -179,6 +185,7 @@ def generate(
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
+ frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
@@ -207,6 +214,10 @@ def generate(
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ frequency_penalty (`float`):
+ The parameter for frequency penalty. 1.0 means no penalty
+ Penalize new tokens based on their existing frequency in the text so far,
+ decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
@@ -245,6 +256,7 @@ def generate(
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
+ frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
@@ -278,6 +290,7 @@ def generate_stream(
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
+ frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
@@ -303,6 +316,10 @@ def generate_stream(
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ frequency_penalty (`float`):
+ The parameter for frequency penalty. 1.0 means no penalty
+ Penalize new tokens based on their existing frequency in the text so far,
+ decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
@@ -340,6 +357,7 @@ def generate_stream(
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
+ frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
@@ -435,6 +453,7 @@ def __init__(
async def chat(
self,
messages: List[Message],
+ repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
@@ -455,9 +474,13 @@ async def chat(
Args:
messages (`List[Message]`):
List of messages
- frequency_penalty (`float`):
+ repetition_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ frequency_penalty (`float`):
+ The parameter for frequency penalty. 0.0 means no penalty
+ Penalize new tokens based on their existing frequency in the text so far,
+ decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
@@ -489,6 +512,7 @@ async def chat(
request = ChatRequest(
model="tgi",
messages=messages,
+ repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
@@ -546,6 +570,7 @@ async def generate(
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
+ frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
@@ -574,6 +599,10 @@ async def generate(
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ frequency_penalty (`float`):
+ The parameter for frequency penalty. 1.0 means no penalty
+ Penalize new tokens based on their existing frequency in the text so far,
+ decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
@@ -614,6 +643,7 @@ async def generate(
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
+ frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
@@ -644,6 +674,7 @@ async def generate_stream(
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
+ frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
@@ -669,6 +700,10 @@ async def generate_stream(
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ frequency_penalty (`float`):
+ The parameter for frequency penalty. 1.0 means no penalty
+ Penalize new tokens based on their existing frequency in the text so far,
+ decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
@@ -706,6 +741,7 @@ async def generate_stream(
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
+ frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
diff --git a/clients/python/text_generation/types.py b/clients/python/text_generation/types.py
--- a/clients/python/text_generation/types.py
+++ b/clients/python/text_generation/types.py
@@ -109,7 +109,12 @@ class ChatRequest(BaseModel):
model: str
# List of messages in the conversation
messages: List[Message]
- # Penalty for frequency of new tokens
+ # The parameter for repetition penalty. 1.0 means no penalty.
+ # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
+ repetition_penalty: Optional[float] = None
+ # The parameter for frequency penalty. 1.0 means no penalty
+ # Penalize new tokens based on their existing frequency in the text so far,
+ # decreasing the model's likelihood to repeat the same line verbatim.
frequency_penalty: Optional[float] = None
# Bias values for token selection
logit_bias: Optional[List[float]] = None
@@ -145,6 +150,10 @@ class Parameters(BaseModel):
# The parameter for repetition penalty. 1.0 means no penalty.
# See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
repetition_penalty: Optional[float] = None
+ # The parameter for frequency penalty. 1.0 means no penalty
+ # Penalize new tokens based on their existing frequency in the text so far,
+ # decreasing the model's likelihood to repeat the same line verbatim.
+ frequency_penalty: Optional[float] = None
# Whether to prepend the prompt to the generated text
return_full_text: bool = False
# Stop generating tokens if a member of `stop_sequences` is generated
@@ -201,6 +210,12 @@ def valid_repetition_penalty(cls, v):
raise ValidationError("`repetition_penalty` must be strictly positive")
return v
+ @field_validator("frequency_penalty")
+ def valid_frequency_penalty(cls, v):
+ if v is not None and v <= 0:
+ raise ValidationError("`frequency_penalty` must be strictly positive")
+ return v
+
@field_validator("seed")
def valid_seed(cls, v):
if v is not None and v < 0:
| TGI server and python client's `generate`/`generate_stream` endpoints/methods should support `presence_penalty`/`frequency_penalty`
### System Info
2024-03-18T19:02:01.628354Z INFO text_generation_launcher: Runtime environment:
Target: x86_64-unknown-linux-gnu
Cargo version: 1.75.0
Commit sha: 9c1cb81cd8fc01c8f736c554f316ba42b0695717
Docker label: sha-9c1cb81
nvidia-smi:
Mon Mar 18 19:02:01 2024
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 525.85.12 Driver Version: 525.85.12 CUDA Version: 12.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA A100-SXM... On | 00000000:8A:00.0 Off | 0 |
| N/A 29C P0 74W / 400W | 76893MiB / 81920MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
TGI version: 1.4.2
### Information
- [ ] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
The TGI python client's `generate` and `generate_stream` methods don't have support for `presence_penalty` or `frequency_penalty`. The TGI server /generate and /generate_stream endpoints support `frequency_penalty`, but not `presence_penalty`.
```
from text_generation import Client as TGIClient
client = TGIClient(service_url)
client.generate(...)
client.generate_stream(...)
```
IDE shows no support for `presence_penalty` or `frequency_penalty`:

Running the TGI server and accessing the docs shows the API endpoint only supports `frequency_penalty`, but not `presence_penalty`:
<img width="424" alt="image" src="https://github.com/huggingface/text-generation-inference/assets/119249469/369dd0c8-83a1-40dd-a928-73c66e6ec178">
### Expected behavior
I would expect:
- The python client's `generate` and generate_stream` methods to support passing in `frequency_penalty` and `presence_penalty`
- The TGI server's `/generate` and `/generate_stream`endpoints to support `presence_penalty`
| 2024-03-22T16:10:49 |
||
huggingface/text-generation-inference | 1,886 | huggingface__text-generation-inference-1886 | [
"1679"
] | d348d2b28feeaab7a8f6bd44cc8924b6b4ae7868 | diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
--- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
@@ -18,9 +18,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import List, Optional, Tuple
+
import torch
import torch.distributed
-
from torch import nn
from transformers.activations import ACT2FN
from typing import Optional, List, Tuple
diff --git a/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py b/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py
--- a/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py
+++ b/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py
@@ -1,26 +1,21 @@
+from typing import List, Optional, Tuple
+
import torch
import torch.distributed
-
from torch import nn
-from transformers.modeling_utils import PreTrainedModel
from transformers.configuration_utils import PretrainedConfig
-from typing import Optional, List, Tuple
+from transformers.modeling_utils import PreTrainedModel
-from text_generation_server.utils import paged_attention, flash_attn
-from text_generation_server.utils.flash_attn import attention
from text_generation_server.layers import (
- TensorParallelRowLinear,
+ SpeculativeHead,
TensorParallelColumnLinear,
TensorParallelEmbedding,
- SpeculativeHead,
+ TensorParallelRowLinear,
get_linear,
)
-from text_generation_server.layers.layernorm import (
- FastLayerNorm,
-)
-from text_generation_server.layers.rotary import (
- PositionRotaryEmbedding,
-)
+from text_generation_server.layers.layernorm import FastLayerNorm
+from text_generation_server.layers.rotary import PositionRotaryEmbedding
+from text_generation_server.utils import flash_attn, paged_attention
def load_row(config, prefix: str, weights, bias: bool):
@@ -52,6 +47,7 @@ def __init__(
hidden_size=64,
num_hidden_layers=None,
num_attention_heads=None,
+ num_ln_in_prallel_attention=None,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
use_cache=True,
@@ -65,6 +61,7 @@ def __init__(
new_decoder_architecture=None,
bias=False,
parallel_attn=False,
+ rope_theta=10_000.0,
**kwargs,
):
if alibi:
@@ -75,6 +72,7 @@ def __init__(
self.model_type = model_type
self.alibi = False
self.rotary = True
+ self.rope_theta = rope_theta
self.vocab_size = vocab_size
# Backward compatibility with n_embed kwarg
@@ -91,6 +89,7 @@ def __init__(
else kwargs.pop("n_head", 8)
)
self.layer_norm_epsilon = layer_norm_epsilon
+ self.num_ln_in_parallel_attention = num_ln_in_prallel_attention
self.initializer_range = initializer_range
self.use_cache = use_cache
self.hidden_dropout = hidden_dropout
@@ -132,9 +131,13 @@ def __init__(
self.num_heads_kv = config.n_head_kv
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
+ self.rope_theta = config.rope_theta
self.rotary_emb = PositionRotaryEmbedding.static(
- config=config, dim=self.head_size, base=10000.0, device=weights.device
+ config=config,
+ dim=self.head_size,
+ base=self.rope_theta,
+ device=weights.device,
)
self.softmax_scale = self.head_size ** (-0.5)
@@ -244,9 +247,13 @@ def __init__(
self.hidden_size = hidden_size
self.head_size = hidden_size // num_heads
self.num_groups = num_groups
+ self.rope_theta = config.rope_theta
self.rotary_emb = PositionRotaryEmbedding.static(
- config=config, dim=self.head_size, base=10000.0, device=weights.device
+ config=config,
+ dim=self.head_size,
+ base=self.rope_theta,
+ device=weights.device,
)
self.softmax_scale = self.head_size ** (-0.5)
@@ -257,7 +264,7 @@ def __init__(
if process_group.size() > self.num_groups:
raise NotImplementedError(
- f"Tensor Parallelism is not implemented for world_size > n groups"
+ "Tensor Parallelism is not implemented for world_size > n groups"
)
if self.num_groups % process_group.size() != 0:
raise NotImplementedError(
@@ -459,29 +466,61 @@ def forward(
max_s,
)
- hidden_states, residual = self.post_attention_layernorm(
- hidden_states, residual
- )
+ if self.post_attention_layernorm is not None:
+ hidden_states, residual = self.post_attention_layernorm(
+ hidden_states, residual
+ )
mlp_output = self.mlp(hidden_states)
return mlp_output, residual
+class FlashRWLayerNorm(nn.Module):
+ def __init__(self, config, prefix, weights):
+ super().__init__()
+ self.num_ln = config.num_ln_in_parallel_attn
+
+ if self.num_ln == 1:
+ self.input_ln = FastLayerNorm.load(
+ prefix=f"{prefix}.input_layernorm",
+ weights=weights,
+ eps=config.layer_norm_epsilon,
+ )
+ elif self.num_ln == 2:
+ self.ln_attn = FastLayerNorm.load(
+ prefix=f"{prefix}.ln_attn",
+ weights=weights,
+ eps=config.layer_norm_epsilon,
+ )
+ self.ln_mlp = FastLayerNorm.load(
+ prefix=f"{prefix}.ln_mlp",
+ weights=weights,
+ eps=config.layer_norm_epsilon,
+ )
+ else:
+ raise ValueError("Number of layer norms can either be 1 or 2.")
+
+ def forward(
+ self,
+ hidden_states,
+ residual,
+ ):
+ if self.num_ln == 1:
+ ln_hidden_states, residual = self.input_ln(hidden_states, residual)
+ return ln_hidden_states, ln_hidden_states, residual
+ elif self.num_ln == 2:
+ ln_attn, residual = self.ln_attn(hidden_states, residual)
+ ln_mlp, _ = self.ln_mlp(residual)
+ return ln_attn, ln_mlp, residual
+
+
class FlashRWLargeLayer(nn.Module):
def __init__(self, layer_id, config, weights):
super().__init__()
prefix = f"transformer.h.{layer_id}"
- self.ln_attn = FastLayerNorm.load(
- prefix=f"{prefix}.ln_attn",
- weights=weights,
- eps=config.layer_norm_epsilon,
- )
- self.ln_mlp = FastLayerNorm.load(
- prefix=f"{prefix}.ln_mlp",
- weights=weights,
- eps=config.layer_norm_epsilon,
- )
+
+ self.ln_layer = FlashRWLayerNorm(config, prefix, weights)
self.self_attention = FlashRWLargeAttention(
config,
@@ -507,8 +546,8 @@ def forward(
input_lengths,
max_s,
):
- ln_attn, residual = self.ln_attn(hidden_states, residual)
- ln_mlp, _ = self.ln_mlp(residual)
+ # Layer norm.
+ ln_attn, ln_mlp, residual = self.ln_layer(hidden_states, residual)
# Self attention.
attn_output = self.self_attention(
| Support for DBRX models
### Feature request
support new DBRX model(s): https://huggingface.co/databricks/dbrx-instruct
### Motivation
It is the new state-of-the-art Open LLM
### Your contribution
I have the resources to test any PR with the 132B model to confirm
| On it. We will have a PR ready tomorrow.
Let us know if we can help!
#1685 adds dbrx support.
Usage:
```shell
curl 127.0.0.1:3000/generate_stream \
-X POST \
-d '{"inputs":"<|im_start|> user\n Who is the President of the USA? <|im_end|> \n <|im_start|> assistant\n","parameters":{"max_new_tokens":50}}' \
-H 'Content-Type: application/json'
```
For now, it only supports eetq, bitsandbytes, bitsandbytes-nf4 and bitsandbytes-fp4 quantization methods.
Fails on boot on docker version 1.4.5 with:
```
--model-id=databricks/dbrx-instruct
--revision=3b5d968eab47b0cb5b075fd984612b63f92841c2
--trust-remote-code
```
```
2024-03-29T23:39:50.846327Z ERROR text_generation_launcher: Error when initializing model
Traceback (most recent call last):
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_dbrx.py", line 41, in __init__
tokenizer = GPT2TokenizerFast.from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2086, in from_pretrained
return cls._from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2120, in _from_pretrained
slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2325, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/opt/conda/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2.py", line 181, in __init__
with open(vocab_file, encoding="utf-8") as vocab_handle:
TypeError: expected str, bytes or os.PathLike object, not NoneType
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_dbrx.py", line 52, in __init__
tokenizer = AutoTokenizer.from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 818, in from_pretrained
tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/transformers/dynamic_module_utils.py", line 489, in get_class_from_dynamic_module
final_module = get_cached_module_file(
File "/opt/conda/lib/python3.10/site-packages/transformers/dynamic_module_utils.py", line 315, in get_cached_module_file
modules_needed = check_imports(resolved_module_file)
File "/opt/conda/lib/python3.10/site-packages/transformers/dynamic_module_utils.py", line 180, in check_imports
raise ImportError(
ImportError: This modeling file requires the following packages that were not found in your environment: tiktoken. Run `pip install tiktoken`
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.10/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.10/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/cli.py", line 89, in serve
server.serve(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 235, in serve
asyncio.run(
File "/opt/conda/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 636, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 603, in run_forever
self._run_once()
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 1909, in _run_once
handle._run()
File "/opt/conda/lib/python3.10/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
> File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 196, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/__init__.py", line 388, in get_model
return FlashDbrx(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_dbrx.py", line 63, in __init__
tokenizer = GPT2TokenizerFast.from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2070, in from_pretrained
raise EnvironmentError(
OSError: Can't load tokenizer for 'Xenova/dbrx-instruct-tokenizer'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'Xenova/dbrx-instruct-tokenizer' is the correct path to a directory containing all relevant files for a GPT2TokenizerFast tokenizer.
2024-03-29T23:39:51.479717Z ERROR shard-manager: text_generation_launcher: Shard complete standard error output:
The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.
The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization.
The tokenizer class you load from this checkpoint is 'TiktokenTokenizerWrapper'.
The class this function is called from is 'GPT2Tokenizer'.
Traceback (most recent call last):
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_dbrx.py", line 41, in __init__
tokenizer = GPT2TokenizerFast.from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2086, in from_pretrained
return cls._from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2120, in _from_pretrained
slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2325, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/opt/conda/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2.py", line 181, in __init__
with open(vocab_file, encoding="utf-8") as vocab_handle:
TypeError: expected str, bytes or os.PathLike object, not NoneType
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_dbrx.py", line 52, in __init__
tokenizer = AutoTokenizer.from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 818, in from_pretrained
tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/transformers/dynamic_module_utils.py", line 489, in get_class_from_dynamic_module
final_module = get_cached_module_file(
File "/opt/conda/lib/python3.10/site-packages/transformers/dynamic_module_utils.py", line 315, in get_cached_module_file
modules_needed = check_imports(resolved_module_file)
File "/opt/conda/lib/python3.10/site-packages/transformers/dynamic_module_utils.py", line 180, in check_imports
raise ImportError(
ImportError: This modeling file requires the following packages that were not found in your environment: tiktoken. Run `pip install tiktoken`
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/cli.py", line 89, in serve
server.serve(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 235, in serve
asyncio.run(
File "/opt/conda/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 649, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 196, in serve_inner
model = get_model(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/__init__.py", line 388, in get_model
return FlashDbrx(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_dbrx.py", line 63, in __init__
tokenizer = GPT2TokenizerFast.from_pretrained(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2070, in from_pretrained
raise EnvironmentError(
OSError: Can't load tokenizer for 'Xenova/dbrx-instruct-tokenizer'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'Xenova/dbrx-instruct-tokenizer' is the correct path to a directory containing all relevant files for a GPT2TokenizerFast tokenizer.
```
Using eetq gives an error that the datatype must be float16 or float32.
I can confirm the 16bit works very well! Thanks.
Yes, can confirm 16 bit runs fine on 4x A100 80 GB SXM.
- eetq gives an error that the datatype must be float16 or float32.
- 16 bit on 8x A6000 gives ""text_generation_launcher: Method Warmup encountered an error." when the final stage ." See [this issue](https://github.com/TrelisResearch/one-click-llms/issues/5)
Are you running on Docker?
Getting:
`ValueError("sharded is not supported for AutoModel")`
When deploying via SageMaker:
```
hub = {
'HF_MODEL_ID': "databricks/dbrx-instruct", # model_id from hf.co/models
'SM_NUM_GPUS': json.dumps(8), # Number of GPU used per replica
'TOKEN': 'true',
'TRUST_REMOTE_CODE': 'true',
# 'REVISION': "e0bbb53cee412aba95f3b3fa4fc0265b1a0788b2",
'HUGGING_FACE_HUB_TOKEN': "<token>",
'MAX_INPUT_LENGTH': json.dumps(24000), # Max length of input text
'MAX_BATCH_PREFILL_TOKENS': json.dumps(32000), # Number of tokens for the prefill operation.
'MAX_TOTAL_TOKENS': json.dumps(32000), # Max length of the generation (including input text)
'MAX_BATCH_TOTAL_TOKENS': json.dumps(512000), # Limits the number of tokens that can be processed in parallel during the generation
# ,'HF_MODEL_QUANTIZE': "awq", # comment in to quantize not supported yet
}
role="arn:aws:iam::153172432509:role/Llama2_13B_Role" # generic name but this is correct role
llm_image ="763104351884.dkr.ecr.us-east-2.amazonaws.com/huggingface-pytorch-tgi-inference:2.1.1-tgi1.3.1-gpu-py310-cu121-ubuntu20.04-v1.0"
huggingface_model = HuggingFaceModel(
image_uri =llm_image,
env=hub,
role=role,
)
predictor = huggingface_model.deploy(
initial_instance_count=1,
instance_type="ml.g5.48xlarge",
endpoint_name=endpoint_name_var,
container_startup_health_check_timeout=300,
)
```
> Are you running on Docker?
Yes, this is using the latest TGI docker image.
@RonanKMcGovern can you share command args you are using?
Sure
> @RonanKMcGovern can you share command args you are using?
Sure, it's this runpod template here: https://runpod.io/console/gpu-cloud?template=tlt1i1welu&ref=jmfkcdio
And the arguments for eetq would be:
```
--model-id databricks/dbrx-instruct --trust-remote-code --port 8080 --max-input-length 3000 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 --quantize eetq
```
@RonanKMcGovern with same args I have an error message: `RuntimeError: Invalid datatype. Weight must be FP16 or FP32`
> @RonanKMcGovern with same args I have an error message: `RuntimeError: Invalid datatype. Weight must be FP16 or FP32`
Exactly, same as my note above on eetq
@RonanKMcGovern can you try:
```shell
--model-id databricks/dbrx-instruct --trust-remote-code --port 8080 --max-input-length 3000 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 --quantize eetq --dtype float16
```
in the meantime?
Note the added `--dtype float16` at the end.
I will patch the issue later today.
@OlivierDehaene `RuntimeError: Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model.` | 2024-05-14T06:31:19 |
|
huggingface/text-generation-inference | 1,910 | huggingface__text-generation-inference-1910 | [
"1842"
] | b3dd3902e76df777d28ee76993800f4baf73c40c | diff --git a/server/text_generation_server/server.py b/server/text_generation_server/server.py
--- a/server/text_generation_server/server.py
+++ b/server/text_generation_server/server.py
@@ -35,9 +35,6 @@ def exit_gracefully(self, signum, frame):
self.KEEP_PROCESSING = False
-signal_handler = SignalHandler()
-
-
class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
def __init__(
self,
@@ -251,7 +248,7 @@ async def serve_inner(
await server.start()
logger.info("Server started at {}".format(local_url))
-
+ signal_handler = SignalHandler()
while signal_handler.KEEP_PROCESSING:
await asyncio.sleep(0.5)
| Unable to stop TGI after serving models
### System Info
I use the official docker image: ghcr.io/huggingface/text-generation-inference:2.0.1
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
I used the following command to serve the model. After TGI finished the model sharding/loading and started serving, I cannot use `Ctrl+C` to terminate the server.
```bash
model=mistralai/Mixtral-8x7B-Instruct-v0.1
volume=/my_path_for_hf_cache
token="myhftokens"
docker run --gpus '"device=4,5"' \
--shm-size 20g \
-e HUGGING_FACE_HUB_TOKEN=$token \
-p 8080:80 \
-v $volume:/data ghcr.io/huggingface/text-generation-inference:2.0.1 \
--model-id $model \
--sharded true \
--quantize eetq \
--max-input-length 10240 \
--max-batch-prefill-tokens 10240 \
--max-total-tokens 32768 \
--port 80
```
### Expected behavior
In previous version 1.3.0 and 1.4.0, I can use `Ctrl+C` to terminate the server while it is not the case for 2.0.1. My current solution is to use docker command to kill the container. Not sure if this is a good way?
| Same here, it seems to come from #1716: https://github.com/huggingface/tgi-gaudi/pull/134#issuecomment-2095365083 | 2024-05-16T15:59:45 |
|
huggingface/text-generation-inference | 1,947 | huggingface__text-generation-inference-1947 | [
"1891"
] | 954653466d24a9b3435988136983398bdf788a2f | diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py
--- a/server/text_generation_server/models/flash_llama.py
+++ b/server/text_generation_server/models/flash_llama.py
@@ -3,7 +3,6 @@
from opentelemetry import trace
from transformers import AutoConfig, AutoTokenizer, GenerationConfig
-from transformers.models.llama import LlamaTokenizer
from typing import Optional
from text_generation_server.models import FlashCausalLM
@@ -41,22 +40,13 @@ def __init__(
else:
raise NotImplementedError("FlashLlama is only available on GPU")
- try:
- tokenizer = LlamaTokenizer.from_pretrained(
- model_id,
- revision=revision,
- padding_side="left",
- truncation_side="left",
- trust_remote_code=trust_remote_code,
- )
- except Exception:
- tokenizer = AutoTokenizer.from_pretrained(
- model_id,
- revision=revision,
- padding_side="left",
- truncation_side="left",
- trust_remote_code=trust_remote_code,
- )
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id,
+ revision=revision,
+ padding_side="left",
+ truncation_side="left",
+ trust_remote_code=trust_remote_code,
+ )
try:
generation_config = GenerationConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
| TGI 2.0.2 CodeLlama error `piece id is out of range.`
### System Info
ghcr.io/huggingface/text-generation-inference:2.0.2
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
```bash
model=philschmid/code-llama-7b-text-to-sql
num_shard=1
max_input_length=2048
max_total_tokens=4096
max_prefill_token=4096 # 4096
docker run --gpus all -ti -p 8080:80 \
-e MODEL_ID=$model \
-e NUM_SHARD=$num_shard \
-e MAX_INPUT_LENGTH=$max_input_length \
-e MAX_TOTAL_TOKENS=$max_total_tokens \
-e MAX_BATCH_PREFILL_TOKENS=$max_prefill_token \
-e HF_TOKEN=$(cat ~/.cache/huggingface/token) \
ghcr.io/huggingface/text-generation-inference:2.0.2
```
### Expected behavior
Running Endpoints as with version 2.0.0
### Error
```bash
2024-05-14T12:30:52.830987Z ERROR text_generation_launcher: Method Warmup encountered an error.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.10/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.10/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/cli.py", line 90, in serve
server.serve(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 253, in serve
asyncio.run(
File "/opt/conda/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 636, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 603, in run_forever
self._run_once()
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 1909, in _run_once
handle._run()
File "/opt/conda/lib/python3.10/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.10/site-packages/grpc_interceptor/server.py", line 165, in invoke_intercept_method
return await self.intercept(
> File "/opt/conda/lib/python3.10/site-packages/text_generation_server/interceptor.py", line 21, in intercept
return await response
File "/opt/conda/lib/python3.10/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 82, in _unary_interceptor
raise error
File "/opt/conda/lib/python3.10/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 73, in _unary_interceptor
return await behavior(request_or_iterator, context)
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 114, in Warmup
max_supported_total_tokens = self.model.warmup(batch)
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_causal_lm.py", line 776, in warmup
_, batch, _ = self.generate_token(batch)
File "/opt/conda/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_causal_lm.py", line 1206, in generate_token
toptoken_texts = self.tokenizer.batch_decode(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3771, in batch_decode
return [
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3772, in <listcomp>
self.decode(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3811, in decode
return self._decode(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 1001, in _decode
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 973, in convert_ids_to_tokens
return self._convert_id_to_token(ids)
File "/opt/conda/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py", line 277, in _convert_id_to_token
token = self.sp_model.IdToPiece(index)
File "/opt/conda/lib/python3.10/site-packages/sentencepiece/__init__.py", line 1045, in _batched_func
return _func(self, arg)
File "/opt/conda/lib/python3.10/site-packages/sentencepiece/__init__.py", line 1038, in _func
raise IndexError('piece id is out of range.')
IndexError: piece id is out of range.
2024-05-14T12:30:52.841680Z ERROR warmup{max_input_length=2048 max_prefill_tokens=4096 max_total_tokens=4096 max_batch_size=None}:warmup: text_generation_client: router/client/src/lib.rs:33: Server error: piece id is out of range.
Error: Warmup(Generation("piece id is out of range."))
2024-05-14T12:30:52.870991Z ERROR text_generation_launcher: Webserver Crashed
```
| 2024-05-24T10:03:35 |
||
huggingface/text-generation-inference | 2,060 | huggingface__text-generation-inference-2060 | [
"2055"
] | 376a0b7ada91548a68798383cb008ea01c728b30 | diff --git a/server/text_generation_server/layers/rotary.py b/server/text_generation_server/layers/rotary.py
--- a/server/text_generation_server/layers/rotary.py
+++ b/server/text_generation_server/layers/rotary.py
@@ -267,19 +267,21 @@ def _update_cos_sin_cache(self, dtype, device, seqlen):
or self._cos_cached.dtype != dtype
):
self._seq_len_cached = seqlen
- if seqlen > self.original_max_position_embeddings:
- inv_freq = self.long_inv_freq
- else:
- inv_freq = self.short_inv_freq
- t = torch.arange(seqlen, device=device, dtype=inv_freq.dtype)
- if self.scaling_factor is not None:
- t /= self.scaling_factor
- # Don't do einsum, it converts fp32 to fp16
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
- freqs = torch.outer(t, inv_freq.to(device=t.device))
- self._cos_cached = torch.cos(freqs).to(dtype)
- self._sin_cached = torch.sin(freqs).to(dtype)
+ t = torch.arange(seqlen, device=device, dtype=self.short_inv_freq.dtype)
+ short_freqs = torch.outer(
+ t[: self.original_max_position_embeddings],
+ self.short_inv_freq.to(device=t.device),
+ )
+ long_freqs = torch.outer(
+ t[self.original_max_position_embeddings :],
+ self.long_inv_freq.to(device=t.device),
+ )
+
+ freqs = torch.cat([short_freqs, long_freqs])
+
+ self._cos_cached = (torch.cos(freqs) * self.scaling_factor).to(dtype)
+ self._sin_cached = (torch.sin(freqs) * self.scaling_factor).to(dtype)
class DynamicPositionRotaryEmbedding(PositionRotaryEmbedding):
diff --git a/server/text_generation_server/models/flash_phi.py b/server/text_generation_server/models/flash_phi.py
--- a/server/text_generation_server/models/flash_phi.py
+++ b/server/text_generation_server/models/flash_phi.py
@@ -8,7 +8,6 @@
from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_phi_modeling import (
FlashPhiForCausalLM,
- PhiConfig,
)
from text_generation_server.utils import (
initialize_torch_distributed,
@@ -44,7 +43,7 @@ def __init__(
trust_remote_code=trust_remote_code,
)
- config = PhiConfig.from_pretrained(
+ config = AutoConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
config.quantize = quantize
| Poor/inconsistent results from Phi-3-mini-128k
Phi-3-mini-128k runs, but the result that I'm seeing from TGI are not very good and inconsistent with other inference methods.
For each result below, I supply the same pairwise compare prompt:
```
<|user|>
Consider this information about Johnny:
Johnny likes apples. Johnny hates peanuts.
Now consider these options:
Option A: Pecan Pie
Option B: Peach Pie
Consider the information and brainstorm on items Johnny might like. Be thorough and consider all possibilities.
Next, use the brainstorm to pick which option Johnny would prefer.
End your response with the option you choose (Either **Option A** or **Option B**).<|end|>
<|assistant|>
```
## Running on "Other methods"
I get great results from NIM https://build.nvidia.com/microsoft/phi-3-mini:
```
Given the information, we know Johnny likes apples and hates peanuts. Let's brainstorm on items Johnny might like based on this information:
Apple-based desserts: Since Johnny likes apples, he might enjoy desserts that contain apples. This could include apple pies, apple crumble, apple tarts, apple crisp, apple cake, etc.
Non-peanut based desserts: Since Johnny hates peanuts, he would likely not enjoy any desserts that contain peanuts. This eliminates any desserts that have peanuts as an ingredient, such as peanut butter pie, peanut brittle, peanut cookies, etc.
Now, let's consider the two options:
Option A: Pecan Pie - Pecan pie is a dessert that contains pecans, which are a type of nut. Although Johnny doesn't have a specific dislike for all nuts, he does hate peanuts. It's possible that he might not like pecans as well, but we don't have enough information to be certain.
Option B: Peach Pie - Peach pie is a dessert that contains peaches, which are a type of fruit. Since Johnny likes apples, it's reasonable to assume that he might also enjoy other fruits, such as peaches.
Based on the information provided and the brainstorming, Option B: Peach Pie seems to be the more likely choice for Johnny, as it aligns more closely with his preference for fruit (apples) and doesn't contain any peanuts.
```
I get great results from `Transformers` (running `python` on the TGI:2.0.4 container):
```
Given Johnny's preferences, we know he likes apples and hates peanuts. Let's brainstorm on items Johnny might like based on this information:
1. Apple-based desserts or dishes: Since Johnny likes apples, he might enjoy any dish or dessert that features apples as a primary ingredient. This could include apple pies, apple crisps, apple tarts, apple cakes, apple sauce, and more.
2. Non-peanut-based desserts or dishes: Johnny hates peanuts, so he would likely not enjoy any dish or dessert that contains peanuts or peanut-based ingredients. This eliminates any desserts or dishes that include peanuts, such as peanut butter cookies, peanut butter pies, or peanut brittle.
Now, let's consider the two options provided:
Option A: Pecan Pie
Pecan pie is a dessert that contains pecans, which are a type of nut. Although Johnny doesn't have a stated preference for or against nuts in general, we know he hates peanuts. Since pecans are different from peanuts, it's possible that Johnny might not have an issue with pecan pie. However, without more information about Johnny's feelings towards other nuts, we can't definitively say he would prefer this option.
Option B: Peach Pie
Peach pie is a dessert that features peaches as the primary fruit ingredient. Given that Johnny likes apples, it's reasonable to assume that he might also enjoy other fruits, such as peaches. Since there are no peanuts or peanut-based ingredients in peach pie, this option aligns well with Johnny's preferences.
Based on the information provided and the brainstorming process, **Option B: Peach Pie** seems to be the more likely choice for Johnny, as it aligns with his preference for fruits (apples and potentially peaches) and his dislike for peanuts.
```
## Running on TGI 2.0.4
I get okay-ish results when I run TGI:2.0.4 with the default parameters:
```
Given Johnny's preferences, we know he likes apples and dislikes peanuts. Neither pecan nor peach pies directly relate to apples, but we can infer preferences based on the given information.
- **Option A: Pecan Pie** - Pecans are not directly related to apples, but pecans are a type of nut, and Johnny hates peanuts, which are also nuts. This might make him less likely to prefer a pecan pie due to his aversion to nuts.
- **Option B: Peach Pie** - Peaches are fruits, and there's no direct relation to apples in the information provided. However, since there's no negative association mentioned, it's a safer guess that Johnny might not have an issue with peaches.
Given the information, **Option B: Peach Pie** seems to be the safer choice, as it doesn't directly conflict with any of Johnny's stated preferences.
```
I get poor (but fluent) results when I use increase the token sizes to 5k (input) 7k (total) (but using the same prompt):
```
Given the information provided, Johnny likes apples and dislikes peanuts. Neither pecan nor peach is directly related to apples or peanuts. However, if we consider the options as food items, a peach pie could be more closely related to apples as both are fruits. Therefore, Johnny might have a slight preference for the peach pie. But this is a very weak preference and not a strong preference. The information given does not strongly indicate a clear preference for either option. However, if we must choose one, Option B: Peach Pie could be a slightly more likely choice.
```
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Note: I'm running these commands on an AWS g6.2xlarge instance
### Reproducing the "Transformers" results
1. Run python on a TGI container
```
docker run -it --rm --name tgi --gpus all --shm-size 2g \
--entrypoint python \
ghcr.io/huggingface/text-generation-inference:2.0.4
```
2. Run this python code:
```
prompt = """<|user|>
Consider this information about Johnny:
Johnny likes apples. Johnny hates peanuts.
Now consider these options:
Option A: Pecan Pie
Option B: Peach Pie
Consider the information and brainstorm on items Johnny might like. Be thorough and consider all possibilities.
Next, use the brainstorm to pick which option Johnny would prefer.
End your response with the option you choose (Either **Option A** or **Option B**).<|end|>
<|assistant|>"""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "microsoft/Phi-3-mini-128k-instruct"
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="cuda",
torch_dtype="auto",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
)
generation_args = {
"max_new_tokens": 500,
"return_full_text": False,
"do_sample": False,
}
output = pipe(prompt, **generation_args)
print(output[0]['generated_text'])
```
### Reproducing the "TGI" results
1. Run TGI
Either:
```
docker run -d --rm --name tgi -p 8080:80 --gpus all --shm-size 2g \
ghcr.io/huggingface/text-generation-inference:2.0.4 \
--model-id microsoft/Phi-3-mini-128k-instruct
```
or
```
docker run -d --rm --name tgi -p 8080:80 --gpus all --shm-size 2g \
ghcr.io/huggingface/text-generation-inference:2.0.4 \
--model-id microsoft/Phi-3-mini-128k-instruct \
--max-batch-prefill-tokens=5000 --max-total-tokens=7000 --max-input-tokens=5000
```
2. Query the endpoint and print the results
```
curl 127.0.0.1:8080/generate \
-X POST \
-d '{
"inputs": "Consider this information about Johnny:\n\nJohnny likes apples. Johnny hates peanuts.\n\nNow consider these options:\n\nOption A: Pecan Pie\nOption B: Peach Pie\n\nConsider the information and brainstorm on items Johnny might like. Be thorough and consider all possibilities.\n\nNext, use the brainstorm to pick which option Johnny would prefer.\n\nEnd your response with the option you choose (Either **Option A** or **Option B**).",
"parameters": {
"max_new_tokens": 1000,
"do_sample": false
}
}' \
-H 'Content-Type: application/json' | jq -r '.generated_text'
```
### Expected behavior
I expect TGI to produce results consistent with other inference servers and Transformers
| Note: The "Johnny" pairwise prompt is a stand-in for the actual prompts I'm using. The results for my actual prompts are even more disappointing. I simply get "Option A" or "Option B" from TGI. | 2024-06-12T15:09:41 |
|
keras-team/keras | 63 | keras-team__keras-63 | [
"59"
] | df4d37054e000cb247f54c852993e6a4757745c1 | diff --git a/keras/models.py b/keras/models.py
--- a/keras/models.py
+++ b/keras/models.py
@@ -211,7 +211,31 @@ def evaluate(self, X, y, batch_size=128, show_accuracy=False, verbose=1):
return tot_score/len(batches)
-
-
+ def save_weights(self, filepath):
+ # Save weights from all layers to HDF5
+ import h5py
+ # FIXME: fail if file exists, or add option to overwrite!
+ f = h5py.File(filepath, 'w')
+ f.attrs['nb_layers'] = len(self.layers)
+ for k, l in enumerate(self.layers):
+ g = f.create_group('layer_{}'.format(k))
+ weights = l.get_weights()
+ g.attrs['nb_params'] = len(weights)
+ for n, param in enumerate(weights):
+ param_name = 'param_{}'.format(n)
+ param_dset = g.create_dataset(param_name, param.shape, dtype='float64')
+ param_dset[:] = param
+ f.flush()
+ f.close()
+
+ def load_weights(self, filepath):
+ # Loads weights from HDF5 file
+ import h5py
+ f = h5py.File(filepath)
+ for k in range(f.attrs['nb_layers']):
+ g = f['layer_{}'.format(k)]
+ weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
+ self.layers[k].set_weights(weights)
+ f.close()
| diff --git a/test/test_save_weights.py b/test/test_save_weights.py
new file mode 100644
--- /dev/null
+++ b/test/test_save_weights.py
@@ -0,0 +1,40 @@
+from keras.models import Sequential
+from keras.layers.core import Dense, Dropout, Activation
+from keras.optimizers import SGD
+
+import sys
+sys.setrecursionlimit(10000) # to be able to pickle Theano compiled functions
+
+import pickle, numpy
+
+def create_model():
+ model = Sequential()
+ model.add(Dense(256, 2048, init='uniform', activation='relu'))
+ model.add(Dropout(0.5))
+ model.add(Dense(2048, 2048, init='uniform', activation='relu'))
+ model.add(Dropout(0.5))
+ model.add(Dense(2048, 2048, init='uniform', activation='relu'))
+ model.add(Dropout(0.5))
+ model.add(Dense(2048, 2048, init='uniform', activation='relu'))
+ model.add(Dropout(0.5))
+ model.add(Dense(2048, 256, init='uniform', activation='linear'))
+ return model
+
+model = create_model()
+sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
+model.compile(loss='mse', optimizer=sgd)
+
+pickle.dump(model, open('/tmp/model.pkl', 'wb'))
+model.save_weights('/tmp/model_weights.hdf5')
+
+model_loaded = create_model()
+model_loaded.load_weights('/tmp/model_weights.hdf5')
+
+for k in range(len(model.layers)):
+ weights_orig = model.layers[k].get_weights()
+ weights_loaded = model_loaded.layers[k].get_weights()
+ for x, y in zip(weights_orig, weights_loaded):
+ if numpy.any(x != y):
+ raise ValueError('Loaded weights are different from pickled weights!')
+
+
| Model serialization
This discussion started in #51, but as things can get complicated I decided to start another issue.
It seems to be a good idea to store weights for a model separately in an HDF5 file (or even a Numpy npy file, but HDF5 would be more portable). I wanted to compare how large is a serialized model with and without the weights, so I did the following test:
```
model = Sequential()
model.add(Dense(n_input, 2048, init='uniform', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, 2048, init='uniform', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, 2048, init='uniform', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, 2048, init='uniform', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, n_output, init='uniform', activation='linear'))
```
(the model is intentionally large!)
I then compiled the model, serialized it after compilation, and removed the weights and post-compilation Theano objects/functions as follows:
```
for l in model.layers:
weights.append(l.get_weights())
l.params = []
try:
l.W = None
l.b = None
except ValueError:
pass
model.X = None
model.y = None
model.y_test = None
model.y_train = None
model._train = None
model._train_with_acc = None
model._test = None
model._test_with_acc = None
model._predict = None
```
The full compiled model ends up with 243 MB, and the cleaned-up model with 120 MB (which is exacly the same we would get from pickling the non-compiled models with the weight matrices deleted). Is there anything else we could remove to make the serialized model smaller?
| In theory, besides the weights, we only need the parameters to the .compile() method, the names of the layer classes (or rather their import path) and the parameters that were passed to each layer. It should take a few kilobytes in total.
There are hack-ish ways to recover attributes from an arbitrary layer, but that's kind of ugly. Here's an example:
``` python
for l in layers:
attributes = []
for a in dir(l):
if a[:2] != '__' and a not in ['params', 'previous_layer', 'input']:
if type(getattr(l, a)) != types.MethodType:
attributes.append((a, getattr(l, a)))
```
Besides being ugly, it might be somewhat brittle.
Here's a better alternative: have all layers expose a .config or .get_config attribute or method that returns the constructor arguments to the layer, as a dict. Then it's easy to reconstruct the layer:
```
config = layer.get_config()
weights = layer.get_weights()
new_layer = Layer(**config)
new_layer.set_weights(weights)
```
That's a good approach, but we'll also need the correct layer type and a way to call the right constructor, as we cannot call the base class constructor with arbitrary params. Some alternatives could be:
1. Long function matching each layer class (which would be a pain to maintain).
2. Keeping all the layer subclasses in a list and matching over that list (with `__name__`). Also not great for maintaining, but better than 1.
3. Dynamically calling class names from strings, which is possible but hacky and unsafe.
I think the simplest thing to do would be to dynamically instantiate the layers from their import path, which we would have saved as part of the serialization. This give users the freedom to use the .save() function with custom layers not part of the Keras source.
Of course, it's sort of hacky. But at this point all our options are looking somewhat hacky.
Alternatively, to do something cleaner we would have to restrict ourselves to known layers from the source.
Maybe we should search for inspiration somewhere else. Do you know how Torch7 implements their saving function?
I've been mostly working with Theano-based code and Caffe (which does not have this problem as everything is in the config file). By reading Torch7 [code](https://github.com/torch/torch7/blob/a99177e7be941e50792e4cc15d247462399cdcce/File.lua) used for serialization, it seems they serialize the whole objects without any "smart" parameter saving besides avoiding to store the same object twice on disk.
[Mocha.jl](https://github.com/pluskid/Mocha.jl) (a Julia-based library inspired by Caffe) only stores parameters and requires the user to have code to rebuild the model. They only have a simple function to load the stored parameters to a network. Maybe having something like this would be enough, given the minimalist approach in keras?
I don't think we should be restricted to saving layers from the source, as adding new kinds of layers is something I suspect people would want to do often. I'll work tomorrow on an approach based on either just loading params from HDF5 files or with dynamic instantiation (as it seems the most flexible way to do it).
Ok, great! Looking forward to seeing what you come up with.
| 2015-04-19T19:12:34 |
keras-team/keras | 437 | keras-team__keras-437 | [
"428"
] | d2defcae1896ee646f1a2afe0e4fddfae4f87b71 | diff --git a/keras/optimizers.py b/keras/optimizers.py
--- a/keras/optimizers.py
+++ b/keras/optimizers.py
@@ -3,7 +3,7 @@
import theano.tensor as T
import numpy as np
-from .utils.theano_utils import shared_zeros, shared_scalar
+from .utils.theano_utils import shared_zeros, shared_scalar, floatX
from six.moves import zip
@@ -18,6 +18,18 @@ def kl_divergence(p, p_hat):
class Optimizer(object):
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+ self.update = []
+
+ def get_state(self):
+ return [u[0].get_value() for u in self.update]
+
+ def set_state(self, value_list):
+ assert len(self.update) == len(value_list)
+ for u, v in zip(self.update, value_list):
+ u[0].set_value(floatX(v))
+
def get_updates(self, params, constraints, loss):
raise NotImplementedError
@@ -38,27 +50,27 @@ def get_config(self):
class SGD(Optimizer):
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args, **kwargs):
- self.__dict__.update(kwargs)
+ super(SGD, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
- updates = [(self.iterations, self.iterations + 1.)]
+ self.updates = [(self.iterations, self.iterations + 1.)]
for p, g, c in zip(params, grads, constraints):
m = shared_zeros(p.get_value().shape) # momentum
v = self.momentum * m - lr * g # velocity
- updates.append((m, v))
+ self.updates.append((m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
- updates.append((p, c(new_p))) # apply constraints
- return updates
+ self.updates.append((p, c(new_p))) # apply constraints
+ return self.updates
def get_config(self):
return {"name": self.__class__.__name__,
@@ -70,21 +82,21 @@ def get_config(self):
class RMSprop(Optimizer):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
- self.__dict__.update(kwargs)
+ super(RMSprop, self).__init__(**kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
- updates = []
+ self.updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
- updates.append((a, new_a))
+ self.updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
- updates.append((p, c(new_p))) # apply constraints
- return updates
+ self.updates.append((p, c(new_p))) # apply constraints
+ return self.updates
def get_config(self):
return {"name": self.__class__.__name__,
@@ -95,20 +107,20 @@ def get_config(self):
class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
- self.__dict__.update(kwargs)
+ super(Adagrad, self).__init__(**kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
- updates = []
+ self.updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = a + g ** 2 # update accumulator
- updates.append((a, new_a))
+ self.updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
- updates.append((p, c(new_p))) # apply constraints
- return updates
+ self.updates.append((p, c(new_p))) # apply constraints
+ return self.updates
def get_config(self):
return {"name": self.__class__.__name__,
@@ -121,29 +133,29 @@ class Adadelta(Optimizer):
Reference: http://arxiv.org/abs/1212.5701
'''
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, *args, **kwargs):
- self.__dict__.update(kwargs)
+ super(Adadelta, self).__init__(**kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
- updates = []
+ self.updates = []
for p, g, a, d_a, c in zip(params, grads, accumulators, delta_accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
- updates.append((a, new_a))
+ self.updates.append((a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a + self.epsilon)
new_p = p - self.lr * update
- updates.append((p, c(new_p))) # apply constraints
+ self.updates.append((p, c(new_p))) # apply constraints
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * update ** 2
- updates.append((d_a, new_d_a))
- return updates
+ self.updates.append((d_a, new_d_a))
+ return self.updates
def get_config(self):
return {"name": self.__class__.__name__,
@@ -161,13 +173,13 @@ class Adam(Optimizer):
lambda is renamed kappa.
'''
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, kappa=1-1e-8, *args, **kwargs):
- self.__dict__.update(kwargs)
+ super(Adam, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
- updates = [(self.iterations, self.iterations+1.)]
+ self.updates = [(self.iterations, self.iterations+1.)]
i = self.iterations
beta_1_t = self.beta_1 * (self.kappa**i)
@@ -187,10 +199,10 @@ def get_updates(self, params, constraints, loss):
p_t = p - self.lr * m_b_t / (T.sqrt(v_b_t) + self.epsilon)
- updates.append((m, m_t))
- updates.append((v, v_t))
- updates.append((p, c(p_t))) # apply constraints
- return updates
+ self.updates.append((m, m_t))
+ self.updates.append((v, v_t))
+ self.updates.append((p, c(p_t))) # apply constraints
+ return self.updates
def get_config(self):
return {"name": self.__class__.__name__,
| Saving internal states of optimizers for successive calls to model.fit
I might be wrong, but I think that every call to `model.fit` resets the accumulated states (such as momentum, etc.) in the optimizers. I think it would be great if the optimizers could essentially pick up where they left off training, or at least have the option of doing so. I don't think this would be difficult to do, but I'd like to have some feedback before trying to make this work.
| I have an implementation with `get_state` and `set_state` methods on the optimizers. It's good for continuing the fitting process but it's also needed for parallelized fitting. When running fitting processes on multiple nodes, both the weights and the optimizers states need to be gathered and broadcast.
That sounds great, can you make a PR with that feature?
Sure I can make a PR. I use `OrderedDict` instead of a list of tuples. I don't know if anyone has a strong opinion. I've read that Theano actually expects OrderedDict. https://github.com/Theano/Theano/issues/1306
``` python
class Optimizer(object):
def __init__(self, **kwargs):
self.updates = OrderedDict({})
self.__dict__.update(**kwargs)
def get_states(self):
return [u.get_value() for u in self.updates]
def set_states(self, value_list):
assert len(self.updates) == len(value_list)
for u, v in zip(self.updates, value_list):
u.set_value(floatX(v))
def get_updates(self, params, constraints, loss):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = T.grad(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = T.sqrt(sum([T.sum(g**2) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
return grads
def get_config(self):
return {"name":self.__class__.__name__}
```
| 2015-07-24T00:19:01 |
|
keras-team/keras | 451 | keras-team__keras-451 | [
"395"
] | 48381f8af59d568ddd1ccaf69439329dcf4bad19 | diff --git a/keras/models.py b/keras/models.py
--- a/keras/models.py
+++ b/keras/models.py
@@ -63,13 +63,20 @@ def slice_X(X, start=None, stop=None):
def weighted_objective(fn):
- def weighted(y_true, y_pred, weights):
+ def weighted(y_true, y_pred, weights, mask=None):
# it's important that 0 * Inf == 0, not NaN, so I need to mask first
masked_y_true = y_true[weights.nonzero()[:-1]]
masked_y_pred = y_pred[weights.nonzero()[:-1]]
masked_weights = weights[weights.nonzero()]
obj_output = fn(masked_y_true, masked_y_pred)
- return (masked_weights.flatten() * obj_output.flatten()).mean()
+ if mask is None:
+ return (masked_weights.flatten() * obj_output.flatten()).mean()
+ else:
+ # We assume the time index to be masked is axis=1
+ wc = masked_weights * obj_output
+ wc = wc.reshape(mask.shape)
+ wc = wc.sum(axis=1) / mask.sum(axis=1)
+ return wc.mean()
return weighted
@@ -338,7 +345,8 @@ class Sequential(Model, containers.Sequential):
- set_weights
'''
- def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
+ def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None,
+ mask_cost=False):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
@@ -356,8 +364,12 @@ def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.weights = T.ones_like(self.y_train)
- train_loss = weighted_loss(self.y, self.y_train, self.weights)
- test_loss = weighted_loss(self.y, self.y_test, self.weights)
+ if mask_cost:
+ mask = self.layers[-1].get_output_mask()
+ else:
+ mask = None
+ train_loss = weighted_loss(self.y, self.y_train, self.weights, mask)
+ test_loss = weighted_loss(self.y, self.y_test, self.weights, mask)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
| diff --git a/tests/auto/test_loss_masking.py b/tests/auto/test_loss_masking.py
new file mode 100644
--- /dev/null
+++ b/tests/auto/test_loss_masking.py
@@ -0,0 +1,25 @@
+import numpy as np
+from keras.models import Sequential
+from keras.layers.core import TimeDistributedDense, Masking
+
+
+def test_cost_masking():
+ X = np.array(
+ [[[1, 1], [2, 1], [3, 1], [5, 5]],
+ [[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)
+
+ model = Sequential()
+ model.add(Masking(mask_value=0))
+ model.add(TimeDistributedDense(2, 1, init='one'))
+ model.compile(loss='mse', optimizer='sgd')
+ y = model.predict(X)
+
+ loss = model.fit(X, 4*y, nb_epoch=1, batch_size=2, verbose=1).history['loss'][0]
+ assert loss == 213.75
+
+ model = Sequential()
+ model.add(Masking(mask_value=0))
+ model.add(TimeDistributedDense(2, 1, init='one'))
+ model.compile(loss='mse', optimizer='sgd', mask_cost=True)
+ loss = model.fit(X, 4*y, nb_epoch=1, batch_size=2, verbose=1).history['loss'][0]
+ assert loss == 282.375
| is the Sequence to Sequence learning right?
Assume we are trying to learn a sequence to sequence map. For this we can use Recurrent and TimeDistributedDense layers. Now assume that the sequences have different lengths. We should pad both input and desired sequences with zeros, right? But how will the objective function handle the padded values? There is no choice to pass a mask to the objective function. Won't this bias the cost function?
| > We should pad both input and desired sequences with zeros, right? But how will the objective function handle the padded values?
You could use a mask the hide your padded values from the network. Then you can discard the masked values in your sequence output. Currently masking is only supported via an initial Embedding layer, though. See: http://keras.io/layers/recurrent/
I'm a little new to recurrent network. When Eder talked about the sequence to sequence map, it only reminds me of the char-level LSTM (http://karpathy.github.io/2015/05/21/rnn-effectiveness/). In this case, even we can discard the masked values in your sequence output, the padding values still have effects on the hyper-parameters of the model itself. So is it enough to just discard the masked values? Again, as Eder has asked, won't this bias the cost function?
Maybe this issue is of your interest #382
This worked for me. Padding the inputs and then the outputs, and adding special sequence start and sequence stop symbols to book-end each sequence, then the following model structure:
``` python
embedding_size = 64
hidden_size = 512
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, embedding_size))
model.add(JZS1(embedding_size, hidden_size)) # try using a GRU instead, for fun
model.add(Dense(hidden_size, hidden_size))
model.add(Activation('relu'))
model.add(RepeatVector(MAX_LEN))
model.add(JZS1(hidden_size, hidden_size, return_sequences=True))
model.add(TimeDistributedDense(hidden_size, max_features, activation="softmax"))
model.compile(loss='mse', optimizer='adam')
```
If you have a sequence stop symbol, it should learn when to stop outputing non-zero values, and will output zero's thereafter. May not be ideal, but works within the current framework. I also tried replicating the output to the maxlen width (including the stop symbol) during training, and then just took the first valid sequence at test time.
btw, JZS1 is an RNN like a GRU or an LSTM, each of which could be used here instead in both the encoder and decoder.
Sounds like a good idea, but see that you are forcing your model to learn something else instead of your original problem.
I think I know how to solve this problem with a masking layer after each regular layer and if we allow loss to be a custom function from. This way, instead of averaging the cost with .mean(), we divide it by the number of non zero elements on each time series. This non discriminant averaging is where a lot of bias come from as well.
@fchollet is there any chance that we can get "loss" to check if its input is callable? I really didn't want to have to add new stuff to my code repo every time I needed something custom made. I can write a PR for that if there is a general interest. Let me know.
PR #446 is relevant here. Now all that we need is that the cost functions ask for that mask in their calculations.
| 2015-07-27T16:46:17 |
keras-team/keras | 567 | keras-team__keras-567 | [
"536"
] | effe128bde4ee79aed2bccd0c9f6c0916a3ba772 | diff --git a/keras/callbacks.py b/keras/callbacks.py
--- a/keras/callbacks.py
+++ b/keras/callbacks.py
@@ -1,13 +1,10 @@
from __future__ import absolute_import
from __future__ import print_function
-import theano
-import theano.tensor as T
import numpy as np
-
import time, json, warnings
-from collections import deque
+from collections import deque
from .utils.generic_utils import Progbar
@@ -262,3 +259,16 @@ def on_epoch_end(self, epoch, logs={}):
r = requests.post(self.root + '/publish/epoch/end/', {'data': json.dumps(send)})
except:
print('Warning: could not reach RemoteMonitor root server at ' + str(self.root))
+
+
+class LearningRateScheduler(Callback):
+ '''LearningRateScheduler
+ func is a function that gets an epoch number as input and returns a new
+ learning rate as output.
+ '''
+ def __init__(self, func):
+ super(LearningRateScheduler, self).__init__()
+ self.func = func
+
+ def on_epoch_begin(self, epoch, logs={}):
+ model.lr.set_value(self.func(epoch))
diff --git a/keras/optimizers.py b/keras/optimizers.py
--- a/keras/optimizers.py
+++ b/keras/optimizers.py
@@ -1,7 +1,6 @@
from __future__ import absolute_import
import theano
import theano.tensor as T
-import numpy as np
from .utils.theano_utils import shared_zeros, shared_scalar, floatX
from six.moves import zip
@@ -49,10 +48,13 @@ def get_config(self):
class SGD(Optimizer):
- def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args, **kwargs):
+ def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args,
+ **kwargs):
super(SGD, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
+ self.lr = shared_scalar(lr)
+ self.momentum = shared_scalar(momentum)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
@@ -84,6 +86,8 @@ class RMSprop(Optimizer):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
super(RMSprop, self).__init__(**kwargs)
self.__dict__.update(locals())
+ self.lr = shared_scalar(lr)
+ self.rho = shared_scalar(rho)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
@@ -109,6 +113,7 @@ class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
super(Adagrad, self).__init__(**kwargs)
self.__dict__.update(locals())
+ self.lr = shared_scalar(lr)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
@@ -135,6 +140,7 @@ class Adadelta(Optimizer):
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, *args, **kwargs):
super(Adadelta, self).__init__(**kwargs)
self.__dict__.update(locals())
+ self.lr = shared_scalar(lr)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
@@ -142,12 +148,14 @@ def get_updates(self, params, constraints, loss):
delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
- for p, g, a, d_a, c in zip(params, grads, accumulators, delta_accumulators, constraints):
+ for p, g, a, d_a, c in zip(params, grads, accumulators,
+ delta_accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
self.updates.append((a, new_a))
# use the new accumulator and the *old* delta_accumulator
- update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a + self.epsilon)
+ update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a +
+ self.epsilon)
new_p = p - self.lr * update
self.updates.append((p, c(new_p))) # apply constraints
@@ -170,10 +178,12 @@ class Adam(Optimizer):
Default parameters follow those provided in the original paper.
'''
- def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, *args, **kwargs):
+ def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, *args,
+ **kwargs):
super(Adam, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
+ self.lr = shared_scalar(lr)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
@@ -210,5 +220,8 @@ def get_config(self):
adam = Adam
from .utils.generic_utils import get_from_module
+
+
def get(identifier, kwargs=None):
- return get_from_module(identifier, globals(), 'optimizer', instantiate=True, kwargs=kwargs)
+ return get_from_module(identifier, globals(), 'optimizer', instantiate=True,
+ kwargs=kwargs)
| Learning rate and momentum as shared scalars
I'd like to ask if we could make the optimizer's learning rate (`lr`) and momentum (`momentum`) into shared scalars. This way we could change their values during training with `.set_value` using custom rules.
I could work on the PR as long as you guys don't have a reason for not to.
| I would have issues with changing the definition of the learning rate. But if the PR is just about switching `lr` and `momentum` to shared scalars (hence modifiable after compilation) instead of scalars, without touching the logic, that's fine by me.
This could be a very useful feature. Actually, I can wait to have it:)
So right now, the only way to change learning rate during training is 1) Save weights of the model. 2) Compile a new model with same architecture and different learning rate. 3) Load weights and continue training??
I think you can also create a shared scalar and pass it to the optimizer's initializer.
I tried something like this for changing momentum from a callback.
https://github.com/wuaalb/keras_extensions/blob/master/keras_extensions/callbacks.py
The problem is, since you don't recompile the graph it won't use your new learning rate or momentum, even if you change it with a callback.
Something like this doesn't work, or isn't what you're looking for?
``` python
model = ...
optimizer = SGD(lr=shared_scalar(0.01))
model.compile(optimizer, loss)
...
optimizer.lr.set_value(0.02) # e.g. from a callback during fit()
```
Anyways, making them shared scalars by default sounds good..
That is exactly what I'm doing! What we were discussing was, should we make `lr` a `shared_scalar` by default or not? It seems that we don't have anything to lose with that. I'm also writing a callback that gets a dict of `epoch x lr` pairs and switch values on the go.
> What we were discussing was, should we make lr a shared_scalar by default or not? It seems that we don't have anything to lose with that.
Indeed. You would pass a float to the constructor and internally this float would be used as the initial value of a shared scalar. The logic of the optimizer itself would be unchanged.
| 2015-08-20T21:48:03 |
|
keras-team/keras | 637 | keras-team__keras-637 | [
"636"
] | 332d43e023073561fec53828ee21e206ac1b34b1 | diff --git a/keras/preprocessing/image.py b/keras/preprocessing/image.py
--- a/keras/preprocessing/image.py
+++ b/keras/preprocessing/image.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
import numpy as np
+import re
from scipy import ndimage
from scipy import linalg
| Misiing import in list_pictures
`list_pictures` abborts with error `NameError: global name 're' is not defined`
| 2015-09-04T11:51:23 |
||
keras-team/keras | 640 | keras-team__keras-640 | [
"469"
] | ddf908359c89491b61198a50faeaa9170f9b79e6 | diff --git a/examples/cifar10_cnn.py b/examples/cifar10_cnn.py
--- a/examples/cifar10_cnn.py
+++ b/examples/cifar10_cnn.py
@@ -28,6 +28,17 @@
nb_epoch = 200
data_augmentation = True
+# shape of the image (SHAPE x SHAPE)
+shapex, shapey = 32, 32
+# number of convolutional filters to use at each layer
+nb_filters = [32, 64]
+# level of pooling to perform at each layer (POOL x POOL)
+nb_pool = [2, 2]
+# level of convolution to perform at each layer (CONV x CONV)
+nb_conv = [3, 3]
+# the CIFAR10 images are RGB
+image_dimensions = 3
+
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
@@ -40,22 +51,24 @@
model = Sequential()
-model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
+model.add(Convolution2D(nb_filters[0], image_dimensions, nb_conv[0], nb_conv[0], border_mode='full'))
model.add(Activation('relu'))
-model.add(Convolution2D(32, 32, 3, 3))
+model.add(Convolution2D(nb_filters[0], nb_filters[0], nb_conv[0], nb_conv[0]))
model.add(Activation('relu'))
-model.add(MaxPooling2D(poolsize=(2, 2)))
+model.add(MaxPooling2D(poolsize=(nb_pool[0], nb_pool[0])))
model.add(Dropout(0.25))
-model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
+model.add(Convolution2D(nb_filters[1], nb_filters[0], nb_conv[0], nb_conv[0], border_mode='full'))
model.add(Activation('relu'))
-model.add(Convolution2D(64, 64, 3, 3))
+model.add(Convolution2D(nb_filters[1], nb_filters[1], nb_conv[1], nb_conv[1]))
model.add(Activation('relu'))
-model.add(MaxPooling2D(poolsize=(2, 2)))
+model.add(MaxPooling2D(poolsize=(nb_pool[1], nb_pool[1])))
model.add(Dropout(0.25))
model.add(Flatten())
-model.add(Dense(64*8*8, 512))
+# the image dimensions are the original dimensions divided by any pooling
+# each pixel has a number of filters, determined by the last Convolution2D layer
+model.add(Dense(nb_filters[-1] * (shapex / nb_pool[0] / nb_pool[1]) * (shapey / nb_pool[0] / nb_pool[1]), 512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
diff --git a/examples/mnist_cnn.py b/examples/mnist_cnn.py
--- a/examples/mnist_cnn.py
+++ b/examples/mnist_cnn.py
@@ -22,11 +22,20 @@
nb_classes = 10
nb_epoch = 12
+# shape of the image (SHAPE x SHAPE)
+shapex, shapey = 28, 28
+# number of convolutional filters to use
+nb_filters = 32
+# level of pooling to perform (POOL x POOL)
+nb_pool = 2
+# level of convolution to perform (CONV x CONV)
+nb_conv = 3
+
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
-X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
-X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
+X_train = X_train.reshape(X_train.shape[0], 1, shapex, shapey)
+X_test = X_test.reshape(X_test.shape[0], 1, shapex, shapey)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
@@ -41,15 +50,18 @@
model = Sequential()
-model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
+model.add(Convolution2D(nb_filters, 1, nb_conv, nb_conv, border_mode='full'))
model.add(Activation('relu'))
-model.add(Convolution2D(32, 32, 3, 3))
+model.add(Convolution2D(nb_filters, nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
-model.add(MaxPooling2D(poolsize=(2, 2)))
+model.add(MaxPooling2D(poolsize=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
-model.add(Dense(32*196, 128))
+# the resulting image after conv and pooling is the original shape
+# divided by the pooling with a number of filters for each "pixel"
+# (the number of filters is determined by the last Conv2D)
+model.add(Dense(nb_filters * (shapex / nb_pool) * (shapey / nb_pool), 128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
| Keras Examples without Magic Numbers
Hello,
Looking at the CIFAR10 example:
https://github.com/fchollet/keras/blob/master/examples/cifar10_cnn.py
(3-layered 32x32 images)
I tried to run the model on the LFW (Faces in the Wild) dataset
http://vis-www.cs.umass.edu/lfw/
(3-layered 250x250 images)
I have reshaped the data to the same shape as the CIFAR10 data, but I get other errors:
``` python
ValueError: matrices are not aligned
Apply node that caused the error: dot(Reshape{2}.0, HostFromGpu.0)
Inputs types: [TensorType(float32, matrix), TensorType(float32, matrix)]
Inputs shapes: [(3, 246016), (4096, 512)]
Inputs strides: [(984064, 4), (2048, 4)]
Inputs values: ['not shown', 'not shown']
```
If I understand correctly, this happens because I use the CIFAR10 model as-is, but its parameters - magic numbers such as 32 (which is also the image dimensions of the CIFAR10 dataset are all around the model definition:
``` python
model = Sequential()
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes))
model.add(Activation('softmax'))
```
It would be great for newbies such as myself is instead of magic numbers, the way to calculate them would be stored in an variable and then those variable would be used in building the model. Even for those variables which are selected arbitrarily and on which no other variables are dependent would be better marked. Take for instance batch sizes - of course they affect the training, but on the other hand we can toy around with them and the code would still run (which is my main goal right now).
I know that this is partially because I'm not well-versed in ANNs and in ConvNets, but believe me that understanding things first via code, then by reading is the best method for me. Let's be Pythonic about it and make keras friendly to newbies.
Thanks!
| Here is what I meant: I finally broke down the CIFAR10 demo and replaced the magic numbers with variables. Now I can separate between what variables are my choice (kernel size, number of kernels) and what isn't (things that derive from each other).
Of course my degree of freedom is not unlimited - I can't choose too big (or too small) a kernel size (etc.), but a code like this gave me much more understanding regarding how convolution layers work.
``` python
# Initial arguments
nr_kernels1 = 32 # arbitrary choice
kernel_size = 3 # arbitrary choice
nr_channels = X_train.shape[1] # RGB inputs => 3 channels
#1
# model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model.add(Convolution2D(nr_kernels1,
nr_channels,
kernel_size,
kernel_size,
border_mode='full'))
model.add(Activation('relu'))
# (b x 3 x 32 x 32) -> (b x 32 x 32 x 32)
nr_kernels2 = 32 #arbitrary choice
model.add(Convolution2D(
nr_kernels2, # I chose it
nr_kernels1, # derived from output of previous layer
kernel_size, # I chose it
kernel_size)) # I chose it
model.add(Activation('relu'))
#3
model.add(MaxPooling2D(poolsize=(2, 2))) # arbitrary choice
model.add(Dropout(0.25))
# (b x 32 x 32 x 32) -> (b x 32 x 16 x 16)
#4
nr_kernels3=64
model.add(Convolution2D(nr_kernels3, # I chose it
nr_kernels2,
kernel_size,
kernel_size,
border_mode='full'))
model.add(Activation('relu'))
# (b x 32 x 16 x 16) -> (b x 64 x 16 x 16)
#5
nr_kernels4=64
model.add(Convolution2D(nr_kernels4,
nr_kernels3,
kernel_size,
kernel_size))
model.add(Activation('relu'))
# (b x 64 x 16 x 16) -> (b x 64 x 16 x 16)
#6
model.add(MaxPooling2D(poolsize=(2, 2))) # arbitrary
model.add(Dropout(0.25))
# (b x 64 x 16 x 16) -> (b x 64 x 8 x 8)
#7
model.add(Flatten())
#8
# The dimensions of each image is (N = (X_train.shape[2]) ** 2),
# We had two downsampling layers of 2x2 maxpooling, so we divide each dimension twice by 2 (/2 /2).
# The input to this layer is the 64 "channels" that the previous layer outputs. Thus we have a layer of
# nr_kernels * (N / 2 / 2) * (N / 2 / 2)
flat_layer_size = nr_kernels4 * (X_train.shape[2] / 2 / 2) ** 2
final_layer_size=512 # I chose it
model.add(Dense(flat_layer_size, final_layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
#9
model.add(Dense(final_layer_size, nb_classes))
model.add(Activation('softmax'))
```
If you're willing to give it more formalism (comments and variable names), you could submit a pull request with this.
Sure thing. Thanks for keras!
@guy4261 I agree with you. Such comments and style makes it more readable for newbies. I would like to see you make comments in other examples in keras(./keras/examples.py) especially for those RNN example codes.
It's a little confusing but if you read some of the other issue posts and read up on the network type, you should be able to fill in the holes. But if you can't just holla ;)
@guy4261 - a little late to the party but: I have an [an example](https://github.com/dribnet/lfw_fuel/blob/857dfb43bbcdeb5d820c95b541569af181617409/example/run-lfw.py) of using LFW with Keras that has fewer magic numbers. Since 250x250x3x2 is huge, I [crop and scale the image pairs](https://github.com/dribnet/lfw_fuel/blob/857dfb43bbcdeb5d820c95b541569af181617409/example/run-lfw.py#L29-L35) in a paramaterized way, and [adjust the network](https://github.com/dribnet/lfw_fuel/blob/857dfb43bbcdeb5d820c95b541569af181617409/example/run-lfw.py#L61) accordingly.
Also - if you use that that [lfw_fuel repo](https://github.com/dribnet/lfw_fuel/) itself as a dependency, you'll have LFW data in any of of three formats (regular, funneled, deepfunneled) with the official test/train splits ready to use with Keras. My example runs but doesn't perform well, so suggestions getting the accuracy up (and the magic numbers down) also appreciated!
| 2015-09-04T23:41:26 |
|
keras-team/keras | 656 | keras-team__keras-656 | [
"633"
] | 2224c4cc1ea8c28f72cc68b5ce6ca48497480129 | diff --git a/keras/layers/containers.py b/keras/layers/containers.py
--- a/keras/layers/containers.py
+++ b/keras/layers/containers.py
@@ -156,7 +156,7 @@ def add_input(self, name, ndim=2, dtype='float'):
raise Exception('Duplicate node identifier: ' + name)
self.namespace.add(name)
self.input_order.append(name)
- layer = Layer() # empty layer
+ layer = Layer() # empty layer
if dtype == 'float':
layer.input = ndim_tensor(ndim)
else:
@@ -168,7 +168,7 @@ def add_input(self, name, ndim=2, dtype='float'):
self.inputs[name] = layer
self.input_config.append({'name': name, 'ndim': ndim, 'dtype': dtype})
- def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', create_output=False):
+ def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', concat_axis=-1, create_output=False):
if hasattr(layer, 'set_name'):
layer.set_name(name)
if name in self.namespace:
@@ -189,7 +189,7 @@ def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', crea
to_merge.append(self.inputs[n])
else:
raise Exception('Unknown identifier: ' + n)
- merge = Merge(to_merge, mode=merge_mode)
+ merge = Merge(to_merge, mode=merge_mode, concat_axis=concat_axis)
layer.set_previous(merge)
self.namespace.add(name)
@@ -208,7 +208,7 @@ def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', crea
if create_output:
self.add_output(name, input=name)
- def add_output(self, name, input=None, inputs=[], merge_mode='concat'):
+ def add_output(self, name, input=None, inputs=[], merge_mode='concat', concat_axis=-1):
if name in self.output_order:
raise Exception('Duplicate output identifier: ' + name)
if input:
@@ -224,14 +224,15 @@ def add_output(self, name, input=None, inputs=[], merge_mode='concat'):
if n not in self.nodes:
raise Exception('Unknown identifier: ' + n)
to_merge.append(self.nodes[n])
- merge = Merge(to_merge, mode=merge_mode)
+ merge = Merge(to_merge, mode=merge_mode, concat_axis=concat_axis)
self.outputs[name] = merge
self.output_order.append(name)
self.output_config.append({'name': name,
'input': input,
'inputs': inputs,
- 'merge_mode': merge_mode})
+ 'merge_mode': merge_mode,
+ 'concat_axis': concat_axis})
def get_config(self):
return {"name": self.__class__.__name__,
diff --git a/keras/layers/core.py b/keras/layers/core.py
--- a/keras/layers/core.py
+++ b/keras/layers/core.py
@@ -161,13 +161,14 @@ def get_config(self):
class Merge(Layer):
- def __init__(self, layers, mode='sum'):
+ def __init__(self, layers, mode='sum', concat_axis=-1):
''' Merge the output of a list of layers or containers into a single tensor.
mode: {'sum', 'mul', 'concat'}
'''
if len(layers) < 2:
raise Exception("Please specify two or more input layers (or containers) to merge")
self.mode = mode
+ self.concat_axis = concat_axis
self.layers = layers
self.params = []
self.regularizers = []
@@ -194,7 +195,7 @@ def get_output(self, train=False):
return s
elif self.mode == 'concat':
inputs = [self.layers[i].get_output(train) for i in range(len(self.layers))]
- return T.concatenate(inputs, axis=-1)
+ return T.concatenate(inputs, axis=self.concat_axis)
elif self.mode == 'mul':
s = self.layers[0].get_output(train)
for i in range(1, len(self.layers)):
@@ -239,7 +240,8 @@ def set_weights(self, weights):
def get_config(self):
return {"name": self.__class__.__name__,
"layers": [l.get_config() for l in self.layers],
- "mode": self.mode}
+ "mode": self.mode,
+ "concat_axis": self.concat_axis}
class Dropout(MaskedLayer):
| Merging Convolution2D layers: Wrong Dimension?
I'm currently trying to implement the GoogLeNet using a `Graph` model and `Convolution2D`.
`model.fit(...)` results in the following error:
```
ValueError: GpuJoin: Wrong inputs for input 1 related to inputs 0.!
Apply node that caused the error: GpuJoin(...
...
Inputs shapes: [(), (500, 16, 24, 24), (500, 32, 24, 24), (500, 64, 24, 24), (500, 16, 24, 24)]
```
that suggests that the merging of the 4 convolution outputs is not done according to the 2nd dimension.
According to
https://github.com/fchollet/keras/blob/master/keras/layers/containers.py#L227
a `Merge` Layer is used for `add_output` and this one concatenates according to axis -1
https://github.com/fchollet/keras/blob/master/keras/layers/core.py#L197
would it be ok to change that to axis=1? or would that cause other problems? Then maybe the axis should be a parameter?
| Only now I notice the `()` empty tuple in the list of input shapes... I don't understand where it comes from... Do I use something wrong?
Toy example for mnist data:
``` python
from keras.models import Graph
class Convoogle(Graph):
def __init__(self, c11, r33, c33, r55, c55, pool_features, stack_size, **kwargs):
super(Convoogle, self).__init__()
if 'activation' not in kwargs:
kwargs['activation']='relu'
if 'init' not in kwargs:
kwargs['init']='he_normal'
if 'border_mode' in kwargs:
assert kwargs['border_mode'] == 'same'
else:
kwargs['border_mode'] == 'same'
self.add_input(name='input', ndim=4)
self.add_node(Convolution2D(c11, stack_size, 1, 1, **kwargs), name='c11', input='input')
self.add_node(Convolution2D(r33, stack_size, 1, 1, **kwargs), name='r33', input='input')
self.add_node(Convolution2D(c33, r33, 3, 3, **kwargs), name='c33', input='r33')
self.add_node(Convolution2D(r55, stack_size, 1, 1, **kwargs), name='r55', input='input')
self.add_node(Convolution2D(c55, r55, 5, 5, **kwargs), name='c55', input='r55')
self.add_node(ZeroPadding2D(1), name='pad', input='input',)
self.add_node(MaxPooling2D((3,3),(1,1), ignore_border=False), name='pool', input='pad',)
self.add_node(Convolution2D(pool_features, stack_size, 1, 1, **kwargs), name='pooled', input='pool')
self.add_output(name='output1', inputs=['c11', 'c33', 'c55', 'pooled'])
model=Sequential()
model.add(Convolution2D(64,1,5,5))
model.add(Activation('relu'))
model.add(Convoogle(16,8,32,8,64,16, stack_size=64, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D((3,3), (2,2), ignore_border=False))
model.add(Convoogle(16,16,32,16,64,16, stack_size=128, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D((3,3), (2,2), ignore_border=False))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128*7*7,10))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
```
> would it be ok to change that to axis=1? or would that cause other problems? Then maybe the axis should be a parameter?
That would cause other problems. But you're right, the concatenation axis should definitely be configurable. This is already the case in the Caffe branch (precisely to support convolution output concatenation). Since we are quite late in merging the Caffe branch, we can incorporate this specific feature into master on its own.
Sorry, I don't understand "we are quite late in merging the Caffe branch" you/we should have done it already? Or you can not do it at the moment?
Anyway I tried to search for the commit that brought this change in the Caffe branch (btw thanks for pointing this out), hoping that I could just cherry-pick it into master... It turns out that it's a rather huge commit https://github.com/fchollet/keras/commit/bf997c19431ab12a10e60205c9e2b3c4c591e014#diff-05ec8fc63f0e9bb35f99c0ca76a66d0e so i guess it's easier to copy the changes by hand...? (maybe my git-foo is just not good enough?)
If you approve I'd do this and file a pull request.
| 2015-09-07T18:09:54 |
|
keras-team/keras | 677 | keras-team__keras-677 | [
"676"
] | 1724fe58821cc07763065e950fc9bbd348cd885d | diff --git a/keras/preprocessing/image.py b/keras/preprocessing/image.py
--- a/keras/preprocessing/image.py
+++ b/keras/preprocessing/image.py
@@ -104,7 +104,7 @@ def img_to_array(img):
def load_img(path, grayscale=False):
from PIL import Image
- img = Image.open(open(path))
+ img = Image.open(path)
if grayscale:
img = img.convert('L')
else: # Assure 3 channel even when loaded image is grayscale
| Python 3 compatibility problem with Image loading
Loading an Image using the `load_img` results in an error.
```
Traceback (most recent call last):
File "keras/autoencoder.py", line 45, in <module>
X_train, Y_train, X_test, Y_test, nb_classes = io.load_images(join(DATA_DIR, 'dataset0'))
File "/home/jnphilipp/Documents/cnn/hieroglyphs/keras/utils/io.py", line 27, in load_images
X_train.append(img_to_array(load_img(picture, True)))
File "/home/jnphilipp/.local/lib/python3.4/site-packages/Keras-0.1.2-py3.4.egg/keras/preprocessing/image.py", line 107, in load_img
File "/home/jnphilipp/.local/lib/python3.4/site-packages/PIL/Image.py", line 2330, in open
% (filename if filename else fp))
OSError: cannot identify image file <_io.TextIOWrapper name='/home/jnphilipp/Documents/cnn/hieroglyphs/data/dataset0/train/P1_train0.png' mode='r' encoding='ISO-8859-1'>
```
| 2015-09-11T20:40:05 |
||
keras-team/keras | 826 | keras-team__keras-826 | [
"821"
] | 40a9bd7c2f487cfb19d40157bcb114b34361ff98 | diff --git a/keras/optimizers.py b/keras/optimizers.py
--- a/keras/optimizers.py
+++ b/keras/optimizers.py
@@ -82,7 +82,7 @@ def get_config(self):
return {"name": self.__class__.__name__,
"lr": float(self.lr.get_value()),
"momentum": float(self.momentum.get_value()),
- "decay": float(self.decay.get_value()),
+ "decay": float(self.decay),
"nesterov": self.nesterov}
| 'float' object has no attribute 'get_value' problem.
When I was trying to print the configuration of a model by "model.get_config()" or trying to save my model as a 'json' file:
json_str = autoEncoder.to_json()
open('temp_model.json','w').write(json_str)
autoEncoder.save_weights('temp_model_weights.h5')
It raise the exception "float object has no attribute 'get_value' " in file 'optimizer.py', in class SGD(because I was using SGD as the optimizer), the definition of get_config() is:
def get_config(self):
return {"name": self.**class**.**name**,
"lr": float(self.lr.get_value()),
"momentum": float(self.momentum.get_value()),
"decay": float(self.decay.get_value()),
"nesterov": self.nesterov}
while the **init** of class SGD does not contain decay and nesterov
```
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args, **kwargs):
super(SGD, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
self.lr = shared_scalar(lr)
self.momentum = shared_scalar(momentum)
```
Is it a bug? Can I fix the problem by adding 'self.decay = shared_scalar(decay)' or something like this?
Thank you very much!
| well, it seems I can't fix it by myself....sad οΏ£οΉοΏ£
`self.__dict__.update(locals())` sets up all the named parameters from `__init__` to the created object, so a correctly initialized object of class `SGD` should have both `decay` and `nesterov`. The problem here is that `decay` is a float and not a shared Theano variable. I'll fix this in a bit.
| 2015-10-13T18:24:11 |
|
keras-team/keras | 903 | keras-team__keras-903 | [
"902"
] | 26b57a6cddb2f629c7372b0e844f133694a5d7bd | diff --git a/keras/layers/containers.py b/keras/layers/containers.py
--- a/keras/layers/containers.py
+++ b/keras/layers/containers.py
@@ -84,7 +84,11 @@ def get_input(self, train=False):
if not hasattr(self.layers[0], 'input'):
self.set_input()
return self.layers[0].get_input(train)
-
+
+ @property
+ def input_shape(self):
+ return self.layers[0].input_shape
+
@property
def input(self):
return self.get_input()
| A model doesn't know its input_shape after build
Reproduce the error as
``` python
model = Sequential()
model.add(Dense(1), input_shape=(784,))
model.build()
model.input_shape
```
Shouldn't the model know its `input_shape` after `build`? It knows `output_shape` for instance. Am I missing anything @matsuyamax ?
| 2015-10-27T20:24:19 |
||
keras-team/keras | 1,039 | keras-team__keras-1039 | [
"1036"
] | e94f29cac4151b1b52f3ee419bf7191018974308 | diff --git a/keras/utils/visualize_util.py b/keras/utils/visualize_util.py
--- a/keras/utils/visualize_util.py
+++ b/keras/utils/visualize_util.py
@@ -1,7 +1,7 @@
import pydot
# old pydot will not work with python3, must use one
# that works with python3 such as pydot2 or pydot
-
+from keras.models import Sequential, Graph
def plot(model, to_file='model.png'):
| keras.utils.visualize_util
line 9: if type(model) == Sequential
Global name Sequential is not defined
line 25 elif type(model) == Graph:
Global name Graph is not defined
| Looks like add:
from keras.models import Sequential, Graph
would be fine.
| 2015-11-19T13:54:14 |
|
keras-team/keras | 1,158 | keras-team__keras-1158 | [
"1157"
] | 7bb897dff13db7bb23d07370cf2e6cba38949025 | diff --git a/keras/backend/common.py b/keras/backend/common.py
--- a/keras/backend/common.py
+++ b/keras/backend/common.py
@@ -22,6 +22,8 @@ def set_floatx(floatx):
global _FLOATX
if floatx not in {'float32', 'float64'}:
raise Exception('Unknown floatx type: ' + str(floatx))
+ if isinstance(floatx, unicode):
+ floatx = floatx.encode('ascii')
_FLOATX = floatx
| problem with K.common._FLOATX
I tried to run:
```
a = K.random_normal((100, 200))
```
and I got the error:
```
/home/eders/python/Theano/theano/sandbox/rng_mrg.pyc in get_substream_rstates(self, n_streams, dtype, inc_rstate)
1167
1168 """
-> 1169 assert isinstance(dtype, str)
1170 assert n_streams < 2**72
1171 assert n_streams > 0
AssertionError:
```
I tried to print K.common._FLOATX to see what was going on and it is `u'float32'`. That little `u` upfront is making theano crash. I believe that when reading the type from json it was not converted to the right type of string. Anybody else had that problem? I'll check the code to see if I can fix it.
| 2015-12-03T15:29:40 |
||
keras-team/keras | 1,182 | keras-team__keras-1182 | [
"1181"
] | ada2f2fa0dd9dca2972e374ff937b59461cfacb5 | diff --git a/keras/layers/recurrent.py b/keras/layers/recurrent.py
--- a/keras/layers/recurrent.py
+++ b/keras/layers/recurrent.py
@@ -41,6 +41,15 @@ def output_shape(self):
def step(self, x, states):
raise NotImplementedError
+ def get_initial_states(self, X):
+ # build an all-zero tensor of shape (samples, output_dim)
+ initial_state = K.zeros_like(X) # (samples, timesteps, input_dim)
+ initial_state = K.sum(initial_state, axis=1) # (samples, input_dim)
+ reducer = K.zeros((self.input_dim, self.output_dim))
+ initial_state = K.dot(initial_state, reducer) # (samples, output_dim)
+ initial_states = [initial_state for _ in range(len(self.states))]
+ return initial_states
+
def get_output(self, train=False):
# input shape: (nb_samples, time (padded with zeros), input_dim)
X = self.get_input(train)
@@ -64,12 +73,7 @@ def get_output(self, train=False):
if self.stateful:
initial_states = self.states
else:
- # build an all-zero tensor of shape (samples, output_dim)
- initial_state = K.zeros_like(X) # (samples, timesteps, input_dim)
- initial_state = K.sum(initial_state, axis=1) # (samples, input_dim)
- reducer = K.zeros((self.input_dim, self.output_dim))
- initial_state = K.dot(initial_state, reducer) # (samples, output_dim)
- initial_states = [initial_state for _ in range(len(self.states))]
+ initial_states = self.get_initial_states(X)
last_output, outputs, states = K.rnn(self.step, X, initial_states,
go_backwards=self.go_backwards,
| add get_initial_states to Recurrent
GRU, LSTM and other conventional RNNs have well defined initial state sizes that can be calculated from output_dim, but NTMs, convolutional rnn (I'm working on that) don't. Instead of hard coding the initial states inside `get_output` I propose an extra method called get_initial_states.
| 2015-12-05T19:42:23 |
||
keras-team/keras | 1,383 | keras-team__keras-1383 | [
"1276"
] | be9f7bc62fcada746e446115e6b7c40608f33ebf | diff --git a/keras/backend/theano_backend.py b/keras/backend/theano_backend.py
--- a/keras/backend/theano_backend.py
+++ b/keras/backend/theano_backend.py
@@ -548,12 +548,14 @@ def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th',
if _on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(strides == (1, 1))
- np_kernel = kernel.eval()
- pad_x = (np_kernel.shape[2] - strides[0]) // 2
- pad_y = (np_kernel.shape[3] - strides[1]) // 2
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
- border_mode=(pad_x, pad_y))
+ border_mode='full')
+ shift_x = (kernel.shape[2] - 1) // 2
+ shift_y = (kernel.shape[3] - 1) // 2
+ conv_out = conv_out[:, :,
+ shift_x:x.shape[2] + shift_x,
+ shift_y:x.shape[3] + shift_y]
else:
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
| Tests crash for the shape inference
Hello all,
When I tried to run some scripts after updating to the latest version of Theano and Keras I had some errors.
I maybe didn't notice a change on my side but I tried to run the tests and it seems the shape inference is not behaving nicely:
``` python
Using gpu device 0: GeForce GTX 980 (CNMeM is disabled)
......FF.........
======================================================================
FAIL: test_shape_inference.test_Convolution1D
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/thomas/anaconda3/envs/anapy27/lib/python2.7/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/home/thomas/libraries/forks/keras/tests/test_shape_inference.py", line 75, in test_Convolution1D
check_layer_output_shape(layer, input_data)
File "/home/thomas/libraries/forks/keras/tests/test_shape_inference.py", line 18, in check_layer_output_shape
assert output.shape[1:] == expected_output_shape, "output shape: {} expected output shape: {}".format(output.shape[1:], ex
pected_output_shape)
AssertionError: output shape: (2, 1) expected output shape: (3, 1)
======================================================================
FAIL: test_shape_inference.test_Convolution2D
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/thomas/anaconda3/envs/anapy27/lib/python2.7/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/home/thomas/libraries/forks/keras/tests/test_shape_inference.py", line 91, in test_Convolution2D
check_layer_output_shape(layer, input_data)
File "/home/thomas/libraries/forks/keras/tests/test_shape_inference.py", line 18, in check_layer_output_shape
assert output.shape[1:] == expected_output_shape, "output shape: {} expected output shape: {}".format(output.shape[1:], ex
pected_output_shape)
AssertionError: output shape: (1, 2, 2) expected output shape: (1, 3, 3)
----------------------------------------------------------------------
Ran 17 tests in 7.296s
FAILED (failures=2)
```
I tried to digg in and I made my example work but the tests are still failing.
This is my reproductible example:
``` python
from sklearn.feature_extraction.image import extract_patches_2d
import theano
import keras
import numpy as np
np.random.seed(1337)
from keras.models import Graph
from keras.layers.core import Dense, Flatten
from keras.layers.convolutional import Convolution1D, MaxPooling1D
len_ts_y = 60
sample = 1000
Fs = 8000
f = 5
x_bug = np.arange(sample)
y_test = np.sin(2 * np.pi * f * x_bug / Fs)
y_test = y_test+np.random.normal(0, 0.2, sample)
data_patched = extract_patches_2d(y_test[:,None], (len_ts_y+1,1))
y_train = data_patched[:,:,-1]
endog_train = data_patched[:,-len_ts_y-1:-1,-1]
model = Graph()
model.add_input(name='endog', input_shape=(len_ts_y,1))
model.add_node(Convolution1D(nb_filter=4,
filter_length=2,
border_mode="same",
activation="relu",
subsample_length=1,
),
name='conv_1', input='endog')
model.add_node(Flatten(), name="flat", input='conv_1')
model.add_node(Dense(32, activation="sigmoid"), name="Dense", input='flat')
model.add_node(Dense(1, activation="softplus"), name='last_dense', input='Dense')
model.add_output(name='output', input='last_dense')
model.compile(optimizer='sgd', loss={'output':'mse'})
model.fit({'endog': endog_train[:-100,:, None], 'output': y_train[:-100,-1]},
validation_data={'endog':endog_train[-100:,:, None], 'output': y_train[-100:,-1]},
batch_size=128,
nb_epoch=4)
```
I changed the size of the `output_shape` in the `conv_output_length` function of `convolutionnal.py` to make the above example work :
``` python
def conv_output_length(input_length, filter_size, border_mode, stride):
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - filter_size + 1
return ((output_length + stride - 1) // stride) - 1
```
Does anyone is having the same errors?
Thank you!
| I can't reproduce, tests pass here and on Travis. And your script runs perfectly normally on my laptop.
> AssertionError: output shape: (1, 2, 2) expected output shape: (1, 3, 3)
My best guess is that your version of Theano has a conv2d op that behaves abnormally. Just upgrade Theano to latest master branch and you should be fine.
I checked theano version inside the test and it outputs the right one (0.7.0.dev-24ace59).
I just cleared the compile directory in my .theano folder to make sure I have a clean version but the two tests still fail.
I'm using CUDA 7.0 and cuDNN v2, could it be related?
Do you have the same issue on CPU?
It might be a bug of Theano's conv2d on GPU with a specific version of cuDNN. Who knows...
@fchollet Can you look into #1274
It's working on CPU. Because I have CUDA 6.5 and CUDA 7.0 installed to test tensorflow, I'll try to reinstall cuDNN v3 and rerun the tests.
Right after the release off Keras supporting Tensorflow and Theano, I was able to run both backends.
Tx for the tips, I'll keep you posted!
No luck, I tried the four combinations of CUDA/cuDNN and it still fails. I also tried to checkout older versions of the 2 repos (Theano + Keras) back to 2 weeks from now without success. If someone has a working config for both Theano and Tensorflow working on gpu on Ubuntu 14.04, I would be interested to have some advices :smile: (versions and environment vars). I will try to build a new config from scratch in Docker tomorow.
I'm still able to run my scripts and the tests using `border_mode='valid'` though...
@farizrahman4u sorry to open another issue, I'm not sure if these 2 are related and I didn't want to mix Windows and Ubuntu. If it turns out it's the same problem don't hesitate to reference this one.
I just finished to test the same scripts with both Theano (CUDA 7.0 + cuDNN v3) and Tensorflow (CUDA 7.0 + cuDNN v2) in Docker containers and everything works as expected. I don't know what happened but it isn't a Keras related issue.
Really sorry to repoen the issue. I thought it was a config bug but I changed the size of the filters with the new config the last time I used the scripts.
To have a clean environment, I used [this container](https://hub.docker.com/r/tboquet/scikenacuthe7hc3/) built with CUDA 7.5 and cuDNN v3 and the error arise only when the filters have an even size when using 1d convolutions with Graph models (including the example above). The imdb_cnn_lstm example is running fine so I will rewrite it using Graph. The tests also fail and I will digg in the code tomorow.
I'm not sure where to look to point out the bug so if you have any suggestion I will be happy to help.
Thank you!
**Update**: the bug happens only when using cuDNN.
When bypassing it in the `theano_backend` everything work as expected:
``` python
if _on_gpu() and dnn.dnn_available() and False:
if border_mode == 'same':
assert(strides == (1, 1))
np_kernel = kernel.eval()
pad_x = (np_kernel.shape[2] - strides[0]) // 2
pad_y = (np_kernel.shape[3] - strides[1]) // 2
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode='full')
else:
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode=border_mode,
subsample=strides)
```
With a simple script I tried to higlight the bug:
``` python
import numpy as np
import theano
import itertools
from keras.layers.convolutional import Convolution1D, Convolution2D
# Convolution 1D
l_in = (100, 16)
x = np.random.randn(20, 100, 16).astype(theano.config.floatX)
for layer, mode, f_size in itertools.product([Convolution1D],
['valid', 'same'],
[3, 4, 7, 8]):
try:
l_1 = layer(input_shape=l_in, nb_filter=10, filter_length=f_size, border_mode=mode)
out = theano.function([l_1.get_input()], l_1.get_output())
print("conv1D, {}, {}, {}".format(mode, f_size, out(x).shape))
except Exception as e:
print(e)
# Convolution 2d
l_in = (3, 100, 100)
x = np.random.randn(20, 3, 100, 100).astype(theano.config.floatX)
for layer, mode, f_size in itertools.product([Convolution2D],
['valid', 'same'],
[3, 4, 7, 8]):
try:
l_1 = layer(10, f_size, f_size, border_mode=mode, input_shape=l_in)
out = theano.function([l_1.get_input()], l_1.get_output())
print("conv2D, {}, {}, {}".format(mode, f_size, out(x).shape))
except Exception as e:
print(e)
```
and I have this output when using cuDNN:
``` python
Using Theano backend.
conv1D, valid, 3, (20, 98, 10)
conv1D, valid, 4, (20, 97, 10)
conv1D, valid, 7, (20, 94, 10)
conv1D, valid, 8, (20, 93, 10)
conv1D, same, 3, (20, 100, 10)
conv1D, same, 4, (20, 99, 10)
conv1D, same, 7, (20, 100, 10)
conv1D, same, 8, (20, 99, 10)
conv2D, valid, 3, (20, 10, 98, 98)
conv2D, valid, 4, (20, 10, 97, 97)
conv2D, valid, 7, (20, 10, 94, 94)
conv2D, valid, 8, (20, 10, 93, 93)
conv2D, same, 3, (20, 10, 100, 100)
conv2D, same, 4, (20, 10, 99, 99)
conv2D, same, 7, (20, 10, 100, 100)
conv2D, same, 8, (20, 10, 99, 99)
```
Results without cuDNN:
``` python
conv1D, valid, 3, (20, 98, 10)
conv1D, valid, 4, (20, 97, 10)
conv1D, valid, 7, (20, 94, 10)
conv1D, valid, 8, (20, 93, 10)
conv1D, same, 3, (20, 100, 10)
conv1D, same, 4, (20, 100, 10)
conv1D, same, 7, (20, 100, 10)
conv1D, same, 8, (20, 100, 10)
conv2D, valid, 3, (20, 10, 98, 98)
conv2D, valid, 4, (20, 10, 97, 97)
conv2D, valid, 7, (20, 10, 94, 94)
conv2D, valid, 8, (20, 10, 93, 93)
conv2D, same, 3, (20, 10, 100, 100)
conv2D, same, 4, (20, 10, 100, 100)
conv2D, same, 7, (20, 10, 100, 100)
conv2D, same, 8, (20, 10, 100, 100)
```
@fchollet not sure if you could reproduce the bug with cuDNN?
I don't readily have access to a GPU, so I won't be able to repro.
Does this happen with every cuDNN version? Specifically, does this happen with R4?
| 2015-12-31T03:35:36 |
|
keras-team/keras | 1,651 | keras-team__keras-1651 | [
"1631"
] | cae797b8039d632944feff73ca0f2dda8f2cde65 | diff --git a/keras/callbacks.py b/keras/callbacks.py
--- a/keras/callbacks.py
+++ b/keras/callbacks.py
@@ -456,6 +456,7 @@ def __init__(self, log_dir='./logs', histogram_freq=0):
'with the TensorFlow backend.')
self.log_dir = log_dir
self.histogram_freq = histogram_freq
+ self.merged = None
def _set_model(self, model):
import tensorflow as tf
@@ -463,7 +464,7 @@ def _set_model(self, model):
self.model = model
self.sess = KTF._get_session()
- if self.histogram_freq:
+ if self.histogram_freq and not self.merged:
mod_type = self.model.get_config()['name']
if mod_type == 'Sequential':
layers = {l.get_config()['name']: l for l in self.model.layers}
@@ -515,7 +516,7 @@ def on_epoch_end(self, epoch, logs={}):
all_values = self.totals.copy()
all_values.update(logs)
-
+
for name, value in all_values.items():
if name in ['batch', 'size']:
continue
diff --git a/keras/models.py b/keras/models.py
--- a/keras/models.py
+++ b/keras/models.py
@@ -970,8 +970,17 @@ def input_validation(generator_output):
_stop.set()
raise Exception('The generator output tuple must have '
'2 or 3 elements.')
+
+ sample_weight = standardize_weights(y, sample_weight=sample_weight,
+ sample_weight_mode=self.sample_weight_mode)
return X, y, sample_weight
+ if do_validation:
+ X_val, y_val, sample_weight_val = input_validation(validation_data)
+ self.validation_data = X_val + [y_val, sample_weight_val]
+ else:
+ self.validation_data = None
+
# start generator thread storing batches into a queue
generator_queue = queue.Queue()
_stop = threading.Event()
@@ -1043,10 +1052,9 @@ def generator_task():
raise NotImplementedError()
else:
# input validation
- X, y, sample_weight = input_validation(validation_data)
- val_outs = self.evaluate(X, y,
+ val_outs = self.evaluate(X_val, y_val,
show_accuracy=show_accuracy,
- sample_weight=sample_weight,
+ sample_weight=sample_weight_val,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
@@ -1431,8 +1439,19 @@ def input_validation(generator_output):
[len(sample_weight[name]) for name in sample_weight.keys()])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
+ sample_weight = {name: standardize_weights(data[name],
+ sample_weight=sample_weight.get(name),
+ sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order}
return data, sample_weight
+ if do_validation:
+ data_val, sample_weight_val = input_validation(validation_data)
+ sample_weight_val_l = [sample_weight_val[name] for name in self.output_order]
+ y_val = [standardize_y(data_val[name]) for name in self.output_order]
+ self.validation_data = [data_val[name] for name in self.input_order] + y_val + sample_weight_val_l
+ else:
+ self.validation_data = None
+
# start generator thread storing batches into a queue
generator_queue = queue.Queue()
_stop = threading.Event()
@@ -1498,10 +1517,8 @@ def generator_task():
_stop.set()
raise NotImplementedError()
else:
- # input validation
- data, sample_weight = input_validation(validation_data)
- val_outs = self.evaluate(data,
- sample_weight=sample_weight,
+ val_outs = self.evaluate(data_val,
+ sample_weight=sample_weight_val,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
| diff --git a/tests/keras/test_callbacks.py b/tests/keras/test_callbacks.py
--- a/tests/keras/test_callbacks.py
+++ b/tests/keras/test_callbacks.py
@@ -136,23 +136,30 @@ def test_TensorBoard():
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
- # case 1 Sequential wo accuracy
- with tf.Graph().as_default():
- session = tf.Session('')
- KTF._set_session(session)
- model = Sequential()
- model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
- model.add(Dense(nb_class, activation='softmax'))
- model.compile(loss='categorical_crossentropy', optimizer='sgd')
- tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
- cbks = [tsb]
- model.fit(X_train, y_train, batch_size=batch_size, show_accuracy=True,
- validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
- assert os.path.exists(filepath)
- shutil.rmtree(filepath)
+ def data_generator(train):
+ if train:
+ max_batch_index = len(X_train) // batch_size
+ else:
+ max_batch_index = len(X_test) // batch_size
+ i = 0
+ while 1:
+ if train:
+ yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
+ else:
+ yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
+ i += 1
+ i = i % max_batch_index
+
+ def data_generator_graph(train):
+ while 1:
+ if train:
+ yield {'X_vars': X_train, 'output': y_train}
+ else:
+ yield {'X_vars': X_test, 'output': y_test}
+
+ # case 1 Sequential
- # case 2 Sequential w accuracy
with tf.Graph().as_default():
session = tf.Session('')
KTF._set_session(session)
@@ -163,12 +170,42 @@ def test_TensorBoard():
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
cbks = [tsb]
+
+ # fit with validation data
+ model.fit(X_train, y_train, batch_size=batch_size, show_accuracy=False,
+ validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
+
+ # fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size, show_accuracy=True,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
+
+ # fit generator with validation data
+ model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
+ show_accuracy=False,
+ validation_data=(X_test, y_test),
+ callbacks=cbks)
+
+ # fit generator without validation data
+ model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
+ show_accuracy=False,
+ callbacks=cbks)
+
+ # fit generator with validation data and accuracy
+ model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
+ show_accuracy=True,
+ validation_data=(X_test, y_test),
+ callbacks=cbks)
+
+ # fit generator without validation data and accuracy
+ model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
+ show_accuracy=True,
+ callbacks=cbks)
+
assert os.path.exists(filepath)
shutil.rmtree(filepath)
- # case 3 Graph
+ # case 2 Graph
+
with tf.Graph().as_default():
session = tf.Session('')
KTF._set_session(session)
@@ -185,10 +222,27 @@ def test_TensorBoard():
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
cbks = [tsb]
+
+ # fit with validation
model.fit({'X_vars': X_train, 'output': y_train},
batch_size=batch_size,
validation_data={'X_vars': X_test, 'output': y_test},
callbacks=cbks, nb_epoch=2)
+
+ # fit wo validation
+ model.fit({'X_vars': X_train, 'output': y_train},
+ batch_size=batch_size,
+ callbacks=cbks, nb_epoch=2)
+
+ # fit generator with validation
+ model.fit_generator(data_generator_graph(True), 1000, nb_epoch=2,
+ validation_data={'X_vars': X_test, 'output': y_test},
+ callbacks=cbks)
+
+ # fit generator wo validation
+ model.fit_generator(data_generator_graph(True), 1000, nb_epoch=2,
+ callbacks=cbks)
+
assert os.path.exists(filepath)
shutil.rmtree(filepath)
| Tensorboard callback doesn't work on fit_generator
We have this error on a sequential model with multiple inputs:
keras sequential object as no attribute validation data
We have the same error for the graph:
keras graph object as no attribute validation data
It does work on fit !!
Is it normal?
`mix_model.fit_generator(trainBatchBuilder.seq_batch_generator(), 100, 5, validation_data=(X_test, Y_test),
callbacks=[TensorBoard("path/tensorboard_logs/", 1)])
`
Am I missing something?
| Same here with a Graph model, looking at lines _1399-1428_ in **models.py** and _501-524_ in **callbacks.py** shows that it is not implemented. Though, may be not too difficult to implement.
| 2016-02-05T23:49:59 |
keras-team/keras | 1,767 | keras-team__keras-1767 | [
"1730"
] | b8a9f84fad1be2f27365a25b4e7f188d382d70d0 | diff --git a/keras/layers/containers.py b/keras/layers/containers.py
--- a/keras/layers/containers.py
+++ b/keras/layers/containers.py
@@ -156,9 +156,9 @@ def get_weights(self):
return weights
def set_weights(self, weights):
- for i in range(len(self.layers)):
- nb_param = len(self.layers[i].trainable_weights) + len(self.layers[i].non_trainable_weights)
- self.layers[i].set_weights(weights[:nb_param])
+ for layer in self.layers:
+ nb_param = len(layer.get_weights())
+ layer.set_weights(weights[:nb_param])
weights = weights[nb_param:]
def get_config(self):
| diff --git a/tests/keras/test_models.py b/tests/keras/test_models.py
--- a/tests/keras/test_models.py
+++ b/tests/keras/test_models.py
@@ -125,6 +125,70 @@ def test_sequential():
model = model_from_yaml(yaml_data)
+def test_nested_sequential():
+ (X_train, y_train), (X_test, y_test) = _get_test_data()
+
+ inner = Sequential()
+ inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
+ inner.add(Activation('relu'))
+ inner.add(Dense(nb_class))
+
+ middle = Sequential()
+ middle.add(inner)
+
+ model = Sequential()
+ model.add(middle)
+ model.add(Activation('softmax'))
+ model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
+ model.summary()
+
+ model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
+ model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
+ model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
+ model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
+ model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
+ model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
+
+ model.train_on_batch(X_train[:32], y_train[:32])
+
+ loss = model.evaluate(X_test, y_test, verbose=0)
+ assert(loss < 0.8)
+
+ model.predict(X_test, verbose=0)
+ model.predict_classes(X_test, verbose=0)
+ model.predict_proba(X_test, verbose=0)
+ model.get_config(verbose=0)
+
+ fname = 'test_nested_sequential_temp.h5'
+ model.save_weights(fname, overwrite=True)
+
+ inner = Sequential()
+ inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
+ inner.add(Activation('relu'))
+ inner.add(Dense(nb_class))
+
+ middle = Sequential()
+ middle.add(inner)
+
+ model = Sequential()
+ model.add(middle)
+ model.add(Activation('softmax'))
+ model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
+ model.load_weights(fname)
+ os.remove(fname)
+
+ nloss = model.evaluate(X_test, y_test, verbose=0)
+ assert(loss == nloss)
+
+ # test json serialization
+ json_data = model.to_json()
+ model = model_from_json(json_data)
+
+ # test yaml serialization
+ yaml_data = model.to_yaml()
+ model = model_from_yaml(yaml_data)
+
+
def test_merge_sum():
(X_train, y_train), (X_test, y_test) = _get_test_data()
left = Sequential()
| unable to load weights in models with siamese branches
The problem is that the set_weights() function in sequential tries to concatenate trainable_weights and non_trainable together
However if one of your layers is another sequential container, this does not have a non_trainable_weights parameter
This needs to be implemented
I hacked it in like this:
``` Python
@property
def non_trainable_weights(self):
weights = []
for l in self.layers:
if not l.trainable:
weights += l.get_params()[0]
return weights
```
But it's probably not the way to do it
| +1
I think the actual fix is to change `Sequential.set_weights` to something very similar to `Graph.set_weights`. I'll submit a PR when I get time.
It turns out that this has nothing to do with Siamese models. It happens when you have triple-nested Sequential layers.
| 2016-02-19T20:27:35 |
keras-team/keras | 1,769 | keras-team__keras-1769 | [
"1768"
] | b8a9f84fad1be2f27365a25b4e7f188d382d70d0 | diff --git a/keras/callbacks.py b/keras/callbacks.py
--- a/keras/callbacks.py
+++ b/keras/callbacks.py
@@ -255,7 +255,7 @@ def __init__(self, filepath, monitor='val_loss', verbose=0,
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
- 'fallback to auto mode.' % (self.mode),
+ 'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
| Bug in callbacks.py ModelCheckpoint class
There is a bug in the constructor of the ModelCheckpoint class (lines 247-259)
See code below (from master branch). If the mode parameter is not recognized, the warning message refers to a self.mode variable that is never initialized, and an exception is thrown:
`AttributeError: 'ModelCheckpoint' object has no attribute 'mode'`
--- Code from callbacks.py ---
```
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, mode='auto'):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
```
| 2016-02-20T01:19:29 |
||
keras-team/keras | 1,792 | keras-team__keras-1792 | [
"1763"
] | 784d81d2c88630b8cba8236f3354afa3326cd3c1 | diff --git a/keras/backend/theano_backend.py b/keras/backend/theano_backend.py
--- a/keras/backend/theano_backend.py
+++ b/keras/backend/theano_backend.py
@@ -650,10 +650,10 @@ def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th',
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
- # TODO: add implementation for border_mode="same"
- raise Exception('border_mode="same" not supported with Theano.')
+ w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
+ h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
+ padding = (w_pad, h_pad)
elif border_mode == 'valid':
- ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
@@ -666,17 +666,25 @@ def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
if pool_mode == 'max':
pool_out = downsample.max_pool_2d(x, ds=pool_size, st=strides,
- ignore_border=ignore_border,
+ ignore_border=True,
padding=padding,
mode='max')
elif pool_mode == 'avg':
pool_out = downsample.max_pool_2d(x, ds=pool_size, st=strides,
- ignore_border=ignore_border,
+ ignore_border=True,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
+ if border_mode == 'same':
+ expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
+ expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
+
+ pool_out = pool_out[:, :,
+ : expected_width,
+ : expected_height]
+
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
| diff --git a/tests/keras/layers/test_convolutional.py b/tests/keras/layers/test_convolutional.py
--- a/tests/keras/layers/test_convolutional.py
+++ b/tests/keras/layers/test_convolutional.py
@@ -191,17 +191,20 @@ def test_averagepooling_2d():
stack_size = 7
input_nb_row = 11
input_nb_col = 12
- pool_size = (3, 3)
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
- for strides in [(1, 1), (2, 2)]:
- layer = convolutional.AveragePooling2D(strides=strides,
- border_mode='valid',
- pool_size=pool_size)
- layer.input = K.variable(input)
- for train in [True, False]:
- K.eval(layer.get_output(train))
- layer.get_config()
+ for border_mode in ['valid', 'same']:
+ for pool_size in [(2, 2), (3, 3), (4, 4), (5, 5)]:
+ for strides in [(1, 1), (2, 2)]:
+ layer = convolutional.AveragePooling2D(strides=strides,
+ border_mode=border_mode,
+ pool_size=pool_size)
+ layer.input = K.variable(input)
+ for train in [True, False]:
+ out = K.eval(layer.get_output(train))
+ if border_mode == 'same' and strides == (1, 1):
+ assert input.shape == out.shape
+ layer.get_config()
def test_zero_padding_2d():
| border_mode="same" in AveragePooling2D with Theano
This currently raises a `NotImplementedError`. It looks to me that it would be fairly straightforward to implement. Are there any more complex issues that aren't obvious?
It looks like I can just make the following change to `pool2d` in `theano_backend.py`:
``` python
if border_mode == 'same':
ignore_border = True
padding = (pool_size[0] - 2, pool_size[1] - 2)
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
...
```
I assume that if it was this easy, someone would have done it already, though.
| 2016-02-22T22:30:26 |
|
keras-team/keras | 2,268 | keras-team__keras-2268 | [
"1461"
] | 0772210dea435e0c8d5cff9ce212b835b40c7a40 | diff --git a/keras/utils/np_utils.py b/keras/utils/np_utils.py
--- a/keras/utils/np_utils.py
+++ b/keras/utils/np_utils.py
@@ -9,7 +9,6 @@ def to_categorical(y, nb_classes=None):
'''Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy.
'''
- y = np.asarray(y, dtype='int32')
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
| Textual information for labels?
I seem unable to use text for labels, whilst using to_categorical
```
Using Theano backend.
Traceback (most recent call last):
File "playground.py", line 88, in <module>
train_model_and_test(number_of_epochs, number_of_classes, train_data, train_label, augmented_data_generator)
File "playground.py", line 62, in train_model_and_test
train_label = np_utils.to_categorical(train_label, number_of_classes)
File "/usr/local/lib/python2.7/dist-packages/keras/utils/np_utils.py", line 12, in to_categorical
y = np.asarray(y, dtype='int32')
File "/usr/lib/python2.7/dist-packages/numpy/core/numeric.py", line 460, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: invalid literal for int() with base 10: 'yellow'
```
| The problem has been fixed, but I might as well ask - why on earth are you only allowing for int32 labels?
```
def to_categorical(y, nb_classes=None):
'''Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy.
'''
y = np.asarray(y, dtype='int32')
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
```
How to change (for reference)
```
y = np.asarray(y, dtype='int32')
```
to
```
y = np.asarray(y, dtype='a16')
```
https://github.com/fchollet/keras/blob/master/keras/utils/np_utils.py#L8-L18
Seems a bit daft!
for my application, i have a varying number of classes for each sample. `y = np.asarray(y, dtype='int32')` causes `to_categorical` to fail with `ValueError: setting an array element with a sequence.` but if i remove the line completely, `to_categorical` behaves correctly.
| 2016-04-12T05:07:48 |
|
keras-team/keras | 2,300 | keras-team__keras-2300 | [
"2287"
] | 1206120d1084cbe45dc2876f002cb572a97e3844 | diff --git a/keras/engine/training.py b/keras/engine/training.py
--- a/keras/engine/training.py
+++ b/keras/engine/training.py
@@ -369,7 +369,7 @@ def standardize_weights(y, sample_weight=None, class_weight=None,
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1):
'''Builds a threading queue out of a data generator.
- Used in `fit_generator`, `evaluate_generator`.
+ Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
q = queue.Queue()
_stop = threading.Event()
@@ -1184,7 +1184,7 @@ def predict_on_batch(self, x):
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
- class_weight={}):
+ class_weight={}, max_q_size=10):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
@@ -1214,6 +1214,7 @@ def fit_generator(self, generator, samples_per_epoch, nb_epoch,
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
+ max_q_size: maximum size for the generator queue
# Returns
A `History` object.
@@ -1287,7 +1288,7 @@ def generate_arrays_from_file(path):
self.validation_data = None
# start generator thread storing batches into a queue
- data_gen_queue, _stop = generator_queue(generator)
+ data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
self.stop_training = False
while epoch < nb_epoch:
@@ -1358,7 +1359,8 @@ def generate_arrays_from_file(path):
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
- nb_val_samples)
+ nb_val_samples,
+ max_q_size=max_q_size)
else:
# no need for try/except because
# data has already been validated
@@ -1380,7 +1382,7 @@ def generate_arrays_from_file(path):
callbacks.on_train_end()
return self.history
- def evaluate_generator(self, generator, val_samples):
+ def evaluate_generator(self, generator, val_samples, max_q_size=10):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
@@ -1391,6 +1393,7 @@ def evaluate_generator(self, generator, val_samples):
val_samples:
total number of samples to generate from `generator`
before returning.
+ max_q_size: maximum size for the generator queue
# Returns
Scalar test loss (if the model has a single output and no metrics)
@@ -1404,7 +1407,7 @@ def evaluate_generator(self, generator, val_samples):
wait_time = 0.01
all_outs = []
weights = []
- data_gen_queue, _stop = generator_queue(generator)
+ data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
@@ -1456,7 +1459,7 @@ def evaluate_generator(self, generator, val_samples):
weights=weights))
return averages
- def predict_generator(self, generator, val_samples):
+ def predict_generator(self, generator, val_samples, max_q_size=10):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
@@ -1465,6 +1468,7 @@ def predict_generator(self, generator, val_samples):
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
+ max_q_size: maximum size for the generator queue
# Returns
Numpy array(s) of predictions.
@@ -1474,7 +1478,7 @@ def predict_generator(self, generator, val_samples):
processed_samples = 0
wait_time = 0.01
all_outs = []
- data_gen_queue, _stop = generator_queue(generator)
+ data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
diff --git a/keras/models.py b/keras/models.py
--- a/keras/models.py
+++ b/keras/models.py
@@ -562,8 +562,7 @@ def predict_classes(self, x, batch_size=32, verbose=1):
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
- class_weight=None,
- **kwargs):
+ class_weight=None, max_q_size=10, **kwargs):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
@@ -593,6 +592,7 @@ def fit_generator(self, generator, samples_per_epoch, nb_epoch,
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
+ max_q_size: maximum size for the generator queue
# Returns
A `History` object.
@@ -641,10 +641,10 @@ def generate_arrays_from_file(path):
callbacks=callbacks,
validation_data=validation_data,
nb_val_samples=nb_val_samples,
- class_weight=class_weight)
+ class_weight=class_weight,
+ max_q_size=max_q_size)
- def evaluate_generator(self, generator, val_samples,
- **kwargs):
+ def evaluate_generator(self, generator, val_samples, max_q_size=10, **kwargs):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
@@ -655,6 +655,7 @@ def evaluate_generator(self, generator, val_samples,
val_samples:
total number of samples to generate from `generator`
before returning.
+ max_q_size: maximum size for the generator queue
'''
if self.model is None:
raise Exception('The model needs to be compiled before being used.')
@@ -672,9 +673,10 @@ def evaluate_generator(self, generator, val_samples,
raise Exception('Received unknown keyword arguments: ' +
str(kwargs))
return self.model.evaluate_generator(generator,
- val_samples)
+ val_samples,
+ max_q_size=max_q_size)
- def predict_generator(self, generator, val_samples):
+ def predict_generator(self, generator, val_samples, max_q_size=10):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
@@ -683,13 +685,15 @@ def predict_generator(self, generator, val_samples):
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
+ max_q_size: maximum size for the generator queue
# Returns
A Numpy array of predictions.
'''
if self.model is None:
raise Exception('The model needs to be compiled before being used.')
- return self.model.predict_generator(generator, val_samples)
+ return self.model.predict_generator(generator, val_samples,
+ max_q_size=max_q_size)
def get_config(self):
'''Returns the model configuration
| diff --git a/tests/keras/test_sequential_model.py b/tests/keras/test_sequential_model.py
--- a/tests/keras/test_sequential_model.py
+++ b/tests/keras/test_sequential_model.py
@@ -66,6 +66,7 @@ def data_generator(train):
model.fit_generator(data_generator(True), len(X_train), nb_epoch, validation_data=(X_test, y_test))
model.fit_generator(data_generator(True), len(X_train), nb_epoch,
validation_data=data_generator(False), nb_val_samples=batch_size * 3)
+ model.fit_generator(data_generator(True), len(X_train), nb_epoch, max_q_size=2)
loss = model.evaluate(X_train, y_train)
@@ -100,8 +101,8 @@ def data_generator(x, y, batch_size=50):
loss = model.evaluate(X_test, y_test)
- prediction = model.predict_generator(data_generator(X_test, y_test), X_test.shape[0])
- gen_loss = model.evaluate_generator(data_generator(X_test, y_test, 50), X_test.shape[0])
+ prediction = model.predict_generator(data_generator(X_test, y_test), X_test.shape[0], max_q_size=2)
+ gen_loss = model.evaluate_generator(data_generator(X_test, y_test, 50), X_test.shape[0], max_q_size=2)
pred_loss = K.eval(K.mean(objectives.get(model.loss)(K.variable(y_test), K.variable(prediction))))
assert(np.isclose(pred_loss, loss))
| Add option to change generator queue size in fit_generator
I'm training a 3D CNN model on video clips and my batch generator is eating up all the memory on my machine. I see that the `generator_queue` function has a `max_q_size` argument, but there's no way to set it in the `fit_generator` call.
Here's [a link](https://gist.github.com/jbencook/15cd39799dbefdccae9c4087647a5f91) to my code, which works until I run out of memory. I'm using the data described [here](https://github.com/gtoderici/sports-1m-dataset).
| 2016-04-13T13:36:56 |
|
keras-team/keras | 2,303 | keras-team__keras-2303 | [
"2297"
] | 1206120d1084cbe45dc2876f002cb572a97e3844 | diff --git a/keras/callbacks.py b/keras/callbacks.py
--- a/keras/callbacks.py
+++ b/keras/callbacks.py
@@ -467,8 +467,14 @@ def on_epoch_end(self, epoch, logs={}):
if epoch % self.histogram_freq == 0:
# TODO: implement batched calls to sess.run
# (current call will likely go OOM on GPU)
- feed_dict = dict(zip(self.model.inputs,
- self.model.validation_data))
+ if self.model.uses_learning_phase:
+ cut_v_data = len(self.model.inputs)
+ val_data = self.model.validation_data[:cut_v_data] + [0]
+ tensors = self.model.inputs + [K.learning_phase()]
+ else:
+ val_data = self.model.validation_data
+ tensors = self.model.inputs
+ feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
| diff --git a/tests/keras/test_callbacks.py b/tests/keras/test_callbacks.py
--- a/tests/keras/test_callbacks.py
+++ b/tests/keras/test_callbacks.py
@@ -126,7 +126,7 @@ def test_LearningRateScheduler():
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
[email protected]((K._BACKEND != 'tensorflow') or (sys.version_info[0] == 3),
[email protected]((K._BACKEND != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard():
import shutil
@@ -252,8 +252,4 @@ def data_generator_graph(train):
KTF.set_session(old_session)
if __name__ == '__main__':
- # pytest.main([__file__])
- # test_ModelCheckpoint()
- # test_EarlyStopping()
- # test_LearningRateScheduler()
- test_TensorBoard()
+ pytest.main([__file__])
| TensorBoard with Histograms crashes with Dropout Layers
The most recent version of Keras crashes after the first epoch when a TensorBoard callback is added that records histograms.
Failing Example Code (`examples/mnist_mlp.py` with added callback) and Crash Log:
https://gist.github.com/jdoerrie/713a2d53e14b01470aa664494186d7c7
Judging by the output the issue is related to `keras_learning_phase` not being fed. The code works normally when `histogram_freq` is set to 0 or the Dropout layers are removed.
- Operating System: Mac OS X 10.11.3
- Python: 3.5.1
- Keras: 1.0.0 (Master Checkout)
- Tensorflow: 0.7.1
- Running on CPU.
| I'm on it, I will submit a pull request in a few minutes.
| 2016-04-13T16:59:26 |
keras-team/keras | 2,690 | keras-team__keras-2690 | [
"2498"
] | ed7a5a1418f9766d70523ad2d625085b070971d5 | diff --git a/keras/engine/topology.py b/keras/engine/topology.py
--- a/keras/engine/topology.py
+++ b/keras/engine/topology.py
@@ -2107,8 +2107,6 @@ def run_internal_graph(self, inputs, masks=None):
return output_tensors, output_masks, output_shapes
def get_config(self):
- '''TODO: add keras version information
- '''
config = {
'name': self.name,
}
@@ -2348,6 +2346,26 @@ def load_weights(self, filepath):
K.batch_set_value(weight_value_tuples)
f.close()
+ def _updated_config(self):
+ '''shared between different serialization methods'''
+ from keras import __version__ as keras_version
+
+ config = self.get_config()
+ model_config = {
+ 'class_name': self.__class__.__name__,
+ 'config': config,
+ 'keras_version': keras_version
+ }
+
+ if hasattr(self, 'optimizer'):
+ model_config['optimizer'] = self.optimizer.get_config()
+ model_config['loss'] = self.loss.__class__.__name__
+ model_config['sample_weight_mode'] = self.sample_weight_mode
+
+ if hasattr(self, 'loss_weights'):
+ model_config['loss_weights'] = self.loss_weights
+ return model_config
+
def to_json(self, **kwargs):
'''Returns a JSON string containing the network configuration.
@@ -2367,11 +2385,7 @@ def get_json_type(obj):
raise TypeError('Not JSON Serializable')
- config = self.get_config()
- model_config = {
- 'class_name': self.__class__.__name__,
- 'config': config,
- }
+ model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
@@ -2385,12 +2399,7 @@ def to_yaml(self, **kwargs):
functions / classes.
'''
import yaml
- config = self.get_config()
- model_config = {
- 'class_name': self.__class__.__name__,
- 'config': config,
- }
- return yaml.dump(model_config, **kwargs)
+ return yaml.dump(self._updated_config(), **kwargs)
def summary(self):
from keras.utils.layer_utils import print_summary
diff --git a/keras/engine/training.py b/keras/engine/training.py
--- a/keras/engine/training.py
+++ b/keras/engine/training.py
@@ -452,6 +452,7 @@ def compile(self, optimizer, loss, metrics=[], loss_weights=None,
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
+ self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
| save optimizer and objective information to configure file
It seems that keras 1 doesn't support save optimizer and loss to configure file any more. It there some alternative way? Or any plan to add this function back again?
| 2016-05-10T18:58:36 |
||
keras-team/keras | 2,854 | keras-team__keras-2854 | [
"2818"
] | 7b5bab83f4836a23a8a27a5e11762c999bcdfb6d | diff --git a/keras/engine/topology.py b/keras/engine/topology.py
--- a/keras/engine/topology.py
+++ b/keras/engine/topology.py
@@ -1327,9 +1327,9 @@ def get_config(self):
if isinstance(self.mode, python_types.LambdaType):
if py3:
- mode = marshal.dumps(self.mode.__code__)
+ mode = marshal.dumps(self.mode.__code__).decode('raw_unicode_escape')
else:
- mode = marshal.dumps(self.mode.func_code)
+ mode = marshal.dumps(self.mode.func_code).decode('raw_unicode_escape')
mode_type = 'lambda'
elif callable(self.mode):
mode = self.mode.__name__
@@ -1365,7 +1365,7 @@ def from_config(cls, config):
if mode_type == 'function':
mode = globals()[config['mode']]
elif mode_type == 'lambda':
- mode = marshal.loads(config['mode'])
+ mode = marshal.loads(config['mode'].encode('raw_unicode_escape'))
mode = python_types.FunctionType(mode, globals())
else:
mode = config['mode']
| TypeError: Not JSON Serializable
There is a sample code that will reproduce the error:
https://gist.github.com/henry0312/c5e37cf219498a692520ddc01d2e41ba
Run: `python keras_save_json_bug.py`
```
Traceback (most recent call last):
File "keras_save_json_bug.py", line 59, in <module>
json_string = net.to_json()
File "/usr/local/var/pyenv/versions/3.5.1/lib/python3.5/site-packages/keras/engine/topology.py", line 2373, in to_json
return json.dumps(model_config, default=get_json_type, **kwargs)
File "/usr/local/var/pyenv/versions/3.5.1/lib/python3.5/json/__init__.py", line 237, in dumps
**kw).encode(obj)
File "/usr/local/var/pyenv/versions/3.5.1/lib/python3.5/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/local/var/pyenv/versions/3.5.1/lib/python3.5/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/local/var/pyenv/versions/3.5.1/lib/python3.5/site-packages/keras/engine/topology.py", line 2366, in get_json_type
raise TypeError('Not JSON Serializable')
TypeError: Not JSON Serializable
```
| What is your environment? I use Keras 1.0.3 with both tensorflow / theano backend and no error was produced with your code.
My environment:
- OS: OS X 10.11.5 and Ubuntu 14.04.4
- Python: 3.5.1
- numpy: 1.11.0
- scipy: 0.17.1
- Theano: 0.8.2
- Keras: 1.0.3
The serialization works fine... in Python 2.
> The serialization works fine... in Python 2.
oh... I also confirmed the sample code worked fine in Python 2 π
There are something wrong with Python 3.
I think the issue is related to https://github.com/fchollet/keras/issues/2582, but `to_yaml` works fine in Python 3.
And I found that the below changed sample (i.e. not using lambda) worked fine in Python 3.
``` python
# before
merged_vector = merge([y1, y2], mode=lambda x: x[0] - x[1], output_shape=(None, 64))
# after
merged_vector = merge([y1, y2], mode='sum', output_shape=(None, 64))
```
this code raises the same error:
``` python
import json
import marshal
func = lambda x, y: x + y
dump = marshal.dumps(func.__code__)
json.dumps({'test': dump})
```
so I think there are something wrong around https://github.com/fchollet/keras/blob/master/keras/engine/topology.py#L1330
| 2016-05-31T02:44:31 |
|
keras-team/keras | 2,883 | keras-team__keras-2883 | [
"2871"
] | 80bfec725315e68762155cd47e8e9abf45c644f8 | diff --git a/keras/regularizers.py b/keras/regularizers.py
--- a/keras/regularizers.py
+++ b/keras/regularizers.py
@@ -41,8 +41,8 @@ def __call__(self, loss):
def get_config(self):
return {'name': self.__class__.__name__,
- 'l1': self.l1,
- 'l2': self.l2}
+ 'l1': float(self.l1),
+ 'l2': float(self.l2)}
class ActivityRegularizer(Regularizer):
@@ -68,8 +68,8 @@ def __call__(self, loss):
def get_config(self):
return {'name': self.__class__.__name__,
- 'l1': self.l1,
- 'l2': self.l2}
+ 'l1': float(self.l1),
+ 'l2': float(self.l2)}
def l1(l=0.01):
| YAML serialization throws exception with Regularizers
If a model contains a regularizer, YAML serialization throws an exception. Using latest Keras 1.0.3.
Compare:
``` python
>>> model = Sequential([LSTM(20, input_shape=(2,3))])
>>> model.to_yaml()
'class_name: Sequential\nconfig:\n- class_name: LSTM\n config:\n U_regularizer: null\n W_regularizer: null\n activation: tanh\n b_regularizer: null\n batch_input_shape: !!python/tuple [null, 2, 3]\n consume_less: cpu\n dropout_U: 0.0\n dropout_W: 0.0\n forget_bias_init: one\n go_backwards: false\n init: glorot_uniform\n inner_activation: hard_sigmoid\n inner_init: orthogonal\n input_dim: 3\n input_dtype: float32\n input_length: null\n name: lstm_3\n output_dim: 20\n return_sequences: false\n stateful: false\n trainable: true\n unroll: false\n'
>>> model = Sequential([LSTM(20, input_shape=(2,3), W_regularizer=l2())])
>>> model.to_yaml()
/usr/lib64/python3.4/site-packages/yaml/representer.py:135: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
if data in [None, ()]:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.4/site-packages/keras/engine/topology.py", line 2391, in to_yaml
return yaml.dump(model_config, **kwargs)
File "/usr/lib64/python3.4/site-packages/yaml/__init__.py", line 200, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "/usr/lib64/python3.4/site-packages/yaml/__init__.py", line 188, in dump_all
dumper.represent(data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 26, in represent
node = self.represent_data(data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 195, in represent_list
return self.represent_sequence('tag:yaml.org,2002:seq', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 91, in represent_sequence
node_item = self.represent_data(item)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 51, in represent_data
node = self.yaml_multi_representers[data_type](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 352, in represent_object
return self.represent_mapping(tag+function_name, value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 282, in represent_tuple
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 91, in represent_sequence
node_item = self.represent_data(item)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 33, in represent_data
if self.ignore_aliases(data):
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 135, in ignore_aliases
if data in [None, ()]:
TypeError: data type not understood
```
Tested with LSTM & Embedding layer, W_regularizer and U_regularizer, l1 and l2. Always throws. `.to_json()` works fine. Is there some serialization code missing for l1/l2?
| Hum... I also confirmed the same problem in both Python 3.5 and 2.7.11.
``` python
from keras.layers import LSTM
from keras.models import Sequential
from keras.regularizers import l2
model = Sequential([LSTM(20, input_shape=(2,3), W_regularizer=l2())])
model.to_yaml()
```
| 2016-06-02T02:32:15 |
|
keras-team/keras | 2,924 | keras-team__keras-2924 | [
"2864"
] | af5c5b6a55528a255500b733439a66d25b5647ec | diff --git a/keras/wrappers/scikit_learn.py b/keras/wrappers/scikit_learn.py
--- a/keras/wrappers/scikit_learn.py
+++ b/keras/wrappers/scikit_learn.py
@@ -2,6 +2,7 @@
import copy
import inspect
import types
+import numpy
from ..utils.np_utils import to_categorical
from ..models import Sequential
@@ -202,9 +203,17 @@ def predict_proba(self, X, **kwargs):
# Returns
proba: array-like, shape `(n_samples, n_outputs)`
Class probability estimates.
+ In the case of binary classification (i.e. 1 output of 0 or 1)
+ will return '(n_samples, 2)'
'''
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
- return self.model.predict_proba(X, **kwargs)
+ probs = self.model.predict_proba(X, **kwargs)
+
+ # check if binary classification
+ if probs.shape[1] == 1:
+ # first column is probability of class 0 and second is of class 1
+ probs = numpy.hstack([1 - probs, probs])
+ return probs
def score(self, X, y, **kwargs):
'''Returns the mean accuracy on the given test data and labels.
| KerasClassifier does not return the same thing as scikit learn in the case of binary classification
For binary classification using a stock classifier from scikit, running predict_proba will return a matrix of shape (n_samples,n_classes) however KerasClassifier returns (nsamples,noutputs).
This results in the inability to use the ootb cross_val_score with 'roc_auc' due to the error:
```
site-packages\sklearn\metrics\scorer.pyc in __call__(self, clf, X, y, sample_weight)
173
174 if y_type == "binary":
--> 175 y_pred = y_pred[:, 1]
176 elif isinstance(y_pred, list):
177 y_pred = np.vstack([p[:, -1] for p in y_pred]).T
IndexError: index 1 is out of bounds for axis 1 with size 1
```
| Changing the return statement of KerasClassifier's predict_proba method to the following would fix:
```
probs = self.model.predict_proba(X, **kwargs)
if(probs.shape[1] == 1):
probs = np.hstack([1-probs,probs])
return probs
```
Would anyone have a problem with me making this change? I am a bit worried it may break people's code, but given KerasClassifier is supposed to mimic a sci-kit learn classifier, I think it is an important change to make.
sample code for the problem can be found on the post of the stack overflow user which reported it:
http://stackoverflow.com/questions/37523882/keras-wrappers-for-scikit-learn-auc-scorer-is-not-working/37569823#37569823
I asked the person to try my solution and he confirmed that it fixed the issue.
| 2016-06-07T16:08:53 |
|
keras-team/keras | 2,961 | keras-team__keras-2961 | [
"2960"
] | 3b83a1b1ac69d3bb3c2ac55e61878df1fd392768 | diff --git a/keras/backend/tensorflow_backend.py b/keras/backend/tensorflow_backend.py
--- a/keras/backend/tensorflow_backend.py
+++ b/keras/backend/tensorflow_backend.py
@@ -314,17 +314,23 @@ def prod(x, axis=None, keepdims=False):
return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
-def std(x, axis=None, keepdims=False):
- '''Standard deviation of a tensor, alongside the specificied axis.
+def var(x, axis=None, keepdims=False):
+ '''Variance of a tensor, alongside the specified axis.
'''
axis = _normalize_axis(axis, ndim(x))
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, _FLOATX)
m = tf.reduce_mean(x, reduction_indices=axis, keep_dims=True)
devs_squared = tf.square(x - m)
- return tf.sqrt(tf.reduce_mean(devs_squared,
- reduction_indices=axis,
- keep_dims=keepdims))
+ return tf.reduce_mean(devs_squared,
+ reduction_indices=axis,
+ keep_dims=keepdims)
+
+
+def std(x, axis=None, keepdims=False):
+ '''Standard deviation of a tensor, alongside the specified axis.
+ '''
+ return tf.sqrt(var(x, axis=axis, keepdims=keepdims))
def mean(x, axis=None, keepdims=False):
diff --git a/keras/backend/theano_backend.py b/keras/backend/theano_backend.py
--- a/keras/backend/theano_backend.py
+++ b/keras/backend/theano_backend.py
@@ -200,6 +200,10 @@ def std(x, axis=None, keepdims=False):
return T.std(x, axis=axis, keepdims=keepdims)
+def var(x, axis=None, keepdims=False):
+ return T.var(x, axis=axis, keepdims=keepdims)
+
+
def any(x, axis=None, keepdims=False):
'''Bitwise reduction (logical OR).
'''
diff --git a/keras/layers/normalization.py b/keras/layers/normalization.py
--- a/keras/layers/normalization.py
+++ b/keras/layers/normalization.py
@@ -139,7 +139,7 @@ def call(self, x, mask=None):
elif self.mode == 1:
# sample-wise normalization
m = K.mean(x, axis=-1, keepdims=True)
- std = K.std(x, axis=-1, keepdims=True)
+ std = K.sqrt(K.var(x, axis=-1, keepdims=True) + self.epsilon)
x_normed = (x - m) / (std + self.epsilon)
out = self.gamma * x_normed + self.beta
return out
| K.std results in NaN if input is zero vector
Since `K.std(x)` is just `sqrt(var(x))` under the hood the gradient will be `inf` if the input vector `x` is 0.
This is problematic when using the `BatchNormalization` layer and having a model that sometimes produces zero vectors.
Wouldn't it be better to expose `K.var` in the backends and use that instead of `K.std` so that
the code can do `K.sqrt(K.var(x) + eps)` and does not have to add `eps` to `x`?
| That sounds reasonable. Do you want to make a PR?
| 2016-06-13T00:15:15 |
|
keras-team/keras | 2,980 | keras-team__keras-2980 | [
"2974"
] | dc569e952d32b2c441593e25201f22f170d76098 | diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py
--- a/keras/preprocessing/text.py
+++ b/keras/preprocessing/text.py
@@ -206,8 +206,10 @@ def sequences_to_matrix(self, sequences, mode='binary'):
elif mode == 'binary':
X[i][j] = 1
elif mode == 'tfidf':
- tf = np.log(c / len(seq))
- df = (1 + np.log(1 + self.index_docs.get(j, 0) / (1 + self.document_count)))
+ # Use weighting scheme 2 in
+ # https://en.wikipedia.org/wiki/Tf%E2%80%93idf
+ tf = 1 + np.log(c)
+ df = np.log(1 + self.index_docs.get(j, 0) / (1 + self.document_count))
X[i][j] = tf / df
else:
raise Exception('Unknown vectorization mode: ' + str(mode))
| Why TF-IDF matrix generated by keras.preprocessing.text.Tokenizer() has negative values?
Say, if run the following script:
> > > import keras
> > > tk = keras.preprocessing.text.Tokenizer()
> > > texts = ['I love you.', 'I love you, too.']
> > > tk.fit_on_texts(texts)
> > > tk.texts_to_matrix(texts, mode='tfidf')
The output will be:
array([[ 0. , -1.09861229, -1.09861229, -1.09861229, 0. ],
[ 0. , -1.38629436, -1.38629436, -1.38629436, -1.38629436]])
But tf-idf values seems should be non-negative?
By the way, is there a neat way to get the word by its index, or the vocabulary (in the order of word indices) of the Tokenizer() class? Say, sometimes I want to know what's the most frequent word in the documents, then I want to access word with index 1.
I can do it by running:
> > > vocab = tk.word_index.items()
> > > vocab.sort(key=lambda x:x[1])
This gives:
> > > vocab
[('i', 1), ('you', 2), ('love', 3), ('too', 4)]
But is it somehow hacky?
Thank you!
| 2016-06-14T16:01:11 |
||
keras-team/keras | 2,986 | keras-team__keras-2986 | [
"2974"
] | c53c64d7fa8d80990d18d662093227f2fbe55ed7 | diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py
--- a/keras/preprocessing/text.py
+++ b/keras/preprocessing/text.py
@@ -209,8 +209,8 @@ def sequences_to_matrix(self, sequences, mode='binary'):
# Use weighting scheme 2 in
# https://en.wikipedia.org/wiki/Tf%E2%80%93idf
tf = 1 + np.log(c)
- df = np.log(1 + self.index_docs.get(j, 0) / (1 + self.document_count))
- X[i][j] = tf / df
+ idf = np.log(1 + self.document_count / (1 + self.index_docs.get(j, 0)))
+ X[i][j] = tf * idf
else:
raise Exception('Unknown vectorization mode: ' + str(mode))
return X
| Why TF-IDF matrix generated by keras.preprocessing.text.Tokenizer() has negative values?
Say, if run the following script:
> > > import keras
> > > tk = keras.preprocessing.text.Tokenizer()
> > > texts = ['I love you.', 'I love you, too.']
> > > tk.fit_on_texts(texts)
> > > tk.texts_to_matrix(texts, mode='tfidf')
The output will be:
array([[ 0. , -1.09861229, -1.09861229, -1.09861229, 0. ],
[ 0. , -1.38629436, -1.38629436, -1.38629436, -1.38629436]])
But tf-idf values seems should be non-negative?
By the way, is there a neat way to get the word by its index, or the vocabulary (in the order of word indices) of the Tokenizer() class? Say, sometimes I want to know what's the most frequent word in the documents, then I want to access word with index 1.
I can do it by running:
> > > vocab = tk.word_index.items()
> > > vocab.sort(key=lambda x:x[1])
This gives:
> > > vocab
[('i', 1), ('you', 2), ('love', 3), ('too', 4)]
But is it somehow hacky?
Thank you!
| Thanks for your quick fixing.
Though, I'm afraid that this bug has **NOT** been fixed. Because idf is not equivalent to 1/df.
Maybe the following code is more reasonable:
```
idf = np.log(1 + self.document_count / self.index_docs.get(j, self.document_count))
X[i][j] = tf * idf
```
| 2016-06-15T05:48:48 |
|
keras-team/keras | 2,992 | keras-team__keras-2992 | [
"2974"
] | 936360020cbd9e9e174c47554c49bfc9c96898f3 | diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py
--- a/keras/preprocessing/text.py
+++ b/keras/preprocessing/text.py
@@ -3,6 +3,7 @@
from a fast Cython rewrite.
'''
from __future__ import absolute_import
+from __future__ import division
import string
import sys
| Why TF-IDF matrix generated by keras.preprocessing.text.Tokenizer() has negative values?
Say, if run the following script:
> > > import keras
> > > tk = keras.preprocessing.text.Tokenizer()
> > > texts = ['I love you.', 'I love you, too.']
> > > tk.fit_on_texts(texts)
> > > tk.texts_to_matrix(texts, mode='tfidf')
The output will be:
array([[ 0. , -1.09861229, -1.09861229, -1.09861229, 0. ],
[ 0. , -1.38629436, -1.38629436, -1.38629436, -1.38629436]])
But tf-idf values seems should be non-negative?
By the way, is there a neat way to get the word by its index, or the vocabulary (in the order of word indices) of the Tokenizer() class? Say, sometimes I want to know what's the most frequent word in the documents, then I want to access word with index 1.
I can do it by running:
> > > vocab = tk.word_index.items()
> > > vocab.sort(key=lambda x:x[1])
This gives:
> > > vocab
[('i', 1), ('you', 2), ('love', 3), ('too', 4)]
But is it somehow hacky?
Thank you!
| Thanks for your quick fixing.
Though, I'm afraid that this bug has **NOT** been fixed. Because idf is not equivalent to 1/df.
Maybe the following code is more reasonable:
```
idf = np.log(1 + self.document_count / self.index_docs.get(j, self.document_count))
X[i][j] = tf * idf
```
I'm sorry.
you're right.
lol~
@soloice please check whether the latest commit has fixed the issue.
Now the logic is correct.
But one more (maybe stupid) question: Does this code works for both Python2 and Python3?
Will `self.document_count / (1 + self.index_docs.get(j, 0))` fall back to integer division (rather than float division) in Python2?
Should there be something like `from __future__ import division`or replace 1 with 1.0?
I'm using Python 2.7 on Ubuntu and have updated Keras to the bleeding edge version, the truncating problem does occur.
(I don't know too much about the compatibility issue. Feel sorry if this comment is too stupid.)
Thank you!
oh my gosh!
I forgot Python 2 completely since I use Python 3 π
| 2016-06-16T07:31:54 |
|
keras-team/keras | 3,011 | keras-team__keras-3011 | [
"3008"
] | f42160021872c8d327336995e877f5d63fcf7038 | diff --git a/keras/engine/topology.py b/keras/engine/topology.py
--- a/keras/engine/topology.py
+++ b/keras/engine/topology.py
@@ -1342,9 +1342,9 @@ def get_config(self):
if isinstance(self._output_shape, python_types.LambdaType):
if py3:
- output_shape = marshal.dumps(self._output_shape.__code__)
+ output_shape = marshal.dumps(self._output_shape.__code__).decode('raw_unicode_escape')
else:
- output_shape = marshal.dumps(self._output_shape.func_code)
+ output_shape = marshal.dumps(self._output_shape.func_code).decode('raw_unicode_escape')
output_shape_type = 'lambda'
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
@@ -1376,7 +1376,7 @@ def from_config(cls, config):
if output_shape_type == 'function':
output_shape = globals()[config['output_shape']]
elif output_shape_type == 'lambda':
- output_shape = marshal.loads(config['output_shape'])
+ output_shape = marshal.loads(config['output_shape'].encode('raw_unicode_escape'))
output_shape = python_types.FunctionType(output_shape, globals())
else:
output_shape = config['output_shape']
| JSON/YAML loading not working with Lambda merge layers
It doesn't seem possible to save and load models to/from JSON or YAML if there's a Merge layer with a lambda.
| Similar https://github.com/fchollet/keras/issues/3001
Have you updated Keras to master? Have you posted a code snippet to reproduce your issue?
Still an issue in #2582 in head version as well.
| 2016-06-18T02:16:53 |
|
keras-team/keras | 3,012 | keras-team__keras-3012 | [
"3001"
] | f42160021872c8d327336995e877f5d63fcf7038 | diff --git a/keras/layers/core.py b/keras/layers/core.py
--- a/keras/layers/core.py
+++ b/keras/layers/core.py
@@ -460,9 +460,9 @@ def get_config(self):
if isinstance(self._output_shape, python_types.LambdaType):
if py3:
- output_shape = marshal.dumps(self._output_shape.__code__)
+ output_shape = marshal.dumps(self._output_shape.__code__).decode('raw_unicode_escape')
else:
- output_shape = marshal.dumps(self._output_shape.func_code)
+ output_shape = marshal.dumps(self._output_shape.func_code).decode('raw_unicode_escape')
output_shape_type = 'lambda'
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
@@ -494,7 +494,7 @@ def from_config(cls, config):
if output_shape_type == 'function':
output_shape = globals()[config['output_shape']]
elif output_shape_type == 'lambda':
- output_shape = marshal.loads(config['output_shape'])
+ output_shape = marshal.loads(config['output_shape'].encode('raw_unicode_escape'))
output_shape = python_types.FunctionType(output_shape, globals())
else:
output_shape = config['output_shape']
| JSON Serialization broken with auxiliary output
I have a vehicle identification problem, and designed a model which:
1) uses triplet loss to generate a deep hash of image,
2) use another auxiliary output to predict the vehicle ID given an image.
For your reference, triplet loss means input 3 images (1 anchor, 1 positive, 1 negative, where anchor and positive image are of the same car, while negative is an image of another car), and generate 3 deep hash of them using a shared vision model. The deep hash is expected to satisfy: images of the same car have similar deep hashs, while the distance between deep hashs of different cars are as far as possible)
Here is my model:
```
def __init__(self, deep_id_dim=3000, aux_weight=0.1, nb_epoch=20, nb_classes=5043, model_name='my_model1'):
self.batch_size = 100
self.nb_epoch = nb_epoch
self.vision_model = Sequential()
self.model = None
self.hash_len = deep_id_dim
self.aux_weight = aux_weight
self.nb_classes = nb_classes
self.model_name = model_name
self.build_model2()
def build_model2(self):
def euclidean_distance(vecs):
x, y = vecs
return K.sum(K.square(x - y), axis=1, keepdims=True)
def euclidean_dist_output_shape(shapes):
shape1, _ = shapes
return shape1[0], 1
def triplet_loss(y_true, y_pred):
# Use y_true as alpha
mse0, mse1 = y_pred[:, 0], y_pred[:, 1]
return K.maximum(0.0, mse0 - mse1 + y_true[:, 0])
# input image dimensions
img_rows, img_cols, img_channel = 50, 50, 3
# number of convolutional filters to use
nb_filters = 10
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 5
# build a vision model
self.vision_model.add(Convolution2D(nb_filters, nb_conv, nb_conv, activation='relu',
input_shape=(img_channel, img_rows, img_cols)))
self.vision_model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
self.vision_model.add(Flatten())
self.vision_model.add(Dense(self.hash_len))
img1 = Input(shape=(img_channel, img_rows, img_cols), name='X1')
img2 = Input(shape=(img_channel, img_rows, img_cols), name='X2')
img3 = Input(shape=(img_channel, img_rows, img_cols), name='X3')
hash1, hash2 = self.vision_model(img1), self.vision_model(img2)
hash3 = self.vision_model(img3)
vid = Dense(self.nb_classes, activation='softmax', name='aux_output')(hash1)
distance_layer = Lambda(euclidean_distance, output_shape=euclidean_dist_output_shape)
dist12 = distance_layer([hash1, hash2])
dist13 = distance_layer([hash1, hash3])
merged_out = merge([dist12, dist13], mode='concat', name='main_output')
self.model = Model(input=[img1, img2, img3], output=[merged_out, vid])
self.model.summary()
print(self.model.output_shape)
print('DeepID dim:', self.hash_len)
self.model.compile(optimizer='adadelta',
loss={'main_output': triplet_loss, 'aux_output': 'categorical_crossentropy'},
loss_weights={'main_output': 1., 'aux_output': self.aux_weight})
```
In the code above, self.vision_model is used to generate deep hash, then the Lambda layer is used to calculate the distance between (anchor, positive) and that between (anchor, negative). The main_output expects there is a margin of y_true[0] between d(anchor, negative) and d(anchor, positive).
An auxiliary output is also added to the deep hash layer, using a softmax layer to predict the vehicle ID of the anchor image.
when I save model with
```
def save_model(self, overwrite=True):
model_path = '../model/'
if not os.path.exists(model_path):
os.mkdir(model_path)
# save the wrapper distance model
yaml_string = self.model.to_yaml()
open(os.path.join(model_path, self.model_name + '_arch.yaml'), 'w').write(yaml_string)
self.model.save_weights(os.path.join(model_path, self.model_name + '_weights.h5'), overwrite)
# save the inner vision model
model_name = self.model_name + '_vision'
yaml_string = self.vision_model.to_yaml()
open(os.path.join(model_path, model_name + '_arch.yaml'), 'w').write(yaml_string)
self.vision_model.save_weights(os.path.join(model_path, model_name + '_weights.h5'), overwrite)
```
it works well. But, when I use JSON, it reports:
```
File "main.py", line 53, in <module>
model.save_model()
File "/media/mmr6-raid5/test1/group2/vehicles/src6/nn_model.py", line 101, in save_model
json_string = self.model.to_json()
File "/home/test1/.local/lib/python2.7/site-packages/keras/engine/topology.py", line 2412, in to_json
return json.dumps(model_config, default=get_json_type, **kwargs)
File "/usr/lib/python2.7/json/__init__.py", line 250, in dumps
sort_keys=sort_keys, **kw).encode(obj)
File "/usr/lib/python2.7/json/encoder.py", line 207, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode
return _iterencode(o, 0)
File "/home/test1/.local/lib/python2.7/site-packages/keras/engine/topology.py", line 2409, in get_json_type
raise TypeError('Not JSON Serializable')
TypeError: Not JSON Serializable
```
**However, I found that if I remove the auxiliary output, JSON also works fine.** Is it a known failure with multiple output? Or is there anything wrong with my model?
Thanks for your patience.
| Have you updated Keras to master? Have you posted a code snippet to reproduce your issue?
| 2016-06-18T02:46:44 |
|
keras-team/keras | 3,142 | keras-team__keras-3142 | [
"3141"
] | b4adce34dcf026679ca7b19ccede9104719e544a | diff --git a/keras/backend/tensorflow_backend.py b/keras/backend/tensorflow_backend.py
--- a/keras/backend/tensorflow_backend.py
+++ b/keras/backend/tensorflow_backend.py
@@ -224,8 +224,7 @@ def batch_dot(x, y, axes=None):
make sure that ndim is at least 2.
# Example
- Assume x = [[1, 2] and y = [[5, 6]
- [3, 4]] [7, 8]]
+ Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]]
batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal
of x.dot(y.T), although we never have to calculate the off-diagonal
elements.
diff --git a/keras/backend/theano_backend.py b/keras/backend/theano_backend.py
--- a/keras/backend/theano_backend.py
+++ b/keras/backend/theano_backend.py
@@ -128,8 +128,7 @@ def batch_dot(x, y, axes=None):
make sure that ndim is at least 2.
# Example
- Assume x = [[1, 2] and y = [[5, 6]
- [3, 4]] [7, 8]]
+ Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]]
batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal
of x.dot(y.T), although we never have to calculate the off-diagonal
elements.
| A tiny bug in http://keras.io/backend/#batch_dot
In `example` section of http://keras.io/backend/#batch_dot, it reads:
Assume x = [[1, 2] and y = [[5, 6] [3, 4]] [7, 8]] batch_dot(x, y, axes=1) = [[17, 53]] ...
It confused me for a while and later I refer to the comment of source (https://github.com/fchollet/keras/blob/master/keras/backend/theano_backend.py, L130),
and it should look like:
```
Assume x = [[1, 2] and y = [[5, 6]
[3, 4]] [7, 8]]
```
I think mkdocs just views it as two lines and then combine them into a single line, so matrix should be written in a single line.
May you fix it and notice other potential bugs like this. Thanks.
| 2016-07-04T17:51:54 |
Subsets and Splits