problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_24405
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-14591
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the TODO "7 days" restriction for edit and move topics
Right now we have a restriction to move just the messages in the last week in method:
`update_messages_for_topic_edit` file `zerver/lib/topic.py`
```
# We only change messages up to 7 days in the past, to avoid hammering our
# DB by changing an unbounded amount of messages
#
# TODO: Look at removing this restriction and/or add a "change_last_week"
# option; this behavior feels buggy.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/topic.py`
Content:
```
1 import datetime
2
3 from django.db import connection
4 from django.db.models.query import QuerySet, Q
5 from django.utils.timezone import now as timezone_now
6
7 from sqlalchemy.sql import (
8 column,
9 literal,
10 func,
11 )
12
13 from zerver.lib.request import REQ
14 from zerver.models import (
15 Message,
16 Recipient,
17 Stream,
18 UserMessage,
19 UserProfile,
20 )
21
22 from typing import Any, Dict, List, Optional, Tuple
23
24 # Only use these constants for events.
25 ORIG_TOPIC = "orig_subject"
26 TOPIC_NAME = "subject"
27 TOPIC_LINKS = "topic_links"
28 MATCH_TOPIC = "match_subject"
29
30 # This constant is actually embedded into
31 # the JSON data for message edit history,
32 # so we'll always need to handle legacy data
33 # unless we do a pretty tricky migration.
34 LEGACY_PREV_TOPIC = "prev_subject"
35
36 # This constant is pretty closely coupled to the
37 # database, but it's the JSON field.
38 EXPORT_TOPIC_NAME = "subject"
39
40 '''
41 The following functions are for user-facing APIs
42 where we'll want to support "subject" for a while.
43 '''
44
45 def get_topic_from_message_info(message_info: Dict[str, Any]) -> str:
46 '''
47 Use this where you are getting dicts that are based off of messages
48 that may come from the outside world, especially from third party
49 APIs and bots.
50
51 We prefer 'topic' to 'subject' here. We expect at least one field
52 to be present (or the caller must know how to handle KeyError).
53 '''
54 if 'topic' in message_info:
55 return message_info['topic']
56
57 return message_info['subject']
58
59 def REQ_topic() -> Optional[str]:
60 # REQ handlers really return a REQ, but we
61 # lie to make the rest of the type matching work.
62 return REQ(
63 whence='topic',
64 aliases=['subject'],
65 converter=lambda x: x.strip(),
66 default=None,
67 )
68
69 '''
70 TRY TO KEEP THIS DIVIDING LINE.
71
72 Below this line we want to make it so that functions are only
73 using "subject" in the DB sense, and nothing customer facing.
74
75 '''
76
77 # This is used in low-level message functions in
78 # zerver/lib/message.py, and it's not user facing.
79 DB_TOPIC_NAME = "subject"
80 MESSAGE__TOPIC = 'message__subject'
81
82 def topic_match_sa(topic_name: str) -> Any:
83 # _sa is short for Sql Alchemy, which we use mostly for
84 # queries that search messages
85 topic_cond = func.upper(column("subject")) == func.upper(literal(topic_name))
86 return topic_cond
87
88 def topic_column_sa() -> Any:
89 return column("subject")
90
91 def filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:
92 topic_name = message.topic_name()
93 return query.filter(subject=topic_name)
94
95 def filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:
96 return query.filter(message__subject__iexact=topic_name)
97
98 def messages_for_topic(stream_recipient_id: int, topic_name: str) -> QuerySet:
99 return Message.objects.filter(
100 recipient_id=stream_recipient_id,
101 subject__iexact=topic_name,
102 )
103
104 def save_message_for_edit_use_case(message: Message) -> None:
105 message.save(update_fields=[TOPIC_NAME, "content", "rendered_content",
106 "rendered_content_version", "last_edit_time",
107 "edit_history", "has_attachment", "has_image",
108 "has_link", "recipient_id"])
109
110
111 def user_message_exists_for_topic(user_profile: UserProfile,
112 recipient: Recipient,
113 topic_name: str) -> bool:
114 return UserMessage.objects.filter(
115 user_profile=user_profile,
116 message__recipient=recipient,
117 message__subject__iexact=topic_name,
118 ).exists()
119
120 def update_messages_for_topic_edit(message: Message,
121 propagate_mode: str,
122 orig_topic_name: str,
123 topic_name: Optional[str],
124 new_stream: Optional[Stream]) -> List[Message]:
125 propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)
126 if propagate_mode == 'change_all':
127 # We only change messages up to 7 days in the past, to avoid hammering our
128 # DB by changing an unbounded amount of messages
129 #
130 # TODO: Look at removing this restriction and/or add a "change_last_week"
131 # option; this behavior feels buggy.
132 before_bound = timezone_now() - datetime.timedelta(days=7)
133
134 propagate_query = (propagate_query & ~Q(id = message.id) &
135 Q(date_sent__range=(before_bound, timezone_now())))
136 if propagate_mode == 'change_later':
137 propagate_query = propagate_query & Q(id__gt = message.id)
138
139 messages = Message.objects.filter(propagate_query).select_related()
140
141 update_fields = {}
142
143 # Evaluate the query before running the update
144 messages_list = list(messages)
145
146 # The cached ORM objects are not changed by the upcoming
147 # messages.update(), and the remote cache update (done by the
148 # caller) requires the new value, so we manually update the
149 # objects in addition to sending a bulk query to the database.
150 if new_stream is not None:
151 update_fields["recipient"] = new_stream.recipient
152 for m in messages_list:
153 m.recipient = new_stream.recipient
154 if topic_name is not None:
155 update_fields["subject"] = topic_name
156 for m in messages_list:
157 m.set_topic_name(topic_name)
158
159 messages.update(**update_fields)
160
161 return messages_list
162
163 def generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:
164 canonical_topic_names: Dict[str, Tuple[int, str]] = {}
165
166 # Sort rows by max_message_id so that if a topic
167 # has many different casings, we use the most
168 # recent row.
169 rows = sorted(rows, key=lambda tup: tup[1])
170
171 for (topic_name, max_message_id) in rows:
172 canonical_name = topic_name.lower()
173 canonical_topic_names[canonical_name] = (max_message_id, topic_name)
174
175 history = []
176 for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():
177 history.append(dict(
178 name=topic_name,
179 max_id=max_message_id)
180 )
181 return sorted(history, key=lambda x: -x['max_id'])
182
183 def get_topic_history_for_stream(user_profile: UserProfile,
184 recipient: Recipient,
185 public_history: bool) -> List[Dict[str, Any]]:
186 cursor = connection.cursor()
187 if public_history:
188 query = '''
189 SELECT
190 "zerver_message"."subject" as topic,
191 max("zerver_message".id) as max_message_id
192 FROM "zerver_message"
193 WHERE (
194 "zerver_message"."recipient_id" = %s
195 )
196 GROUP BY (
197 "zerver_message"."subject"
198 )
199 ORDER BY max("zerver_message".id) DESC
200 '''
201 cursor.execute(query, [recipient.id])
202 else:
203 query = '''
204 SELECT
205 "zerver_message"."subject" as topic,
206 max("zerver_message".id) as max_message_id
207 FROM "zerver_message"
208 INNER JOIN "zerver_usermessage" ON (
209 "zerver_usermessage"."message_id" = "zerver_message"."id"
210 )
211 WHERE (
212 "zerver_usermessage"."user_profile_id" = %s AND
213 "zerver_message"."recipient_id" = %s
214 )
215 GROUP BY (
216 "zerver_message"."subject"
217 )
218 ORDER BY max("zerver_message".id) DESC
219 '''
220 cursor.execute(query, [user_profile.id, recipient.id])
221 rows = cursor.fetchall()
222 cursor.close()
223
224 return generate_topic_history_from_db_rows(rows)
225
226 def get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:
227 cursor = connection.cursor()
228 query = '''
229 SELECT
230 "zerver_message"."subject" as topic,
231 max("zerver_message".id) as max_message_id
232 FROM "zerver_message"
233 WHERE (
234 "zerver_message"."recipient_id" = %s
235 )
236 GROUP BY (
237 "zerver_message"."subject"
238 )
239 ORDER BY max("zerver_message".id) DESC
240 '''
241 cursor.execute(query, [recipient.id])
242 rows = cursor.fetchall()
243 cursor.close()
244
245 return generate_topic_history_from_db_rows(rows)
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/topic.py b/zerver/lib/topic.py
--- a/zerver/lib/topic.py
+++ b/zerver/lib/topic.py
@@ -1,8 +1,5 @@
-import datetime
-
from django.db import connection
from django.db.models.query import QuerySet, Q
-from django.utils.timezone import now as timezone_now
from sqlalchemy.sql import (
column,
@@ -124,15 +121,7 @@
new_stream: Optional[Stream]) -> List[Message]:
propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)
if propagate_mode == 'change_all':
- # We only change messages up to 7 days in the past, to avoid hammering our
- # DB by changing an unbounded amount of messages
- #
- # TODO: Look at removing this restriction and/or add a "change_last_week"
- # option; this behavior feels buggy.
- before_bound = timezone_now() - datetime.timedelta(days=7)
-
- propagate_query = (propagate_query & ~Q(id = message.id) &
- Q(date_sent__range=(before_bound, timezone_now())))
+ propagate_query = propagate_query & ~Q(id = message.id)
if propagate_mode == 'change_later':
propagate_query = propagate_query & Q(id__gt = message.id)
|
{"golden_diff": "diff --git a/zerver/lib/topic.py b/zerver/lib/topic.py\n--- a/zerver/lib/topic.py\n+++ b/zerver/lib/topic.py\n@@ -1,8 +1,5 @@\n-import datetime\n-\n from django.db import connection\n from django.db.models.query import QuerySet, Q\n-from django.utils.timezone import now as timezone_now\n \n from sqlalchemy.sql import (\n column,\n@@ -124,15 +121,7 @@\n new_stream: Optional[Stream]) -> List[Message]:\n propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)\n if propagate_mode == 'change_all':\n- # We only change messages up to 7 days in the past, to avoid hammering our\n- # DB by changing an unbounded amount of messages\n- #\n- # TODO: Look at removing this restriction and/or add a \"change_last_week\"\n- # option; this behavior feels buggy.\n- before_bound = timezone_now() - datetime.timedelta(days=7)\n-\n- propagate_query = (propagate_query & ~Q(id = message.id) &\n- Q(date_sent__range=(before_bound, timezone_now())))\n+ propagate_query = propagate_query & ~Q(id = message.id)\n if propagate_mode == 'change_later':\n propagate_query = propagate_query & Q(id__gt = message.id)\n", "issue": "Remove the TODO \"7 days\" restriction for edit and move topics\nRight now we have a restriction to move just the messages in the last week in method:\r\n`update_messages_for_topic_edit` file `zerver/lib/topic.py`\r\n```\r\n # We only change messages up to 7 days in the past, to avoid hammering our\r\n # DB by changing an unbounded amount of messages\r\n #\r\n # TODO: Look at removing this restriction and/or add a \"change_last_week\"\r\n # option; this behavior feels buggy.\r\n```\n", "before_files": [{"content": "import datetime\n\nfrom django.db import connection\nfrom django.db.models.query import QuerySet, Q\nfrom django.utils.timezone import now as timezone_now\n\nfrom sqlalchemy.sql import (\n column,\n literal,\n func,\n)\n\nfrom zerver.lib.request import REQ\nfrom zerver.models import (\n Message,\n Recipient,\n Stream,\n UserMessage,\n UserProfile,\n)\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\n# Only use these constants for events.\nORIG_TOPIC = \"orig_subject\"\nTOPIC_NAME = \"subject\"\nTOPIC_LINKS = \"topic_links\"\nMATCH_TOPIC = \"match_subject\"\n\n# This constant is actually embedded into\n# the JSON data for message edit history,\n# so we'll always need to handle legacy data\n# unless we do a pretty tricky migration.\nLEGACY_PREV_TOPIC = \"prev_subject\"\n\n# This constant is pretty closely coupled to the\n# database, but it's the JSON field.\nEXPORT_TOPIC_NAME = \"subject\"\n\n'''\nThe following functions are for user-facing APIs\nwhere we'll want to support \"subject\" for a while.\n'''\n\ndef get_topic_from_message_info(message_info: Dict[str, Any]) -> str:\n '''\n Use this where you are getting dicts that are based off of messages\n that may come from the outside world, especially from third party\n APIs and bots.\n\n We prefer 'topic' to 'subject' here. We expect at least one field\n to be present (or the caller must know how to handle KeyError).\n '''\n if 'topic' in message_info:\n return message_info['topic']\n\n return message_info['subject']\n\ndef REQ_topic() -> Optional[str]:\n # REQ handlers really return a REQ, but we\n # lie to make the rest of the type matching work.\n return REQ(\n whence='topic',\n aliases=['subject'],\n converter=lambda x: x.strip(),\n default=None,\n )\n\n'''\nTRY TO KEEP THIS DIVIDING LINE.\n\nBelow this line we want to make it so that functions are only\nusing \"subject\" in the DB sense, and nothing customer facing.\n\n'''\n\n# This is used in low-level message functions in\n# zerver/lib/message.py, and it's not user facing.\nDB_TOPIC_NAME = \"subject\"\nMESSAGE__TOPIC = 'message__subject'\n\ndef topic_match_sa(topic_name: str) -> Any:\n # _sa is short for Sql Alchemy, which we use mostly for\n # queries that search messages\n topic_cond = func.upper(column(\"subject\")) == func.upper(literal(topic_name))\n return topic_cond\n\ndef topic_column_sa() -> Any:\n return column(\"subject\")\n\ndef filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:\n topic_name = message.topic_name()\n return query.filter(subject=topic_name)\n\ndef filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:\n return query.filter(message__subject__iexact=topic_name)\n\ndef messages_for_topic(stream_recipient_id: int, topic_name: str) -> QuerySet:\n return Message.objects.filter(\n recipient_id=stream_recipient_id,\n subject__iexact=topic_name,\n )\n\ndef save_message_for_edit_use_case(message: Message) -> None:\n message.save(update_fields=[TOPIC_NAME, \"content\", \"rendered_content\",\n \"rendered_content_version\", \"last_edit_time\",\n \"edit_history\", \"has_attachment\", \"has_image\",\n \"has_link\", \"recipient_id\"])\n\n\ndef user_message_exists_for_topic(user_profile: UserProfile,\n recipient: Recipient,\n topic_name: str) -> bool:\n return UserMessage.objects.filter(\n user_profile=user_profile,\n message__recipient=recipient,\n message__subject__iexact=topic_name,\n ).exists()\n\ndef update_messages_for_topic_edit(message: Message,\n propagate_mode: str,\n orig_topic_name: str,\n topic_name: Optional[str],\n new_stream: Optional[Stream]) -> List[Message]:\n propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)\n if propagate_mode == 'change_all':\n # We only change messages up to 7 days in the past, to avoid hammering our\n # DB by changing an unbounded amount of messages\n #\n # TODO: Look at removing this restriction and/or add a \"change_last_week\"\n # option; this behavior feels buggy.\n before_bound = timezone_now() - datetime.timedelta(days=7)\n\n propagate_query = (propagate_query & ~Q(id = message.id) &\n Q(date_sent__range=(before_bound, timezone_now())))\n if propagate_mode == 'change_later':\n propagate_query = propagate_query & Q(id__gt = message.id)\n\n messages = Message.objects.filter(propagate_query).select_related()\n\n update_fields = {}\n\n # Evaluate the query before running the update\n messages_list = list(messages)\n\n # The cached ORM objects are not changed by the upcoming\n # messages.update(), and the remote cache update (done by the\n # caller) requires the new value, so we manually update the\n # objects in addition to sending a bulk query to the database.\n if new_stream is not None:\n update_fields[\"recipient\"] = new_stream.recipient\n for m in messages_list:\n m.recipient = new_stream.recipient\n if topic_name is not None:\n update_fields[\"subject\"] = topic_name\n for m in messages_list:\n m.set_topic_name(topic_name)\n\n messages.update(**update_fields)\n\n return messages_list\n\ndef generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:\n canonical_topic_names: Dict[str, Tuple[int, str]] = {}\n\n # Sort rows by max_message_id so that if a topic\n # has many different casings, we use the most\n # recent row.\n rows = sorted(rows, key=lambda tup: tup[1])\n\n for (topic_name, max_message_id) in rows:\n canonical_name = topic_name.lower()\n canonical_topic_names[canonical_name] = (max_message_id, topic_name)\n\n history = []\n for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():\n history.append(dict(\n name=topic_name,\n max_id=max_message_id)\n )\n return sorted(history, key=lambda x: -x['max_id'])\n\ndef get_topic_history_for_stream(user_profile: UserProfile,\n recipient: Recipient,\n public_history: bool) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n if public_history:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n else:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n INNER JOIN \"zerver_usermessage\" ON (\n \"zerver_usermessage\".\"message_id\" = \"zerver_message\".\"id\"\n )\n WHERE (\n \"zerver_usermessage\".\"user_profile_id\" = %s AND\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [user_profile.id, recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n\ndef get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n", "path": "zerver/lib/topic.py"}], "after_files": [{"content": "from django.db import connection\nfrom django.db.models.query import QuerySet, Q\n\nfrom sqlalchemy.sql import (\n column,\n literal,\n func,\n)\n\nfrom zerver.lib.request import REQ\nfrom zerver.models import (\n Message,\n Recipient,\n Stream,\n UserMessage,\n UserProfile,\n)\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\n# Only use these constants for events.\nORIG_TOPIC = \"orig_subject\"\nTOPIC_NAME = \"subject\"\nTOPIC_LINKS = \"topic_links\"\nMATCH_TOPIC = \"match_subject\"\n\n# This constant is actually embedded into\n# the JSON data for message edit history,\n# so we'll always need to handle legacy data\n# unless we do a pretty tricky migration.\nLEGACY_PREV_TOPIC = \"prev_subject\"\n\n# This constant is pretty closely coupled to the\n# database, but it's the JSON field.\nEXPORT_TOPIC_NAME = \"subject\"\n\n'''\nThe following functions are for user-facing APIs\nwhere we'll want to support \"subject\" for a while.\n'''\n\ndef get_topic_from_message_info(message_info: Dict[str, Any]) -> str:\n '''\n Use this where you are getting dicts that are based off of messages\n that may come from the outside world, especially from third party\n APIs and bots.\n\n We prefer 'topic' to 'subject' here. We expect at least one field\n to be present (or the caller must know how to handle KeyError).\n '''\n if 'topic' in message_info:\n return message_info['topic']\n\n return message_info['subject']\n\ndef REQ_topic() -> Optional[str]:\n # REQ handlers really return a REQ, but we\n # lie to make the rest of the type matching work.\n return REQ(\n whence='topic',\n aliases=['subject'],\n converter=lambda x: x.strip(),\n default=None,\n )\n\n'''\nTRY TO KEEP THIS DIVIDING LINE.\n\nBelow this line we want to make it so that functions are only\nusing \"subject\" in the DB sense, and nothing customer facing.\n\n'''\n\n# This is used in low-level message functions in\n# zerver/lib/message.py, and it's not user facing.\nDB_TOPIC_NAME = \"subject\"\nMESSAGE__TOPIC = 'message__subject'\n\ndef topic_match_sa(topic_name: str) -> Any:\n # _sa is short for Sql Alchemy, which we use mostly for\n # queries that search messages\n topic_cond = func.upper(column(\"subject\")) == func.upper(literal(topic_name))\n return topic_cond\n\ndef topic_column_sa() -> Any:\n return column(\"subject\")\n\ndef filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:\n topic_name = message.topic_name()\n return query.filter(subject=topic_name)\n\ndef filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:\n return query.filter(message__subject__iexact=topic_name)\n\ndef messages_for_topic(stream_recipient_id: int, topic_name: str) -> QuerySet:\n return Message.objects.filter(\n recipient_id=stream_recipient_id,\n subject__iexact=topic_name,\n )\n\ndef save_message_for_edit_use_case(message: Message) -> None:\n message.save(update_fields=[TOPIC_NAME, \"content\", \"rendered_content\",\n \"rendered_content_version\", \"last_edit_time\",\n \"edit_history\", \"has_attachment\", \"has_image\",\n \"has_link\", \"recipient_id\"])\n\n\ndef user_message_exists_for_topic(user_profile: UserProfile,\n recipient: Recipient,\n topic_name: str) -> bool:\n return UserMessage.objects.filter(\n user_profile=user_profile,\n message__recipient=recipient,\n message__subject__iexact=topic_name,\n ).exists()\n\ndef update_messages_for_topic_edit(message: Message,\n propagate_mode: str,\n orig_topic_name: str,\n topic_name: Optional[str],\n new_stream: Optional[Stream]) -> List[Message]:\n propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)\n if propagate_mode == 'change_all':\n propagate_query = propagate_query & ~Q(id = message.id)\n if propagate_mode == 'change_later':\n propagate_query = propagate_query & Q(id__gt = message.id)\n\n messages = Message.objects.filter(propagate_query).select_related()\n\n update_fields = {}\n\n # Evaluate the query before running the update\n messages_list = list(messages)\n\n # The cached ORM objects are not changed by the upcoming\n # messages.update(), and the remote cache update (done by the\n # caller) requires the new value, so we manually update the\n # objects in addition to sending a bulk query to the database.\n if new_stream is not None:\n update_fields[\"recipient\"] = new_stream.recipient\n for m in messages_list:\n m.recipient = new_stream.recipient\n if topic_name is not None:\n update_fields[\"subject\"] = topic_name\n for m in messages_list:\n m.set_topic_name(topic_name)\n\n messages.update(**update_fields)\n\n return messages_list\n\ndef generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:\n canonical_topic_names = {} # type: Dict[str, Tuple[int, str]]\n\n # Sort rows by max_message_id so that if a topic\n # has many different casings, we use the most\n # recent row.\n rows = sorted(rows, key=lambda tup: tup[1])\n\n for (topic_name, max_message_id) in rows:\n canonical_name = topic_name.lower()\n canonical_topic_names[canonical_name] = (max_message_id, topic_name)\n\n history = []\n for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():\n history.append(dict(\n name=topic_name,\n max_id=max_message_id)\n )\n return sorted(history, key=lambda x: -x['max_id'])\n\ndef get_topic_history_for_stream(user_profile: UserProfile,\n recipient: Recipient,\n public_history: bool) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n if public_history:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n else:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n INNER JOIN \"zerver_usermessage\" ON (\n \"zerver_usermessage\".\"message_id\" = \"zerver_message\".\"id\"\n )\n WHERE (\n \"zerver_usermessage\".\"user_profile_id\" = %s AND\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [user_profile.id, recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n\ndef get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n", "path": "zerver/lib/topic.py"}]}
| 2,846 | 291 |
gh_patches_debug_26087
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-9208
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NVDA hangs up in terminal, when a large piece of text is loaded
I know that normal user doesn't have this problem.
But developers, mainly developers working in terminal, could have.
When large piece of text is loaded to the terminal at the same time, for example, 10000 characters of more, NVDA is hanging up. Also, after a moment, the system hangs up.
The only way is to wait.
NVDA is not reading the text, it's reading pieces of text, then a moment of silence, different piece, silence...
For example, I can call this in ruby by writing
for i in 1..100000
print("A fragment number #{i.to_s} ")
end
Also, we can find this error, when we'll using in terminal app, which writes big pieces of text.
In console commands, like tree, we won't observe this eror, because it isn't loading of text at the same time, there's a while between printing new files.
What is interesting...
The problem is hanging up the all system, you can not open task manager or other apps.
Thank you for help
Greetings,
Dawid Pieper
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/winInputHook.py`
Content:
```
1 #winInputHook.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6
7 import threading
8 import comtypes.client
9 import time
10 from ctypes import *
11 from ctypes.wintypes import *
12 from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED
13
14 class KBDLLHOOKSTRUCT(Structure):
15 _fields_=[
16 ('vkCode',DWORD),
17 ('scanCode',DWORD),
18 ('flags',DWORD),
19 ('time',DWORD),
20 ('dwExtraInfo',DWORD),
21 ]
22
23 class MSLLHOOKSTRUCT(Structure):
24 _fields_=[
25 ('pt',POINT),
26 ('mouseData',DWORD),
27 ('flags',DWORD),
28 ('time',DWORD),
29 ('dwExtraInfo',DWORD),
30 ]
31
32 keyDownCallback=None
33 keyUpCallback=None
34 mouseCallback=None
35
36 @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
37 def keyboardHook(code,wParam,lParam):
38 if code!=HC_ACTION:
39 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
40 kbd=KBDLLHOOKSTRUCT.from_address(lParam)
41 if keyUpCallback and kbd.flags&LLKHF_UP:
42 if not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
43 return 1
44 elif keyDownCallback:
45 if not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
46 return 1
47 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
48
49 @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
50 def mouseHook(code,wParam,lParam):
51 if code!=HC_ACTION:
52 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
53 msll=MSLLHOOKSTRUCT.from_address(lParam)
54 if mouseCallback:
55 if not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):
56 return 1
57 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
58
59 hookThread=None
60 hookThreadRefCount=0
61
62 def hookThreadFunc():
63 keyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)
64 if keyHookID==0:
65 raise OSError("Could not register keyboard hook")
66 mouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)
67 if mouseHookID==0:
68 raise OSError("Could not register mouse hook")
69 msg=MSG()
70 while windll.user32.GetMessageW(byref(msg),None,0,0):
71 pass
72 if windll.user32.UnhookWindowsHookEx(keyHookID)==0:
73 raise OSError("could not unregister key hook %s"%keyHookID)
74 if windll.user32.UnhookWindowsHookEx(mouseHookID)==0:
75 raise OSError("could not unregister mouse hook %s"%mouseHookID)
76
77 def initialize():
78 global hookThread, hookThreadRefCount
79 hookThreadRefCount+=1
80 if hookThreadRefCount==1:
81 hookThread=threading.Thread(target=hookThreadFunc)
82 hookThread.start()
83
84 def setCallbacks(keyUp=None,keyDown=None,mouse=None):
85 global keyUpCallback, keyDownCallback, mouseCallback
86 if keyUp:
87 keyUpCallback=keyUp
88 if keyDown:
89 keyDownCallback=keyDown
90 if mouse:
91 mouseCallback=mouse
92
93 def terminate():
94 global hookThread, hookThreadRefCount
95 if not hookThread:
96 raise RuntimeError("winInputHook not running")
97 hookThreadRefCount-=1
98 if hookThreadRefCount==0:
99 windll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)
100 hookThread.join()
101 hookThread=None
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/winInputHook.py b/source/winInputHook.py
--- a/source/winInputHook.py
+++ b/source/winInputHook.py
@@ -10,6 +10,7 @@
from ctypes import *
from ctypes.wintypes import *
from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED
+import watchdog
class KBDLLHOOKSTRUCT(Structure):
_fields_=[
@@ -35,7 +36,7 @@
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def keyboardHook(code,wParam,lParam):
- if code!=HC_ACTION:
+ if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
kbd=KBDLLHOOKSTRUCT.from_address(lParam)
if keyUpCallback and kbd.flags&LLKHF_UP:
@@ -48,7 +49,7 @@
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def mouseHook(code,wParam,lParam):
- if code!=HC_ACTION:
+ if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
msll=MSLLHOOKSTRUCT.from_address(lParam)
if mouseCallback:
|
{"golden_diff": "diff --git a/source/winInputHook.py b/source/winInputHook.py\n--- a/source/winInputHook.py\n+++ b/source/winInputHook.py\n@@ -10,6 +10,7 @@\n from ctypes import *\r\n from ctypes.wintypes import *\r\n from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\n+import watchdog\r\n \r\n class KBDLLHOOKSTRUCT(Structure):\r\n \t_fields_=[\r\n@@ -35,7 +36,7 @@\n \r\n @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\n def keyboardHook(code,wParam,lParam):\r\n-\tif code!=HC_ACTION:\r\n+\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n \t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n \tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n \tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n@@ -48,7 +49,7 @@\n \r\n @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\n def mouseHook(code,wParam,lParam):\r\n-\tif code!=HC_ACTION:\r\n+\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n \t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n \tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n \tif mouseCallback:\n", "issue": "NVDA hangs up in terminal, when a large piece of text is loaded\nI know that normal user doesn't have this problem.\nBut developers, mainly developers working in terminal, could have.\n\nWhen large piece of text is loaded to the terminal at the same time, for example, 10000 characters of more, NVDA is hanging up. Also, after a moment, the system hangs up.\nThe only way is to wait.\nNVDA is not reading the text, it's reading pieces of text, then a moment of silence, different piece, silence...\n\nFor example, I can call this in ruby by writing\n\nfor i in 1..100000\nprint(\"A fragment number #{i.to_s} \")\nend\n\nAlso, we can find this error, when we'll using in terminal app, which writes big pieces of text.\nIn console commands, like tree, we won't observe this eror, because it isn't loading of text at the same time, there's a while between printing new files.\n\nWhat is interesting...\nThe problem is hanging up the all system, you can not open task manager or other apps.\n\nThank you for help\nGreetings,\nDawid Pieper\n\n", "before_files": [{"content": "#winInputHook.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport threading\r\nimport comtypes.client\r\nimport time\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nfrom win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\n\r\nclass KBDLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('vkCode',DWORD),\r\n\t\t('scanCode',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nclass MSLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('pt',POINT),\r\n\t\t('mouseData',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nkeyDownCallback=None\r\nkeyUpCallback=None\r\nmouseCallback=None\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef keyboardHook(code,wParam,lParam):\r\n\tif code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n\tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n\t\tif not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\telif keyDownCallback:\r\n\t\tif not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef mouseHook(code,wParam,lParam):\r\n\tif code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n\tif mouseCallback:\r\n\t\tif not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\nhookThread=None\r\nhookThreadRefCount=0\r\n\r\ndef hookThreadFunc():\r\n\tkeyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif keyHookID==0:\r\n\t\traise OSError(\"Could not register keyboard hook\")\r\n\tmouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif mouseHookID==0:\r\n\t\traise OSError(\"Could not register mouse hook\")\r\n\tmsg=MSG()\r\n\twhile windll.user32.GetMessageW(byref(msg),None,0,0):\r\n\t\tpass\r\n\tif windll.user32.UnhookWindowsHookEx(keyHookID)==0:\r\n\t\traise OSError(\"could not unregister key hook %s\"%keyHookID)\r\n\tif windll.user32.UnhookWindowsHookEx(mouseHookID)==0:\r\n\t\traise OSError(\"could not unregister mouse hook %s\"%mouseHookID)\r\n\r\ndef initialize():\r\n\tglobal hookThread, hookThreadRefCount\r\n\thookThreadRefCount+=1\r\n\tif hookThreadRefCount==1:\r\n\t\thookThread=threading.Thread(target=hookThreadFunc)\r\n\t\thookThread.start()\r\n\r\ndef setCallbacks(keyUp=None,keyDown=None,mouse=None):\r\n\tglobal keyUpCallback, keyDownCallback, mouseCallback\r\n\tif keyUp:\r\n\t\tkeyUpCallback=keyUp\r\n\tif keyDown:\r\n\t\tkeyDownCallback=keyDown\r\n\tif mouse:\r\n\t\tmouseCallback=mouse\r\n\r\ndef terminate():\r\n\tglobal hookThread, hookThreadRefCount\r\n\tif not hookThread:\r\n\t\traise RuntimeError(\"winInputHook not running\")\r\n\thookThreadRefCount-=1\r\n\tif hookThreadRefCount==0:\r\n\t\twindll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)\r\n\t\thookThread.join()\r\n\t\thookThread=None\r\n", "path": "source/winInputHook.py"}], "after_files": [{"content": "#winInputHook.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport threading\r\nimport comtypes.client\r\nimport time\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nfrom win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\nimport watchdog\r\n\r\nclass KBDLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('vkCode',DWORD),\r\n\t\t('scanCode',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nclass MSLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('pt',POINT),\r\n\t\t('mouseData',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nkeyDownCallback=None\r\nkeyUpCallback=None\r\nmouseCallback=None\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef keyboardHook(code,wParam,lParam):\r\n\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n\tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n\t\tif not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\telif keyDownCallback:\r\n\t\tif not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef mouseHook(code,wParam,lParam):\r\n\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n\tif mouseCallback:\r\n\t\tif not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\nhookThread=None\r\nhookThreadRefCount=0\r\n\r\ndef hookThreadFunc():\r\n\tkeyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif keyHookID==0:\r\n\t\traise OSError(\"Could not register keyboard hook\")\r\n\tmouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif mouseHookID==0:\r\n\t\traise OSError(\"Could not register mouse hook\")\r\n\tmsg=MSG()\r\n\twhile windll.user32.GetMessageW(byref(msg),None,0,0):\r\n\t\tpass\r\n\tif windll.user32.UnhookWindowsHookEx(keyHookID)==0:\r\n\t\traise OSError(\"could not unregister key hook %s\"%keyHookID)\r\n\tif windll.user32.UnhookWindowsHookEx(mouseHookID)==0:\r\n\t\traise OSError(\"could not unregister mouse hook %s\"%mouseHookID)\r\n\r\ndef initialize():\r\n\tglobal hookThread, hookThreadRefCount\r\n\thookThreadRefCount+=1\r\n\tif hookThreadRefCount==1:\r\n\t\thookThread=threading.Thread(target=hookThreadFunc)\r\n\t\thookThread.start()\r\n\r\ndef setCallbacks(keyUp=None,keyDown=None,mouse=None):\r\n\tglobal keyUpCallback, keyDownCallback, mouseCallback\r\n\tif keyUp:\r\n\t\tkeyUpCallback=keyUp\r\n\tif keyDown:\r\n\t\tkeyDownCallback=keyDown\r\n\tif mouse:\r\n\t\tmouseCallback=mouse\r\n\r\ndef terminate():\r\n\tglobal hookThread, hookThreadRefCount\r\n\tif not hookThread:\r\n\t\traise RuntimeError(\"winInputHook not running\")\r\n\thookThreadRefCount-=1\r\n\tif hookThreadRefCount==0:\r\n\t\twindll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)\r\n\t\thookThread.join()\r\n\t\thookThread=None\r\n", "path": "source/winInputHook.py"}]}
| 1,689 | 315 |
gh_patches_debug_3947
|
rasdani/github-patches
|
git_diff
|
openai__gym-558
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Box2d won't find some RAND_LIMIT_swigconstant
Hello!
It's probably some silly mistake on my side, but i wasn't able to fix by random lever pulling, as usual.
Installing Box2d as in instuctions (using `pip install -e .[all]`) will throw error when trying to use some of Box2D examples.
Code that reproduces the issue:
```
import gym
atari = gym.make('LunarLander-v0')
atari.reset()
```
```
[2016-05-16 02:14:25,430] Making new env: LunarLander-v0
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-f89e78f4410b> in <module>()
1 import gym
----> 2 atari = gym.make('LunarLander-v0')
3 atari.reset()
4 #plt.imshow(atari.render('rgb_array'))
/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self, id)
77 logger.info('Making new env: %s', id)
78 spec = self.spec(id)
---> 79 return spec.make()
80
81 def all(self):
/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self)
52 raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
53
---> 54 cls = load(self._entry_point)
55 env = cls(**self._kwargs)
56
/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in load(name)
11 def load(name):
12 entry_point = pkg_resources.EntryPoint.parse('x={}'.format(name))
---> 13 result = entry_point.load(False)
14 return result
15
/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in load(self, require, *args, **kwargs)
2378 if require:
2379 self.require(*args, **kwargs)
-> 2380 return self.resolve()
2381
2382 def resolve(self):
/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in resolve(self)
2384 Resolve the entry point from its module and attrs.
2385 """
-> 2386 module = __import__(self.module_name, fromlist=['__name__'], level=0)
2387 try:
2388 return functools.reduce(getattr, self.attrs, module)
/home/jheuristic/yozhik/gym/gym/envs/box2d/__init__.py in <module>()
----> 1 from gym.envs.box2d.lunar_lander import LunarLander
2 from gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore
/home/jheuristic/yozhik/gym/gym/envs/box2d/lunar_lander.py in <module>()
3 from six.moves import xrange
4
----> 5 import Box2D
6 from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
7
/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/__init__.py in <module>()
18 # 3. This notice may not be removed or altered from any source distribution.
19 #
---> 20 from .Box2D import *
21 __author__ = '$Date$'
22 __version__ = '2.3.1'
/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/Box2D.py in <module>()
433 return _Box2D.b2CheckPolygon(shape, additional_checks)
434
--> 435 _Box2D.RAND_LIMIT_swigconstant(_Box2D)
436 RAND_LIMIT = _Box2D.RAND_LIMIT
437
AttributeError: 'module' object has no attribute 'RAND_LIMIT_swigconstant'
```
What didn't help:
```
pip uninstall gym
apt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb libav-tools xorg-dev python-opengl
git clone https://github.com/openai/gym
cd gym
pip install -e .[all] --upgrade
```
The OS is Ubuntu 14.04 Server x64
It may be a clue that i am running the thing from inside python2 virtualenv (with all numpys, etc. installed)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import sys, os.path
3
4 # Don't import gym module here, since deps may not be installed
5 sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'gym'))
6 from version import VERSION
7
8 # Environment-specific dependencies.
9 extras = {
10 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],
11 'board_game' : ['pachi-py>=0.0.19'],
12 'box2d': ['box2d-py'],
13 'classic_control': ['PyOpenGL'],
14 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],
15 'parameter_tuning': ['keras', 'theano'],
16 }
17
18 # Meta dependency groups.
19 all_deps = []
20 for group_name in extras:
21 all_deps += extras[group_name]
22 extras['all'] = all_deps
23
24 setup(name='gym',
25 version=VERSION,
26 description='The OpenAI Gym: A toolkit for developing and comparing your reinforcement learning agents.',
27 url='https://github.com/openai/gym',
28 author='OpenAI',
29 author_email='[email protected]',
30 license='',
31 packages=[package for package in find_packages()
32 if package.startswith('gym')],
33 zip_safe=False,
34 install_requires=[
35 'numpy>=1.10.4', 'requests>=2.0', 'six', 'pyglet>=1.2.0',
36 ],
37 extras_require=extras,
38 package_data={'gym': ['envs/mujoco/assets/*.xml', 'envs/classic_control/assets/*.png']},
39 tests_require=['pytest', 'mock'],
40 )
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
extras = {
'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],
'board_game' : ['pachi-py>=0.0.19'],
- 'box2d': ['box2d-py'],
+ 'box2d': ['Box2D-kengz'],
'classic_control': ['PyOpenGL'],
'mujoco': ['mujoco_py>=0.4.3', 'imageio'],
'parameter_tuning': ['keras', 'theano'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n extras = {\n 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],\n 'board_game' : ['pachi-py>=0.0.19'],\n- 'box2d': ['box2d-py'],\n+ 'box2d': ['Box2D-kengz'],\n 'classic_control': ['PyOpenGL'],\n 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],\n 'parameter_tuning': ['keras', 'theano'],\n", "issue": "Box2d won't find some RAND_LIMIT_swigconstant\nHello!\n\nIt's probably some silly mistake on my side, but i wasn't able to fix by random lever pulling, as usual.\n\nInstalling Box2d as in instuctions (using `pip install -e .[all]`) will throw error when trying to use some of Box2D examples.\n\nCode that reproduces the issue:\n\n```\nimport gym\natari = gym.make('LunarLander-v0')\natari.reset()\n```\n\n```\n[2016-05-16 02:14:25,430] Making new env: LunarLander-v0\n\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-1-f89e78f4410b> in <module>()\n 1 import gym\n----> 2 atari = gym.make('LunarLander-v0')\n 3 atari.reset()\n 4 #plt.imshow(atari.render('rgb_array'))\n\n/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self, id)\n 77 logger.info('Making new env: %s', id)\n 78 spec = self.spec(id)\n---> 79 return spec.make()\n 80 \n 81 def all(self):\n\n/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self)\n 52 raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))\n 53 \n---> 54 cls = load(self._entry_point)\n 55 env = cls(**self._kwargs)\n 56 \n\n/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in load(name)\n 11 def load(name):\n 12 entry_point = pkg_resources.EntryPoint.parse('x={}'.format(name))\n---> 13 result = entry_point.load(False)\n 14 return result\n 15 \n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in load(self, require, *args, **kwargs)\n 2378 if require:\n 2379 self.require(*args, **kwargs)\n-> 2380 return self.resolve()\n 2381 \n 2382 def resolve(self):\n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in resolve(self)\n 2384 Resolve the entry point from its module and attrs.\n 2385 \"\"\"\n-> 2386 module = __import__(self.module_name, fromlist=['__name__'], level=0)\n 2387 try:\n 2388 return functools.reduce(getattr, self.attrs, module)\n\n/home/jheuristic/yozhik/gym/gym/envs/box2d/__init__.py in <module>()\n----> 1 from gym.envs.box2d.lunar_lander import LunarLander\n 2 from gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore\n\n/home/jheuristic/yozhik/gym/gym/envs/box2d/lunar_lander.py in <module>()\n 3 from six.moves import xrange\n 4 \n----> 5 import Box2D\n 6 from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)\n 7 \n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/__init__.py in <module>()\n 18 # 3. This notice may not be removed or altered from any source distribution.\n 19 #\n---> 20 from .Box2D import *\n 21 __author__ = '$Date$'\n 22 __version__ = '2.3.1'\n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/Box2D.py in <module>()\n 433 return _Box2D.b2CheckPolygon(shape, additional_checks)\n 434 \n--> 435 _Box2D.RAND_LIMIT_swigconstant(_Box2D)\n 436 RAND_LIMIT = _Box2D.RAND_LIMIT\n 437 \n\nAttributeError: 'module' object has no attribute 'RAND_LIMIT_swigconstant'\n\n```\n\nWhat didn't help:\n\n```\npip uninstall gym\napt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb libav-tools xorg-dev python-opengl\ngit clone https://github.com/openai/gym\ncd gym\npip install -e .[all] --upgrade\n```\n\nThe OS is Ubuntu 14.04 Server x64\nIt may be a clue that i am running the thing from inside python2 virtualenv (with all numpys, etc. installed)\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport sys, os.path\n\n# Don't import gym module here, since deps may not be installed\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), 'gym'))\nfrom version import VERSION\n\n# Environment-specific dependencies.\nextras = {\n 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],\n 'board_game' : ['pachi-py>=0.0.19'],\n 'box2d': ['box2d-py'],\n 'classic_control': ['PyOpenGL'],\n 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],\n 'parameter_tuning': ['keras', 'theano'],\n}\n\n# Meta dependency groups.\nall_deps = []\nfor group_name in extras:\n all_deps += extras[group_name]\nextras['all'] = all_deps\n\nsetup(name='gym',\n version=VERSION,\n description='The OpenAI Gym: A toolkit for developing and comparing your reinforcement learning agents.',\n url='https://github.com/openai/gym',\n author='OpenAI',\n author_email='[email protected]',\n license='',\n packages=[package for package in find_packages()\n if package.startswith('gym')],\n zip_safe=False,\n install_requires=[\n 'numpy>=1.10.4', 'requests>=2.0', 'six', 'pyglet>=1.2.0',\n ],\n extras_require=extras,\n package_data={'gym': ['envs/mujoco/assets/*.xml', 'envs/classic_control/assets/*.png']},\n tests_require=['pytest', 'mock'],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport sys, os.path\n\n# Don't import gym module here, since deps may not be installed\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), 'gym'))\nfrom version import VERSION\n\n# Environment-specific dependencies.\nextras = {\n 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],\n 'board_game' : ['pachi-py>=0.0.19'],\n 'box2d': ['Box2D-kengz'],\n 'classic_control': ['PyOpenGL'],\n 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],\n 'parameter_tuning': ['keras', 'theano'],\n}\n\n# Meta dependency groups.\nall_deps = []\nfor group_name in extras:\n all_deps += extras[group_name]\nextras['all'] = all_deps\n\nsetup(name='gym',\n version=VERSION,\n description='The OpenAI Gym: A toolkit for developing and comparing your reinforcement learning agents.',\n url='https://github.com/openai/gym',\n author='OpenAI',\n author_email='[email protected]',\n license='',\n packages=[package for package in find_packages()\n if package.startswith('gym')],\n zip_safe=False,\n install_requires=[\n 'numpy>=1.10.4', 'requests>=2.0', 'six', 'pyglet>=1.2.0',\n ],\n extras_require=extras,\n package_data={'gym': ['envs/mujoco/assets/*.xml', 'envs/classic_control/assets/*.png']},\n tests_require=['pytest', 'mock'],\n)\n", "path": "setup.py"}]}
| 1,825 | 151 |
gh_patches_debug_33666
|
rasdani/github-patches
|
git_diff
|
google__fuzzbench-776
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move/publish reports of experimental experiments under fuzzbench.com/reports/experimental
Experimental experiments are experiments with fuzzers not in this list:
https://github.com/google/fuzzbench/blob/master/service/core-fuzzers.yaml
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `experiment/reporter.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """A module containing the interface used by an experiment for generating
16 reports."""
17 import os
18 import posixpath
19
20 from common import experiment_utils
21 from common import experiment_path as exp_path
22 from common import filesystem
23 from common import filestore_utils
24 from common import logs
25 from common import yaml_utils
26 from analysis import generate_report
27 from analysis import data_utils
28
29 CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',
30 'core-fuzzers.yaml')
31
32 logger = logs.Logger('reporter') # pylint: disable=invalid-name
33
34
35 def get_reports_dir():
36 """Return reports directory."""
37 return exp_path.path('reports')
38
39
40 def output_report(experiment_config: dict,
41 in_progress=False,
42 coverage_report=False):
43 """Generate the HTML report and write it to |web_bucket|."""
44 experiment_name = experiment_utils.get_experiment_name()
45 web_filestore_path = posixpath.join(experiment_config['report_filestore'],
46 experiment_name)
47
48 reports_dir = get_reports_dir()
49
50 core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
51 fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))
52
53 # Don't merge with nonprivate experiments until the very end as doing it
54 # while the experiment is in progress will produce unusable realtime
55 # results.
56 merge_with_nonprivate = (not in_progress and experiment_config.get(
57 'merge_with_nonprivate', False))
58
59 try:
60 logger.debug('Generating report.')
61 filesystem.recreate_directory(reports_dir)
62 generate_report.generate_report(
63 [experiment_name],
64 str(reports_dir),
65 report_name=experiment_name,
66 fuzzers=fuzzers,
67 in_progress=in_progress,
68 merge_with_clobber_nonprivate=merge_with_nonprivate,
69 coverage_report=coverage_report)
70 filestore_utils.rsync(
71 str(reports_dir),
72 web_filestore_path,
73 delete=False, # Don't remove existing coverage jsons.
74 gsutil_options=[
75 '-h', 'Cache-Control:public,max-age=0,no-transform'
76 ])
77 logger.debug('Done generating report.')
78 except data_utils.EmptyDataError:
79 logs.warning('No snapshot data.')
80 except Exception: # pylint: disable=broad-except
81 logger.error('Error generating HTML report.')
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/experiment/reporter.py b/experiment/reporter.py
--- a/experiment/reporter.py
+++ b/experiment/reporter.py
@@ -26,8 +26,9 @@
from analysis import generate_report
from analysis import data_utils
-CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',
- 'core-fuzzers.yaml')
+CORE_FUZZERS_YAML = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', 'service',
+ 'core-fuzzers.yaml'))
logger = logs.Logger('reporter') # pylint: disable=invalid-name
@@ -37,18 +38,29 @@
return exp_path.path('reports')
+def get_core_fuzzers():
+ """Return list of core fuzzers to be used for merging experiment data."""
+ return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
+
+
def output_report(experiment_config: dict,
in_progress=False,
coverage_report=False):
"""Generate the HTML report and write it to |web_bucket|."""
experiment_name = experiment_utils.get_experiment_name()
- web_filestore_path = posixpath.join(experiment_config['report_filestore'],
- experiment_name)
-
reports_dir = get_reports_dir()
- core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
- fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))
+ core_fuzzers = set(get_core_fuzzers())
+ experiment_fuzzers = set(experiment_config['fuzzers'])
+ fuzzers = experiment_fuzzers.union(core_fuzzers)
+
+ # Calculate path to store report files in filestore.
+ web_filestore_path = experiment_config['report_filestore']
+ if not fuzzers.issubset(core_fuzzers):
+ # This means that we are running an experimental report with fuzzers
+ # not in the core list. So, store these in |experimental| sub-directory.
+ web_filestore_path = os.path.join(web_filestore_path, 'experimental')
+ web_filestore_path = posixpath.join(web_filestore_path, experiment_name)
# Don't merge with nonprivate experiments until the very end as doing it
# while the experiment is in progress will produce unusable realtime
|
{"golden_diff": "diff --git a/experiment/reporter.py b/experiment/reporter.py\n--- a/experiment/reporter.py\n+++ b/experiment/reporter.py\n@@ -26,8 +26,9 @@\n from analysis import generate_report\n from analysis import data_utils\n \n-CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',\n- 'core-fuzzers.yaml')\n+CORE_FUZZERS_YAML = os.path.abspath(\n+ os.path.join(os.path.dirname(__file__), '..', 'service',\n+ 'core-fuzzers.yaml'))\n \n logger = logs.Logger('reporter') # pylint: disable=invalid-name\n \n@@ -37,18 +38,29 @@\n return exp_path.path('reports')\n \n \n+def get_core_fuzzers():\n+ \"\"\"Return list of core fuzzers to be used for merging experiment data.\"\"\"\n+ return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n+\n+\n def output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n- web_filestore_path = posixpath.join(experiment_config['report_filestore'],\n- experiment_name)\n-\n reports_dir = get_reports_dir()\n \n- core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n- fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))\n+ core_fuzzers = set(get_core_fuzzers())\n+ experiment_fuzzers = set(experiment_config['fuzzers'])\n+ fuzzers = experiment_fuzzers.union(core_fuzzers)\n+\n+ # Calculate path to store report files in filestore.\n+ web_filestore_path = experiment_config['report_filestore']\n+ if not fuzzers.issubset(core_fuzzers):\n+ # This means that we are running an experimental report with fuzzers\n+ # not in the core list. So, store these in |experimental| sub-directory.\n+ web_filestore_path = os.path.join(web_filestore_path, 'experimental')\n+ web_filestore_path = posixpath.join(web_filestore_path, experiment_name)\n \n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n", "issue": "Move/publish reports of experimental experiments under fuzzbench.com/reports/experimental\nExperimental experiments are experiments with fuzzers not in this list:\r\nhttps://github.com/google/fuzzbench/blob/master/service/core-fuzzers.yaml\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A module containing the interface used by an experiment for generating\nreports.\"\"\"\nimport os\nimport posixpath\n\nfrom common import experiment_utils\nfrom common import experiment_path as exp_path\nfrom common import filesystem\nfrom common import filestore_utils\nfrom common import logs\nfrom common import yaml_utils\nfrom analysis import generate_report\nfrom analysis import data_utils\n\nCORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',\n 'core-fuzzers.yaml')\n\nlogger = logs.Logger('reporter') # pylint: disable=invalid-name\n\n\ndef get_reports_dir():\n \"\"\"Return reports directory.\"\"\"\n return exp_path.path('reports')\n\n\ndef output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n web_filestore_path = posixpath.join(experiment_config['report_filestore'],\n experiment_name)\n\n reports_dir = get_reports_dir()\n\n core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))\n\n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n # results.\n merge_with_nonprivate = (not in_progress and experiment_config.get(\n 'merge_with_nonprivate', False))\n\n try:\n logger.debug('Generating report.')\n filesystem.recreate_directory(reports_dir)\n generate_report.generate_report(\n [experiment_name],\n str(reports_dir),\n report_name=experiment_name,\n fuzzers=fuzzers,\n in_progress=in_progress,\n merge_with_clobber_nonprivate=merge_with_nonprivate,\n coverage_report=coverage_report)\n filestore_utils.rsync(\n str(reports_dir),\n web_filestore_path,\n delete=False, # Don't remove existing coverage jsons.\n gsutil_options=[\n '-h', 'Cache-Control:public,max-age=0,no-transform'\n ])\n logger.debug('Done generating report.')\n except data_utils.EmptyDataError:\n logs.warning('No snapshot data.')\n except Exception: # pylint: disable=broad-except\n logger.error('Error generating HTML report.')\n", "path": "experiment/reporter.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A module containing the interface used by an experiment for generating\nreports.\"\"\"\nimport os\nimport posixpath\n\nfrom common import experiment_utils\nfrom common import experiment_path as exp_path\nfrom common import filesystem\nfrom common import filestore_utils\nfrom common import logs\nfrom common import yaml_utils\nfrom analysis import generate_report\nfrom analysis import data_utils\n\nCORE_FUZZERS_YAML = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', 'service',\n 'core-fuzzers.yaml'))\n\nlogger = logs.Logger('reporter') # pylint: disable=invalid-name\n\n\ndef get_reports_dir():\n \"\"\"Return reports directory.\"\"\"\n return exp_path.path('reports')\n\n\ndef get_core_fuzzers():\n \"\"\"Return list of core fuzzers to be used for merging experiment data.\"\"\"\n return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n\n\ndef output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n reports_dir = get_reports_dir()\n\n core_fuzzers = set(get_core_fuzzers())\n experiment_fuzzers = set(experiment_config['fuzzers'])\n fuzzers = experiment_fuzzers.union(core_fuzzers)\n\n # Calculate path to store report files in filestore.\n web_filestore_path = experiment_config['report_filestore']\n if not fuzzers.issubset(core_fuzzers):\n # This means that we are running an experimental report with fuzzers\n # not in the core list. So, store these in |experimental| sub-directory.\n web_filestore_path = os.path.join(web_filestore_path, 'experimental')\n web_filestore_path = posixpath.join(web_filestore_path, experiment_name)\n\n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n # results.\n merge_with_nonprivate = (not in_progress and experiment_config.get(\n 'merge_with_nonprivate', False))\n\n try:\n logger.debug('Generating report.')\n filesystem.recreate_directory(reports_dir)\n generate_report.generate_report(\n [experiment_name],\n str(reports_dir),\n report_name=experiment_name,\n fuzzers=fuzzers,\n in_progress=in_progress,\n merge_with_clobber_nonprivate=merge_with_nonprivate,\n coverage_report=coverage_report)\n filestore_utils.rsync(\n str(reports_dir),\n web_filestore_path,\n delete=False, # Don't remove existing coverage jsons.\n gsutil_options=[\n '-h', 'Cache-Control:public,max-age=0,no-transform'\n ])\n logger.debug('Done generating report.')\n except data_utils.EmptyDataError:\n logs.warning('No snapshot data.')\n except Exception: # pylint: disable=broad-except\n logger.error('Error generating HTML report.')\n", "path": "experiment/reporter.py"}]}
| 1,098 | 525 |
gh_patches_debug_8982
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4778
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation example fails with `proxy URL with no authority`
Running the [example](https://doc.scrapy.org/en/1.5/intro/overview.html#walk-through-of-an-example-spider) from the documentation yields this:
```
10:11 $ scrapy runspider quotes.py
2018-07-11 10:12:04 [scrapy.utils.log] INFO: Scrapy 1.5.0 started (bot: scrapybot)
2018-07-11 10:12:04 [scrapy.utils.log] INFO: Versions: lxml 3.5.0.0, libxml2 2.9.3, cssselect 0.9.1, parsel 1.5.0, w3lib 1.19.0, Twisted 16.0.0, Python 2.7.12 (default, Dec 4 2017, 14:50:18) - [GCC 5.4.0 20160609], pyOpenSSL 0.15.1 (OpenSSL 1.0.2g 1 Mar 2016), cryptography 1.2.3, Platform Linux-4.4.0-130-generic-x86_64-with-Ubuntu-16.04-xenial
2018-07-11 10:12:04 [scrapy.crawler] INFO: Overridden settings: {'SPIDER_LOADER_WARN_ONLY': True}
2018-07-11 10:12:04 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.memusage.MemoryUsage',
'scrapy.extensions.logstats.LogStats',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.corestats.CoreStats']
Unhandled error in Deferred:
2018-07-11 10:12:04 [twisted] CRITICAL: Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/scrapy/commands/runspider.py", line 88, in run
self.crawler_process.crawl(spidercls, **opts.spargs)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 171, in crawl
return self._crawl(crawler, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 175, in _crawl
d = crawler.crawl(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1274, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
--- <exception caught here> ---
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1128, in _inlineCallbacks
result = g.send(result)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 98, in crawl
six.reraise(*exc_info)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 80, in crawl
self.engine = self._create_engine()
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 105, in _create_engine
return ExecutionEngine(self, lambda _: self.stop())
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/engine.py", line 69, in __init__
self.downloader = downloader_cls(crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/downloader/__init__.py", line 88, in __init__
self.middleware = DownloaderMiddlewareManager.from_crawler(crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/middleware.py", line 58, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/middleware.py", line 36, in from_settings
mw = mwcls.from_crawler(crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py", line 29, in from_crawler
return cls(auth_encoding)
File "/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py", line 22, in __init__
self.proxies[type] = self._get_proxy(url, type)
File "/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py", line 39, in _get_proxy
proxy_type, user, password, hostport = _parse_proxy(url)
File "/usr/lib/python2.7/urllib2.py", line 721, in _parse_proxy
raise ValueError("proxy URL with no authority: %r" % proxy)
exceptions.ValueError: proxy URL with no authority: '/var/run/docker.sock'
2018-07-11 10:12:04 [twisted] CRITICAL:
```
Looks like proxy code does not handle `no_proxy` correctly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/downloadermiddlewares/httpproxy.py`
Content:
```
1 import base64
2 from urllib.parse import unquote, urlunparse
3 from urllib.request import getproxies, proxy_bypass, _parse_proxy
4
5 from scrapy.exceptions import NotConfigured
6 from scrapy.utils.httpobj import urlparse_cached
7 from scrapy.utils.python import to_bytes
8
9
10 class HttpProxyMiddleware:
11
12 def __init__(self, auth_encoding='latin-1'):
13 self.auth_encoding = auth_encoding
14 self.proxies = {}
15 for type_, url in getproxies().items():
16 self.proxies[type_] = self._get_proxy(url, type_)
17
18 @classmethod
19 def from_crawler(cls, crawler):
20 if not crawler.settings.getbool('HTTPPROXY_ENABLED'):
21 raise NotConfigured
22 auth_encoding = crawler.settings.get('HTTPPROXY_AUTH_ENCODING')
23 return cls(auth_encoding)
24
25 def _basic_auth_header(self, username, password):
26 user_pass = to_bytes(
27 f'{unquote(username)}:{unquote(password)}',
28 encoding=self.auth_encoding)
29 return base64.b64encode(user_pass)
30
31 def _get_proxy(self, url, orig_type):
32 proxy_type, user, password, hostport = _parse_proxy(url)
33 proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
34
35 if user:
36 creds = self._basic_auth_header(user, password)
37 else:
38 creds = None
39
40 return creds, proxy_url
41
42 def process_request(self, request, spider):
43 # ignore if proxy is already set
44 if 'proxy' in request.meta:
45 if request.meta['proxy'] is None:
46 return
47 # extract credentials if present
48 creds, proxy_url = self._get_proxy(request.meta['proxy'], '')
49 request.meta['proxy'] = proxy_url
50 if creds and not request.headers.get('Proxy-Authorization'):
51 request.headers['Proxy-Authorization'] = b'Basic ' + creds
52 return
53 elif not self.proxies:
54 return
55
56 parsed = urlparse_cached(request)
57 scheme = parsed.scheme
58
59 # 'no_proxy' is only supported by http schemes
60 if scheme in ('http', 'https') and proxy_bypass(parsed.hostname):
61 return
62
63 if scheme in self.proxies:
64 self._set_proxy(request, scheme)
65
66 def _set_proxy(self, request, scheme):
67 creds, proxy = self.proxies[scheme]
68 request.meta['proxy'] = proxy
69 if creds:
70 request.headers['Proxy-Authorization'] = b'Basic ' + creds
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/downloadermiddlewares/httpproxy.py b/scrapy/downloadermiddlewares/httpproxy.py
--- a/scrapy/downloadermiddlewares/httpproxy.py
+++ b/scrapy/downloadermiddlewares/httpproxy.py
@@ -13,7 +13,12 @@
self.auth_encoding = auth_encoding
self.proxies = {}
for type_, url in getproxies().items():
- self.proxies[type_] = self._get_proxy(url, type_)
+ try:
+ self.proxies[type_] = self._get_proxy(url, type_)
+ # some values such as '/var/run/docker.sock' can't be parsed
+ # by _parse_proxy and as such should be skipped
+ except ValueError:
+ continue
@classmethod
def from_crawler(cls, crawler):
|
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/httpproxy.py b/scrapy/downloadermiddlewares/httpproxy.py\n--- a/scrapy/downloadermiddlewares/httpproxy.py\n+++ b/scrapy/downloadermiddlewares/httpproxy.py\n@@ -13,7 +13,12 @@\n self.auth_encoding = auth_encoding\n self.proxies = {}\n for type_, url in getproxies().items():\n- self.proxies[type_] = self._get_proxy(url, type_)\n+ try:\n+ self.proxies[type_] = self._get_proxy(url, type_)\n+ # some values such as '/var/run/docker.sock' can't be parsed\n+ # by _parse_proxy and as such should be skipped\n+ except ValueError:\n+ continue\n \n @classmethod\n def from_crawler(cls, crawler):\n", "issue": "Documentation example fails with `proxy URL with no authority`\nRunning the [example](https://doc.scrapy.org/en/1.5/intro/overview.html#walk-through-of-an-example-spider) from the documentation yields this:\r\n```\r\n10:11 $ scrapy runspider quotes.py \r\n2018-07-11 10:12:04 [scrapy.utils.log] INFO: Scrapy 1.5.0 started (bot: scrapybot)\r\n2018-07-11 10:12:04 [scrapy.utils.log] INFO: Versions: lxml 3.5.0.0, libxml2 2.9.3, cssselect 0.9.1, parsel 1.5.0, w3lib 1.19.0, Twisted 16.0.0, Python 2.7.12 (default, Dec 4 2017, 14:50:18) - [GCC 5.4.0 20160609], pyOpenSSL 0.15.1 (OpenSSL 1.0.2g 1 Mar 2016), cryptography 1.2.3, Platform Linux-4.4.0-130-generic-x86_64-with-Ubuntu-16.04-xenial\r\n2018-07-11 10:12:04 [scrapy.crawler] INFO: Overridden settings: {'SPIDER_LOADER_WARN_ONLY': True}\r\n2018-07-11 10:12:04 [scrapy.middleware] INFO: Enabled extensions:\r\n['scrapy.extensions.memusage.MemoryUsage',\r\n 'scrapy.extensions.logstats.LogStats',\r\n 'scrapy.extensions.telnet.TelnetConsole',\r\n 'scrapy.extensions.corestats.CoreStats']\r\nUnhandled error in Deferred:\r\n2018-07-11 10:12:04 [twisted] CRITICAL: Unhandled error in Deferred:\r\n\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/commands/runspider.py\", line 88, in run\r\n self.crawler_process.crawl(spidercls, **opts.spargs)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py\", line 171, in crawl\r\n return self._crawl(crawler, *args, **kwargs)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py\", line 175, in _crawl\r\n d = crawler.crawl(*args, **kwargs)\r\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 1274, in unwindGenerator\r\n return _inlineCallbacks(None, gen, Deferred())\r\n--- <exception caught here> ---\r\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 1128, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py\", line 98, in crawl\r\n six.reraise(*exc_info)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py\", line 80, in crawl\r\n self.engine = self._create_engine()\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py\", line 105, in _create_engine\r\n return ExecutionEngine(self, lambda _: self.stop())\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/core/engine.py\", line 69, in __init__\r\n self.downloader = downloader_cls(crawler)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/core/downloader/__init__.py\", line 88, in __init__\r\n self.middleware = DownloaderMiddlewareManager.from_crawler(crawler)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/middleware.py\", line 58, in from_crawler\r\n return cls.from_settings(crawler.settings, crawler)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/middleware.py\", line 36, in from_settings\r\n mw = mwcls.from_crawler(crawler)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py\", line 29, in from_crawler\r\n return cls(auth_encoding)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py\", line 22, in __init__\r\n self.proxies[type] = self._get_proxy(url, type)\r\n File \"/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py\", line 39, in _get_proxy\r\n proxy_type, user, password, hostport = _parse_proxy(url)\r\n File \"/usr/lib/python2.7/urllib2.py\", line 721, in _parse_proxy\r\n raise ValueError(\"proxy URL with no authority: %r\" % proxy)\r\nexceptions.ValueError: proxy URL with no authority: '/var/run/docker.sock'\r\n2018-07-11 10:12:04 [twisted] CRITICAL:\r\n```\r\nLooks like proxy code does not handle `no_proxy` correctly.\n", "before_files": [{"content": "import base64\nfrom urllib.parse import unquote, urlunparse\nfrom urllib.request import getproxies, proxy_bypass, _parse_proxy\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.python import to_bytes\n\n\nclass HttpProxyMiddleware:\n\n def __init__(self, auth_encoding='latin-1'):\n self.auth_encoding = auth_encoding\n self.proxies = {}\n for type_, url in getproxies().items():\n self.proxies[type_] = self._get_proxy(url, type_)\n\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('HTTPPROXY_ENABLED'):\n raise NotConfigured\n auth_encoding = crawler.settings.get('HTTPPROXY_AUTH_ENCODING')\n return cls(auth_encoding)\n\n def _basic_auth_header(self, username, password):\n user_pass = to_bytes(\n f'{unquote(username)}:{unquote(password)}',\n encoding=self.auth_encoding)\n return base64.b64encode(user_pass)\n\n def _get_proxy(self, url, orig_type):\n proxy_type, user, password, hostport = _parse_proxy(url)\n proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))\n\n if user:\n creds = self._basic_auth_header(user, password)\n else:\n creds = None\n\n return creds, proxy_url\n\n def process_request(self, request, spider):\n # ignore if proxy is already set\n if 'proxy' in request.meta:\n if request.meta['proxy'] is None:\n return\n # extract credentials if present\n creds, proxy_url = self._get_proxy(request.meta['proxy'], '')\n request.meta['proxy'] = proxy_url\n if creds and not request.headers.get('Proxy-Authorization'):\n request.headers['Proxy-Authorization'] = b'Basic ' + creds\n return\n elif not self.proxies:\n return\n\n parsed = urlparse_cached(request)\n scheme = parsed.scheme\n\n # 'no_proxy' is only supported by http schemes\n if scheme in ('http', 'https') and proxy_bypass(parsed.hostname):\n return\n\n if scheme in self.proxies:\n self._set_proxy(request, scheme)\n\n def _set_proxy(self, request, scheme):\n creds, proxy = self.proxies[scheme]\n request.meta['proxy'] = proxy\n if creds:\n request.headers['Proxy-Authorization'] = b'Basic ' + creds\n", "path": "scrapy/downloadermiddlewares/httpproxy.py"}], "after_files": [{"content": "import base64\nfrom urllib.parse import unquote, urlunparse\nfrom urllib.request import getproxies, proxy_bypass, _parse_proxy\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.python import to_bytes\n\n\nclass HttpProxyMiddleware:\n\n def __init__(self, auth_encoding='latin-1'):\n self.auth_encoding = auth_encoding\n self.proxies = {}\n for type_, url in getproxies().items():\n try:\n self.proxies[type_] = self._get_proxy(url, type_)\n # some values such as '/var/run/docker.sock' can't be parsed\n # by _parse_proxy and as such should be skipped\n except ValueError:\n continue\n\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('HTTPPROXY_ENABLED'):\n raise NotConfigured\n auth_encoding = crawler.settings.get('HTTPPROXY_AUTH_ENCODING')\n return cls(auth_encoding)\n\n def _basic_auth_header(self, username, password):\n user_pass = to_bytes(\n f'{unquote(username)}:{unquote(password)}',\n encoding=self.auth_encoding)\n return base64.b64encode(user_pass)\n\n def _get_proxy(self, url, orig_type):\n proxy_type, user, password, hostport = _parse_proxy(url)\n proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))\n\n if user:\n creds = self._basic_auth_header(user, password)\n else:\n creds = None\n\n return creds, proxy_url\n\n def process_request(self, request, spider):\n # ignore if proxy is already set\n if 'proxy' in request.meta:\n if request.meta['proxy'] is None:\n return\n # extract credentials if present\n creds, proxy_url = self._get_proxy(request.meta['proxy'], '')\n request.meta['proxy'] = proxy_url\n if creds and not request.headers.get('Proxy-Authorization'):\n request.headers['Proxy-Authorization'] = b'Basic ' + creds\n return\n elif not self.proxies:\n return\n\n parsed = urlparse_cached(request)\n scheme = parsed.scheme\n\n # 'no_proxy' is only supported by http schemes\n if scheme in ('http', 'https') and proxy_bypass(parsed.hostname):\n return\n\n if scheme in self.proxies:\n self._set_proxy(request, scheme)\n\n def _set_proxy(self, request, scheme):\n creds, proxy = self.proxies[scheme]\n request.meta['proxy'] = proxy\n if creds:\n request.headers['Proxy-Authorization'] = b'Basic ' + creds\n", "path": "scrapy/downloadermiddlewares/httpproxy.py"}]}
| 2,133 | 185 |
gh_patches_debug_31036
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-6325
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker compose run --rm server ldap_sync - doesn't work anymore to debug
**Describe the bug**
this command doesnt work anymore as described here (https://goauthentik.io/docs/troubleshooting/ldap_source)
```
docker compose run --rm server ldap_sync nxnet
```
it will just create a backgorund taks instead of running it in foreground!
**To Reproduce**
```
docker compose run --rm server ldap_sync SLUGofLDAPsource
```
**Expected behavior**
it will run an LDAP- synchronization in the foreground and see any errors or warnings that might happen directly
**Logs**
{"event": "Task published", "level": "info", "logger": "authentik.root.celery", "pid": 7, "task_id": "17af668f-1d9f-4732-a0eb-044c4a16beed", "task_name": "authentik.sources.ldap.tasks.ldap_sync", "timestamp": "2023-07-20T15:10:12.575247"}
**Version and Deployment (please complete the following information):**
- authentik version: 2023.6.1
- Deployment: docker compose
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/ldap/management/commands/ldap_sync.py`
Content:
```
1 """LDAP Sync"""
2 from django.core.management.base import BaseCommand
3 from structlog.stdlib import get_logger
4
5 from authentik.sources.ldap.models import LDAPSource
6 from authentik.sources.ldap.tasks import ldap_sync_single
7
8 LOGGER = get_logger()
9
10
11 class Command(BaseCommand):
12 """Run sync for an LDAP Source"""
13
14 def add_arguments(self, parser):
15 parser.add_argument("source_slugs", nargs="+", type=str)
16
17 def handle(self, **options):
18 for source_slug in options["source_slugs"]:
19 source = LDAPSource.objects.filter(slug=source_slug).first()
20 if not source:
21 LOGGER.warning("Source does not exist", slug=source_slug)
22 continue
23 ldap_sync_single(source)
24
```
Path: `authentik/sources/ldap/sync/users.py`
Content:
```
1 """Sync LDAP Users into authentik"""
2 from typing import Generator
3
4 from django.core.exceptions import FieldError
5 from django.db.utils import IntegrityError
6 from ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE
7
8 from authentik.core.models import User
9 from authentik.events.models import Event, EventAction
10 from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer
11 from authentik.sources.ldap.sync.vendor.freeipa import FreeIPA
12 from authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory
13
14
15 class UserLDAPSynchronizer(BaseLDAPSynchronizer):
16 """Sync LDAP Users into authentik"""
17
18 @staticmethod
19 def name() -> str:
20 return "users"
21
22 def get_objects(self, **kwargs) -> Generator:
23 return self.search_paginator(
24 search_base=self.base_dn_users,
25 search_filter=self._source.user_object_filter,
26 search_scope=SUBTREE,
27 attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],
28 **kwargs,
29 )
30
31 def sync(self, page_data: list) -> int:
32 """Iterate over all LDAP Users and create authentik_core.User instances"""
33 if not self._source.sync_users:
34 self.message("User syncing is disabled for this Source")
35 return -1
36 user_count = 0
37 for user in page_data:
38 if "attributes" not in user:
39 continue
40 attributes = user.get("attributes", {})
41 user_dn = self._flatten(user.get("entryDN", user.get("dn")))
42 if self._source.object_uniqueness_field not in attributes:
43 self.message(
44 f"Cannot find uniqueness field in attributes: '{user_dn}'",
45 attributes=attributes.keys(),
46 dn=user_dn,
47 )
48 continue
49 uniq = self._flatten(attributes[self._source.object_uniqueness_field])
50 try:
51 defaults = self.build_user_properties(user_dn, **attributes)
52 self._logger.debug("Creating user with attributes", **defaults)
53 if "username" not in defaults:
54 raise IntegrityError("Username was not set by propertymappings")
55 ak_user, created = self.update_or_create_attributes(
56 User, {f"attributes__{LDAP_UNIQUENESS}": uniq}, defaults
57 )
58 except (IntegrityError, FieldError, TypeError, AttributeError) as exc:
59 Event.new(
60 EventAction.CONFIGURATION_ERROR,
61 message=(
62 f"Failed to create user: {str(exc)} "
63 "To merge new user with existing user, set the user's "
64 f"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'"
65 ),
66 source=self._source,
67 dn=user_dn,
68 ).save()
69 else:
70 self._logger.debug("Synced User", user=ak_user.username, created=created)
71 user_count += 1
72 MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)
73 FreeIPA(self._source).sync(attributes, ak_user, created)
74 return user_count
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/sources/ldap/management/commands/ldap_sync.py b/authentik/sources/ldap/management/commands/ldap_sync.py
--- a/authentik/sources/ldap/management/commands/ldap_sync.py
+++ b/authentik/sources/ldap/management/commands/ldap_sync.py
@@ -3,7 +3,10 @@
from structlog.stdlib import get_logger
from authentik.sources.ldap.models import LDAPSource
-from authentik.sources.ldap.tasks import ldap_sync_single
+from authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer
+from authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer
+from authentik.sources.ldap.sync.users import UserLDAPSynchronizer
+from authentik.sources.ldap.tasks import ldap_sync_paginator
LOGGER = get_logger()
@@ -20,4 +23,10 @@
if not source:
LOGGER.warning("Source does not exist", slug=source_slug)
continue
- ldap_sync_single(source)
+ tasks = (
+ ldap_sync_paginator(source, UserLDAPSynchronizer)
+ + ldap_sync_paginator(source, GroupLDAPSynchronizer)
+ + ldap_sync_paginator(source, MembershipLDAPSynchronizer)
+ )
+ for task in tasks:
+ task()
diff --git a/authentik/sources/ldap/sync/users.py b/authentik/sources/ldap/sync/users.py
--- a/authentik/sources/ldap/sync/users.py
+++ b/authentik/sources/ldap/sync/users.py
@@ -49,7 +49,7 @@
uniq = self._flatten(attributes[self._source.object_uniqueness_field])
try:
defaults = self.build_user_properties(user_dn, **attributes)
- self._logger.debug("Creating user with attributes", **defaults)
+ self._logger.debug("Writing user with attributes", **defaults)
if "username" not in defaults:
raise IntegrityError("Username was not set by propertymappings")
ak_user, created = self.update_or_create_attributes(
|
{"golden_diff": "diff --git a/authentik/sources/ldap/management/commands/ldap_sync.py b/authentik/sources/ldap/management/commands/ldap_sync.py\n--- a/authentik/sources/ldap/management/commands/ldap_sync.py\n+++ b/authentik/sources/ldap/management/commands/ldap_sync.py\n@@ -3,7 +3,10 @@\n from structlog.stdlib import get_logger\n \n from authentik.sources.ldap.models import LDAPSource\n-from authentik.sources.ldap.tasks import ldap_sync_single\n+from authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer\n+from authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer\n+from authentik.sources.ldap.sync.users import UserLDAPSynchronizer\n+from authentik.sources.ldap.tasks import ldap_sync_paginator\n \n LOGGER = get_logger()\n \n@@ -20,4 +23,10 @@\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n- ldap_sync_single(source)\n+ tasks = (\n+ ldap_sync_paginator(source, UserLDAPSynchronizer)\n+ + ldap_sync_paginator(source, GroupLDAPSynchronizer)\n+ + ldap_sync_paginator(source, MembershipLDAPSynchronizer)\n+ )\n+ for task in tasks:\n+ task()\ndiff --git a/authentik/sources/ldap/sync/users.py b/authentik/sources/ldap/sync/users.py\n--- a/authentik/sources/ldap/sync/users.py\n+++ b/authentik/sources/ldap/sync/users.py\n@@ -49,7 +49,7 @@\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n- self._logger.debug(\"Creating user with attributes\", **defaults)\n+ self._logger.debug(\"Writing user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n", "issue": "docker compose run --rm server ldap_sync - doesn't work anymore to debug\n**Describe the bug**\r\nthis command doesnt work anymore as described here (https://goauthentik.io/docs/troubleshooting/ldap_source)\r\n```\r\ndocker compose run --rm server ldap_sync nxnet\r\n```\r\nit will just create a backgorund taks instead of running it in foreground!\r\n\r\n**To Reproduce**\r\n```\r\ndocker compose run --rm server ldap_sync SLUGofLDAPsource\r\n```\r\n\r\n**Expected behavior**\r\nit will run an LDAP- synchronization in the foreground and see any errors or warnings that might happen directly\r\n\r\n**Logs**\r\n{\"event\": \"Task published\", \"level\": \"info\", \"logger\": \"authentik.root.celery\", \"pid\": 7, \"task_id\": \"17af668f-1d9f-4732-a0eb-044c4a16beed\", \"task_name\": \"authentik.sources.ldap.tasks.ldap_sync\", \"timestamp\": \"2023-07-20T15:10:12.575247\"}\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.6.1\r\n- Deployment: docker compose\r\n\r\n\n", "before_files": [{"content": "\"\"\"LDAP Sync\"\"\"\nfrom django.core.management.base import BaseCommand\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.tasks import ldap_sync_single\n\nLOGGER = get_logger()\n\n\nclass Command(BaseCommand):\n \"\"\"Run sync for an LDAP Source\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"source_slugs\", nargs=\"+\", type=str)\n\n def handle(self, **options):\n for source_slug in options[\"source_slugs\"]:\n source = LDAPSource.objects.filter(slug=source_slug).first()\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n ldap_sync_single(source)\n", "path": "authentik/sources/ldap/management/commands/ldap_sync.py"}, {"content": "\"\"\"Sync LDAP Users into authentik\"\"\"\nfrom typing import Generator\n\nfrom django.core.exceptions import FieldError\nfrom django.db.utils import IntegrityError\nfrom ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE\n\nfrom authentik.core.models import User\nfrom authentik.events.models import Event, EventAction\nfrom authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer\nfrom authentik.sources.ldap.sync.vendor.freeipa import FreeIPA\nfrom authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory\n\n\nclass UserLDAPSynchronizer(BaseLDAPSynchronizer):\n \"\"\"Sync LDAP Users into authentik\"\"\"\n\n @staticmethod\n def name() -> str:\n return \"users\"\n\n def get_objects(self, **kwargs) -> Generator:\n return self.search_paginator(\n search_base=self.base_dn_users,\n search_filter=self._source.user_object_filter,\n search_scope=SUBTREE,\n attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],\n **kwargs,\n )\n\n def sync(self, page_data: list) -> int:\n \"\"\"Iterate over all LDAP Users and create authentik_core.User instances\"\"\"\n if not self._source.sync_users:\n self.message(\"User syncing is disabled for this Source\")\n return -1\n user_count = 0\n for user in page_data:\n if \"attributes\" not in user:\n continue\n attributes = user.get(\"attributes\", {})\n user_dn = self._flatten(user.get(\"entryDN\", user.get(\"dn\")))\n if self._source.object_uniqueness_field not in attributes:\n self.message(\n f\"Cannot find uniqueness field in attributes: '{user_dn}'\",\n attributes=attributes.keys(),\n dn=user_dn,\n )\n continue\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n self._logger.debug(\"Creating user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n User, {f\"attributes__{LDAP_UNIQUENESS}\": uniq}, defaults\n )\n except (IntegrityError, FieldError, TypeError, AttributeError) as exc:\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=(\n f\"Failed to create user: {str(exc)} \"\n \"To merge new user with existing user, set the user's \"\n f\"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'\"\n ),\n source=self._source,\n dn=user_dn,\n ).save()\n else:\n self._logger.debug(\"Synced User\", user=ak_user.username, created=created)\n user_count += 1\n MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)\n FreeIPA(self._source).sync(attributes, ak_user, created)\n return user_count\n", "path": "authentik/sources/ldap/sync/users.py"}], "after_files": [{"content": "\"\"\"LDAP Sync\"\"\"\nfrom django.core.management.base import BaseCommand\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer\nfrom authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer\nfrom authentik.sources.ldap.sync.users import UserLDAPSynchronizer\nfrom authentik.sources.ldap.tasks import ldap_sync_paginator\n\nLOGGER = get_logger()\n\n\nclass Command(BaseCommand):\n \"\"\"Run sync for an LDAP Source\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"source_slugs\", nargs=\"+\", type=str)\n\n def handle(self, **options):\n for source_slug in options[\"source_slugs\"]:\n source = LDAPSource.objects.filter(slug=source_slug).first()\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n tasks = (\n ldap_sync_paginator(source, UserLDAPSynchronizer)\n + ldap_sync_paginator(source, GroupLDAPSynchronizer)\n + ldap_sync_paginator(source, MembershipLDAPSynchronizer)\n )\n for task in tasks:\n task()\n", "path": "authentik/sources/ldap/management/commands/ldap_sync.py"}, {"content": "\"\"\"Sync LDAP Users into authentik\"\"\"\nfrom typing import Generator\n\nfrom django.core.exceptions import FieldError\nfrom django.db.utils import IntegrityError\nfrom ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE\n\nfrom authentik.core.models import User\nfrom authentik.events.models import Event, EventAction\nfrom authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer\nfrom authentik.sources.ldap.sync.vendor.freeipa import FreeIPA\nfrom authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory\n\n\nclass UserLDAPSynchronizer(BaseLDAPSynchronizer):\n \"\"\"Sync LDAP Users into authentik\"\"\"\n\n @staticmethod\n def name() -> str:\n return \"users\"\n\n def get_objects(self, **kwargs) -> Generator:\n return self.search_paginator(\n search_base=self.base_dn_users,\n search_filter=self._source.user_object_filter,\n search_scope=SUBTREE,\n attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],\n **kwargs,\n )\n\n def sync(self, page_data: list) -> int:\n \"\"\"Iterate over all LDAP Users and create authentik_core.User instances\"\"\"\n if not self._source.sync_users:\n self.message(\"User syncing is disabled for this Source\")\n return -1\n user_count = 0\n for user in page_data:\n if \"attributes\" not in user:\n continue\n attributes = user.get(\"attributes\", {})\n user_dn = self._flatten(user.get(\"entryDN\", user.get(\"dn\")))\n if self._source.object_uniqueness_field not in attributes:\n self.message(\n f\"Cannot find uniqueness field in attributes: '{user_dn}'\",\n attributes=attributes.keys(),\n dn=user_dn,\n )\n continue\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n self._logger.debug(\"Writing user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n User, {f\"attributes__{LDAP_UNIQUENESS}\": uniq}, defaults\n )\n except (IntegrityError, FieldError, TypeError, AttributeError) as exc:\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=(\n f\"Failed to create user: {str(exc)} \"\n \"To merge new user with existing user, set the user's \"\n f\"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'\"\n ),\n source=self._source,\n dn=user_dn,\n ).save()\n else:\n self._logger.debug(\"Synced User\", user=ak_user.username, created=created)\n user_count += 1\n MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)\n FreeIPA(self._source).sync(attributes, ak_user, created)\n return user_count\n", "path": "authentik/sources/ldap/sync/users.py"}]}
| 1,556 | 452 |
gh_patches_debug_43971
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-272
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check IV lengths are correct for cipher
OpenSSL can't be bothered to check these itself, because crypto is real easy and not important.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/hazmat/primitives/ciphers/base.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 from cryptography import utils
17 from cryptography.exceptions import (
18 AlreadyFinalized, NotYetFinalized, AlreadyUpdated,
19 )
20 from cryptography.hazmat.primitives import interfaces
21
22
23 class Cipher(object):
24 def __init__(self, algorithm, mode, backend):
25 if not isinstance(algorithm, interfaces.CipherAlgorithm):
26 raise TypeError("Expected interface of interfaces.CipherAlgorithm")
27
28 self.algorithm = algorithm
29 self.mode = mode
30 self._backend = backend
31
32 def encryptor(self):
33 if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):
34 if self.mode.tag is not None:
35 raise ValueError(
36 "Authentication tag must be None when encrypting"
37 )
38 ctx = self._backend.create_symmetric_encryption_ctx(
39 self.algorithm, self.mode
40 )
41 return self._wrap_ctx(ctx, encrypt=True)
42
43 def decryptor(self):
44 if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):
45 if self.mode.tag is None:
46 raise ValueError(
47 "Authentication tag must be provided when decrypting"
48 )
49 ctx = self._backend.create_symmetric_decryption_ctx(
50 self.algorithm, self.mode
51 )
52 return self._wrap_ctx(ctx, encrypt=False)
53
54 def _wrap_ctx(self, ctx, encrypt):
55 if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):
56 if encrypt:
57 return _AEADEncryptionContext(ctx)
58 else:
59 return _AEADCipherContext(ctx)
60 else:
61 return _CipherContext(ctx)
62
63
64 @utils.register_interface(interfaces.CipherContext)
65 class _CipherContext(object):
66 def __init__(self, ctx):
67 self._ctx = ctx
68
69 def update(self, data):
70 if self._ctx is None:
71 raise AlreadyFinalized("Context was already finalized")
72 return self._ctx.update(data)
73
74 def finalize(self):
75 if self._ctx is None:
76 raise AlreadyFinalized("Context was already finalized")
77 data = self._ctx.finalize()
78 self._ctx = None
79 return data
80
81
82 @utils.register_interface(interfaces.AEADCipherContext)
83 @utils.register_interface(interfaces.CipherContext)
84 class _AEADCipherContext(object):
85 def __init__(self, ctx):
86 self._ctx = ctx
87 self._tag = None
88 self._updated = False
89
90 def update(self, data):
91 if self._ctx is None:
92 raise AlreadyFinalized("Context was already finalized")
93 self._updated = True
94 return self._ctx.update(data)
95
96 def finalize(self):
97 if self._ctx is None:
98 raise AlreadyFinalized("Context was already finalized")
99 data = self._ctx.finalize()
100 self._tag = self._ctx.tag
101 self._ctx = None
102 return data
103
104 def authenticate_additional_data(self, data):
105 if self._ctx is None:
106 raise AlreadyFinalized("Context was already finalized")
107 if self._updated:
108 raise AlreadyUpdated("Update has been called on this context")
109 self._ctx.authenticate_additional_data(data)
110
111
112 @utils.register_interface(interfaces.AEADEncryptionContext)
113 class _AEADEncryptionContext(_AEADCipherContext):
114 @property
115 def tag(self):
116 if self._ctx is not None:
117 raise NotYetFinalized("You must finalize encryption before "
118 "getting the tag")
119 return self._tag
120
```
Path: `cryptography/hazmat/primitives/interfaces.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import abc
17
18 import six
19
20
21 class CipherAlgorithm(six.with_metaclass(abc.ABCMeta)):
22 @abc.abstractproperty
23 def name(self):
24 """
25 A string naming this mode (e.g. "AES", "Camellia").
26 """
27
28 @abc.abstractproperty
29 def key_size(self):
30 """
31 The size of the key being used as an integer in bits (e.g. 128, 256).
32 """
33
34
35 class BlockCipherAlgorithm(six.with_metaclass(abc.ABCMeta)):
36 @abc.abstractproperty
37 def block_size(self):
38 """
39 The size of a block as an integer in bits (e.g. 64, 128).
40 """
41
42
43 class Mode(six.with_metaclass(abc.ABCMeta)):
44 @abc.abstractproperty
45 def name(self):
46 """
47 A string naming this mode (e.g. "ECB", "CBC").
48 """
49
50
51 class ModeWithInitializationVector(six.with_metaclass(abc.ABCMeta)):
52 @abc.abstractproperty
53 def initialization_vector(self):
54 """
55 The value of the initialization vector for this mode as bytes.
56 """
57
58
59 class ModeWithNonce(six.with_metaclass(abc.ABCMeta)):
60 @abc.abstractproperty
61 def nonce(self):
62 """
63 The value of the nonce for this mode as bytes.
64 """
65
66
67 class ModeWithAuthenticationTag(six.with_metaclass(abc.ABCMeta)):
68 @abc.abstractproperty
69 def tag(self):
70 """
71 The value of the tag supplied to the constructor of this mode.
72 """
73
74
75 class CipherContext(six.with_metaclass(abc.ABCMeta)):
76 @abc.abstractmethod
77 def update(self, data):
78 """
79 Processes the provided bytes through the cipher and returns the results
80 as bytes.
81 """
82
83 @abc.abstractmethod
84 def finalize(self):
85 """
86 Returns the results of processing the final block as bytes.
87 """
88
89
90 class AEADCipherContext(six.with_metaclass(abc.ABCMeta)):
91 @abc.abstractmethod
92 def authenticate_additional_data(self, data):
93 """
94 Authenticates the provided bytes.
95 """
96
97
98 class AEADEncryptionContext(six.with_metaclass(abc.ABCMeta)):
99 @abc.abstractproperty
100 def tag(self):
101 """
102 Returns tag bytes. This is only available after encryption is
103 finalized.
104 """
105
106
107 class PaddingContext(six.with_metaclass(abc.ABCMeta)):
108 @abc.abstractmethod
109 def update(self, data):
110 """
111 Pads the provided bytes and returns any available data as bytes.
112 """
113
114 @abc.abstractmethod
115 def finalize(self):
116 """
117 Finalize the padding, returns bytes.
118 """
119
120
121 class HashAlgorithm(six.with_metaclass(abc.ABCMeta)):
122 @abc.abstractproperty
123 def name(self):
124 """
125 A string naming this algorithm (e.g. "sha256", "md5").
126 """
127
128 @abc.abstractproperty
129 def digest_size(self):
130 """
131 The size of the resulting digest in bytes.
132 """
133
134 @abc.abstractproperty
135 def block_size(self):
136 """
137 The internal block size of the hash algorithm in bytes.
138 """
139
140
141 class HashContext(six.with_metaclass(abc.ABCMeta)):
142 @abc.abstractproperty
143 def algorithm(self):
144 """
145 A HashAlgorithm that will be used by this context.
146 """
147
148 @abc.abstractmethod
149 def update(self, data):
150 """
151 Processes the provided bytes through the hash.
152 """
153
154 @abc.abstractmethod
155 def finalize(self):
156 """
157 Finalizes the hash context and returns the hash digest as bytes.
158 """
159
160 @abc.abstractmethod
161 def copy(self):
162 """
163 Return a HashContext that is a copy of the current context.
164 """
165
```
Path: `cryptography/hazmat/primitives/ciphers/modes.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 from cryptography import utils
17 from cryptography.hazmat.primitives import interfaces
18
19
20 @utils.register_interface(interfaces.Mode)
21 @utils.register_interface(interfaces.ModeWithInitializationVector)
22 class CBC(object):
23 name = "CBC"
24
25 def __init__(self, initialization_vector):
26 self.initialization_vector = initialization_vector
27
28
29 @utils.register_interface(interfaces.Mode)
30 class ECB(object):
31 name = "ECB"
32
33
34 @utils.register_interface(interfaces.Mode)
35 @utils.register_interface(interfaces.ModeWithInitializationVector)
36 class OFB(object):
37 name = "OFB"
38
39 def __init__(self, initialization_vector):
40 self.initialization_vector = initialization_vector
41
42
43 @utils.register_interface(interfaces.Mode)
44 @utils.register_interface(interfaces.ModeWithInitializationVector)
45 class CFB(object):
46 name = "CFB"
47
48 def __init__(self, initialization_vector):
49 self.initialization_vector = initialization_vector
50
51
52 @utils.register_interface(interfaces.Mode)
53 @utils.register_interface(interfaces.ModeWithNonce)
54 class CTR(object):
55 name = "CTR"
56
57 def __init__(self, nonce):
58 self.nonce = nonce
59
60
61 @utils.register_interface(interfaces.Mode)
62 @utils.register_interface(interfaces.ModeWithInitializationVector)
63 @utils.register_interface(interfaces.ModeWithAuthenticationTag)
64 class GCM(object):
65 name = "GCM"
66
67 def __init__(self, initialization_vector, tag=None):
68 if tag is not None and len(tag) < 4:
69 raise ValueError(
70 "Authentication tag must be 4 bytes or longer"
71 )
72
73 self.initialization_vector = initialization_vector
74 self.tag = tag
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cryptography/hazmat/primitives/ciphers/base.py b/cryptography/hazmat/primitives/ciphers/base.py
--- a/cryptography/hazmat/primitives/ciphers/base.py
+++ b/cryptography/hazmat/primitives/ciphers/base.py
@@ -25,6 +25,9 @@
if not isinstance(algorithm, interfaces.CipherAlgorithm):
raise TypeError("Expected interface of interfaces.CipherAlgorithm")
+ if mode is not None:
+ mode.validate_for_algorithm(algorithm)
+
self.algorithm = algorithm
self.mode = mode
self._backend = backend
diff --git a/cryptography/hazmat/primitives/ciphers/modes.py b/cryptography/hazmat/primitives/ciphers/modes.py
--- a/cryptography/hazmat/primitives/ciphers/modes.py
+++ b/cryptography/hazmat/primitives/ciphers/modes.py
@@ -25,11 +25,20 @@
def __init__(self, initialization_vector):
self.initialization_vector = initialization_vector
+ def validate_for_algorithm(self, algorithm):
+ if len(self.initialization_vector) * 8 != algorithm.block_size:
+ raise ValueError("Invalid iv size ({0}) for {1}".format(
+ len(self.initialization_vector), self.name
+ ))
+
@utils.register_interface(interfaces.Mode)
class ECB(object):
name = "ECB"
+ def validate_for_algorithm(self, algorithm):
+ pass
+
@utils.register_interface(interfaces.Mode)
@utils.register_interface(interfaces.ModeWithInitializationVector)
@@ -39,6 +48,12 @@
def __init__(self, initialization_vector):
self.initialization_vector = initialization_vector
+ def validate_for_algorithm(self, algorithm):
+ if len(self.initialization_vector) * 8 != algorithm.block_size:
+ raise ValueError("Invalid iv size ({0}) for {1}".format(
+ len(self.initialization_vector), self.name
+ ))
+
@utils.register_interface(interfaces.Mode)
@utils.register_interface(interfaces.ModeWithInitializationVector)
@@ -48,6 +63,12 @@
def __init__(self, initialization_vector):
self.initialization_vector = initialization_vector
+ def validate_for_algorithm(self, algorithm):
+ if len(self.initialization_vector) * 8 != algorithm.block_size:
+ raise ValueError("Invalid iv size ({0}) for {1}".format(
+ len(self.initialization_vector), self.name
+ ))
+
@utils.register_interface(interfaces.Mode)
@utils.register_interface(interfaces.ModeWithNonce)
@@ -57,6 +78,12 @@
def __init__(self, nonce):
self.nonce = nonce
+ def validate_for_algorithm(self, algorithm):
+ if len(self.nonce) * 8 != algorithm.block_size:
+ raise ValueError("Invalid nonce size ({0}) for {1}".format(
+ len(self.nonce), self.name
+ ))
+
@utils.register_interface(interfaces.Mode)
@utils.register_interface(interfaces.ModeWithInitializationVector)
@@ -65,6 +92,9 @@
name = "GCM"
def __init__(self, initialization_vector, tag=None):
+ # len(initialization_vector) must in [1, 2 ** 64), but it's impossible
+ # to actually construct a bytes object that large, so we don't check
+ # for it
if tag is not None and len(tag) < 4:
raise ValueError(
"Authentication tag must be 4 bytes or longer"
@@ -72,3 +102,6 @@
self.initialization_vector = initialization_vector
self.tag = tag
+
+ def validate_for_algorithm(self, algorithm):
+ pass
diff --git a/cryptography/hazmat/primitives/interfaces.py b/cryptography/hazmat/primitives/interfaces.py
--- a/cryptography/hazmat/primitives/interfaces.py
+++ b/cryptography/hazmat/primitives/interfaces.py
@@ -47,6 +47,13 @@
A string naming this mode (e.g. "ECB", "CBC").
"""
+ @abc.abstractmethod
+ def validate_for_algorithm(self, algorithm):
+ """
+ Checks that all the necessary invariants of this (mode, algorithm)
+ combination are met.
+ """
+
class ModeWithInitializationVector(six.with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
|
{"golden_diff": "diff --git a/cryptography/hazmat/primitives/ciphers/base.py b/cryptography/hazmat/primitives/ciphers/base.py\n--- a/cryptography/hazmat/primitives/ciphers/base.py\n+++ b/cryptography/hazmat/primitives/ciphers/base.py\n@@ -25,6 +25,9 @@\n if not isinstance(algorithm, interfaces.CipherAlgorithm):\n raise TypeError(\"Expected interface of interfaces.CipherAlgorithm\")\n \n+ if mode is not None:\n+ mode.validate_for_algorithm(algorithm)\n+\n self.algorithm = algorithm\n self.mode = mode\n self._backend = backend\ndiff --git a/cryptography/hazmat/primitives/ciphers/modes.py b/cryptography/hazmat/primitives/ciphers/modes.py\n--- a/cryptography/hazmat/primitives/ciphers/modes.py\n+++ b/cryptography/hazmat/primitives/ciphers/modes.py\n@@ -25,11 +25,20 @@\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n \n+ def validate_for_algorithm(self, algorithm):\n+ if len(self.initialization_vector) * 8 != algorithm.block_size:\n+ raise ValueError(\"Invalid iv size ({0}) for {1}\".format(\n+ len(self.initialization_vector), self.name\n+ ))\n+\n \n @utils.register_interface(interfaces.Mode)\n class ECB(object):\n name = \"ECB\"\n \n+ def validate_for_algorithm(self, algorithm):\n+ pass\n+\n \n @utils.register_interface(interfaces.Mode)\n @utils.register_interface(interfaces.ModeWithInitializationVector)\n@@ -39,6 +48,12 @@\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n \n+ def validate_for_algorithm(self, algorithm):\n+ if len(self.initialization_vector) * 8 != algorithm.block_size:\n+ raise ValueError(\"Invalid iv size ({0}) for {1}\".format(\n+ len(self.initialization_vector), self.name\n+ ))\n+\n \n @utils.register_interface(interfaces.Mode)\n @utils.register_interface(interfaces.ModeWithInitializationVector)\n@@ -48,6 +63,12 @@\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n \n+ def validate_for_algorithm(self, algorithm):\n+ if len(self.initialization_vector) * 8 != algorithm.block_size:\n+ raise ValueError(\"Invalid iv size ({0}) for {1}\".format(\n+ len(self.initialization_vector), self.name\n+ ))\n+\n \n @utils.register_interface(interfaces.Mode)\n @utils.register_interface(interfaces.ModeWithNonce)\n@@ -57,6 +78,12 @@\n def __init__(self, nonce):\n self.nonce = nonce\n \n+ def validate_for_algorithm(self, algorithm):\n+ if len(self.nonce) * 8 != algorithm.block_size:\n+ raise ValueError(\"Invalid nonce size ({0}) for {1}\".format(\n+ len(self.nonce), self.name\n+ ))\n+\n \n @utils.register_interface(interfaces.Mode)\n @utils.register_interface(interfaces.ModeWithInitializationVector)\n@@ -65,6 +92,9 @@\n name = \"GCM\"\n \n def __init__(self, initialization_vector, tag=None):\n+ # len(initialization_vector) must in [1, 2 ** 64), but it's impossible\n+ # to actually construct a bytes object that large, so we don't check\n+ # for it\n if tag is not None and len(tag) < 4:\n raise ValueError(\n \"Authentication tag must be 4 bytes or longer\"\n@@ -72,3 +102,6 @@\n \n self.initialization_vector = initialization_vector\n self.tag = tag\n+\n+ def validate_for_algorithm(self, algorithm):\n+ pass\ndiff --git a/cryptography/hazmat/primitives/interfaces.py b/cryptography/hazmat/primitives/interfaces.py\n--- a/cryptography/hazmat/primitives/interfaces.py\n+++ b/cryptography/hazmat/primitives/interfaces.py\n@@ -47,6 +47,13 @@\n A string naming this mode (e.g. \"ECB\", \"CBC\").\n \"\"\"\n \n+ @abc.abstractmethod\n+ def validate_for_algorithm(self, algorithm):\n+ \"\"\"\n+ Checks that all the necessary invariants of this (mode, algorithm)\n+ combination are met.\n+ \"\"\"\n+\n \n class ModeWithInitializationVector(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n", "issue": "Check IV lengths are correct for cipher\nOpenSSL can't be bothered to check these itself, because crypto is real easy and not important.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, NotYetFinalized, AlreadyUpdated,\n)\nfrom cryptography.hazmat.primitives import interfaces\n\n\nclass Cipher(object):\n def __init__(self, algorithm, mode, backend):\n if not isinstance(algorithm, interfaces.CipherAlgorithm):\n raise TypeError(\"Expected interface of interfaces.CipherAlgorithm\")\n\n self.algorithm = algorithm\n self.mode = mode\n self._backend = backend\n\n def encryptor(self):\n if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):\n if self.mode.tag is not None:\n raise ValueError(\n \"Authentication tag must be None when encrypting\"\n )\n ctx = self._backend.create_symmetric_encryption_ctx(\n self.algorithm, self.mode\n )\n return self._wrap_ctx(ctx, encrypt=True)\n\n def decryptor(self):\n if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):\n if self.mode.tag is None:\n raise ValueError(\n \"Authentication tag must be provided when decrypting\"\n )\n ctx = self._backend.create_symmetric_decryption_ctx(\n self.algorithm, self.mode\n )\n return self._wrap_ctx(ctx, encrypt=False)\n\n def _wrap_ctx(self, ctx, encrypt):\n if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):\n if encrypt:\n return _AEADEncryptionContext(ctx)\n else:\n return _AEADCipherContext(ctx)\n else:\n return _CipherContext(ctx)\n\n\[email protected]_interface(interfaces.CipherContext)\nclass _CipherContext(object):\n def __init__(self, ctx):\n self._ctx = ctx\n\n def update(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n return self._ctx.update(data)\n\n def finalize(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n data = self._ctx.finalize()\n self._ctx = None\n return data\n\n\[email protected]_interface(interfaces.AEADCipherContext)\[email protected]_interface(interfaces.CipherContext)\nclass _AEADCipherContext(object):\n def __init__(self, ctx):\n self._ctx = ctx\n self._tag = None\n self._updated = False\n\n def update(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n self._updated = True\n return self._ctx.update(data)\n\n def finalize(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n data = self._ctx.finalize()\n self._tag = self._ctx.tag\n self._ctx = None\n return data\n\n def authenticate_additional_data(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n if self._updated:\n raise AlreadyUpdated(\"Update has been called on this context\")\n self._ctx.authenticate_additional_data(data)\n\n\[email protected]_interface(interfaces.AEADEncryptionContext)\nclass _AEADEncryptionContext(_AEADCipherContext):\n @property\n def tag(self):\n if self._ctx is not None:\n raise NotYetFinalized(\"You must finalize encryption before \"\n \"getting the tag\")\n return self._tag\n", "path": "cryptography/hazmat/primitives/ciphers/base.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\n\nclass CipherAlgorithm(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this mode (e.g. \"AES\", \"Camellia\").\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n The size of the key being used as an integer in bits (e.g. 128, 256).\n \"\"\"\n\n\nclass BlockCipherAlgorithm(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def block_size(self):\n \"\"\"\n The size of a block as an integer in bits (e.g. 64, 128).\n \"\"\"\n\n\nclass Mode(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this mode (e.g. \"ECB\", \"CBC\").\n \"\"\"\n\n\nclass ModeWithInitializationVector(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def initialization_vector(self):\n \"\"\"\n The value of the initialization vector for this mode as bytes.\n \"\"\"\n\n\nclass ModeWithNonce(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def nonce(self):\n \"\"\"\n The value of the nonce for this mode as bytes.\n \"\"\"\n\n\nclass ModeWithAuthenticationTag(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def tag(self):\n \"\"\"\n The value of the tag supplied to the constructor of this mode.\n \"\"\"\n\n\nclass CipherContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Processes the provided bytes through the cipher and returns the results\n as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Returns the results of processing the final block as bytes.\n \"\"\"\n\n\nclass AEADCipherContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def authenticate_additional_data(self, data):\n \"\"\"\n Authenticates the provided bytes.\n \"\"\"\n\n\nclass AEADEncryptionContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def tag(self):\n \"\"\"\n Returns tag bytes. This is only available after encryption is\n finalized.\n \"\"\"\n\n\nclass PaddingContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Pads the provided bytes and returns any available data as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalize the padding, returns bytes.\n \"\"\"\n\n\nclass HashAlgorithm(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this algorithm (e.g. \"sha256\", \"md5\").\n \"\"\"\n\n @abc.abstractproperty\n def digest_size(self):\n \"\"\"\n The size of the resulting digest in bytes.\n \"\"\"\n\n @abc.abstractproperty\n def block_size(self):\n \"\"\"\n The internal block size of the hash algorithm in bytes.\n \"\"\"\n\n\nclass HashContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def algorithm(self):\n \"\"\"\n A HashAlgorithm that will be used by this context.\n \"\"\"\n\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Processes the provided bytes through the hash.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalizes the hash context and returns the hash digest as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def copy(self):\n \"\"\"\n Return a HashContext that is a copy of the current context.\n \"\"\"\n", "path": "cryptography/hazmat/primitives/interfaces.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives import interfaces\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass CBC(object):\n name = \"CBC\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n\[email protected]_interface(interfaces.Mode)\nclass ECB(object):\n name = \"ECB\"\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass OFB(object):\n name = \"OFB\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass CFB(object):\n name = \"CFB\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithNonce)\nclass CTR(object):\n name = \"CTR\"\n\n def __init__(self, nonce):\n self.nonce = nonce\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\[email protected]_interface(interfaces.ModeWithAuthenticationTag)\nclass GCM(object):\n name = \"GCM\"\n\n def __init__(self, initialization_vector, tag=None):\n if tag is not None and len(tag) < 4:\n raise ValueError(\n \"Authentication tag must be 4 bytes or longer\"\n )\n\n self.initialization_vector = initialization_vector\n self.tag = tag\n", "path": "cryptography/hazmat/primitives/ciphers/modes.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, NotYetFinalized, AlreadyUpdated,\n)\nfrom cryptography.hazmat.primitives import interfaces\n\n\nclass Cipher(object):\n def __init__(self, algorithm, mode, backend):\n if not isinstance(algorithm, interfaces.CipherAlgorithm):\n raise TypeError(\"Expected interface of interfaces.CipherAlgorithm\")\n\n if mode is not None:\n mode.validate_for_algorithm(algorithm)\n\n self.algorithm = algorithm\n self.mode = mode\n self._backend = backend\n\n def encryptor(self):\n if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):\n if self.mode.tag is not None:\n raise ValueError(\n \"Authentication tag must be None when encrypting\"\n )\n ctx = self._backend.create_symmetric_encryption_ctx(\n self.algorithm, self.mode\n )\n return self._wrap_ctx(ctx, encrypt=True)\n\n def decryptor(self):\n if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):\n if self.mode.tag is None:\n raise ValueError(\n \"Authentication tag must be provided when decrypting\"\n )\n ctx = self._backend.create_symmetric_decryption_ctx(\n self.algorithm, self.mode\n )\n return self._wrap_ctx(ctx, encrypt=False)\n\n def _wrap_ctx(self, ctx, encrypt):\n if isinstance(self.mode, interfaces.ModeWithAuthenticationTag):\n if encrypt:\n return _AEADEncryptionContext(ctx)\n else:\n return _AEADCipherContext(ctx)\n else:\n return _CipherContext(ctx)\n\n\[email protected]_interface(interfaces.CipherContext)\nclass _CipherContext(object):\n def __init__(self, ctx):\n self._ctx = ctx\n\n def update(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n return self._ctx.update(data)\n\n def finalize(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n data = self._ctx.finalize()\n self._ctx = None\n return data\n\n\[email protected]_interface(interfaces.AEADCipherContext)\[email protected]_interface(interfaces.CipherContext)\nclass _AEADCipherContext(object):\n def __init__(self, ctx):\n self._ctx = ctx\n self._tag = None\n self._updated = False\n\n def update(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n self._updated = True\n return self._ctx.update(data)\n\n def finalize(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n data = self._ctx.finalize()\n self._tag = self._ctx.tag\n self._ctx = None\n return data\n\n def authenticate_additional_data(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized\")\n if self._updated:\n raise AlreadyUpdated(\"Update has been called on this context\")\n self._ctx.authenticate_additional_data(data)\n\n\[email protected]_interface(interfaces.AEADEncryptionContext)\nclass _AEADEncryptionContext(_AEADCipherContext):\n @property\n def tag(self):\n if self._ctx is not None:\n raise NotYetFinalized(\"You must finalize encryption before \"\n \"getting the tag\")\n return self._tag\n", "path": "cryptography/hazmat/primitives/ciphers/base.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\n\nclass CipherAlgorithm(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this mode (e.g. \"AES\", \"Camellia\").\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n The size of the key being used as an integer in bits (e.g. 128, 256).\n \"\"\"\n\n\nclass BlockCipherAlgorithm(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def block_size(self):\n \"\"\"\n The size of a block as an integer in bits (e.g. 64, 128).\n \"\"\"\n\n\nclass Mode(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this mode (e.g. \"ECB\", \"CBC\").\n \"\"\"\n\n @abc.abstractmethod\n def validate_for_algorithm(self, algorithm):\n \"\"\"\n Checks that all the necessary invariants of this (mode, algorithm)\n combination are met.\n \"\"\"\n\n\nclass ModeWithInitializationVector(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def initialization_vector(self):\n \"\"\"\n The value of the initialization vector for this mode as bytes.\n \"\"\"\n\n\nclass ModeWithNonce(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def nonce(self):\n \"\"\"\n The value of the nonce for this mode as bytes.\n \"\"\"\n\n\nclass ModeWithAuthenticationTag(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def tag(self):\n \"\"\"\n The value of the tag supplied to the constructor of this mode.\n \"\"\"\n\n\nclass CipherContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Processes the provided bytes through the cipher and returns the results\n as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Returns the results of processing the final block as bytes.\n \"\"\"\n\n\nclass AEADCipherContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def authenticate_additional_data(self, data):\n \"\"\"\n Authenticates the provided bytes.\n \"\"\"\n\n\nclass AEADEncryptionContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def tag(self):\n \"\"\"\n Returns tag bytes. This is only available after encryption is\n finalized.\n \"\"\"\n\n\nclass PaddingContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Pads the provided bytes and returns any available data as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalize the padding, returns bytes.\n \"\"\"\n\n\nclass HashAlgorithm(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this algorithm (e.g. \"sha256\", \"md5\").\n \"\"\"\n\n @abc.abstractproperty\n def digest_size(self):\n \"\"\"\n The size of the resulting digest in bytes.\n \"\"\"\n\n @abc.abstractproperty\n def block_size(self):\n \"\"\"\n The internal block size of the hash algorithm in bytes.\n \"\"\"\n\n\nclass HashContext(six.with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def algorithm(self):\n \"\"\"\n A HashAlgorithm that will be used by this context.\n \"\"\"\n\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Processes the provided bytes through the hash.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalizes the hash context and returns the hash digest as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def copy(self):\n \"\"\"\n Return a HashContext that is a copy of the current context.\n \"\"\"\n", "path": "cryptography/hazmat/primitives/interfaces.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives import interfaces\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass CBC(object):\n name = \"CBC\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n def validate_for_algorithm(self, algorithm):\n if len(self.initialization_vector) * 8 != algorithm.block_size:\n raise ValueError(\"Invalid iv size ({0}) for {1}\".format(\n len(self.initialization_vector), self.name\n ))\n\n\[email protected]_interface(interfaces.Mode)\nclass ECB(object):\n name = \"ECB\"\n\n def validate_for_algorithm(self, algorithm):\n pass\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass OFB(object):\n name = \"OFB\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n def validate_for_algorithm(self, algorithm):\n if len(self.initialization_vector) * 8 != algorithm.block_size:\n raise ValueError(\"Invalid iv size ({0}) for {1}\".format(\n len(self.initialization_vector), self.name\n ))\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass CFB(object):\n name = \"CFB\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n def validate_for_algorithm(self, algorithm):\n if len(self.initialization_vector) * 8 != algorithm.block_size:\n raise ValueError(\"Invalid iv size ({0}) for {1}\".format(\n len(self.initialization_vector), self.name\n ))\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithNonce)\nclass CTR(object):\n name = \"CTR\"\n\n def __init__(self, nonce):\n self.nonce = nonce\n\n def validate_for_algorithm(self, algorithm):\n if len(self.nonce) * 8 != algorithm.block_size:\n raise ValueError(\"Invalid nonce size ({0}) for {1}\".format(\n len(self.nonce), self.name\n ))\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\[email protected]_interface(interfaces.ModeWithAuthenticationTag)\nclass GCM(object):\n name = \"GCM\"\n\n def __init__(self, initialization_vector, tag=None):\n # len(initialization_vector) must in [1, 2 ** 64), but it's impossible\n # to actually construct a bytes object that large, so we don't check\n # for it\n if tag is not None and len(tag) < 4:\n raise ValueError(\n \"Authentication tag must be 4 bytes or longer\"\n )\n\n self.initialization_vector = initialization_vector\n self.tag = tag\n\n def validate_for_algorithm(self, algorithm):\n pass\n", "path": "cryptography/hazmat/primitives/ciphers/modes.py"}]}
| 3,408 | 971 |
gh_patches_debug_4904
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-2846
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
duplicate footer appears in package page only when not logged in
When viewing a package page (appears to apply to any package), the (blue) footer ("Get Help", "About PyPI", "Contributing to PyPI") occurs twice: once below a login prompt and a search text box that appear below the package's title (with the package details below these and the footer), and once at the bottom of the page (the latter as expected).
This behavior occurs only when logged out. In contrast, when logged in, the layout seems as intended: a search text box at the top, and the package title and details contiguously below that search box, with a single footer at the bottom.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/views.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import collections
14
15 from pyramid.httpexceptions import (
16 HTTPException, HTTPSeeOther, HTTPMovedPermanently, HTTPNotFound,
17 HTTPBadRequest, exception_response,
18 )
19 from pyramid.exceptions import PredicateMismatch
20 from pyramid.renderers import render_to_response
21 from pyramid.response import Response
22 from pyramid.view import (
23 notfound_view_config, forbidden_view_config, exception_view_config,
24 view_config,
25 )
26 from elasticsearch_dsl import Q
27 from sqlalchemy import func
28 from sqlalchemy.orm import aliased, joinedload
29 from sqlalchemy.sql import exists
30
31 from warehouse.accounts import REDIRECT_FIELD_NAME
32 from warehouse.accounts.models import User
33 from warehouse.cache.origin import origin_cache
34 from warehouse.cache.http import cache_control
35 from warehouse.classifiers.models import Classifier
36 from warehouse.packaging.models import (
37 Project, Release, File, release_classifiers,
38 )
39 from warehouse.utils.row_counter import RowCount
40 from warehouse.utils.paginate import ElasticsearchPage, paginate_url_factory
41
42
43 SEARCH_FIELDS = [
44 "author", "author_email", "description", "download_url", "home_page",
45 "keywords", "license", "maintainer", "maintainer_email", "normalized_name",
46 "platform", "summary",
47 ]
48 SEARCH_BOOSTS = {
49 "normalized_name": 10,
50 "description": 5,
51 "keywords": 5,
52 "summary": 5,
53 }
54 SEARCH_FILTER_ORDER = (
55 "Programming Language",
56 "License",
57 "Framework",
58 "Topic",
59 "Intended Audience",
60 "Environment",
61 "Operating System",
62 "Natural Language",
63 "Development Status",
64 )
65
66
67 # 403, 404, 410, 500,
68
69
70 @view_config(context=HTTPException)
71 @notfound_view_config(append_slash=HTTPMovedPermanently)
72 def httpexception_view(exc, request):
73 # This special case exists for the easter egg that appears on the 404
74 # response page. We don't generally allow youtube embeds, but we make an
75 # except for this one.
76 if isinstance(exc, HTTPNotFound):
77 request.find_service(name="csp").merge({
78 "frame-src": ["https://www.youtube-nocookie.com"],
79 "script-src": ["https://www.youtube.com", "https://s.ytimg.com"],
80 })
81 try:
82 # Lightweight version of 404 page for `/simple/`
83 if (isinstance(exc, HTTPNotFound) and
84 request.path.startswith("/simple/")):
85 response = Response(
86 body="404 Not Found",
87 content_type="text/plain"
88 )
89 else:
90 response = render_to_response(
91 "{}.html".format(exc.status_code),
92 {},
93 request=request,
94 )
95 except LookupError:
96 # We don't have a customized template for this error, so we'll just let
97 # the default happen instead.
98 return exc
99
100 # Copy over the important values from our HTTPException to our new response
101 # object.
102 response.status = exc.status
103 response.headers.extend(
104 (k, v) for k, v in exc.headers.items()
105 if k not in response.headers
106 )
107
108 return response
109
110
111 @forbidden_view_config()
112 @exception_view_config(PredicateMismatch)
113 def forbidden(exc, request, redirect_to="accounts.login"):
114 # If the forbidden error is because the user isn't logged in, then we'll
115 # redirect them to the log in page.
116 if request.authenticated_userid is None:
117 url = request.route_url(
118 redirect_to,
119 _query={REDIRECT_FIELD_NAME: request.path_qs},
120 )
121 return HTTPSeeOther(url)
122
123 # If we've reached here, then the user is logged in and they are genuinely
124 # not allowed to access this page.
125 return httpexception_view(exc, request)
126
127
128 @view_config(
129 route_name="robots.txt",
130 renderer="robots.txt",
131 decorator=[
132 cache_control(1 * 24 * 60 * 60), # 1 day
133 origin_cache(
134 1 * 24 * 60 * 60, # 1 day
135 stale_while_revalidate=6 * 60 * 60, # 6 hours
136 stale_if_error=1 * 24 * 60 * 60, # 1 day
137 ),
138 ],
139 )
140 def robotstxt(request):
141 request.response.content_type = "text/plain"
142 return {}
143
144
145 @view_config(
146 route_name="opensearch.xml",
147 renderer="opensearch.xml",
148 decorator=[
149 cache_control(1 * 24 * 60 * 60), # 1 day
150 origin_cache(
151 1 * 24 * 60 * 60, # 1 day
152 stale_while_revalidate=6 * 60 * 60, # 6 hours
153 stale_if_error=1 * 24 * 60 * 60, # 1 day
154 )
155 ]
156 )
157 def opensearchxml(request):
158 request.response.content_type = "text/xml"
159 return {}
160
161
162 @view_config(
163 route_name="index",
164 renderer="index.html",
165 decorator=[
166 origin_cache(
167 1 * 60 * 60, # 1 hour
168 stale_while_revalidate=10 * 60, # 10 minutes
169 stale_if_error=1 * 24 * 60 * 60, # 1 day
170 keys=["all-projects", "trending"],
171 ),
172 ]
173 )
174 def index(request):
175 project_names = [
176 r[0] for r in (
177 request.db.query(Project.name)
178 .order_by(Project.zscore.desc().nullslast(),
179 func.random())
180 .limit(5)
181 .all())
182 ]
183 release_a = aliased(
184 Release,
185 request.db.query(Release)
186 .distinct(Release.name)
187 .filter(Release.name.in_(project_names))
188 .order_by(Release.name,
189 Release.is_prerelease.nullslast(),
190 Release._pypi_ordering.desc())
191 .subquery(),
192 )
193 trending_projects = (
194 request.db.query(release_a)
195 .options(joinedload(release_a.project))
196 .order_by(func.array_idx(project_names, release_a.name))
197 .all()
198 )
199
200 latest_releases = (
201 request.db.query(Release)
202 .options(joinedload(Release.project))
203 .order_by(Release.created.desc())
204 .limit(5)
205 .all()
206 )
207
208 counts = dict(
209 request.db.query(RowCount.table_name, RowCount.count)
210 .filter(
211 RowCount.table_name.in_([
212 Project.__tablename__,
213 Release.__tablename__,
214 File.__tablename__,
215 User.__tablename__,
216 ]))
217 .all()
218 )
219
220 return {
221 "latest_releases": latest_releases,
222 "trending_projects": trending_projects,
223 "num_projects": counts.get(Project.__tablename__, 0),
224 "num_releases": counts.get(Release.__tablename__, 0),
225 "num_files": counts.get(File.__tablename__, 0),
226 "num_users": counts.get(User.__tablename__, 0),
227 }
228
229
230 @view_config(
231 route_name="search",
232 renderer="search/results.html",
233 decorator=[
234 origin_cache(
235 1 * 60 * 60, # 1 hour
236 stale_while_revalidate=10 * 60, # 10 minutes
237 stale_if_error=1 * 24 * 60 * 60, # 1 day
238 keys=["all-projects"],
239 )
240 ],
241 )
242 def search(request):
243
244 q = request.params.get("q", '')
245
246 if q:
247 should = []
248 for field in SEARCH_FIELDS:
249 kw = {"query": q}
250 if field in SEARCH_BOOSTS:
251 kw["boost"] = SEARCH_BOOSTS[field]
252 should.append(Q("match", **{field: kw}))
253
254 # Add a prefix query if ``q`` is longer than one character.
255 if len(q) > 1:
256 should.append(Q('prefix', normalized_name=q))
257
258 query = request.es.query("dis_max", queries=should)
259 query = query.suggest("name_suggestion", q, term={"field": "name"})
260 else:
261 query = request.es.query()
262
263 if request.params.get("o"):
264 sort_key = request.params["o"]
265 if sort_key.startswith("-"):
266 sort = {
267 sort_key[1:]: {
268 "order": "desc",
269 "unmapped_type": "long",
270 },
271 }
272 else:
273 sort = {
274 sort_key: {
275 "unmapped_type": "long",
276 }
277 }
278
279 query = query.sort(sort)
280
281 # Require match to all specified classifiers
282 for classifier in request.params.getall("c"):
283 query = query.filter("terms", classifiers=[classifier])
284
285 try:
286 page_num = int(request.params.get("page", 1))
287 except ValueError:
288 raise HTTPBadRequest("'page' must be an integer.")
289
290 page = ElasticsearchPage(
291 query,
292 page=page_num,
293 url_maker=paginate_url_factory(request),
294 )
295
296 if page.page_count and page_num > page.page_count:
297 return HTTPNotFound()
298
299 available_filters = collections.defaultdict(list)
300
301 classifiers_q = (
302 request.db.query(Classifier)
303 .with_entities(Classifier.classifier)
304 .filter(
305 exists([release_classifiers.c.trove_id])
306 .where(release_classifiers.c.trove_id == Classifier.id)
307 )
308 .order_by(Classifier.classifier)
309 )
310
311 for cls in classifiers_q:
312 first, *_ = cls.classifier.split(' :: ')
313 available_filters[first].append(cls.classifier)
314
315 def filter_key(item):
316 try:
317 return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]
318 except ValueError:
319 return 1, 0, item[0]
320
321 return {
322 "page": page,
323 "term": q,
324 "order": request.params.get("o", ''),
325 "available_filters": sorted(available_filters.items(), key=filter_key),
326 "applied_filters": request.params.getall("c"),
327 }
328
329
330 @view_config(
331 route_name="includes.current-user-indicator",
332 renderer="includes/current-user-indicator.html",
333 uses_session=True,
334 )
335 def current_user_indicator(request):
336 return {}
337
338
339 @view_config(
340 route_name="includes.flash-messages",
341 renderer="includes/flash-messages.html",
342 uses_session=True,
343 )
344 def flash_messages(request):
345 return {}
346
347
348 @view_config(route_name="health", renderer="string")
349 def health(request):
350 # This will ensure that we can access the database and run queries against
351 # it without doing anything that will take a lock or block other queries.
352 request.db.execute("SELECT 1")
353
354 # Nothing will actually check this, but it's a little nicer to have
355 # something to return besides an empty body.
356 return "OK"
357
358
359 @view_config(route_name="force-status")
360 def force_status(request):
361 try:
362 raise exception_response(int(request.matchdict["status"]))
363 except KeyError:
364 raise exception_response(404) from None
365
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/views.py b/warehouse/views.py
--- a/warehouse/views.py
+++ b/warehouse/views.py
@@ -125,6 +125,14 @@
return httpexception_view(exc, request)
+@forbidden_view_config(path_info=r"^/_includes/")
+@exception_view_config(PredicateMismatch, path_info=r"^/_includes/")
+def forbidden_include(exc, request):
+ # If the forbidden error is for a client-side-include, just return an empty
+ # response instead of redirecting
+ return Response()
+
+
@view_config(
route_name="robots.txt",
renderer="robots.txt",
|
{"golden_diff": "diff --git a/warehouse/views.py b/warehouse/views.py\n--- a/warehouse/views.py\n+++ b/warehouse/views.py\n@@ -125,6 +125,14 @@\n return httpexception_view(exc, request)\n \n \n+@forbidden_view_config(path_info=r\"^/_includes/\")\n+@exception_view_config(PredicateMismatch, path_info=r\"^/_includes/\")\n+def forbidden_include(exc, request):\n+ # If the forbidden error is for a client-side-include, just return an empty\n+ # response instead of redirecting\n+ return Response()\n+\n+\n @view_config(\n route_name=\"robots.txt\",\n renderer=\"robots.txt\",\n", "issue": "duplicate footer appears in package page only when not logged in\nWhen viewing a package page (appears to apply to any package), the (blue) footer (\"Get Help\", \"About PyPI\", \"Contributing to PyPI\") occurs twice: once below a login prompt and a search text box that appear below the package's title (with the package details below these and the footer), and once at the bottom of the page (the latter as expected).\r\n\r\nThis behavior occurs only when logged out. In contrast, when logged in, the layout seems as intended: a search text box at the top, and the package title and details contiguously below that search box, with a single footer at the bottom.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nfrom pyramid.httpexceptions import (\n HTTPException, HTTPSeeOther, HTTPMovedPermanently, HTTPNotFound,\n HTTPBadRequest, exception_response,\n)\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.renderers import render_to_response\nfrom pyramid.response import Response\nfrom pyramid.view import (\n notfound_view_config, forbidden_view_config, exception_view_config,\n view_config,\n)\nfrom elasticsearch_dsl import Q\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import aliased, joinedload\nfrom sqlalchemy.sql import exists\n\nfrom warehouse.accounts import REDIRECT_FIELD_NAME\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.cache.http import cache_control\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.packaging.models import (\n Project, Release, File, release_classifiers,\n)\nfrom warehouse.utils.row_counter import RowCount\nfrom warehouse.utils.paginate import ElasticsearchPage, paginate_url_factory\n\n\nSEARCH_FIELDS = [\n \"author\", \"author_email\", \"description\", \"download_url\", \"home_page\",\n \"keywords\", \"license\", \"maintainer\", \"maintainer_email\", \"normalized_name\",\n \"platform\", \"summary\",\n]\nSEARCH_BOOSTS = {\n \"normalized_name\": 10,\n \"description\": 5,\n \"keywords\": 5,\n \"summary\": 5,\n}\nSEARCH_FILTER_ORDER = (\n \"Programming Language\",\n \"License\",\n \"Framework\",\n \"Topic\",\n \"Intended Audience\",\n \"Environment\",\n \"Operating System\",\n \"Natural Language\",\n \"Development Status\",\n)\n\n\n# 403, 404, 410, 500,\n\n\n@view_config(context=HTTPException)\n@notfound_view_config(append_slash=HTTPMovedPermanently)\ndef httpexception_view(exc, request):\n # This special case exists for the easter egg that appears on the 404\n # response page. We don't generally allow youtube embeds, but we make an\n # except for this one.\n if isinstance(exc, HTTPNotFound):\n request.find_service(name=\"csp\").merge({\n \"frame-src\": [\"https://www.youtube-nocookie.com\"],\n \"script-src\": [\"https://www.youtube.com\", \"https://s.ytimg.com\"],\n })\n try:\n # Lightweight version of 404 page for `/simple/`\n if (isinstance(exc, HTTPNotFound) and\n request.path.startswith(\"/simple/\")):\n response = Response(\n body=\"404 Not Found\",\n content_type=\"text/plain\"\n )\n else:\n response = render_to_response(\n \"{}.html\".format(exc.status_code),\n {},\n request=request,\n )\n except LookupError:\n # We don't have a customized template for this error, so we'll just let\n # the default happen instead.\n return exc\n\n # Copy over the important values from our HTTPException to our new response\n # object.\n response.status = exc.status\n response.headers.extend(\n (k, v) for k, v in exc.headers.items()\n if k not in response.headers\n )\n\n return response\n\n\n@forbidden_view_config()\n@exception_view_config(PredicateMismatch)\ndef forbidden(exc, request, redirect_to=\"accounts.login\"):\n # If the forbidden error is because the user isn't logged in, then we'll\n # redirect them to the log in page.\n if request.authenticated_userid is None:\n url = request.route_url(\n redirect_to,\n _query={REDIRECT_FIELD_NAME: request.path_qs},\n )\n return HTTPSeeOther(url)\n\n # If we've reached here, then the user is logged in and they are genuinely\n # not allowed to access this page.\n return httpexception_view(exc, request)\n\n\n@view_config(\n route_name=\"robots.txt\",\n renderer=\"robots.txt\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef robotstxt(request):\n request.response.content_type = \"text/plain\"\n return {}\n\n\n@view_config(\n route_name=\"opensearch.xml\",\n renderer=\"opensearch.xml\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n )\n ]\n)\ndef opensearchxml(request):\n request.response.content_type = \"text/xml\"\n return {}\n\n\n@view_config(\n route_name=\"index\",\n renderer=\"index.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\", \"trending\"],\n ),\n ]\n)\ndef index(request):\n project_names = [\n r[0] for r in (\n request.db.query(Project.name)\n .order_by(Project.zscore.desc().nullslast(),\n func.random())\n .limit(5)\n .all())\n ]\n release_a = aliased(\n Release,\n request.db.query(Release)\n .distinct(Release.name)\n .filter(Release.name.in_(project_names))\n .order_by(Release.name,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc())\n .subquery(),\n )\n trending_projects = (\n request.db.query(release_a)\n .options(joinedload(release_a.project))\n .order_by(func.array_idx(project_names, release_a.name))\n .all()\n )\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(5)\n .all()\n )\n\n counts = dict(\n request.db.query(RowCount.table_name, RowCount.count)\n .filter(\n RowCount.table_name.in_([\n Project.__tablename__,\n Release.__tablename__,\n File.__tablename__,\n User.__tablename__,\n ]))\n .all()\n )\n\n return {\n \"latest_releases\": latest_releases,\n \"trending_projects\": trending_projects,\n \"num_projects\": counts.get(Project.__tablename__, 0),\n \"num_releases\": counts.get(Release.__tablename__, 0),\n \"num_files\": counts.get(File.__tablename__, 0),\n \"num_users\": counts.get(User.__tablename__, 0),\n }\n\n\n@view_config(\n route_name=\"search\",\n renderer=\"search/results.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\"],\n )\n ],\n)\ndef search(request):\n\n q = request.params.get(\"q\", '')\n\n if q:\n should = []\n for field in SEARCH_FIELDS:\n kw = {\"query\": q}\n if field in SEARCH_BOOSTS:\n kw[\"boost\"] = SEARCH_BOOSTS[field]\n should.append(Q(\"match\", **{field: kw}))\n\n # Add a prefix query if ``q`` is longer than one character.\n if len(q) > 1:\n should.append(Q('prefix', normalized_name=q))\n\n query = request.es.query(\"dis_max\", queries=should)\n query = query.suggest(\"name_suggestion\", q, term={\"field\": \"name\"})\n else:\n query = request.es.query()\n\n if request.params.get(\"o\"):\n sort_key = request.params[\"o\"]\n if sort_key.startswith(\"-\"):\n sort = {\n sort_key[1:]: {\n \"order\": \"desc\",\n \"unmapped_type\": \"long\",\n },\n }\n else:\n sort = {\n sort_key: {\n \"unmapped_type\": \"long\",\n }\n }\n\n query = query.sort(sort)\n\n # Require match to all specified classifiers\n for classifier in request.params.getall(\"c\"):\n query = query.filter(\"terms\", classifiers=[classifier])\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\")\n\n page = ElasticsearchPage(\n query,\n page=page_num,\n url_maker=paginate_url_factory(request),\n )\n\n if page.page_count and page_num > page.page_count:\n return HTTPNotFound()\n\n available_filters = collections.defaultdict(list)\n\n classifiers_q = (\n request.db.query(Classifier)\n .with_entities(Classifier.classifier)\n .filter(\n exists([release_classifiers.c.trove_id])\n .where(release_classifiers.c.trove_id == Classifier.id)\n )\n .order_by(Classifier.classifier)\n )\n\n for cls in classifiers_q:\n first, *_ = cls.classifier.split(' :: ')\n available_filters[first].append(cls.classifier)\n\n def filter_key(item):\n try:\n return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]\n except ValueError:\n return 1, 0, item[0]\n\n return {\n \"page\": page,\n \"term\": q,\n \"order\": request.params.get(\"o\", ''),\n \"available_filters\": sorted(available_filters.items(), key=filter_key),\n \"applied_filters\": request.params.getall(\"c\"),\n }\n\n\n@view_config(\n route_name=\"includes.current-user-indicator\",\n renderer=\"includes/current-user-indicator.html\",\n uses_session=True,\n)\ndef current_user_indicator(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.flash-messages\",\n renderer=\"includes/flash-messages.html\",\n uses_session=True,\n)\ndef flash_messages(request):\n return {}\n\n\n@view_config(route_name=\"health\", renderer=\"string\")\ndef health(request):\n # This will ensure that we can access the database and run queries against\n # it without doing anything that will take a lock or block other queries.\n request.db.execute(\"SELECT 1\")\n\n # Nothing will actually check this, but it's a little nicer to have\n # something to return besides an empty body.\n return \"OK\"\n\n\n@view_config(route_name=\"force-status\")\ndef force_status(request):\n try:\n raise exception_response(int(request.matchdict[\"status\"]))\n except KeyError:\n raise exception_response(404) from None\n", "path": "warehouse/views.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nfrom pyramid.httpexceptions import (\n HTTPException, HTTPSeeOther, HTTPMovedPermanently, HTTPNotFound,\n HTTPBadRequest, exception_response,\n)\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.renderers import render_to_response\nfrom pyramid.response import Response\nfrom pyramid.view import (\n notfound_view_config, forbidden_view_config, exception_view_config,\n view_config,\n)\nfrom elasticsearch_dsl import Q\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import aliased, joinedload\nfrom sqlalchemy.sql import exists\n\nfrom warehouse.accounts import REDIRECT_FIELD_NAME\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.cache.http import cache_control\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.packaging.models import (\n Project, Release, File, release_classifiers,\n)\nfrom warehouse.utils.row_counter import RowCount\nfrom warehouse.utils.paginate import ElasticsearchPage, paginate_url_factory\n\n\nSEARCH_FIELDS = [\n \"author\", \"author_email\", \"description\", \"download_url\", \"home_page\",\n \"keywords\", \"license\", \"maintainer\", \"maintainer_email\", \"normalized_name\",\n \"platform\", \"summary\",\n]\nSEARCH_BOOSTS = {\n \"normalized_name\": 10,\n \"description\": 5,\n \"keywords\": 5,\n \"summary\": 5,\n}\nSEARCH_FILTER_ORDER = (\n \"Programming Language\",\n \"License\",\n \"Framework\",\n \"Topic\",\n \"Intended Audience\",\n \"Environment\",\n \"Operating System\",\n \"Natural Language\",\n \"Development Status\",\n)\n\n\n# 403, 404, 410, 500,\n\n\n@view_config(context=HTTPException)\n@notfound_view_config(append_slash=HTTPMovedPermanently)\ndef httpexception_view(exc, request):\n # This special case exists for the easter egg that appears on the 404\n # response page. We don't generally allow youtube embeds, but we make an\n # except for this one.\n if isinstance(exc, HTTPNotFound):\n request.find_service(name=\"csp\").merge({\n \"frame-src\": [\"https://www.youtube-nocookie.com\"],\n \"script-src\": [\"https://www.youtube.com\", \"https://s.ytimg.com\"],\n })\n try:\n # Lightweight version of 404 page for `/simple/`\n if (isinstance(exc, HTTPNotFound) and\n request.path.startswith(\"/simple/\")):\n response = Response(\n body=\"404 Not Found\",\n content_type=\"text/plain\"\n )\n else:\n response = render_to_response(\n \"{}.html\".format(exc.status_code),\n {},\n request=request,\n )\n except LookupError:\n # We don't have a customized template for this error, so we'll just let\n # the default happen instead.\n return exc\n\n # Copy over the important values from our HTTPException to our new response\n # object.\n response.status = exc.status\n response.headers.extend(\n (k, v) for k, v in exc.headers.items()\n if k not in response.headers\n )\n\n return response\n\n\n@forbidden_view_config()\n@exception_view_config(PredicateMismatch)\ndef forbidden(exc, request, redirect_to=\"accounts.login\"):\n # If the forbidden error is because the user isn't logged in, then we'll\n # redirect them to the log in page.\n if request.authenticated_userid is None:\n url = request.route_url(\n redirect_to,\n _query={REDIRECT_FIELD_NAME: request.path_qs},\n )\n return HTTPSeeOther(url)\n\n # If we've reached here, then the user is logged in and they are genuinely\n # not allowed to access this page.\n return httpexception_view(exc, request)\n\n\n@forbidden_view_config(path_info=r\"^/_includes/\")\n@exception_view_config(PredicateMismatch, path_info=r\"^/_includes/\")\ndef forbidden_include(exc, request):\n # If the forbidden error is for a client-side-include, just return an empty\n # response instead of redirecting\n return Response()\n\n\n@view_config(\n route_name=\"robots.txt\",\n renderer=\"robots.txt\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef robotstxt(request):\n request.response.content_type = \"text/plain\"\n return {}\n\n\n@view_config(\n route_name=\"opensearch.xml\",\n renderer=\"opensearch.xml\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n )\n ]\n)\ndef opensearchxml(request):\n request.response.content_type = \"text/xml\"\n return {}\n\n\n@view_config(\n route_name=\"index\",\n renderer=\"index.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\", \"trending\"],\n ),\n ]\n)\ndef index(request):\n project_names = [\n r[0] for r in (\n request.db.query(Project.name)\n .order_by(Project.zscore.desc().nullslast(),\n func.random())\n .limit(5)\n .all())\n ]\n release_a = aliased(\n Release,\n request.db.query(Release)\n .distinct(Release.name)\n .filter(Release.name.in_(project_names))\n .order_by(Release.name,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc())\n .subquery(),\n )\n trending_projects = (\n request.db.query(release_a)\n .options(joinedload(release_a.project))\n .order_by(func.array_idx(project_names, release_a.name))\n .all()\n )\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(5)\n .all()\n )\n\n counts = dict(\n request.db.query(RowCount.table_name, RowCount.count)\n .filter(\n RowCount.table_name.in_([\n Project.__tablename__,\n Release.__tablename__,\n File.__tablename__,\n User.__tablename__,\n ]))\n .all()\n )\n\n return {\n \"latest_releases\": latest_releases,\n \"trending_projects\": trending_projects,\n \"num_projects\": counts.get(Project.__tablename__, 0),\n \"num_releases\": counts.get(Release.__tablename__, 0),\n \"num_files\": counts.get(File.__tablename__, 0),\n \"num_users\": counts.get(User.__tablename__, 0),\n }\n\n\n@view_config(\n route_name=\"search\",\n renderer=\"search/results.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\"],\n )\n ],\n)\ndef search(request):\n\n q = request.params.get(\"q\", '')\n\n if q:\n should = []\n for field in SEARCH_FIELDS:\n kw = {\"query\": q}\n if field in SEARCH_BOOSTS:\n kw[\"boost\"] = SEARCH_BOOSTS[field]\n should.append(Q(\"match\", **{field: kw}))\n\n # Add a prefix query if ``q`` is longer than one character.\n if len(q) > 1:\n should.append(Q('prefix', normalized_name=q))\n\n query = request.es.query(\"dis_max\", queries=should)\n query = query.suggest(\"name_suggestion\", q, term={\"field\": \"name\"})\n else:\n query = request.es.query()\n\n if request.params.get(\"o\"):\n sort_key = request.params[\"o\"]\n if sort_key.startswith(\"-\"):\n sort = {\n sort_key[1:]: {\n \"order\": \"desc\",\n \"unmapped_type\": \"long\",\n },\n }\n else:\n sort = {\n sort_key: {\n \"unmapped_type\": \"long\",\n }\n }\n\n query = query.sort(sort)\n\n # Require match to all specified classifiers\n for classifier in request.params.getall(\"c\"):\n query = query.filter(\"terms\", classifiers=[classifier])\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\")\n\n page = ElasticsearchPage(\n query,\n page=page_num,\n url_maker=paginate_url_factory(request),\n )\n\n if page.page_count and page_num > page.page_count:\n return HTTPNotFound()\n\n available_filters = collections.defaultdict(list)\n\n classifiers_q = (\n request.db.query(Classifier)\n .with_entities(Classifier.classifier)\n .filter(\n exists([release_classifiers.c.trove_id])\n .where(release_classifiers.c.trove_id == Classifier.id)\n )\n .order_by(Classifier.classifier)\n )\n\n for cls in classifiers_q:\n first, *_ = cls.classifier.split(' :: ')\n available_filters[first].append(cls.classifier)\n\n def filter_key(item):\n try:\n return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]\n except ValueError:\n return 1, 0, item[0]\n\n return {\n \"page\": page,\n \"term\": q,\n \"order\": request.params.get(\"o\", ''),\n \"available_filters\": sorted(available_filters.items(), key=filter_key),\n \"applied_filters\": request.params.getall(\"c\"),\n }\n\n\n@view_config(\n route_name=\"includes.current-user-indicator\",\n renderer=\"includes/current-user-indicator.html\",\n uses_session=True,\n)\ndef current_user_indicator(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.flash-messages\",\n renderer=\"includes/flash-messages.html\",\n uses_session=True,\n)\ndef flash_messages(request):\n return {}\n\n\n@view_config(route_name=\"health\", renderer=\"string\")\ndef health(request):\n # This will ensure that we can access the database and run queries against\n # it without doing anything that will take a lock or block other queries.\n request.db.execute(\"SELECT 1\")\n\n # Nothing will actually check this, but it's a little nicer to have\n # something to return besides an empty body.\n return \"OK\"\n\n\n@view_config(route_name=\"force-status\")\ndef force_status(request):\n try:\n raise exception_response(int(request.matchdict[\"status\"]))\n except KeyError:\n raise exception_response(404) from None\n", "path": "warehouse/views.py"}]}
| 3,955 | 143 |
gh_patches_debug_58129
|
rasdani/github-patches
|
git_diff
|
alibaba__FederatedScope-496
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Message asked for local pretraining is missing the "content" para when train a graph model in distributed mode?
If no "content" para, there will raise ValueError('The data type {} has not been supported.'.format(type(value))) in Message.create_by_type() function.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `federatedscope/core/message.py`
Content:
```
1 import json
2 import numpy as np
3 from federatedscope.core.proto import gRPC_comm_manager_pb2
4
5
6 class Message(object):
7 """
8 The data exchanged during an FL course are abstracted as 'Message' in
9 FederatedScope.
10 A message object includes:
11 msg_type: The type of message, which is used to trigger the
12 corresponding handlers of server/client
13 sender: The sender's ID
14 receiver: The receiver's ID
15 state: The training round of the message, which is determined by
16 the sender and used to filter out the outdated messages.
17 strategy: redundant attribute
18 """
19 def __init__(self,
20 msg_type=None,
21 sender=0,
22 receiver=0,
23 state=0,
24 content=None,
25 timestamp=0,
26 strategy=None):
27 self._msg_type = msg_type
28 self._sender = sender
29 self._receiver = receiver
30 self._state = state
31 self._content = content
32 self._timestamp = timestamp
33 self._strategy = strategy
34
35 @property
36 def msg_type(self):
37 return self._msg_type
38
39 @msg_type.setter
40 def msg_type(self, value):
41 self._msg_type = value
42
43 @property
44 def sender(self):
45 return self._sender
46
47 @sender.setter
48 def sender(self, value):
49 self._sender = value
50
51 @property
52 def receiver(self):
53 return self._receiver
54
55 @receiver.setter
56 def receiver(self, value):
57 self._receiver = value
58
59 @property
60 def state(self):
61 return self._state
62
63 @state.setter
64 def state(self, value):
65 self._state = value
66
67 @property
68 def content(self):
69 return self._content
70
71 @content.setter
72 def content(self, value):
73 self._content = value
74
75 @property
76 def timestamp(self):
77 return self._timestamp
78
79 @timestamp.setter
80 def timestamp(self, value):
81 assert isinstance(value, int) or isinstance(value, float), \
82 "We only support an int or a float value for timestamp"
83 self._timestamp = value
84
85 @property
86 def strategy(self):
87 return self._strategy
88
89 @strategy.setter
90 def strategy(self, value):
91 self._strategy = value
92
93 def __lt__(self, other):
94 if self.timestamp != other.timestamp:
95 return self.timestamp < other.timestamp
96 else:
97 return self.state < other.state
98
99 def transform_to_list(self, x):
100 if isinstance(x, list) or isinstance(x, tuple):
101 return [self.transform_to_list(each_x) for each_x in x]
102 elif isinstance(x, dict):
103 for key in x.keys():
104 x[key] = self.transform_to_list(x[key])
105 return x
106 else:
107 if hasattr(x, 'tolist'):
108 return x.tolist()
109 else:
110 return x
111
112 def msg_to_json(self, to_list=False):
113 if to_list:
114 self.content = self.transform_to_list(self.content)
115
116 json_msg = {
117 'msg_type': self.msg_type,
118 'sender': self.sender,
119 'receiver': self.receiver,
120 'state': self.state,
121 'content': self.content,
122 'timestamp': self.timestamp,
123 'strategy': self.strategy,
124 }
125 return json.dumps(json_msg)
126
127 def json_to_msg(self, json_string):
128 json_msg = json.loads(json_string)
129 self.msg_type = json_msg['msg_type']
130 self.sender = json_msg['sender']
131 self.receiver = json_msg['receiver']
132 self.state = json_msg['state']
133 self.content = json_msg['content']
134 self.timestamp = json_msg['timestamp']
135 self.strategy = json_msg['strategy']
136
137 def create_by_type(self, value, nested=False):
138 if isinstance(value, dict):
139 if isinstance(list(value.keys())[0], str):
140 m_dict = gRPC_comm_manager_pb2.mDict_keyIsString()
141 key_type = 'string'
142 else:
143 m_dict = gRPC_comm_manager_pb2.mDict_keyIsInt()
144 key_type = 'int'
145
146 for key in value.keys():
147 m_dict.dict_value[key].MergeFrom(
148 self.create_by_type(value[key], nested=True))
149 if nested:
150 msg_value = gRPC_comm_manager_pb2.MsgValue()
151 if key_type == 'string':
152 msg_value.dict_msg_stringkey.MergeFrom(m_dict)
153 else:
154 msg_value.dict_msg_intkey.MergeFrom(m_dict)
155 return msg_value
156 else:
157 return m_dict
158 elif isinstance(value, list) or isinstance(value, tuple):
159 m_list = gRPC_comm_manager_pb2.mList()
160 for each in value:
161 m_list.list_value.append(self.create_by_type(each,
162 nested=True))
163 if nested:
164 msg_value = gRPC_comm_manager_pb2.MsgValue()
165 msg_value.list_msg.MergeFrom(m_list)
166 return msg_value
167 else:
168 return m_list
169 else:
170 m_single = gRPC_comm_manager_pb2.mSingle()
171 if type(value) in [int, np.int32]:
172 m_single.int_value = value
173 elif type(value) in [str]:
174 m_single.str_value = value
175 elif type(value) in [float, np.float32]:
176 m_single.float_value = value
177 else:
178 raise ValueError(
179 'The data type {} has not been supported.'.format(
180 type(value)))
181
182 if nested:
183 msg_value = gRPC_comm_manager_pb2.MsgValue()
184 msg_value.single_msg.MergeFrom(m_single)
185 return msg_value
186 else:
187 return m_single
188
189 def build_msg_value(self, value):
190 msg_value = gRPC_comm_manager_pb2.MsgValue()
191
192 if isinstance(value, list) or isinstance(value, tuple):
193 msg_value.list_msg.MergeFrom(self.create_by_type(value))
194 elif isinstance(value, dict):
195 if isinstance(list(value.keys())[0], str):
196 msg_value.dict_msg_stringkey.MergeFrom(
197 self.create_by_type(value))
198 else:
199 msg_value.dict_msg_intkey.MergeFrom(self.create_by_type(value))
200 else:
201 msg_value.single_msg.MergeFrom(self.create_by_type(value))
202
203 return msg_value
204
205 def transform(self, to_list=False):
206 if to_list:
207 self.content = self.transform_to_list(self.content)
208
209 splited_msg = gRPC_comm_manager_pb2.MessageRequest() # map/dict
210 splited_msg.msg['sender'].MergeFrom(self.build_msg_value(self.sender))
211 splited_msg.msg['receiver'].MergeFrom(
212 self.build_msg_value(self.receiver))
213 splited_msg.msg['state'].MergeFrom(self.build_msg_value(self.state))
214 splited_msg.msg['msg_type'].MergeFrom(
215 self.build_msg_value(self.msg_type))
216 splited_msg.msg['content'].MergeFrom(self.build_msg_value(
217 self.content))
218 splited_msg.msg['timestamp'].MergeFrom(
219 self.build_msg_value(self.timestamp))
220 return splited_msg
221
222 def _parse_msg(self, value):
223 if isinstance(value, gRPC_comm_manager_pb2.MsgValue) or isinstance(
224 value, gRPC_comm_manager_pb2.mSingle):
225 return self._parse_msg(getattr(value, value.WhichOneof("type")))
226 elif isinstance(value, gRPC_comm_manager_pb2.mList):
227 return [self._parse_msg(each) for each in value.list_value]
228 elif isinstance(value, gRPC_comm_manager_pb2.mDict_keyIsString) or \
229 isinstance(value, gRPC_comm_manager_pb2.mDict_keyIsInt):
230 return {
231 k: self._parse_msg(value.dict_value[k])
232 for k in value.dict_value
233 }
234 else:
235 return value
236
237 def parse(self, received_msg):
238 self.sender = self._parse_msg(received_msg['sender'])
239 self.receiver = self._parse_msg(received_msg['receiver'])
240 self.msg_type = self._parse_msg(received_msg['msg_type'])
241 self.state = self._parse_msg(received_msg['state'])
242 self.content = self._parse_msg(received_msg['content'])
243 self.timestamp = self._parse_msg(received_msg['timestamp'])
244
245 def count_bytes(self):
246 """
247 calculate the message bytes to be sent/received
248 :return: tuple of bytes of the message to be sent and received
249 """
250 from pympler import asizeof
251 download_bytes = asizeof.asizeof(self.content)
252 upload_cnt = len(self.receiver) if isinstance(self.receiver,
253 list) else 1
254 upload_bytes = download_bytes * upload_cnt
255 return download_bytes, upload_bytes
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/federatedscope/core/message.py b/federatedscope/core/message.py
--- a/federatedscope/core/message.py
+++ b/federatedscope/core/message.py
@@ -21,7 +21,7 @@
sender=0,
receiver=0,
state=0,
- content=None,
+ content='None',
timestamp=0,
strategy=None):
self._msg_type = msg_type
|
{"golden_diff": "diff --git a/federatedscope/core/message.py b/federatedscope/core/message.py\n--- a/federatedscope/core/message.py\n+++ b/federatedscope/core/message.py\n@@ -21,7 +21,7 @@\n sender=0,\n receiver=0,\n state=0,\n- content=None,\n+ content='None',\n timestamp=0,\n strategy=None):\n self._msg_type = msg_type\n", "issue": "Message asked for local pretraining is missing the \"content\" para when train a graph model in distributed mode?\n\r\nIf no \"content\" para, there will raise ValueError('The data type {} has not been supported.'.format(type(value))) in Message.create_by_type() function.\n", "before_files": [{"content": "import json\nimport numpy as np\nfrom federatedscope.core.proto import gRPC_comm_manager_pb2\n\n\nclass Message(object):\n \"\"\"\n The data exchanged during an FL course are abstracted as 'Message' in\n FederatedScope.\n A message object includes:\n msg_type: The type of message, which is used to trigger the\n corresponding handlers of server/client\n sender: The sender's ID\n receiver: The receiver's ID\n state: The training round of the message, which is determined by\n the sender and used to filter out the outdated messages.\n strategy: redundant attribute\n \"\"\"\n def __init__(self,\n msg_type=None,\n sender=0,\n receiver=0,\n state=0,\n content=None,\n timestamp=0,\n strategy=None):\n self._msg_type = msg_type\n self._sender = sender\n self._receiver = receiver\n self._state = state\n self._content = content\n self._timestamp = timestamp\n self._strategy = strategy\n\n @property\n def msg_type(self):\n return self._msg_type\n\n @msg_type.setter\n def msg_type(self, value):\n self._msg_type = value\n\n @property\n def sender(self):\n return self._sender\n\n @sender.setter\n def sender(self, value):\n self._sender = value\n\n @property\n def receiver(self):\n return self._receiver\n\n @receiver.setter\n def receiver(self, value):\n self._receiver = value\n\n @property\n def state(self):\n return self._state\n\n @state.setter\n def state(self, value):\n self._state = value\n\n @property\n def content(self):\n return self._content\n\n @content.setter\n def content(self, value):\n self._content = value\n\n @property\n def timestamp(self):\n return self._timestamp\n\n @timestamp.setter\n def timestamp(self, value):\n assert isinstance(value, int) or isinstance(value, float), \\\n \"We only support an int or a float value for timestamp\"\n self._timestamp = value\n\n @property\n def strategy(self):\n return self._strategy\n\n @strategy.setter\n def strategy(self, value):\n self._strategy = value\n\n def __lt__(self, other):\n if self.timestamp != other.timestamp:\n return self.timestamp < other.timestamp\n else:\n return self.state < other.state\n\n def transform_to_list(self, x):\n if isinstance(x, list) or isinstance(x, tuple):\n return [self.transform_to_list(each_x) for each_x in x]\n elif isinstance(x, dict):\n for key in x.keys():\n x[key] = self.transform_to_list(x[key])\n return x\n else:\n if hasattr(x, 'tolist'):\n return x.tolist()\n else:\n return x\n\n def msg_to_json(self, to_list=False):\n if to_list:\n self.content = self.transform_to_list(self.content)\n\n json_msg = {\n 'msg_type': self.msg_type,\n 'sender': self.sender,\n 'receiver': self.receiver,\n 'state': self.state,\n 'content': self.content,\n 'timestamp': self.timestamp,\n 'strategy': self.strategy,\n }\n return json.dumps(json_msg)\n\n def json_to_msg(self, json_string):\n json_msg = json.loads(json_string)\n self.msg_type = json_msg['msg_type']\n self.sender = json_msg['sender']\n self.receiver = json_msg['receiver']\n self.state = json_msg['state']\n self.content = json_msg['content']\n self.timestamp = json_msg['timestamp']\n self.strategy = json_msg['strategy']\n\n def create_by_type(self, value, nested=False):\n if isinstance(value, dict):\n if isinstance(list(value.keys())[0], str):\n m_dict = gRPC_comm_manager_pb2.mDict_keyIsString()\n key_type = 'string'\n else:\n m_dict = gRPC_comm_manager_pb2.mDict_keyIsInt()\n key_type = 'int'\n\n for key in value.keys():\n m_dict.dict_value[key].MergeFrom(\n self.create_by_type(value[key], nested=True))\n if nested:\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n if key_type == 'string':\n msg_value.dict_msg_stringkey.MergeFrom(m_dict)\n else:\n msg_value.dict_msg_intkey.MergeFrom(m_dict)\n return msg_value\n else:\n return m_dict\n elif isinstance(value, list) or isinstance(value, tuple):\n m_list = gRPC_comm_manager_pb2.mList()\n for each in value:\n m_list.list_value.append(self.create_by_type(each,\n nested=True))\n if nested:\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n msg_value.list_msg.MergeFrom(m_list)\n return msg_value\n else:\n return m_list\n else:\n m_single = gRPC_comm_manager_pb2.mSingle()\n if type(value) in [int, np.int32]:\n m_single.int_value = value\n elif type(value) in [str]:\n m_single.str_value = value\n elif type(value) in [float, np.float32]:\n m_single.float_value = value\n else:\n raise ValueError(\n 'The data type {} has not been supported.'.format(\n type(value)))\n\n if nested:\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n msg_value.single_msg.MergeFrom(m_single)\n return msg_value\n else:\n return m_single\n\n def build_msg_value(self, value):\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n\n if isinstance(value, list) or isinstance(value, tuple):\n msg_value.list_msg.MergeFrom(self.create_by_type(value))\n elif isinstance(value, dict):\n if isinstance(list(value.keys())[0], str):\n msg_value.dict_msg_stringkey.MergeFrom(\n self.create_by_type(value))\n else:\n msg_value.dict_msg_intkey.MergeFrom(self.create_by_type(value))\n else:\n msg_value.single_msg.MergeFrom(self.create_by_type(value))\n\n return msg_value\n\n def transform(self, to_list=False):\n if to_list:\n self.content = self.transform_to_list(self.content)\n\n splited_msg = gRPC_comm_manager_pb2.MessageRequest() # map/dict\n splited_msg.msg['sender'].MergeFrom(self.build_msg_value(self.sender))\n splited_msg.msg['receiver'].MergeFrom(\n self.build_msg_value(self.receiver))\n splited_msg.msg['state'].MergeFrom(self.build_msg_value(self.state))\n splited_msg.msg['msg_type'].MergeFrom(\n self.build_msg_value(self.msg_type))\n splited_msg.msg['content'].MergeFrom(self.build_msg_value(\n self.content))\n splited_msg.msg['timestamp'].MergeFrom(\n self.build_msg_value(self.timestamp))\n return splited_msg\n\n def _parse_msg(self, value):\n if isinstance(value, gRPC_comm_manager_pb2.MsgValue) or isinstance(\n value, gRPC_comm_manager_pb2.mSingle):\n return self._parse_msg(getattr(value, value.WhichOneof(\"type\")))\n elif isinstance(value, gRPC_comm_manager_pb2.mList):\n return [self._parse_msg(each) for each in value.list_value]\n elif isinstance(value, gRPC_comm_manager_pb2.mDict_keyIsString) or \\\n isinstance(value, gRPC_comm_manager_pb2.mDict_keyIsInt):\n return {\n k: self._parse_msg(value.dict_value[k])\n for k in value.dict_value\n }\n else:\n return value\n\n def parse(self, received_msg):\n self.sender = self._parse_msg(received_msg['sender'])\n self.receiver = self._parse_msg(received_msg['receiver'])\n self.msg_type = self._parse_msg(received_msg['msg_type'])\n self.state = self._parse_msg(received_msg['state'])\n self.content = self._parse_msg(received_msg['content'])\n self.timestamp = self._parse_msg(received_msg['timestamp'])\n\n def count_bytes(self):\n \"\"\"\n calculate the message bytes to be sent/received\n :return: tuple of bytes of the message to be sent and received\n \"\"\"\n from pympler import asizeof\n download_bytes = asizeof.asizeof(self.content)\n upload_cnt = len(self.receiver) if isinstance(self.receiver,\n list) else 1\n upload_bytes = download_bytes * upload_cnt\n return download_bytes, upload_bytes\n", "path": "federatedscope/core/message.py"}], "after_files": [{"content": "import json\nimport numpy as np\nfrom federatedscope.core.proto import gRPC_comm_manager_pb2\n\n\nclass Message(object):\n \"\"\"\n The data exchanged during an FL course are abstracted as 'Message' in\n FederatedScope.\n A message object includes:\n msg_type: The type of message, which is used to trigger the\n corresponding handlers of server/client\n sender: The sender's ID\n receiver: The receiver's ID\n state: The training round of the message, which is determined by\n the sender and used to filter out the outdated messages.\n strategy: redundant attribute\n \"\"\"\n def __init__(self,\n msg_type=None,\n sender=0,\n receiver=0,\n state=0,\n content='None',\n timestamp=0,\n strategy=None):\n self._msg_type = msg_type\n self._sender = sender\n self._receiver = receiver\n self._state = state\n self._content = content\n self._timestamp = timestamp\n self._strategy = strategy\n\n @property\n def msg_type(self):\n return self._msg_type\n\n @msg_type.setter\n def msg_type(self, value):\n self._msg_type = value\n\n @property\n def sender(self):\n return self._sender\n\n @sender.setter\n def sender(self, value):\n self._sender = value\n\n @property\n def receiver(self):\n return self._receiver\n\n @receiver.setter\n def receiver(self, value):\n self._receiver = value\n\n @property\n def state(self):\n return self._state\n\n @state.setter\n def state(self, value):\n self._state = value\n\n @property\n def content(self):\n return self._content\n\n @content.setter\n def content(self, value):\n self._content = value\n\n @property\n def timestamp(self):\n return self._timestamp\n\n @timestamp.setter\n def timestamp(self, value):\n assert isinstance(value, int) or isinstance(value, float), \\\n \"We only support an int or a float value for timestamp\"\n self._timestamp = value\n\n @property\n def strategy(self):\n return self._strategy\n\n @strategy.setter\n def strategy(self, value):\n self._strategy = value\n\n def __lt__(self, other):\n if self.timestamp != other.timestamp:\n return self.timestamp < other.timestamp\n else:\n return self.state < other.state\n\n def transform_to_list(self, x):\n if isinstance(x, list) or isinstance(x, tuple):\n return [self.transform_to_list(each_x) for each_x in x]\n elif isinstance(x, dict):\n for key in x.keys():\n x[key] = self.transform_to_list(x[key])\n return x\n else:\n if hasattr(x, 'tolist'):\n return x.tolist()\n else:\n return x\n\n def msg_to_json(self, to_list=False):\n if to_list:\n self.content = self.transform_to_list(self.content)\n\n json_msg = {\n 'msg_type': self.msg_type,\n 'sender': self.sender,\n 'receiver': self.receiver,\n 'state': self.state,\n 'content': self.content,\n 'timestamp': self.timestamp,\n 'strategy': self.strategy,\n }\n return json.dumps(json_msg)\n\n def json_to_msg(self, json_string):\n json_msg = json.loads(json_string)\n self.msg_type = json_msg['msg_type']\n self.sender = json_msg['sender']\n self.receiver = json_msg['receiver']\n self.state = json_msg['state']\n self.content = json_msg['content']\n self.timestamp = json_msg['timestamp']\n self.strategy = json_msg['strategy']\n\n def create_by_type(self, value, nested=False):\n if isinstance(value, dict):\n if isinstance(list(value.keys())[0], str):\n m_dict = gRPC_comm_manager_pb2.mDict_keyIsString()\n key_type = 'string'\n else:\n m_dict = gRPC_comm_manager_pb2.mDict_keyIsInt()\n key_type = 'int'\n\n for key in value.keys():\n m_dict.dict_value[key].MergeFrom(\n self.create_by_type(value[key], nested=True))\n if nested:\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n if key_type == 'string':\n msg_value.dict_msg_stringkey.MergeFrom(m_dict)\n else:\n msg_value.dict_msg_intkey.MergeFrom(m_dict)\n return msg_value\n else:\n return m_dict\n elif isinstance(value, list) or isinstance(value, tuple):\n m_list = gRPC_comm_manager_pb2.mList()\n for each in value:\n m_list.list_value.append(self.create_by_type(each,\n nested=True))\n if nested:\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n msg_value.list_msg.MergeFrom(m_list)\n return msg_value\n else:\n return m_list\n else:\n m_single = gRPC_comm_manager_pb2.mSingle()\n if type(value) in [int, np.int32]:\n m_single.int_value = value\n elif type(value) in [str]:\n m_single.str_value = value\n elif type(value) in [float, np.float32]:\n m_single.float_value = value\n else:\n raise ValueError(\n 'The data type {} has not been supported.'.format(\n type(value)))\n\n if nested:\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n msg_value.single_msg.MergeFrom(m_single)\n return msg_value\n else:\n return m_single\n\n def build_msg_value(self, value):\n msg_value = gRPC_comm_manager_pb2.MsgValue()\n\n if isinstance(value, list) or isinstance(value, tuple):\n msg_value.list_msg.MergeFrom(self.create_by_type(value))\n elif isinstance(value, dict):\n if isinstance(list(value.keys())[0], str):\n msg_value.dict_msg_stringkey.MergeFrom(\n self.create_by_type(value))\n else:\n msg_value.dict_msg_intkey.MergeFrom(self.create_by_type(value))\n else:\n msg_value.single_msg.MergeFrom(self.create_by_type(value))\n\n return msg_value\n\n def transform(self, to_list=False):\n if to_list:\n self.content = self.transform_to_list(self.content)\n\n splited_msg = gRPC_comm_manager_pb2.MessageRequest() # map/dict\n splited_msg.msg['sender'].MergeFrom(self.build_msg_value(self.sender))\n splited_msg.msg['receiver'].MergeFrom(\n self.build_msg_value(self.receiver))\n splited_msg.msg['state'].MergeFrom(self.build_msg_value(self.state))\n splited_msg.msg['msg_type'].MergeFrom(\n self.build_msg_value(self.msg_type))\n splited_msg.msg['content'].MergeFrom(self.build_msg_value(\n self.content))\n splited_msg.msg['timestamp'].MergeFrom(\n self.build_msg_value(self.timestamp))\n return splited_msg\n\n def _parse_msg(self, value):\n if isinstance(value, gRPC_comm_manager_pb2.MsgValue) or isinstance(\n value, gRPC_comm_manager_pb2.mSingle):\n return self._parse_msg(getattr(value, value.WhichOneof(\"type\")))\n elif isinstance(value, gRPC_comm_manager_pb2.mList):\n return [self._parse_msg(each) for each in value.list_value]\n elif isinstance(value, gRPC_comm_manager_pb2.mDict_keyIsString) or \\\n isinstance(value, gRPC_comm_manager_pb2.mDict_keyIsInt):\n return {\n k: self._parse_msg(value.dict_value[k])\n for k in value.dict_value\n }\n else:\n return value\n\n def parse(self, received_msg):\n self.sender = self._parse_msg(received_msg['sender'])\n self.receiver = self._parse_msg(received_msg['receiver'])\n self.msg_type = self._parse_msg(received_msg['msg_type'])\n self.state = self._parse_msg(received_msg['state'])\n self.content = self._parse_msg(received_msg['content'])\n self.timestamp = self._parse_msg(received_msg['timestamp'])\n\n def count_bytes(self):\n \"\"\"\n calculate the message bytes to be sent/received\n :return: tuple of bytes of the message to be sent and received\n \"\"\"\n from pympler import asizeof\n download_bytes = asizeof.asizeof(self.content)\n upload_cnt = len(self.receiver) if isinstance(self.receiver,\n list) else 1\n upload_bytes = download_bytes * upload_cnt\n return download_bytes, upload_bytes\n", "path": "federatedscope/core/message.py"}]}
| 2,834 | 96 |
gh_patches_debug_7599
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-core-3257
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
source_control/subversion.py needs to reset LC_MESSAGES
##### Issue Type:
<!-- Please pick one and delete the rest: -->
- Bug Report
##### Plugin Name:
<!-- Name of the plugin/module/task -->
subversion
##### Ansible Version:
```
ansible-playbook 2.1.0 (devel 86080fbaa9) last updated 2016/03/15 19:12:05 (GMT +200)
lib/ansible/modules/core: (detached HEAD a8841e6834) last updated 2016/03/15 19:40:46 (GMT +200)
lib/ansible/modules/extras: (detached HEAD 45bba8ec64) last updated 2016/03/15 19:40:46 (GMT +200)
config file = /var/tmp/subversion.bug/ansible.cfg
configured module search path = Default w/o overrides
```
##### Ansible Configuration:
<!-- Please mention any settings you've changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables). -->
Default
##### Environment:
<!-- Please mention the OS you are running Ansible from, and the OS you are managing,
or say “N/A” for anything that isn't platform-specific. -->
Debian GNU Linux 8.3
##### Summary:
<!-- Please explain the problem briefly. -->
Subversion module needs to reset LC_MESSAGES, not just LANG, since it screenscrapes the output of the svn(1) client. #2462 was the same issue for git.
##### Steps To Reproduce:
<!-- For bugs, please show exactly how to reproduce the problem.
For new features, show how the feature would be used. -->
```
- hosts: all
tasks:
- shell: rm -fr {{ playbook_dir }}/repo {{ playbook_dir }}/working && svnadmin create {{ playbook_dir }}/repo
- subversion: repo=file://{{ playbook_dir }}/repo dest={{ playbook_dir }}/working
EOF
```
<!-- You can also paste gist.github.com links for larger files. -->
##### Expected Results:
<!-- What did you expect to happen when running the steps above? -->
Success
##### Actual Results:
<!-- What actually happened? If possible run with high verbosity (-vvvv) -->
```
$ LANG=fr_FR.utf8 ansible-playbook -i localhost, svn_co.yml
PLAY [all] *********************************************************************
TASK [setup] *******************************************************************
ok: [localhost]
TASK [command] *****************************************************************
changed: [localhost]
[WARNING]: Consider using file module with state=absent rather than running rm
TASK [subversion] **************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "", "module_stdout": "Traceback (most recent call last):\r\n File \"/home/quinot/.ansible/tmp/ansible-tmp-1458067626.47-249928368830364/subversion\", line 2323, in <module>\r\n main()\r\n File \"/home/quinot/.ansible/tmp/ansible-tmp-1458067626.47-249928368830364/subversion\", line 259, in main\r\n after = svn.get_revision()\r\n File \"/home/quinot/.ansible/tmp/ansible-tmp-1458067626.47-249928368830364/subversion\", line 167, in get_revision\r\n rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)\r\nAttributeError: 'NoneType' object has no attribute 'group'\r\n", "msg": "MODULE FAILURE", "parsed": false}
NO MORE HOSTS LEFT *************************************************************
to retry, use: --limit @svn_co.retry
PLAY RECAP *********************************************************************
localhost : ok=2 changed=1 unreachable=0 failed=1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source_control/subversion.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2012, Michael DeHaan <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 DOCUMENTATION = '''
22 ---
23 module: subversion
24 short_description: Deploys a subversion repository.
25 description:
26 - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
27 version_added: "0.7"
28 author: "Dane Summers (@dsummersl) <[email protected]>"
29 notes:
30 - Requires I(svn) to be installed on the client.
31 requirements: []
32 options:
33 repo:
34 description:
35 - The subversion URL to the repository.
36 required: true
37 aliases: [ name, repository ]
38 default: null
39 dest:
40 description:
41 - Absolute path where the repository should be deployed.
42 required: true
43 default: null
44 revision:
45 description:
46 - Specific revision to checkout.
47 required: false
48 default: HEAD
49 aliases: [ version ]
50 force:
51 description:
52 - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.
53 Prior to 1.9 the default was `yes`.
54 required: false
55 default: "no"
56 choices: [ "yes", "no" ]
57 username:
58 description:
59 - --username parameter passed to svn.
60 required: false
61 default: null
62 password:
63 description:
64 - --password parameter passed to svn.
65 required: false
66 default: null
67 executable:
68 required: false
69 default: null
70 version_added: "1.4"
71 description:
72 - Path to svn executable to use. If not supplied,
73 the normal mechanism for resolving binary paths will be used.
74 export:
75 required: false
76 default: "no"
77 choices: [ "yes", "no" ]
78 version_added: "1.6"
79 description:
80 - If C(yes), do export instead of checkout/update.
81 switch:
82 required: false
83 default: "yes"
84 choices: [ "yes", "no" ]
85 version_added: "2.0"
86 description:
87 - If C(no), do not call svn switch before update.
88 '''
89
90 EXAMPLES = '''
91 # Checkout subversion repository to specified folder.
92 - subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout
93
94 # Export subversion directory to folder
95 - subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True
96 '''
97
98 import re
99 import tempfile
100
101
102 class Subversion(object):
103 def __init__(
104 self, module, dest, repo, revision, username, password, svn_path):
105 self.module = module
106 self.dest = dest
107 self.repo = repo
108 self.revision = revision
109 self.username = username
110 self.password = password
111 self.svn_path = svn_path
112
113 def _exec(self, args, check_rc=True):
114 '''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''
115 bits = [
116 self.svn_path,
117 '--non-interactive',
118 '--trust-server-cert',
119 '--no-auth-cache',
120 ]
121 if self.username:
122 bits.extend(["--username", self.username])
123 if self.password:
124 bits.extend(["--password", self.password])
125 bits.extend(args)
126 rc, out, err = self.module.run_command(bits, check_rc)
127 if check_rc:
128 return out.splitlines()
129 else:
130 return rc
131
132 def is_svn_repo(self):
133 '''Checks if path is a SVN Repo.'''
134 rc = self._exec(["info", self.dest], check_rc=False)
135 return rc == 0
136
137 def checkout(self):
138 '''Creates new svn working directory if it does not already exist.'''
139 self._exec(["checkout", "-r", self.revision, self.repo, self.dest])
140
141 def export(self, force=False):
142 '''Export svn repo to directory'''
143 cmd = ["export"]
144 if force:
145 cmd.append("--force")
146 cmd.extend(["-r", self.revision, self.repo, self.dest])
147
148 self._exec(cmd)
149
150 def switch(self):
151 '''Change working directory's repo.'''
152 # switch to ensure we are pointing at correct repo.
153 self._exec(["switch", self.repo, self.dest])
154
155 def update(self):
156 '''Update existing svn working directory.'''
157 self._exec(["update", "-r", self.revision, self.dest])
158
159 def revert(self):
160 '''Revert svn working directory.'''
161 self._exec(["revert", "-R", self.dest])
162
163 def get_revision(self):
164 '''Revision and URL of subversion working directory.'''
165 text = '\n'.join(self._exec(["info", self.dest]))
166 rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
167 url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)
168 return rev, url
169
170 def has_local_mods(self):
171 '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
172 lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
173 # The --quiet option will return only modified files.
174 # Match only revisioned files, i.e. ignore status '?'.
175 regex = re.compile(r'^[^?X]')
176 # Has local mods if more than 0 modifed revisioned files.
177 return len(filter(regex.match, lines)) > 0
178
179 def needs_update(self):
180 curr, url = self.get_revision()
181 out2 = '\n'.join(self._exec(["info", "-r", "HEAD", self.dest]))
182 head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)
183 rev1 = int(curr.split(':')[1].strip())
184 rev2 = int(head.split(':')[1].strip())
185 change = False
186 if rev1 < rev2:
187 change = True
188 return change, curr, head
189
190
191 # ===========================================
192
193 def main():
194 module = AnsibleModule(
195 argument_spec=dict(
196 dest=dict(required=True, type='path'),
197 repo=dict(required=True, aliases=['name', 'repository']),
198 revision=dict(default='HEAD', aliases=['rev', 'version']),
199 force=dict(default='no', type='bool'),
200 username=dict(required=False),
201 password=dict(required=False),
202 executable=dict(default=None, type='path'),
203 export=dict(default=False, required=False, type='bool'),
204 switch=dict(default=True, required=False, type='bool'),
205 ),
206 supports_check_mode=True
207 )
208
209 dest = module.params['dest']
210 repo = module.params['repo']
211 revision = module.params['revision']
212 force = module.params['force']
213 username = module.params['username']
214 password = module.params['password']
215 svn_path = module.params['executable'] or module.get_bin_path('svn', True)
216 export = module.params['export']
217 switch = module.params['switch']
218
219 os.environ['LANG'] = 'C'
220 svn = Subversion(module, dest, repo, revision, username, password, svn_path)
221
222 if export or not os.path.exists(dest):
223 before = None
224 local_mods = False
225 if module.check_mode:
226 module.exit_json(changed=True)
227 if not export:
228 svn.checkout()
229 else:
230 svn.export(force=force)
231 elif svn.is_svn_repo():
232 # Order matters. Need to get local mods before switch to avoid false
233 # positives. Need to switch before revert to ensure we are reverting to
234 # correct repo.
235 if module.check_mode:
236 check, before, after = svn.needs_update()
237 module.exit_json(changed=check, before=before, after=after)
238 before = svn.get_revision()
239 local_mods = svn.has_local_mods()
240 if switch:
241 svn.switch()
242 if local_mods:
243 if force:
244 svn.revert()
245 else:
246 module.fail_json(msg="ERROR: modified files exist in the repository.")
247 svn.update()
248 else:
249 module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, ))
250
251 if export:
252 module.exit_json(changed=True)
253 else:
254 after = svn.get_revision()
255 changed = before != after or local_mods
256 module.exit_json(changed=changed, before=before, after=after)
257
258 # import module snippets
259 from ansible.module_utils.basic import *
260 main()
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source_control/subversion.py b/source_control/subversion.py
--- a/source_control/subversion.py
+++ b/source_control/subversion.py
@@ -216,7 +216,10 @@
export = module.params['export']
switch = module.params['switch']
- os.environ['LANG'] = 'C'
+ # We screenscrape a huge amount of svn commands so use C locale anytime we
+ # call run_command()
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
if export or not os.path.exists(dest):
|
{"golden_diff": "diff --git a/source_control/subversion.py b/source_control/subversion.py\n--- a/source_control/subversion.py\n+++ b/source_control/subversion.py\n@@ -216,7 +216,10 @@\n export = module.params['export']\n switch = module.params['switch']\n \n- os.environ['LANG'] = 'C'\n+ # We screenscrape a huge amount of svn commands so use C locale anytime we\n+ # call run_command()\n+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')\n+\n svn = Subversion(module, dest, repo, revision, username, password, svn_path)\n \n if export or not os.path.exists(dest):\n", "issue": "source_control/subversion.py needs to reset LC_MESSAGES\n##### Issue Type:\n\n<!-- Please pick one and delete the rest: -->\n- Bug Report\n##### Plugin Name:\n\n<!-- Name of the plugin/module/task -->\n\nsubversion\n##### Ansible Version:\n\n```\nansible-playbook 2.1.0 (devel 86080fbaa9) last updated 2016/03/15 19:12:05 (GMT +200)\n lib/ansible/modules/core: (detached HEAD a8841e6834) last updated 2016/03/15 19:40:46 (GMT +200)\n lib/ansible/modules/extras: (detached HEAD 45bba8ec64) last updated 2016/03/15 19:40:46 (GMT +200)\n config file = /var/tmp/subversion.bug/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### Ansible Configuration:\n\n<!-- Please mention any settings you've changed/added/removed in ansible.cfg\n(or using the ANSIBLE_* environment variables). -->\n\nDefault\n##### Environment:\n\n<!-- Please mention the OS you are running Ansible from, and the OS you are managing,\nor say \u201cN/A\u201d for anything that isn't platform-specific. -->\n\nDebian GNU Linux 8.3\n##### Summary:\n\n<!-- Please explain the problem briefly. -->\n\nSubversion module needs to reset LC_MESSAGES, not just LANG, since it screenscrapes the output of the svn(1) client. #2462 was the same issue for git.\n##### Steps To Reproduce:\n\n<!-- For bugs, please show exactly how to reproduce the problem.\nFor new features, show how the feature would be used. -->\n\n```\n- hosts: all\n tasks:\n - shell: rm -fr {{ playbook_dir }}/repo {{ playbook_dir }}/working && svnadmin create {{ playbook_dir }}/repo\n - subversion: repo=file://{{ playbook_dir }}/repo dest={{ playbook_dir }}/working\nEOF\n```\n\n<!-- You can also paste gist.github.com links for larger files. -->\n##### Expected Results:\n\n<!-- What did you expect to happen when running the steps above? -->\n\nSuccess\n##### Actual Results:\n\n<!-- What actually happened? If possible run with high verbosity (-vvvv) -->\n\n```\n$ LANG=fr_FR.utf8 ansible-playbook -i localhost, svn_co.yml \n\nPLAY [all] *********************************************************************\n\nTASK [setup] *******************************************************************\nok: [localhost]\n\nTASK [command] *****************************************************************\nchanged: [localhost]\n [WARNING]: Consider using file module with state=absent rather than running rm\n\n\nTASK [subversion] **************************************************************\nfatal: [localhost]: FAILED! => {\"changed\": false, \"failed\": true, \"module_stderr\": \"\", \"module_stdout\": \"Traceback (most recent call last):\\r\\n File \\\"/home/quinot/.ansible/tmp/ansible-tmp-1458067626.47-249928368830364/subversion\\\", line 2323, in <module>\\r\\n main()\\r\\n File \\\"/home/quinot/.ansible/tmp/ansible-tmp-1458067626.47-249928368830364/subversion\\\", line 259, in main\\r\\n after = svn.get_revision()\\r\\n File \\\"/home/quinot/.ansible/tmp/ansible-tmp-1458067626.47-249928368830364/subversion\\\", line 167, in get_revision\\r\\n rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)\\r\\nAttributeError: 'NoneType' object has no attribute 'group'\\r\\n\", \"msg\": \"MODULE FAILURE\", \"parsed\": false}\n\nNO MORE HOSTS LEFT *************************************************************\n to retry, use: --limit @svn_co.retry\n\nPLAY RECAP *********************************************************************\nlocalhost : ok=2 changed=1 unreachable=0 failed=1\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: subversion\nshort_description: Deploys a subversion repository.\ndescription:\n - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.\nversion_added: \"0.7\"\nauthor: \"Dane Summers (@dsummersl) <[email protected]>\"\nnotes:\n - Requires I(svn) to be installed on the client.\nrequirements: []\noptions:\n repo:\n description:\n - The subversion URL to the repository.\n required: true\n aliases: [ name, repository ]\n default: null\n dest:\n description:\n - Absolute path where the repository should be deployed.\n required: true\n default: null\n revision:\n description:\n - Specific revision to checkout.\n required: false\n default: HEAD\n aliases: [ version ]\n force:\n description:\n - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.\n Prior to 1.9 the default was `yes`.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n username:\n description:\n - --username parameter passed to svn.\n required: false\n default: null\n password:\n description:\n - --password parameter passed to svn.\n required: false\n default: null\n executable:\n required: false\n default: null\n version_added: \"1.4\"\n description:\n - Path to svn executable to use. If not supplied,\n the normal mechanism for resolving binary paths will be used.\n export:\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n version_added: \"1.6\"\n description:\n - If C(yes), do export instead of checkout/update.\n switch:\n required: false\n default: \"yes\"\n choices: [ \"yes\", \"no\" ]\n version_added: \"2.0\"\n description:\n - If C(no), do not call svn switch before update.\n'''\n\nEXAMPLES = '''\n# Checkout subversion repository to specified folder.\n- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout\n\n# Export subversion directory to folder\n- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True\n'''\n\nimport re\nimport tempfile\n\n\nclass Subversion(object):\n def __init__(\n self, module, dest, repo, revision, username, password, svn_path):\n self.module = module\n self.dest = dest\n self.repo = repo\n self.revision = revision\n self.username = username\n self.password = password\n self.svn_path = svn_path\n\n def _exec(self, args, check_rc=True):\n '''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''\n bits = [\n self.svn_path,\n '--non-interactive',\n '--trust-server-cert',\n '--no-auth-cache',\n ]\n if self.username:\n bits.extend([\"--username\", self.username])\n if self.password:\n bits.extend([\"--password\", self.password])\n bits.extend(args)\n rc, out, err = self.module.run_command(bits, check_rc)\n if check_rc:\n return out.splitlines()\n else:\n return rc\n\n def is_svn_repo(self):\n '''Checks if path is a SVN Repo.'''\n rc = self._exec([\"info\", self.dest], check_rc=False)\n return rc == 0\n\n def checkout(self):\n '''Creates new svn working directory if it does not already exist.'''\n self._exec([\"checkout\", \"-r\", self.revision, self.repo, self.dest])\n\n def export(self, force=False):\n '''Export svn repo to directory'''\n cmd = [\"export\"]\n if force:\n cmd.append(\"--force\")\n cmd.extend([\"-r\", self.revision, self.repo, self.dest])\n\n self._exec(cmd)\n\n def switch(self):\n '''Change working directory's repo.'''\n # switch to ensure we are pointing at correct repo.\n self._exec([\"switch\", self.repo, self.dest])\n\n def update(self):\n '''Update existing svn working directory.'''\n self._exec([\"update\", \"-r\", self.revision, self.dest])\n\n def revert(self):\n '''Revert svn working directory.'''\n self._exec([\"revert\", \"-R\", self.dest])\n\n def get_revision(self):\n '''Revision and URL of subversion working directory.'''\n text = '\\n'.join(self._exec([\"info\", self.dest]))\n rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)\n url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)\n return rev, url\n\n def has_local_mods(self):\n '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''\n lines = self._exec([\"status\", \"--quiet\", \"--ignore-externals\", self.dest])\n # The --quiet option will return only modified files.\n # Match only revisioned files, i.e. ignore status '?'.\n regex = re.compile(r'^[^?X]')\n # Has local mods if more than 0 modifed revisioned files.\n return len(filter(regex.match, lines)) > 0\n\n def needs_update(self):\n curr, url = self.get_revision()\n out2 = '\\n'.join(self._exec([\"info\", \"-r\", \"HEAD\", self.dest]))\n head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)\n rev1 = int(curr.split(':')[1].strip())\n rev2 = int(head.split(':')[1].strip())\n change = False\n if rev1 < rev2:\n change = True\n return change, curr, head\n\n\n# ===========================================\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n dest=dict(required=True, type='path'),\n repo=dict(required=True, aliases=['name', 'repository']),\n revision=dict(default='HEAD', aliases=['rev', 'version']),\n force=dict(default='no', type='bool'),\n username=dict(required=False),\n password=dict(required=False),\n executable=dict(default=None, type='path'),\n export=dict(default=False, required=False, type='bool'),\n switch=dict(default=True, required=False, type='bool'),\n ),\n supports_check_mode=True\n )\n\n dest = module.params['dest']\n repo = module.params['repo']\n revision = module.params['revision']\n force = module.params['force']\n username = module.params['username']\n password = module.params['password']\n svn_path = module.params['executable'] or module.get_bin_path('svn', True)\n export = module.params['export']\n switch = module.params['switch']\n\n os.environ['LANG'] = 'C'\n svn = Subversion(module, dest, repo, revision, username, password, svn_path)\n\n if export or not os.path.exists(dest):\n before = None\n local_mods = False\n if module.check_mode:\n module.exit_json(changed=True)\n if not export:\n svn.checkout()\n else:\n svn.export(force=force)\n elif svn.is_svn_repo():\n # Order matters. Need to get local mods before switch to avoid false\n # positives. Need to switch before revert to ensure we are reverting to\n # correct repo.\n if module.check_mode:\n check, before, after = svn.needs_update()\n module.exit_json(changed=check, before=before, after=after)\n before = svn.get_revision()\n local_mods = svn.has_local_mods()\n if switch:\n svn.switch()\n if local_mods:\n if force:\n svn.revert()\n else:\n module.fail_json(msg=\"ERROR: modified files exist in the repository.\")\n svn.update()\n else:\n module.fail_json(msg=\"ERROR: %s folder already exists, but its not a subversion repository.\" % (dest, ))\n\n if export:\n module.exit_json(changed=True)\n else:\n after = svn.get_revision()\n changed = before != after or local_mods\n module.exit_json(changed=changed, before=before, after=after)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "source_control/subversion.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: subversion\nshort_description: Deploys a subversion repository.\ndescription:\n - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.\nversion_added: \"0.7\"\nauthor: \"Dane Summers (@dsummersl) <[email protected]>\"\nnotes:\n - Requires I(svn) to be installed on the client.\nrequirements: []\noptions:\n repo:\n description:\n - The subversion URL to the repository.\n required: true\n aliases: [ name, repository ]\n default: null\n dest:\n description:\n - Absolute path where the repository should be deployed.\n required: true\n default: null\n revision:\n description:\n - Specific revision to checkout.\n required: false\n default: HEAD\n aliases: [ version ]\n force:\n description:\n - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.\n Prior to 1.9 the default was `yes`.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n username:\n description:\n - --username parameter passed to svn.\n required: false\n default: null\n password:\n description:\n - --password parameter passed to svn.\n required: false\n default: null\n executable:\n required: false\n default: null\n version_added: \"1.4\"\n description:\n - Path to svn executable to use. If not supplied,\n the normal mechanism for resolving binary paths will be used.\n export:\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n version_added: \"1.6\"\n description:\n - If C(yes), do export instead of checkout/update.\n switch:\n required: false\n default: \"yes\"\n choices: [ \"yes\", \"no\" ]\n version_added: \"2.0\"\n description:\n - If C(no), do not call svn switch before update.\n'''\n\nEXAMPLES = '''\n# Checkout subversion repository to specified folder.\n- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout\n\n# Export subversion directory to folder\n- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True\n'''\n\nimport re\nimport tempfile\n\n\nclass Subversion(object):\n def __init__(\n self, module, dest, repo, revision, username, password, svn_path):\n self.module = module\n self.dest = dest\n self.repo = repo\n self.revision = revision\n self.username = username\n self.password = password\n self.svn_path = svn_path\n\n def _exec(self, args, check_rc=True):\n '''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''\n bits = [\n self.svn_path,\n '--non-interactive',\n '--trust-server-cert',\n '--no-auth-cache',\n ]\n if self.username:\n bits.extend([\"--username\", self.username])\n if self.password:\n bits.extend([\"--password\", self.password])\n bits.extend(args)\n rc, out, err = self.module.run_command(bits, check_rc)\n if check_rc:\n return out.splitlines()\n else:\n return rc\n\n def is_svn_repo(self):\n '''Checks if path is a SVN Repo.'''\n rc = self._exec([\"info\", self.dest], check_rc=False)\n return rc == 0\n\n def checkout(self):\n '''Creates new svn working directory if it does not already exist.'''\n self._exec([\"checkout\", \"-r\", self.revision, self.repo, self.dest])\n\n def export(self, force=False):\n '''Export svn repo to directory'''\n cmd = [\"export\"]\n if force:\n cmd.append(\"--force\")\n cmd.extend([\"-r\", self.revision, self.repo, self.dest])\n\n self._exec(cmd)\n\n def switch(self):\n '''Change working directory's repo.'''\n # switch to ensure we are pointing at correct repo.\n self._exec([\"switch\", self.repo, self.dest])\n\n def update(self):\n '''Update existing svn working directory.'''\n self._exec([\"update\", \"-r\", self.revision, self.dest])\n\n def revert(self):\n '''Revert svn working directory.'''\n self._exec([\"revert\", \"-R\", self.dest])\n\n def get_revision(self):\n '''Revision and URL of subversion working directory.'''\n text = '\\n'.join(self._exec([\"info\", self.dest]))\n rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)\n url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)\n return rev, url\n\n def has_local_mods(self):\n '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''\n lines = self._exec([\"status\", \"--quiet\", \"--ignore-externals\", self.dest])\n # The --quiet option will return only modified files.\n # Match only revisioned files, i.e. ignore status '?'.\n regex = re.compile(r'^[^?X]')\n # Has local mods if more than 0 modifed revisioned files.\n return len(filter(regex.match, lines)) > 0\n\n def needs_update(self):\n curr, url = self.get_revision()\n out2 = '\\n'.join(self._exec([\"info\", \"-r\", \"HEAD\", self.dest]))\n head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)\n rev1 = int(curr.split(':')[1].strip())\n rev2 = int(head.split(':')[1].strip())\n change = False\n if rev1 < rev2:\n change = True\n return change, curr, head\n\n\n# ===========================================\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n dest=dict(required=True, type='path'),\n repo=dict(required=True, aliases=['name', 'repository']),\n revision=dict(default='HEAD', aliases=['rev', 'version']),\n force=dict(default='no', type='bool'),\n username=dict(required=False),\n password=dict(required=False),\n executable=dict(default=None, type='path'),\n export=dict(default=False, required=False, type='bool'),\n switch=dict(default=True, required=False, type='bool'),\n ),\n supports_check_mode=True\n )\n\n dest = module.params['dest']\n repo = module.params['repo']\n revision = module.params['revision']\n force = module.params['force']\n username = module.params['username']\n password = module.params['password']\n svn_path = module.params['executable'] or module.get_bin_path('svn', True)\n export = module.params['export']\n switch = module.params['switch']\n\n # We screenscrape a huge amount of svn commands so use C locale anytime we\n # call run_command()\n module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')\n\n svn = Subversion(module, dest, repo, revision, username, password, svn_path)\n\n if export or not os.path.exists(dest):\n before = None\n local_mods = False\n if module.check_mode:\n module.exit_json(changed=True)\n if not export:\n svn.checkout()\n else:\n svn.export(force=force)\n elif svn.is_svn_repo():\n # Order matters. Need to get local mods before switch to avoid false\n # positives. Need to switch before revert to ensure we are reverting to\n # correct repo.\n if module.check_mode:\n check, before, after = svn.needs_update()\n module.exit_json(changed=check, before=before, after=after)\n before = svn.get_revision()\n local_mods = svn.has_local_mods()\n if switch:\n svn.switch()\n if local_mods:\n if force:\n svn.revert()\n else:\n module.fail_json(msg=\"ERROR: modified files exist in the repository.\")\n svn.update()\n else:\n module.fail_json(msg=\"ERROR: %s folder already exists, but its not a subversion repository.\" % (dest, ))\n\n if export:\n module.exit_json(changed=True)\n else:\n after = svn.get_revision()\n changed = before != after or local_mods\n module.exit_json(changed=changed, before=before, after=after)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "source_control/subversion.py"}]}
| 3,878 | 165 |
gh_patches_debug_51991
|
rasdani/github-patches
|
git_diff
|
pydantic__pydantic-391
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 from importlib.machinery import SourceFileLoader
3 from pathlib import Path
4 from setuptools import setup
5
6
7 class ReplaceLinks:
8 def __init__(self):
9 self.links = set()
10
11 def replace_issues(self, m):
12 id = m.group(1)
13 self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')
14 return f'`#{id}`_'
15
16 def replace_users(self, m):
17 name = m.group(2)
18 self.links.add(f'.. _@{name}: https://github.com/{name}')
19 return f'{m.group(1)}`@{name}`_'
20
21 def extra(self):
22 return '\n\n' + '\n'.join(self.links) + '\n'
23
24
25 description = 'Data validation and settings management using python 3.6 type hinting'
26 THIS_DIR = Path(__file__).resolve().parent
27 try:
28 history = THIS_DIR.joinpath('HISTORY.rst').read_text()
29
30 replacer = ReplaceLinks()
31 history = re.sub(r'#(\d+)', replacer.replace_issues, history)
32 history = re.sub(r'( +)@(\w+)', replacer.replace_users, history, flags=re.I)
33 history = re.sub(r'@@', '@', history)
34 history += replacer.extra()
35
36 long_description = '\n\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])
37 except FileNotFoundError:
38 long_description = description + '.\n\nSee https://pydantic-docs.helpmanual.io/ for documentation.'
39
40 # avoid loading the package before requirements are installed:
41 version = SourceFileLoader('version', 'pydantic/version.py').load_module()
42
43 setup(
44 name='pydantic',
45 version=str(version.VERSION),
46 description=description,
47 long_description=long_description,
48 classifiers=[
49 'Development Status :: 5 - Production/Stable',
50 'Programming Language :: Python',
51 'Programming Language :: Python :: 3',
52 'Programming Language :: Python :: 3 :: Only',
53 'Programming Language :: Python :: 3.6',
54 'Programming Language :: Python :: 3.7',
55 'Intended Audience :: Developers',
56 'Intended Audience :: Information Technology',
57 'Intended Audience :: System Administrators',
58 'License :: OSI Approved :: MIT License',
59 'Operating System :: Unix',
60 'Operating System :: POSIX :: Linux',
61 'Environment :: Console',
62 'Environment :: MacOS X',
63 'Topic :: Software Development :: Libraries :: Python Modules',
64 'Topic :: Internet',
65 ],
66 author='Samuel Colvin',
67 author_email='[email protected]',
68 url='https://github.com/samuelcolvin/pydantic',
69 license='MIT',
70 packages=['pydantic'],
71 python_requires='>=3.6',
72 zip_safe=True,
73 install_requires=[
74 'dataclasses>=0.6;python_version<"3.7"'
75 ],
76 extras_require={
77 'ujson': ['ujson>=1.35'],
78 'email': ['email-validator>=1.0.3'],
79 }
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,8 +68,9 @@
url='https://github.com/samuelcolvin/pydantic',
license='MIT',
packages=['pydantic'],
+ package_data={'pydantic': ['py.typed']},
python_requires='>=3.6',
- zip_safe=True,
+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html
install_requires=[
'dataclasses>=0.6;python_version<"3.7"'
],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,8 +68,9 @@\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n+ package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n- zip_safe=True,\n+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n", "issue": "Include a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\nInclude a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\n", "before_files": [{"content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n python_requires='>=3.6',\n zip_safe=True,\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py"}]}
| 1,403 | 134 |
gh_patches_debug_9907
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-13028
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sporadically broken HTML formatting since at least 3.5.0
### NetBox version
v3.5.1
### Python version
3.10
### Steps to Reproduce
Spontaneously observed behavior:
1. Open any list
2. Click to any object
3. Press "Go back" (or ALT+←) in browser
### Expected Behavior
List will be rendered as usual.
### Observed Behavior
Loaded only part of page, without sidebar, with empty head tag:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/middleware.py`
Content:
```
1 import logging
2 import uuid
3 from urllib import parse
4
5 from django.conf import settings
6 from django.contrib import auth, messages
7 from django.contrib.auth.middleware import RemoteUserMiddleware as RemoteUserMiddleware_
8 from django.core.exceptions import ImproperlyConfigured
9 from django.db import connection, ProgrammingError
10 from django.db.utils import InternalError
11 from django.http import Http404, HttpResponseRedirect
12
13 from extras.context_managers import change_logging
14 from netbox.config import clear_config, get_config
15 from netbox.views import handler_500
16 from utilities.api import is_api_request, rest_api_server_error
17
18 __all__ = (
19 'CoreMiddleware',
20 'MaintenanceModeMiddleware',
21 'RemoteUserMiddleware',
22 )
23
24
25 class CoreMiddleware:
26
27 def __init__(self, get_response):
28 self.get_response = get_response
29
30 def __call__(self, request):
31
32 # Assign a random unique ID to the request. This will be used for change logging.
33 request.id = uuid.uuid4()
34
35 # Enforce the LOGIN_REQUIRED config parameter. If true, redirect all non-exempt unauthenticated requests
36 # to the login page.
37 if (
38 settings.LOGIN_REQUIRED and
39 not request.user.is_authenticated and
40 not request.path_info.startswith(settings.AUTH_EXEMPT_PATHS)
41 ):
42 login_url = f'{settings.LOGIN_URL}?next={parse.quote(request.get_full_path_info())}'
43 return HttpResponseRedirect(login_url)
44
45 # Enable the change_logging context manager and process the request.
46 with change_logging(request):
47 response = self.get_response(request)
48
49 # Attach the unique request ID as an HTTP header.
50 response['X-Request-ID'] = request.id
51
52 # If this is an API request, attach an HTTP header annotating the API version (e.g. '3.5').
53 if is_api_request(request):
54 response['API-Version'] = settings.REST_FRAMEWORK_VERSION
55
56 # Clear any cached dynamic config parameters after each request.
57 clear_config()
58
59 return response
60
61 def process_exception(self, request, exception):
62 """
63 Implement custom error handling logic for production deployments.
64 """
65 # Don't catch exceptions when in debug mode
66 if settings.DEBUG:
67 return
68
69 # Cleanly handle exceptions that occur from REST API requests
70 if is_api_request(request):
71 return rest_api_server_error(request)
72
73 # Ignore Http404s (defer to Django's built-in 404 handling)
74 if isinstance(exception, Http404):
75 return
76
77 # Determine the type of exception. If it's a common issue, return a custom error page with instructions.
78 custom_template = None
79 if isinstance(exception, ProgrammingError):
80 custom_template = 'exceptions/programming_error.html'
81 elif isinstance(exception, ImportError):
82 custom_template = 'exceptions/import_error.html'
83 elif isinstance(exception, PermissionError):
84 custom_template = 'exceptions/permission_error.html'
85
86 # Return a custom error message, or fall back to Django's default 500 error handling
87 if custom_template:
88 return handler_500(request, template_name=custom_template)
89
90
91 class RemoteUserMiddleware(RemoteUserMiddleware_):
92 """
93 Custom implementation of Django's RemoteUserMiddleware which allows for a user-configurable HTTP header name.
94 """
95 force_logout_if_no_header = False
96
97 @property
98 def header(self):
99 return settings.REMOTE_AUTH_HEADER
100
101 def process_request(self, request):
102 logger = logging.getLogger(
103 'netbox.authentication.RemoteUserMiddleware')
104 # Bypass middleware if remote authentication is not enabled
105 if not settings.REMOTE_AUTH_ENABLED:
106 return
107 # AuthenticationMiddleware is required so that request.user exists.
108 if not hasattr(request, 'user'):
109 raise ImproperlyConfigured(
110 "The Django remote user auth middleware requires the"
111 " authentication middleware to be installed. Edit your"
112 " MIDDLEWARE setting to insert"
113 " 'django.contrib.auth.middleware.AuthenticationMiddleware'"
114 " before the RemoteUserMiddleware class.")
115 try:
116 username = request.META[self.header]
117 except KeyError:
118 # If specified header doesn't exist then remove any existing
119 # authenticated remote-user, or return (leaving request.user set to
120 # AnonymousUser by the AuthenticationMiddleware).
121 if self.force_logout_if_no_header and request.user.is_authenticated:
122 self._remove_invalid_user(request)
123 return
124 # If the user is already authenticated and that user is the user we are
125 # getting passed in the headers, then the correct user is already
126 # persisted in the session and we don't need to continue.
127 if request.user.is_authenticated:
128 if request.user.get_username() == self.clean_username(username, request):
129 return
130 else:
131 # An authenticated user is associated with the request, but
132 # it does not match the authorized user in the header.
133 self._remove_invalid_user(request)
134
135 # We are seeing this user for the first time in this session, attempt
136 # to authenticate the user.
137 if settings.REMOTE_AUTH_GROUP_SYNC_ENABLED:
138 logger.debug("Trying to sync Groups")
139 user = auth.authenticate(
140 request, remote_user=username, remote_groups=self._get_groups(request))
141 else:
142 user = auth.authenticate(request, remote_user=username)
143 if user:
144 # User is valid.
145 # Update the User's Profile if set by request headers
146 if settings.REMOTE_AUTH_USER_FIRST_NAME in request.META:
147 user.first_name = request.META[settings.REMOTE_AUTH_USER_FIRST_NAME]
148 if settings.REMOTE_AUTH_USER_LAST_NAME in request.META:
149 user.last_name = request.META[settings.REMOTE_AUTH_USER_LAST_NAME]
150 if settings.REMOTE_AUTH_USER_EMAIL in request.META:
151 user.email = request.META[settings.REMOTE_AUTH_USER_EMAIL]
152 user.save()
153
154 # Set request.user and persist user in the session
155 # by logging the user in.
156 request.user = user
157 auth.login(request, user)
158
159 def _get_groups(self, request):
160 logger = logging.getLogger(
161 'netbox.authentication.RemoteUserMiddleware')
162
163 groups_string = request.META.get(
164 settings.REMOTE_AUTH_GROUP_HEADER, None)
165 if groups_string:
166 groups = groups_string.split(settings.REMOTE_AUTH_GROUP_SEPARATOR)
167 else:
168 groups = []
169 logger.debug(f"Groups are {groups}")
170 return groups
171
172
173 class MaintenanceModeMiddleware:
174 """
175 Middleware that checks if the application is in maintenance mode
176 and restricts write-related operations to the database.
177 """
178
179 def __init__(self, get_response):
180 self.get_response = get_response
181
182 def __call__(self, request):
183 if get_config().MAINTENANCE_MODE:
184 self._set_session_type(
185 allow_write=request.path_info.startswith(settings.MAINTENANCE_EXEMPT_PATHS)
186 )
187
188 return self.get_response(request)
189
190 @staticmethod
191 def _set_session_type(allow_write):
192 """
193 Prevent any write-related database operations.
194
195 Args:
196 allow_write (bool): If True, write operations will be permitted.
197 """
198 with connection.cursor() as cursor:
199 mode = 'READ WRITE' if allow_write else 'READ ONLY'
200 cursor.execute(f'SET SESSION CHARACTERISTICS AS TRANSACTION {mode};')
201
202 def process_exception(self, request, exception):
203 """
204 Prevent any write-related database operations if an exception is raised.
205 """
206 if get_config().MAINTENANCE_MODE and isinstance(exception, InternalError):
207 error_message = 'NetBox is currently operating in maintenance mode and is unable to perform write ' \
208 'operations. Please try again later.'
209
210 if is_api_request(request):
211 return rest_api_server_error(request, error=error_message)
212
213 messages.error(request, error_message)
214 return HttpResponseRedirect(request.path_info)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/middleware.py b/netbox/netbox/middleware.py
--- a/netbox/netbox/middleware.py
+++ b/netbox/netbox/middleware.py
@@ -49,6 +49,9 @@
# Attach the unique request ID as an HTTP header.
response['X-Request-ID'] = request.id
+ # Enable the Vary header to help with caching of HTMX responses
+ response['Vary'] = 'HX-Request'
+
# If this is an API request, attach an HTTP header annotating the API version (e.g. '3.5').
if is_api_request(request):
response['API-Version'] = settings.REST_FRAMEWORK_VERSION
|
{"golden_diff": "diff --git a/netbox/netbox/middleware.py b/netbox/netbox/middleware.py\n--- a/netbox/netbox/middleware.py\n+++ b/netbox/netbox/middleware.py\n@@ -49,6 +49,9 @@\n # Attach the unique request ID as an HTTP header.\n response['X-Request-ID'] = request.id\n \n+ # Enable the Vary header to help with caching of HTMX responses\n+ response['Vary'] = 'HX-Request'\n+\n # If this is an API request, attach an HTTP header annotating the API version (e.g. '3.5').\n if is_api_request(request):\n response['API-Version'] = settings.REST_FRAMEWORK_VERSION\n", "issue": "Sporadically broken HTML formatting since at least 3.5.0\n### NetBox version\n\nv3.5.1\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\nSpontaneously observed behavior:\r\n1. Open any list\r\n2. Click to any object\r\n3. Press \"Go back\" (or ALT+\u2190) in browser\n\n### Expected Behavior\n\nList will be rendered as usual.\n\n### Observed Behavior\n\nLoaded only part of page, without sidebar, with empty head tag:\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\nimport uuid\nfrom urllib import parse\n\nfrom django.conf import settings\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.middleware import RemoteUserMiddleware as RemoteUserMiddleware_\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import connection, ProgrammingError\nfrom django.db.utils import InternalError\nfrom django.http import Http404, HttpResponseRedirect\n\nfrom extras.context_managers import change_logging\nfrom netbox.config import clear_config, get_config\nfrom netbox.views import handler_500\nfrom utilities.api import is_api_request, rest_api_server_error\n\n__all__ = (\n 'CoreMiddleware',\n 'MaintenanceModeMiddleware',\n 'RemoteUserMiddleware',\n)\n\n\nclass CoreMiddleware:\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n\n # Assign a random unique ID to the request. This will be used for change logging.\n request.id = uuid.uuid4()\n\n # Enforce the LOGIN_REQUIRED config parameter. If true, redirect all non-exempt unauthenticated requests\n # to the login page.\n if (\n settings.LOGIN_REQUIRED and\n not request.user.is_authenticated and\n not request.path_info.startswith(settings.AUTH_EXEMPT_PATHS)\n ):\n login_url = f'{settings.LOGIN_URL}?next={parse.quote(request.get_full_path_info())}'\n return HttpResponseRedirect(login_url)\n\n # Enable the change_logging context manager and process the request.\n with change_logging(request):\n response = self.get_response(request)\n\n # Attach the unique request ID as an HTTP header.\n response['X-Request-ID'] = request.id\n\n # If this is an API request, attach an HTTP header annotating the API version (e.g. '3.5').\n if is_api_request(request):\n response['API-Version'] = settings.REST_FRAMEWORK_VERSION\n\n # Clear any cached dynamic config parameters after each request.\n clear_config()\n\n return response\n\n def process_exception(self, request, exception):\n \"\"\"\n Implement custom error handling logic for production deployments.\n \"\"\"\n # Don't catch exceptions when in debug mode\n if settings.DEBUG:\n return\n\n # Cleanly handle exceptions that occur from REST API requests\n if is_api_request(request):\n return rest_api_server_error(request)\n\n # Ignore Http404s (defer to Django's built-in 404 handling)\n if isinstance(exception, Http404):\n return\n\n # Determine the type of exception. If it's a common issue, return a custom error page with instructions.\n custom_template = None\n if isinstance(exception, ProgrammingError):\n custom_template = 'exceptions/programming_error.html'\n elif isinstance(exception, ImportError):\n custom_template = 'exceptions/import_error.html'\n elif isinstance(exception, PermissionError):\n custom_template = 'exceptions/permission_error.html'\n\n # Return a custom error message, or fall back to Django's default 500 error handling\n if custom_template:\n return handler_500(request, template_name=custom_template)\n\n\nclass RemoteUserMiddleware(RemoteUserMiddleware_):\n \"\"\"\n Custom implementation of Django's RemoteUserMiddleware which allows for a user-configurable HTTP header name.\n \"\"\"\n force_logout_if_no_header = False\n\n @property\n def header(self):\n return settings.REMOTE_AUTH_HEADER\n\n def process_request(self, request):\n logger = logging.getLogger(\n 'netbox.authentication.RemoteUserMiddleware')\n # Bypass middleware if remote authentication is not enabled\n if not settings.REMOTE_AUTH_ENABLED:\n return\n # AuthenticationMiddleware is required so that request.user exists.\n if not hasattr(request, 'user'):\n raise ImproperlyConfigured(\n \"The Django remote user auth middleware requires the\"\n \" authentication middleware to be installed. Edit your\"\n \" MIDDLEWARE setting to insert\"\n \" 'django.contrib.auth.middleware.AuthenticationMiddleware'\"\n \" before the RemoteUserMiddleware class.\")\n try:\n username = request.META[self.header]\n except KeyError:\n # If specified header doesn't exist then remove any existing\n # authenticated remote-user, or return (leaving request.user set to\n # AnonymousUser by the AuthenticationMiddleware).\n if self.force_logout_if_no_header and request.user.is_authenticated:\n self._remove_invalid_user(request)\n return\n # If the user is already authenticated and that user is the user we are\n # getting passed in the headers, then the correct user is already\n # persisted in the session and we don't need to continue.\n if request.user.is_authenticated:\n if request.user.get_username() == self.clean_username(username, request):\n return\n else:\n # An authenticated user is associated with the request, but\n # it does not match the authorized user in the header.\n self._remove_invalid_user(request)\n\n # We are seeing this user for the first time in this session, attempt\n # to authenticate the user.\n if settings.REMOTE_AUTH_GROUP_SYNC_ENABLED:\n logger.debug(\"Trying to sync Groups\")\n user = auth.authenticate(\n request, remote_user=username, remote_groups=self._get_groups(request))\n else:\n user = auth.authenticate(request, remote_user=username)\n if user:\n # User is valid.\n # Update the User's Profile if set by request headers\n if settings.REMOTE_AUTH_USER_FIRST_NAME in request.META:\n user.first_name = request.META[settings.REMOTE_AUTH_USER_FIRST_NAME]\n if settings.REMOTE_AUTH_USER_LAST_NAME in request.META:\n user.last_name = request.META[settings.REMOTE_AUTH_USER_LAST_NAME]\n if settings.REMOTE_AUTH_USER_EMAIL in request.META:\n user.email = request.META[settings.REMOTE_AUTH_USER_EMAIL]\n user.save()\n\n # Set request.user and persist user in the session\n # by logging the user in.\n request.user = user\n auth.login(request, user)\n\n def _get_groups(self, request):\n logger = logging.getLogger(\n 'netbox.authentication.RemoteUserMiddleware')\n\n groups_string = request.META.get(\n settings.REMOTE_AUTH_GROUP_HEADER, None)\n if groups_string:\n groups = groups_string.split(settings.REMOTE_AUTH_GROUP_SEPARATOR)\n else:\n groups = []\n logger.debug(f\"Groups are {groups}\")\n return groups\n\n\nclass MaintenanceModeMiddleware:\n \"\"\"\n Middleware that checks if the application is in maintenance mode\n and restricts write-related operations to the database.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if get_config().MAINTENANCE_MODE:\n self._set_session_type(\n allow_write=request.path_info.startswith(settings.MAINTENANCE_EXEMPT_PATHS)\n )\n\n return self.get_response(request)\n\n @staticmethod\n def _set_session_type(allow_write):\n \"\"\"\n Prevent any write-related database operations.\n\n Args:\n allow_write (bool): If True, write operations will be permitted.\n \"\"\"\n with connection.cursor() as cursor:\n mode = 'READ WRITE' if allow_write else 'READ ONLY'\n cursor.execute(f'SET SESSION CHARACTERISTICS AS TRANSACTION {mode};')\n\n def process_exception(self, request, exception):\n \"\"\"\n Prevent any write-related database operations if an exception is raised.\n \"\"\"\n if get_config().MAINTENANCE_MODE and isinstance(exception, InternalError):\n error_message = 'NetBox is currently operating in maintenance mode and is unable to perform write ' \\\n 'operations. Please try again later.'\n\n if is_api_request(request):\n return rest_api_server_error(request, error=error_message)\n\n messages.error(request, error_message)\n return HttpResponseRedirect(request.path_info)\n", "path": "netbox/netbox/middleware.py"}], "after_files": [{"content": "import logging\nimport uuid\nfrom urllib import parse\n\nfrom django.conf import settings\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.middleware import RemoteUserMiddleware as RemoteUserMiddleware_\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import connection, ProgrammingError\nfrom django.db.utils import InternalError\nfrom django.http import Http404, HttpResponseRedirect\n\nfrom extras.context_managers import change_logging\nfrom netbox.config import clear_config, get_config\nfrom netbox.views import handler_500\nfrom utilities.api import is_api_request, rest_api_server_error\n\n__all__ = (\n 'CoreMiddleware',\n 'MaintenanceModeMiddleware',\n 'RemoteUserMiddleware',\n)\n\n\nclass CoreMiddleware:\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n\n # Assign a random unique ID to the request. This will be used for change logging.\n request.id = uuid.uuid4()\n\n # Enforce the LOGIN_REQUIRED config parameter. If true, redirect all non-exempt unauthenticated requests\n # to the login page.\n if (\n settings.LOGIN_REQUIRED and\n not request.user.is_authenticated and\n not request.path_info.startswith(settings.AUTH_EXEMPT_PATHS)\n ):\n login_url = f'{settings.LOGIN_URL}?next={parse.quote(request.get_full_path_info())}'\n return HttpResponseRedirect(login_url)\n\n # Enable the change_logging context manager and process the request.\n with change_logging(request):\n response = self.get_response(request)\n\n # Attach the unique request ID as an HTTP header.\n response['X-Request-ID'] = request.id\n\n # Enable the Vary header to help with caching of HTMX responses\n response['Vary'] = 'HX-Request'\n\n # If this is an API request, attach an HTTP header annotating the API version (e.g. '3.5').\n if is_api_request(request):\n response['API-Version'] = settings.REST_FRAMEWORK_VERSION\n\n # Clear any cached dynamic config parameters after each request.\n clear_config()\n\n return response\n\n def process_exception(self, request, exception):\n \"\"\"\n Implement custom error handling logic for production deployments.\n \"\"\"\n # Don't catch exceptions when in debug mode\n if settings.DEBUG:\n return\n\n # Cleanly handle exceptions that occur from REST API requests\n if is_api_request(request):\n return rest_api_server_error(request)\n\n # Ignore Http404s (defer to Django's built-in 404 handling)\n if isinstance(exception, Http404):\n return\n\n # Determine the type of exception. If it's a common issue, return a custom error page with instructions.\n custom_template = None\n if isinstance(exception, ProgrammingError):\n custom_template = 'exceptions/programming_error.html'\n elif isinstance(exception, ImportError):\n custom_template = 'exceptions/import_error.html'\n elif isinstance(exception, PermissionError):\n custom_template = 'exceptions/permission_error.html'\n\n # Return a custom error message, or fall back to Django's default 500 error handling\n if custom_template:\n return handler_500(request, template_name=custom_template)\n\n\nclass RemoteUserMiddleware(RemoteUserMiddleware_):\n \"\"\"\n Custom implementation of Django's RemoteUserMiddleware which allows for a user-configurable HTTP header name.\n \"\"\"\n force_logout_if_no_header = False\n\n @property\n def header(self):\n return settings.REMOTE_AUTH_HEADER\n\n def process_request(self, request):\n logger = logging.getLogger(\n 'netbox.authentication.RemoteUserMiddleware')\n # Bypass middleware if remote authentication is not enabled\n if not settings.REMOTE_AUTH_ENABLED:\n return\n # AuthenticationMiddleware is required so that request.user exists.\n if not hasattr(request, 'user'):\n raise ImproperlyConfigured(\n \"The Django remote user auth middleware requires the\"\n \" authentication middleware to be installed. Edit your\"\n \" MIDDLEWARE setting to insert\"\n \" 'django.contrib.auth.middleware.AuthenticationMiddleware'\"\n \" before the RemoteUserMiddleware class.\")\n try:\n username = request.META[self.header]\n except KeyError:\n # If specified header doesn't exist then remove any existing\n # authenticated remote-user, or return (leaving request.user set to\n # AnonymousUser by the AuthenticationMiddleware).\n if self.force_logout_if_no_header and request.user.is_authenticated:\n self._remove_invalid_user(request)\n return\n # If the user is already authenticated and that user is the user we are\n # getting passed in the headers, then the correct user is already\n # persisted in the session and we don't need to continue.\n if request.user.is_authenticated:\n if request.user.get_username() == self.clean_username(username, request):\n return\n else:\n # An authenticated user is associated with the request, but\n # it does not match the authorized user in the header.\n self._remove_invalid_user(request)\n\n # We are seeing this user for the first time in this session, attempt\n # to authenticate the user.\n if settings.REMOTE_AUTH_GROUP_SYNC_ENABLED:\n logger.debug(\"Trying to sync Groups\")\n user = auth.authenticate(\n request, remote_user=username, remote_groups=self._get_groups(request))\n else:\n user = auth.authenticate(request, remote_user=username)\n if user:\n # User is valid.\n # Update the User's Profile if set by request headers\n if settings.REMOTE_AUTH_USER_FIRST_NAME in request.META:\n user.first_name = request.META[settings.REMOTE_AUTH_USER_FIRST_NAME]\n if settings.REMOTE_AUTH_USER_LAST_NAME in request.META:\n user.last_name = request.META[settings.REMOTE_AUTH_USER_LAST_NAME]\n if settings.REMOTE_AUTH_USER_EMAIL in request.META:\n user.email = request.META[settings.REMOTE_AUTH_USER_EMAIL]\n user.save()\n\n # Set request.user and persist user in the session\n # by logging the user in.\n request.user = user\n auth.login(request, user)\n\n def _get_groups(self, request):\n logger = logging.getLogger(\n 'netbox.authentication.RemoteUserMiddleware')\n\n groups_string = request.META.get(\n settings.REMOTE_AUTH_GROUP_HEADER, None)\n if groups_string:\n groups = groups_string.split(settings.REMOTE_AUTH_GROUP_SEPARATOR)\n else:\n groups = []\n logger.debug(f\"Groups are {groups}\")\n return groups\n\n\nclass MaintenanceModeMiddleware:\n \"\"\"\n Middleware that checks if the application is in maintenance mode\n and restricts write-related operations to the database.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if get_config().MAINTENANCE_MODE:\n self._set_session_type(\n allow_write=request.path_info.startswith(settings.MAINTENANCE_EXEMPT_PATHS)\n )\n\n return self.get_response(request)\n\n @staticmethod\n def _set_session_type(allow_write):\n \"\"\"\n Prevent any write-related database operations.\n\n Args:\n allow_write (bool): If True, write operations will be permitted.\n \"\"\"\n with connection.cursor() as cursor:\n mode = 'READ WRITE' if allow_write else 'READ ONLY'\n cursor.execute(f'SET SESSION CHARACTERISTICS AS TRANSACTION {mode};')\n\n def process_exception(self, request, exception):\n \"\"\"\n Prevent any write-related database operations if an exception is raised.\n \"\"\"\n if get_config().MAINTENANCE_MODE and isinstance(exception, InternalError):\n error_message = 'NetBox is currently operating in maintenance mode and is unable to perform write ' \\\n 'operations. Please try again later.'\n\n if is_api_request(request):\n return rest_api_server_error(request, error=error_message)\n\n messages.error(request, error_message)\n return HttpResponseRedirect(request.path_info)\n", "path": "netbox/netbox/middleware.py"}]}
| 2,625 | 155 |
gh_patches_debug_4719
|
rasdani/github-patches
|
git_diff
|
OpenNMT__OpenNMT-py-946
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
valid_batch_size
In the train_single.py file , lines 120 to 124
```
def train_iter_fct(): return build_dataset_iter(
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
lazily_load_dataset("valid", opt), fields)
```
should be changed
```
def train_iter_fct(): return build_dataset_iter(
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
lazily_load_dataset("valid", opt), fields, opt, is_train=False)
```
If it doesn't, it will not use `valid_batch_size`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/train_single.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Training on a single process
4 """
5 from __future__ import division
6
7 import argparse
8 import os
9 import random
10 import torch
11
12 import onmt.opts as opts
13
14 from onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \
15 _load_fields, _collect_report_features
16 from onmt.model_builder import build_model
17 from onmt.utils.optimizers import build_optim
18 from onmt.trainer import build_trainer
19 from onmt.models import build_model_saver
20 from onmt.utils.logging import init_logger, logger
21
22
23 def _check_save_model_path(opt):
24 save_model_path = os.path.abspath(opt.save_model)
25 model_dirname = os.path.dirname(save_model_path)
26 if not os.path.exists(model_dirname):
27 os.makedirs(model_dirname)
28
29
30 def _tally_parameters(model):
31 n_params = sum([p.nelement() for p in model.parameters()])
32 enc = 0
33 dec = 0
34 for name, param in model.named_parameters():
35 if 'encoder' in name:
36 enc += param.nelement()
37 elif 'decoder' or 'generator' in name:
38 dec += param.nelement()
39 return n_params, enc, dec
40
41
42 def training_opt_postprocessing(opt):
43 if opt.word_vec_size != -1:
44 opt.src_word_vec_size = opt.word_vec_size
45 opt.tgt_word_vec_size = opt.word_vec_size
46
47 if opt.layers != -1:
48 opt.enc_layers = opt.layers
49 opt.dec_layers = opt.layers
50
51 opt.brnn = (opt.encoder_type == "brnn")
52
53 if opt.rnn_type == "SRU" and not opt.gpuid:
54 raise AssertionError("Using SRU requires -gpuid set.")
55
56 if torch.cuda.is_available() and not opt.gpuid:
57 logger.info("WARNING: You have a CUDA device, should run with -gpuid")
58
59 if opt.gpuid:
60 torch.cuda.set_device(opt.device_id)
61 if opt.seed > 0:
62 # this one is needed for torchtext random call (shuffled iterator)
63 # in multi gpu it ensures datasets are read in the same order
64 random.seed(opt.seed)
65 # These ensure same initialization in multi gpu mode
66 torch.manual_seed(opt.seed)
67 torch.cuda.manual_seed(opt.seed)
68
69 return opt
70
71
72 def main(opt):
73 opt = training_opt_postprocessing(opt)
74 init_logger(opt.log_file)
75 # Load checkpoint if we resume from a previous training.
76 if opt.train_from:
77 logger.info('Loading checkpoint from %s' % opt.train_from)
78 checkpoint = torch.load(opt.train_from,
79 map_location=lambda storage, loc: storage)
80 model_opt = checkpoint['opt']
81 else:
82 checkpoint = None
83 model_opt = opt
84
85 # Peek the first dataset to determine the data_type.
86 # (All datasets have the same data_type).
87 first_dataset = next(lazily_load_dataset("train", opt))
88 data_type = first_dataset.data_type
89
90 # Load fields generated from preprocess phase.
91 fields = _load_fields(first_dataset, data_type, opt, checkpoint)
92
93 # Report src/tgt features.
94
95 src_features, tgt_features = _collect_report_features(fields)
96 for j, feat in enumerate(src_features):
97 logger.info(' * src feature %d size = %d'
98 % (j, len(fields[feat].vocab)))
99 for j, feat in enumerate(tgt_features):
100 logger.info(' * tgt feature %d size = %d'
101 % (j, len(fields[feat].vocab)))
102
103 # Build model.
104 model = build_model(model_opt, opt, fields, checkpoint)
105 n_params, enc, dec = _tally_parameters(model)
106 logger.info('encoder: %d' % enc)
107 logger.info('decoder: %d' % dec)
108 logger.info('* number of parameters: %d' % n_params)
109 _check_save_model_path(opt)
110
111 # Build optimizer.
112 optim = build_optim(model, opt, checkpoint)
113
114 # Build model saver
115 model_saver = build_model_saver(model_opt, opt, model, fields, optim)
116
117 trainer = build_trainer(
118 opt, model, fields, optim, data_type, model_saver=model_saver)
119
120 def train_iter_fct(): return build_dataset_iter(
121 lazily_load_dataset("train", opt), fields, opt)
122
123 def valid_iter_fct(): return build_dataset_iter(
124 lazily_load_dataset("valid", opt), fields, opt)
125
126 # Do training.
127 trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
128 opt.valid_steps)
129
130 if opt.tensorboard:
131 trainer.report_manager.tensorboard_writer.close()
132
133
134 if __name__ == "__main__":
135 parser = argparse.ArgumentParser(
136 description='train.py',
137 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
138
139 opts.add_md_help_argument(parser)
140 opts.model_opts(parser)
141 opts.train_opts(parser)
142
143 opt = parser.parse_args()
144 main(opt)
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/onmt/train_single.py b/onmt/train_single.py
--- a/onmt/train_single.py
+++ b/onmt/train_single.py
@@ -121,7 +121,7 @@
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
- lazily_load_dataset("valid", opt), fields, opt)
+ lazily_load_dataset("valid", opt), fields, opt, is_train=False)
# Do training.
trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
|
{"golden_diff": "diff --git a/onmt/train_single.py b/onmt/train_single.py\n--- a/onmt/train_single.py\n+++ b/onmt/train_single.py\n@@ -121,7 +121,7 @@\n lazily_load_dataset(\"train\", opt), fields, opt)\n \n def valid_iter_fct(): return build_dataset_iter(\n- lazily_load_dataset(\"valid\", opt), fields, opt)\n+ lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\n \n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n", "issue": "valid_batch_size\nIn the train_single.py file , lines 120 to 124\r\n```\r\ndef train_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"train\", opt), fields, opt)\r\n\r\n def valid_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"valid\", opt), fields)\r\n```\r\nshould be changed\r\n```\r\ndef train_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"train\", opt), fields, opt)\r\n\r\n def valid_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\r\n```\r\nIf it doesn't, it will not use `valid_batch_size`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\n Training on a single process\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport os\nimport random\nimport torch\n\nimport onmt.opts as opts\n\nfrom onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \\\n _load_fields, _collect_report_features\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import build_optim\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n elif 'decoder' or 'generator' in name:\n dec += param.nelement()\n return n_params, enc, dec\n\n\ndef training_opt_postprocessing(opt):\n if opt.word_vec_size != -1:\n opt.src_word_vec_size = opt.word_vec_size\n opt.tgt_word_vec_size = opt.word_vec_size\n\n if opt.layers != -1:\n opt.enc_layers = opt.layers\n opt.dec_layers = opt.layers\n\n opt.brnn = (opt.encoder_type == \"brnn\")\n\n if opt.rnn_type == \"SRU\" and not opt.gpuid:\n raise AssertionError(\"Using SRU requires -gpuid set.\")\n\n if torch.cuda.is_available() and not opt.gpuid:\n logger.info(\"WARNING: You have a CUDA device, should run with -gpuid\")\n\n if opt.gpuid:\n torch.cuda.set_device(opt.device_id)\n if opt.seed > 0:\n # this one is needed for torchtext random call (shuffled iterator)\n # in multi gpu it ensures datasets are read in the same order\n random.seed(opt.seed)\n # These ensure same initialization in multi gpu mode\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed(opt.seed)\n\n return opt\n\n\ndef main(opt):\n opt = training_opt_postprocessing(opt)\n init_logger(opt.log_file)\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n model_opt = checkpoint['opt']\n else:\n checkpoint = None\n model_opt = opt\n\n # Peek the first dataset to determine the data_type.\n # (All datasets have the same data_type).\n first_dataset = next(lazily_load_dataset(\"train\", opt))\n data_type = first_dataset.data_type\n\n # Load fields generated from preprocess phase.\n fields = _load_fields(first_dataset, data_type, opt, checkpoint)\n\n # Report src/tgt features.\n\n src_features, tgt_features = _collect_report_features(fields)\n for j, feat in enumerate(src_features):\n logger.info(' * src feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n for j, feat in enumerate(tgt_features):\n logger.info(' * tgt feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = build_optim(model, opt, checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, model, fields, optim, data_type, model_saver=model_saver)\n\n def train_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"train\", opt), fields, opt)\n\n def valid_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"valid\", opt), fields, opt)\n\n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='train.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n", "path": "onmt/train_single.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\n Training on a single process\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport os\nimport random\nimport torch\n\nimport onmt.opts as opts\n\nfrom onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \\\n _load_fields, _collect_report_features\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import build_optim\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n elif 'decoder' or 'generator' in name:\n dec += param.nelement()\n return n_params, enc, dec\n\n\ndef training_opt_postprocessing(opt):\n if opt.word_vec_size != -1:\n opt.src_word_vec_size = opt.word_vec_size\n opt.tgt_word_vec_size = opt.word_vec_size\n\n if opt.layers != -1:\n opt.enc_layers = opt.layers\n opt.dec_layers = opt.layers\n\n opt.brnn = (opt.encoder_type == \"brnn\")\n\n if opt.rnn_type == \"SRU\" and not opt.gpuid:\n raise AssertionError(\"Using SRU requires -gpuid set.\")\n\n if torch.cuda.is_available() and not opt.gpuid:\n logger.info(\"WARNING: You have a CUDA device, should run with -gpuid\")\n\n if opt.gpuid:\n torch.cuda.set_device(opt.device_id)\n if opt.seed > 0:\n # this one is needed for torchtext random call (shuffled iterator)\n # in multi gpu it ensures datasets are read in the same order\n random.seed(opt.seed)\n # These ensure same initialization in multi gpu mode\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed(opt.seed)\n\n return opt\n\n\ndef main(opt):\n opt = training_opt_postprocessing(opt)\n init_logger(opt.log_file)\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n model_opt = checkpoint['opt']\n else:\n checkpoint = None\n model_opt = opt\n\n # Peek the first dataset to determine the data_type.\n # (All datasets have the same data_type).\n first_dataset = next(lazily_load_dataset(\"train\", opt))\n data_type = first_dataset.data_type\n\n # Load fields generated from preprocess phase.\n fields = _load_fields(first_dataset, data_type, opt, checkpoint)\n\n # Report src/tgt features.\n\n src_features, tgt_features = _collect_report_features(fields)\n for j, feat in enumerate(src_features):\n logger.info(' * src feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n for j, feat in enumerate(tgt_features):\n logger.info(' * tgt feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = build_optim(model, opt, checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, model, fields, optim, data_type, model_saver=model_saver)\n\n def train_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"train\", opt), fields, opt)\n\n def valid_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\n\n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='train.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n", "path": "onmt/train_single.py"}]}
| 1,830 | 130 |
gh_patches_debug_38208
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-772
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restructure Numbers provider
# Feature request
## Thesis
While I was implementing the ``matrix()`` function in the ``Numbers`` provider and I was thinking about some changes that we could make in this provider:
- Add a function ``complex(start, end, length)`` that return a random array of complex numbers
- Make the API uniform, so that every function in the ``Numbers`` provider has the arguments ``start, end, length`` (where possible). Maybe in the ``complex()`` function we can add ``start_real, end_real, start_imaginary, end_imaginary`` ?
- Remove the function ``ranting()`` and add an argument ``decimal_digits`` in the function ``floats()`` to specify the number of decimal digits to keep.
## Reasoning
I think these changes would make the provider more uniform and easy to use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/providers/numbers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Provides data related to numbers."""
4
5 from typing import List, Union
6
7 from mimesis.providers.base import BaseProvider
8
9 __all__ = ['Numbers']
10
11
12 class Numbers(BaseProvider):
13 """Class for generating numbers."""
14
15 class Meta:
16 """Class for metadata."""
17
18 name = 'numbers'
19
20 def floats(self, n: int = 2) -> List[float]:
21 """Generate a list of random float numbers.
22
23 :param n: Raise 10 to the 'n' power.
24 :return: The list of floating-point numbers.
25 """
26 nums = [self.random.random()
27 for _ in range(10 ** int(n))]
28 return nums
29
30 def integers(self, start: int = 0, end: int = 10,
31 length: int = 10) -> List[int]:
32 """Generate a list of random integers.
33
34 Integers can be negative or positive numbers.
35 .. note: You can use both positive and negative numbers.
36
37 :param start: Start.
38 :param end: End.
39 :param length: Length of list.
40 :return: List of integers.
41
42 :Example:
43 [-20, -19, -18, -17]
44 """
45 return self.random.randints(
46 length, start, end)
47
48 @staticmethod
49 def primes(start: int = 1, end: int = 999) -> List[int]:
50 """Generate a list of prime numbers.
51
52 :param start: First value of range.
53 :param end: Last value of range.
54 :return: A list of prime numbers from start to end.
55 """
56 # TODO: It should generate random primes with passed length.
57 sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)
58 sieve = [True] * sieve_size
59
60 primes = [] # list of primes
61 # add 2 to the list if it's in the given range
62 if end >= 2:
63 primes.append(2)
64 for i in range(sieve_size):
65 if sieve[i]:
66 value_at_i = i * 2 + 3
67 primes.append(value_at_i)
68 for j in range(i, sieve_size, value_at_i):
69 sieve[j] = False
70
71 chop_index = 0
72 for i in range(len(primes)):
73 if primes[i] >= start:
74 chop_index = i
75 break
76 return primes[chop_index:]
77
78 def digit(self, to_bin: bool = False) -> Union[str, int]:
79 """Get a random digit.
80
81 :param to_bin: If True then convert to binary.
82 :return: Digit.
83
84 :Example:
85 4.
86 """
87 digit = self.random.randint(0, 9)
88
89 if to_bin:
90 return bin(digit)
91
92 return digit
93
94 def between(self, minimum: int = 1, maximum: int = 1000) -> int:
95 """Generate a random number between minimum and maximum.
96
97 :param minimum: Minimum of range.
98 :param maximum: Maximum of range.
99 :return: Number.
100 """
101 return self.random.randint(minimum, maximum)
102
103 def rating(self, maximum: float = 5.0) -> float:
104 """Generate a random rating for something.
105
106 :param maximum: Maximum value (default is 5.0).
107 :return: Rating.
108
109 :Example:
110 4.7
111 """
112 return self.random.uniform(0, maximum, 1)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mimesis/providers/numbers.py b/mimesis/providers/numbers.py
--- a/mimesis/providers/numbers.py
+++ b/mimesis/providers/numbers.py
@@ -17,18 +17,20 @@
name = 'numbers'
- def floats(self, n: int = 2) -> List[float]:
+ def floats(self, start: float = 0, end: float = 1, n: int = 10,
+ rounding: int = 15) -> List[float]:
"""Generate a list of random float numbers.
- :param n: Raise 10 to the 'n' power.
+ :param start: Start range.
+ :param end: End range.
+ :param n: Length of the list.
+ :param rounding: Max number of decimal digits.
:return: The list of floating-point numbers.
"""
- nums = [self.random.random()
- for _ in range(10 ** int(n))]
- return nums
+ return [self.random.uniform(start, end, rounding) for _ in range(n)]
def integers(self, start: int = 0, end: int = 10,
- length: int = 10) -> List[int]:
+ n: int = 10) -> List[int]:
"""Generate a list of random integers.
Integers can be negative or positive numbers.
@@ -36,14 +38,33 @@
:param start: Start.
:param end: End.
- :param length: Length of list.
+ :param n: Length of list.
:return: List of integers.
:Example:
[-20, -19, -18, -17]
"""
- return self.random.randints(
- length, start, end)
+ return self.random.randints(n, start, end)
+
+ def complexes(self, start_real: float = 0, end_real: float = 1,
+ start_imag: float = 0, end_imag: float = 1,
+ rounding_real: int = 15, rounding_imag: int = 15,
+ n: int = 10) -> List[complex]:
+ """Generate a list of random complex numbers.
+
+ :param start_real: Start real range.
+ :param end_real: End real range.
+ :param start_imag: Start imaginary range.
+ :param end_imag: End imaginary range.
+ :param rounding_real: Rounding real part.
+ :param rounding_imag: Roungind imaginary part.
+ :param n: Length of the list.
+ :return: A list of random complex numbers.
+ """
+ return [
+ complex(self.random.uniform(start_real, end_real, rounding_real),
+ self.random.uniform(start_imag, end_imag, rounding_imag))
+ for _ in range(n)]
@staticmethod
def primes(start: int = 1, end: int = 999) -> List[int]:
@@ -99,14 +120,3 @@
:return: Number.
"""
return self.random.randint(minimum, maximum)
-
- def rating(self, maximum: float = 5.0) -> float:
- """Generate a random rating for something.
-
- :param maximum: Maximum value (default is 5.0).
- :return: Rating.
-
- :Example:
- 4.7
- """
- return self.random.uniform(0, maximum, 1)
|
{"golden_diff": "diff --git a/mimesis/providers/numbers.py b/mimesis/providers/numbers.py\n--- a/mimesis/providers/numbers.py\n+++ b/mimesis/providers/numbers.py\n@@ -17,18 +17,20 @@\n \n name = 'numbers'\n \n- def floats(self, n: int = 2) -> List[float]:\n+ def floats(self, start: float = 0, end: float = 1, n: int = 10,\n+ rounding: int = 15) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n \n- :param n: Raise 10 to the 'n' power.\n+ :param start: Start range.\n+ :param end: End range.\n+ :param n: Length of the list.\n+ :param rounding: Max number of decimal digits.\n :return: The list of floating-point numbers.\n \"\"\"\n- nums = [self.random.random()\n- for _ in range(10 ** int(n))]\n- return nums\n+ return [self.random.uniform(start, end, rounding) for _ in range(n)]\n \n def integers(self, start: int = 0, end: int = 10,\n- length: int = 10) -> List[int]:\n+ n: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n \n Integers can be negative or positive numbers.\n@@ -36,14 +38,33 @@\n \n :param start: Start.\n :param end: End.\n- :param length: Length of list.\n+ :param n: Length of list.\n :return: List of integers.\n \n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n- return self.random.randints(\n- length, start, end)\n+ return self.random.randints(n, start, end)\n+\n+ def complexes(self, start_real: float = 0, end_real: float = 1,\n+ start_imag: float = 0, end_imag: float = 1,\n+ rounding_real: int = 15, rounding_imag: int = 15,\n+ n: int = 10) -> List[complex]:\n+ \"\"\"Generate a list of random complex numbers.\n+\n+ :param start_real: Start real range.\n+ :param end_real: End real range.\n+ :param start_imag: Start imaginary range.\n+ :param end_imag: End imaginary range.\n+ :param rounding_real: Rounding real part.\n+ :param rounding_imag: Roungind imaginary part.\n+ :param n: Length of the list.\n+ :return: A list of random complex numbers.\n+ \"\"\"\n+ return [\n+ complex(self.random.uniform(start_real, end_real, rounding_real),\n+ self.random.uniform(start_imag, end_imag, rounding_imag))\n+ for _ in range(n)]\n \n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n@@ -99,14 +120,3 @@\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n-\n- def rating(self, maximum: float = 5.0) -> float:\n- \"\"\"Generate a random rating for something.\n-\n- :param maximum: Maximum value (default is 5.0).\n- :return: Rating.\n-\n- :Example:\n- 4.7\n- \"\"\"\n- return self.random.uniform(0, maximum, 1)\n", "issue": "Restructure Numbers provider\n# Feature request\r\n\r\n## Thesis\r\n\r\nWhile I was implementing the ``matrix()`` function in the ``Numbers`` provider and I was thinking about some changes that we could make in this provider:\r\n\r\n- Add a function ``complex(start, end, length)`` that return a random array of complex numbers\r\n- Make the API uniform, so that every function in the ``Numbers`` provider has the arguments ``start, end, length`` (where possible). Maybe in the ``complex()`` function we can add ``start_real, end_real, start_imaginary, end_imaginary`` ?\r\n- Remove the function ``ranting()`` and add an argument ``decimal_digits`` in the function ``floats()`` to specify the number of decimal digits to keep.\r\n\r\n## Reasoning\r\n\r\nI think these changes would make the provider more uniform and easy to use.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to numbers.\"\"\"\n\nfrom typing import List, Union\n\nfrom mimesis.providers.base import BaseProvider\n\n__all__ = ['Numbers']\n\n\nclass Numbers(BaseProvider):\n \"\"\"Class for generating numbers.\"\"\"\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'numbers'\n\n def floats(self, n: int = 2) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n\n :param n: Raise 10 to the 'n' power.\n :return: The list of floating-point numbers.\n \"\"\"\n nums = [self.random.random()\n for _ in range(10 ** int(n))]\n return nums\n\n def integers(self, start: int = 0, end: int = 10,\n length: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n\n Integers can be negative or positive numbers.\n .. note: You can use both positive and negative numbers.\n\n :param start: Start.\n :param end: End.\n :param length: Length of list.\n :return: List of integers.\n\n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n return self.random.randints(\n length, start, end)\n\n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n \"\"\"Generate a list of prime numbers.\n\n :param start: First value of range.\n :param end: Last value of range.\n :return: A list of prime numbers from start to end.\n \"\"\"\n # TODO: It should generate random primes with passed length.\n sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)\n sieve = [True] * sieve_size\n\n primes = [] # list of primes\n # add 2 to the list if it's in the given range\n if end >= 2:\n primes.append(2)\n for i in range(sieve_size):\n if sieve[i]:\n value_at_i = i * 2 + 3\n primes.append(value_at_i)\n for j in range(i, sieve_size, value_at_i):\n sieve[j] = False\n\n chop_index = 0\n for i in range(len(primes)):\n if primes[i] >= start:\n chop_index = i\n break\n return primes[chop_index:]\n\n def digit(self, to_bin: bool = False) -> Union[str, int]:\n \"\"\"Get a random digit.\n\n :param to_bin: If True then convert to binary.\n :return: Digit.\n\n :Example:\n 4.\n \"\"\"\n digit = self.random.randint(0, 9)\n\n if to_bin:\n return bin(digit)\n\n return digit\n\n def between(self, minimum: int = 1, maximum: int = 1000) -> int:\n \"\"\"Generate a random number between minimum and maximum.\n\n :param minimum: Minimum of range.\n :param maximum: Maximum of range.\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n\n def rating(self, maximum: float = 5.0) -> float:\n \"\"\"Generate a random rating for something.\n\n :param maximum: Maximum value (default is 5.0).\n :return: Rating.\n\n :Example:\n 4.7\n \"\"\"\n return self.random.uniform(0, maximum, 1)\n", "path": "mimesis/providers/numbers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to numbers.\"\"\"\n\nfrom typing import List, Union\n\nfrom mimesis.providers.base import BaseProvider\n\n__all__ = ['Numbers']\n\n\nclass Numbers(BaseProvider):\n \"\"\"Class for generating numbers.\"\"\"\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'numbers'\n\n def floats(self, start: float = 0, end: float = 1, n: int = 10,\n rounding: int = 15) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n\n :param start: Start range.\n :param end: End range.\n :param n: Length of the list.\n :param rounding: Max number of decimal digits.\n :return: The list of floating-point numbers.\n \"\"\"\n return [self.random.uniform(start, end, rounding) for _ in range(n)]\n\n def integers(self, start: int = 0, end: int = 10,\n n: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n\n Integers can be negative or positive numbers.\n .. note: You can use both positive and negative numbers.\n\n :param start: Start.\n :param end: End.\n :param n: Length of list.\n :return: List of integers.\n\n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n return self.random.randints(n, start, end)\n\n def complexes(self, start_real: float = 0, end_real: float = 1,\n start_imag: float = 0, end_imag: float = 1,\n rounding_real: int = 15, rounding_imag: int = 15,\n n: int = 10) -> List[complex]:\n \"\"\"Generate a list of random complex numbers.\n\n :param start_real: Start real range.\n :param end_real: End real range.\n :param start_imag: Start imaginary range.\n :param end_imag: End imaginary range.\n :param rounding_real: Rounding real part.\n :param rounding_imag: Roungind imaginary part.\n :param n: Length of the list.\n :return: A list of random complex numbers.\n \"\"\"\n return [\n complex(self.random.uniform(start_real, end_real, rounding_real),\n self.random.uniform(start_imag, end_imag, rounding_imag))\n for _ in range(n)]\n\n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n \"\"\"Generate a list of prime numbers.\n\n :param start: First value of range.\n :param end: Last value of range.\n :return: A list of prime numbers from start to end.\n \"\"\"\n # TODO: It should generate random primes with passed length.\n sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)\n sieve = [True] * sieve_size\n\n primes = [] # list of primes\n # add 2 to the list if it's in the given range\n if end >= 2:\n primes.append(2)\n for i in range(sieve_size):\n if sieve[i]:\n value_at_i = i * 2 + 3\n primes.append(value_at_i)\n for j in range(i, sieve_size, value_at_i):\n sieve[j] = False\n\n chop_index = 0\n for i in range(len(primes)):\n if primes[i] >= start:\n chop_index = i\n break\n return primes[chop_index:]\n\n def digit(self, to_bin: bool = False) -> Union[str, int]:\n \"\"\"Get a random digit.\n\n :param to_bin: If True then convert to binary.\n :return: Digit.\n\n :Example:\n 4.\n \"\"\"\n digit = self.random.randint(0, 9)\n\n if to_bin:\n return bin(digit)\n\n return digit\n\n def between(self, minimum: int = 1, maximum: int = 1000) -> int:\n \"\"\"Generate a random number between minimum and maximum.\n\n :param minimum: Minimum of range.\n :param maximum: Maximum of range.\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n", "path": "mimesis/providers/numbers.py"}]}
| 1,461 | 795 |
gh_patches_debug_29784
|
rasdani/github-patches
|
git_diff
|
spectrochempy__spectrochempy-77
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
read_opus() shifts the xaxis
Author: @atravert (Arnaud TRAVERT)
Redmine Issue: 75, https://redmine.spectrochempy.fr/issues/75
---
A bug in brukeropusreader module leads to a shift of the xaxis.
It has been corrected on the spectrochempy/brukeropusreader fork (PR FIX wrong setting of wavenumbers axis #1)
but a change in read_opus() is also needed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spectrochempy/core/readers/readopus.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # ======================================================================================================================
4 # Copyright (©) 2015-2020 LCS
5 # Laboratoire Catalyse et Spectrochimie, Caen, France.
6 # CeCILL-B FREE SOFTWARE LICENSE AGREEMENT
7 # See full LICENSE agreement in the root directory
8 # ======================================================================================================================
9
10 """This module to extend NDDataset with the import methods method.
11
12 """
13 __all__ = ['read_opus']
14
15 __dataset_methods__ = __all__
16
17 # ----------------------------------------------------------------------------------------------------------------------
18 # standard imports
19 # ----------------------------------------------------------------------------------------------------------------------
20
21
22 from brukeropusreader import read_file
23 from warnings import warn
24 from datetime import datetime, timezone, timedelta
25 from numpy import linspace
26
27 # ----------------------------------------------------------------------------------------------------------------------
28 # third party imports
29 # ----------------------------------------------------------------------------------------------------------------------
30 # ----------------------------------------------------------------------------------------------------------------------
31 # local imports
32 # ----------------------------------------------------------------------------------------------------------------------
33 from spectrochempy.core import debug_
34 from spectrochempy.core.dataset.nddataset import NDDataset
35 from spectrochempy.core.dataset.ndcoord import Coord
36 from spectrochempy.utils import readfilename
37
38
39 # ======================================================================================================================
40 # Public functions
41 # ======================================================================================================================
42
43 # .............................................................................
44 def read_opus(dataset=None, **kwargs):
45 """Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is
46 extracted ("AB" field). Returns an error if dimensions are incompatibles.
47
48 Parameters
49 ----------
50 filename : `None`, `str`, or list of `str`
51 Filename of the file(s) to load. If `None` : opens a dialog box to select
52 files. If `str` : a single filename. It list of str :
53 a list of filenames.
54 directory : str, optional, default="".
55 From where to read the specified filename. If not specified, read in
56 the defaults datadir.
57
58 Returns
59 -------
60 dataset : |NDDataset|
61 A dataset corresponding to the (set of) bruker file(s).
62
63 Examples
64 --------
65 >>> A = NDDataset.read_opus('irdata\\spectrum.0001')
66 >>> print(A)
67 NDDataset: [float64] a.u. (shape: (y:1, x:2568))
68 """
69 debug_("reading bruker opus files")
70
71 # filename will be given by a keyword parameter except if the first parameters is already
72 # the filename
73 filename = kwargs.get('filename', None)
74
75 # check if the first parameter is a dataset because we allow not to pass it
76 if not isinstance(dataset, NDDataset):
77 # probably did not specify a dataset
78 # so the first parameters must be the filename
79 if isinstance(dataset, (str, list)) and dataset != '':
80 filename = dataset
81
82 # check if directory was specified
83 directory = kwargs.get("directory", None)
84 sortbydate = kwargs.get("sortbydate", True)
85
86 # returns a list of files to read
87 files = readfilename(filename,
88 directory=directory,
89 filetypes=['Bruker files (*.*)',
90 'all files (*)'],
91 dictionary=False)
92 # todo: see how to use regular expression in Qt filters
93
94 if not files:
95 # there is no files, return nothing
96 return None
97
98 xaxis = None
99 intensities = []
100 names = []
101 acquisitiondates = []
102 timestamps = []
103 for file in files:
104 opus_data = read_file(file)
105 try:
106 opus_data["AB"]
107 except KeyError: # not an absorbance spectrum
108 warn("opus file {} could not be read".format(file))
109 continue
110
111 npt = opus_data['AB Data Parameter']['NPT']
112 fxv = opus_data['AB Data Parameter']['FXV']
113 lxv = opus_data['AB Data Parameter']['LXV']
114 xdata = linspace(fxv, lxv, npt)
115
116 if not xaxis:
117 xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')
118
119 elif (xdata != xaxis.data).any():
120 raise ValueError("spectra have incompatible dimensions (xaxis)")
121
122 intensities.append(opus_data["AB"][:npt])
123 names.append(opus_data["Sample"]['SNM'])
124 acqdate = opus_data["AB Data Parameter"]["DAT"]
125 acqtime = opus_data["AB Data Parameter"]["TIM"]
126 GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])
127 date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],
128 '%d/%m/%Y_%H:%M:%S.%f')
129 UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)
130 UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)
131 # Transform to timestamp for storage in the Coord object
132 # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime
133 timestamp = UTC_date_time.timestamp()
134 acquisitiondates.append(UTC_date_time)
135 timestamps.append(timestamp)
136
137 # return if none of the files could be read:
138 if not xaxis:
139 return
140
141 yaxis = Coord(timestamps,
142 title='Acquisition timestamp (GMT)',
143 units='s',
144 labels=(acquisitiondates, names))
145
146 dataset = NDDataset(intensities)
147 dataset.set_coords(y=yaxis, x=xaxis)
148 dataset.units = 'absorbance'
149 dataset.title = 'Absorbance'
150
151 # Set origin, description and history
152 dataset.origin = "opus"
153 dataset.description = ('Dataset from opus files. \n')
154
155 if sortbydate:
156 dataset.sort(dim='y', inplace=True)
157
158 dataset.history = str(datetime.now()) + ':import from opus files \n'
159
160 # Set the NDDataset date
161 dataset._date = datetime.now()
162 dataset._modified = dataset.date
163 # debug_("end of reading")
164
165 return dataset
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/spectrochempy/core/readers/readopus.py b/spectrochempy/core/readers/readopus.py
--- a/spectrochempy/core/readers/readopus.py
+++ b/spectrochempy/core/readers/readopus.py
@@ -65,7 +65,10 @@
>>> A = NDDataset.read_opus('irdata\\spectrum.0001')
>>> print(A)
NDDataset: [float64] a.u. (shape: (y:1, x:2568))
+
+
"""
+
debug_("reading bruker opus files")
# filename will be given by a keyword parameter except if the first parameters is already
@@ -114,7 +117,7 @@
xdata = linspace(fxv, lxv, npt)
if not xaxis:
- xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')
+ xaxis = Coord(xdata, title='Wavenumbers', units='cm^-1')
elif (xdata != xaxis.data).any():
raise ValueError("spectra have incompatible dimensions (xaxis)")
@@ -150,16 +153,13 @@
# Set origin, description and history
dataset.origin = "opus"
- dataset.description = ('Dataset from opus files. \n')
+ dataset.description = 'Dataset from opus files. \n'
if sortbydate:
dataset.sort(dim='y', inplace=True)
- dataset.history = str(datetime.now()) + ':import from opus files \n'
-
- # Set the NDDataset date
+ dataset.history = str(datetime.now()) + ': import from opus files \n'
dataset._date = datetime.now()
dataset._modified = dataset.date
- # debug_("end of reading")
return dataset
|
{"golden_diff": "diff --git a/spectrochempy/core/readers/readopus.py b/spectrochempy/core/readers/readopus.py\n--- a/spectrochempy/core/readers/readopus.py\n+++ b/spectrochempy/core/readers/readopus.py\n@@ -65,7 +65,10 @@\n >>> A = NDDataset.read_opus('irdata\\\\spectrum.0001')\n >>> print(A)\n NDDataset: [float64] a.u. (shape: (y:1, x:2568))\n+\n+\n \"\"\"\n+\n debug_(\"reading bruker opus files\")\n \n # filename will be given by a keyword parameter except if the first parameters is already\n@@ -114,7 +117,7 @@\n xdata = linspace(fxv, lxv, npt)\n \n if not xaxis:\n- xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')\n+ xaxis = Coord(xdata, title='Wavenumbers', units='cm^-1')\n \n elif (xdata != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n@@ -150,16 +153,13 @@\n \n # Set origin, description and history\n dataset.origin = \"opus\"\n- dataset.description = ('Dataset from opus files. \\n')\n+ dataset.description = 'Dataset from opus files. \\n'\n \n if sortbydate:\n dataset.sort(dim='y', inplace=True)\n \n- dataset.history = str(datetime.now()) + ':import from opus files \\n'\n-\n- # Set the NDDataset date\n+ dataset.history = str(datetime.now()) + ': import from opus files \\n'\n dataset._date = datetime.now()\n dataset._modified = dataset.date\n- # debug_(\"end of reading\")\n \n return dataset\n", "issue": "read_opus() shifts the xaxis\nAuthor: @atravert (Arnaud TRAVERT)\n\nRedmine Issue: 75, https://redmine.spectrochempy.fr/issues/75\n\n---\n\nA bug in brukeropusreader module leads to a shift of the xaxis.\r\nIt has been corrected on the spectrochempy/brukeropusreader fork (PR FIX wrong setting of wavenumbers axis #1) \r\nbut a change in read_opus() is also needed.\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# ======================================================================================================================\n# Copyright (\u00a9) 2015-2020 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# ======================================================================================================================\n\n\"\"\"This module to extend NDDataset with the import methods method.\n\n\"\"\"\n__all__ = ['read_opus']\n\n__dataset_methods__ = __all__\n\n# ----------------------------------------------------------------------------------------------------------------------\n# standard imports\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nfrom brukeropusreader import read_file\nfrom warnings import warn\nfrom datetime import datetime, timezone, timedelta\nfrom numpy import linspace\n\n# ----------------------------------------------------------------------------------------------------------------------\n# third party imports\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\n# local imports\n# ----------------------------------------------------------------------------------------------------------------------\nfrom spectrochempy.core import debug_\nfrom spectrochempy.core.dataset.nddataset import NDDataset\nfrom spectrochempy.core.dataset.ndcoord import Coord\nfrom spectrochempy.utils import readfilename\n\n\n# ======================================================================================================================\n# Public functions\n# ======================================================================================================================\n\n# .............................................................................\ndef read_opus(dataset=None, **kwargs):\n \"\"\"Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is\n extracted (\"AB\" field). Returns an error if dimensions are incompatibles.\n\n Parameters\n ----------\n filename : `None`, `str`, or list of `str`\n Filename of the file(s) to load. If `None` : opens a dialog box to select\n files. If `str` : a single filename. It list of str :\n a list of filenames.\n directory : str, optional, default=\"\".\n From where to read the specified filename. If not specified, read in\n the defaults datadir.\n\n Returns\n -------\n dataset : |NDDataset|\n A dataset corresponding to the (set of) bruker file(s).\n\n Examples\n --------\n >>> A = NDDataset.read_opus('irdata\\\\spectrum.0001')\n >>> print(A)\n NDDataset: [float64] a.u. (shape: (y:1, x:2568))\n \"\"\"\n debug_(\"reading bruker opus files\")\n\n # filename will be given by a keyword parameter except if the first parameters is already\n # the filename\n filename = kwargs.get('filename', None)\n\n # check if the first parameter is a dataset because we allow not to pass it\n if not isinstance(dataset, NDDataset):\n # probably did not specify a dataset\n # so the first parameters must be the filename\n if isinstance(dataset, (str, list)) and dataset != '':\n filename = dataset\n\n # check if directory was specified\n directory = kwargs.get(\"directory\", None)\n sortbydate = kwargs.get(\"sortbydate\", True)\n\n # returns a list of files to read\n files = readfilename(filename,\n directory=directory,\n filetypes=['Bruker files (*.*)',\n 'all files (*)'],\n dictionary=False)\n # todo: see how to use regular expression in Qt filters\n\n if not files:\n # there is no files, return nothing\n return None\n\n xaxis = None\n intensities = []\n names = []\n acquisitiondates = []\n timestamps = []\n for file in files:\n opus_data = read_file(file)\n try:\n opus_data[\"AB\"]\n except KeyError: # not an absorbance spectrum\n warn(\"opus file {} could not be read\".format(file))\n continue\n\n npt = opus_data['AB Data Parameter']['NPT']\n fxv = opus_data['AB Data Parameter']['FXV']\n lxv = opus_data['AB Data Parameter']['LXV']\n xdata = linspace(fxv, lxv, npt)\n\n if not xaxis:\n xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')\n\n elif (xdata != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n\n intensities.append(opus_data[\"AB\"][:npt])\n names.append(opus_data[\"Sample\"]['SNM'])\n acqdate = opus_data[\"AB Data Parameter\"][\"DAT\"]\n acqtime = opus_data[\"AB Data Parameter\"][\"TIM\"]\n GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])\n date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],\n '%d/%m/%Y_%H:%M:%S.%f')\n UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)\n UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)\n # Transform to timestamp for storage in the Coord object\n # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime\n timestamp = UTC_date_time.timestamp()\n acquisitiondates.append(UTC_date_time)\n timestamps.append(timestamp)\n\n # return if none of the files could be read:\n if not xaxis:\n return\n\n yaxis = Coord(timestamps,\n title='Acquisition timestamp (GMT)',\n units='s',\n labels=(acquisitiondates, names))\n\n dataset = NDDataset(intensities)\n dataset.set_coords(y=yaxis, x=xaxis)\n dataset.units = 'absorbance'\n dataset.title = 'Absorbance'\n\n # Set origin, description and history\n dataset.origin = \"opus\"\n dataset.description = ('Dataset from opus files. \\n')\n\n if sortbydate:\n dataset.sort(dim='y', inplace=True)\n\n dataset.history = str(datetime.now()) + ':import from opus files \\n'\n\n # Set the NDDataset date\n dataset._date = datetime.now()\n dataset._modified = dataset.date\n # debug_(\"end of reading\")\n\n return dataset\n", "path": "spectrochempy/core/readers/readopus.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# ======================================================================================================================\n# Copyright (\u00a9) 2015-2020 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# ======================================================================================================================\n\n\"\"\"This module to extend NDDataset with the import methods method.\n\n\"\"\"\n__all__ = ['read_opus']\n\n__dataset_methods__ = __all__\n\n# ----------------------------------------------------------------------------------------------------------------------\n# standard imports\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nfrom brukeropusreader import read_file\nfrom warnings import warn\nfrom datetime import datetime, timezone, timedelta\nfrom numpy import linspace\n\n# ----------------------------------------------------------------------------------------------------------------------\n# third party imports\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\n# local imports\n# ----------------------------------------------------------------------------------------------------------------------\nfrom spectrochempy.core import debug_\nfrom spectrochempy.core.dataset.nddataset import NDDataset\nfrom spectrochempy.core.dataset.ndcoord import Coord\nfrom spectrochempy.utils import readfilename\n\n\n# ======================================================================================================================\n# Public functions\n# ======================================================================================================================\n\n# .............................................................................\ndef read_opus(dataset=None, **kwargs):\n \"\"\"Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is\n extracted (\"AB\" field). Returns an error if dimensions are incompatibles.\n\n Parameters\n ----------\n filename : `None`, `str`, or list of `str`\n Filename of the file(s) to load. If `None` : opens a dialog box to select\n files. If `str` : a single filename. It list of str :\n a list of filenames.\n directory : str, optional, default=\"\".\n From where to read the specified filename. If not specified, read in\n the defaults datadir.\n\n Returns\n -------\n dataset : |NDDataset|\n A dataset corresponding to the (set of) bruker file(s).\n\n Examples\n --------\n >>> A = NDDataset.read_opus('irdata\\\\spectrum.0001')\n >>> print(A)\n NDDataset: [float64] a.u. (shape: (y:1, x:2568))\n\n\n \"\"\"\n\n debug_(\"reading bruker opus files\")\n\n # filename will be given by a keyword parameter except if the first parameters is already\n # the filename\n filename = kwargs.get('filename', None)\n\n # check if the first parameter is a dataset because we allow not to pass it\n if not isinstance(dataset, NDDataset):\n # probably did not specify a dataset\n # so the first parameters must be the filename\n if isinstance(dataset, (str, list)) and dataset != '':\n filename = dataset\n\n # check if directory was specified\n directory = kwargs.get(\"directory\", None)\n sortbydate = kwargs.get(\"sortbydate\", True)\n\n # returns a list of files to read\n files = readfilename(filename,\n directory=directory,\n filetypes=['Bruker files (*.*)',\n 'all files (*)'],\n dictionary=False)\n # todo: see how to use regular expression in Qt filters\n\n if not files:\n # there is no files, return nothing\n return None\n\n xaxis = None\n intensities = []\n names = []\n acquisitiondates = []\n timestamps = []\n for file in files:\n opus_data = read_file(file)\n try:\n opus_data[\"AB\"]\n except KeyError: # not an absorbance spectrum\n warn(\"opus file {} could not be read\".format(file))\n continue\n\n npt = opus_data['AB Data Parameter']['NPT']\n fxv = opus_data['AB Data Parameter']['FXV']\n lxv = opus_data['AB Data Parameter']['LXV']\n xdata = linspace(fxv, lxv, npt)\n\n if not xaxis:\n xaxis = Coord(xdata, title='Wavenumbers', units='cm^-1')\n\n elif (xdata != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n\n intensities.append(opus_data[\"AB\"][:npt])\n names.append(opus_data[\"Sample\"]['SNM'])\n acqdate = opus_data[\"AB Data Parameter\"][\"DAT\"]\n acqtime = opus_data[\"AB Data Parameter\"][\"TIM\"]\n GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])\n date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],\n '%d/%m/%Y_%H:%M:%S.%f')\n UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)\n UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)\n # Transform to timestamp for storage in the Coord object\n # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime\n timestamp = UTC_date_time.timestamp()\n acquisitiondates.append(UTC_date_time)\n timestamps.append(timestamp)\n\n # return if none of the files could be read:\n if not xaxis:\n return\n\n yaxis = Coord(timestamps,\n title='Acquisition timestamp (GMT)',\n units='s',\n labels=(acquisitiondates, names))\n\n dataset = NDDataset(intensities)\n dataset.set_coords(y=yaxis, x=xaxis)\n dataset.units = 'absorbance'\n dataset.title = 'Absorbance'\n\n # Set origin, description and history\n dataset.origin = \"opus\"\n dataset.description = 'Dataset from opus files. \\n'\n\n if sortbydate:\n dataset.sort(dim='y', inplace=True)\n\n dataset.history = str(datetime.now()) + ': import from opus files \\n'\n dataset._date = datetime.now()\n dataset._modified = dataset.date\n\n return dataset\n", "path": "spectrochempy/core/readers/readopus.py"}]}
| 2,048 | 425 |
gh_patches_debug_7357
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1793
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PY3: error decoding Content-Disposition header
This request
```
scrapy shell 'http://npe.com.cn/plus/save_to_doc.php?id=1666'
```
raises this error:
```
Traceback (most recent call last):
File "/Users/kmike/envs/dl/bin/scrapy", line 9, in <module>
load_entry_point('Scrapy', 'console_scripts', 'scrapy')()
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/Users/kmike/svn/scrapy/scrapy/commands/shell.py", line 71, in run
shell.start(url=url)
File "/Users/kmike/svn/scrapy/scrapy/shell.py", line 47, in start
self.fetch(url, spider)
File "/Users/kmike/svn/scrapy/scrapy/shell.py", line 112, in fetch
reactor, self._schedule, request, spider)
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py", line 368, in raiseException
raise self.value.with_traceback(self.tb)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte
```
The error points to a wrong location (similar to #1760); the real traceback is
```
Traceback (most recent call last):
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py", line 1126, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/Users/kmike/svn/scrapy/scrapy/core/downloader/middleware.py", line 43, in process_request
defer.returnValue((yield download_func(request=request,spider=spider)))
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py", line 588, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/Users/kmike/svn/scrapy/scrapy/core/downloader/handlers/http11.py", line 272, in _cb_bodydone
respcls = responsetypes.from_args(headers=headers, url=url)
File "/Users/kmike/svn/scrapy/scrapy/responsetypes.py", line 110, in from_args
cls = self.from_headers(headers)
File "/Users/kmike/svn/scrapy/scrapy/responsetypes.py", line 78, in from_headers
cls = self.from_content_disposition(headers[b'Content-Disposition'])
File "/Users/kmike/svn/scrapy/scrapy/responsetypes.py", line 62, in from_content_disposition
filename = to_native_str(content_disposition).split(';')[1].split('=')[1]
File "/Users/kmike/svn/scrapy/scrapy/utils/python.py", line 129, in to_native_str
return to_unicode(text, encoding, errors)
File "/Users/kmike/svn/scrapy/scrapy/utils/python.py", line 107, in to_unicode
return text.decode(encoding, errors)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte
```
It looks like Content-Disposition is decoded using utf-8, but the encoding was not UTF-8.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/responsetypes.py`
Content:
```
1 """
2 This module implements a class which returns the appropriate Response class
3 based on different criteria.
4 """
5 from __future__ import absolute_import
6 from mimetypes import MimeTypes
7 from pkgutil import get_data
8 from io import StringIO
9 import six
10
11 from scrapy.http import Response
12 from scrapy.utils.misc import load_object
13 from scrapy.utils.python import isbinarytext, to_bytes, to_native_str
14
15
16 class ResponseTypes(object):
17
18 CLASSES = {
19 'text/html': 'scrapy.http.HtmlResponse',
20 'application/atom+xml': 'scrapy.http.XmlResponse',
21 'application/rdf+xml': 'scrapy.http.XmlResponse',
22 'application/rss+xml': 'scrapy.http.XmlResponse',
23 'application/xhtml+xml': 'scrapy.http.HtmlResponse',
24 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',
25 'application/xml': 'scrapy.http.XmlResponse',
26 'application/json': 'scrapy.http.TextResponse',
27 'application/x-json': 'scrapy.http.TextResponse',
28 'application/javascript': 'scrapy.http.TextResponse',
29 'application/x-javascript': 'scrapy.http.TextResponse',
30 'text/xml': 'scrapy.http.XmlResponse',
31 'text/*': 'scrapy.http.TextResponse',
32 }
33
34 def __init__(self):
35 self.classes = {}
36 self.mimetypes = MimeTypes()
37 mimedata = get_data('scrapy', 'mime.types').decode('utf8')
38 self.mimetypes.readfp(StringIO(mimedata))
39 for mimetype, cls in six.iteritems(self.CLASSES):
40 self.classes[mimetype] = load_object(cls)
41
42 def from_mimetype(self, mimetype):
43 """Return the most appropriate Response class for the given mimetype"""
44 if mimetype is None:
45 return Response
46 elif mimetype in self.classes:
47 return self.classes[mimetype]
48 else:
49 basetype = "%s/*" % mimetype.split('/')[0]
50 return self.classes.get(basetype, Response)
51
52 def from_content_type(self, content_type, content_encoding=None):
53 """Return the most appropriate Response class from an HTTP Content-Type
54 header """
55 if content_encoding:
56 return Response
57 mimetype = to_native_str(content_type).split(';')[0].strip().lower()
58 return self.from_mimetype(mimetype)
59
60 def from_content_disposition(self, content_disposition):
61 try:
62 filename = to_native_str(content_disposition).split(';')[1].split('=')[1]
63 filename = filename.strip('"\'')
64 return self.from_filename(filename)
65 except IndexError:
66 return Response
67
68 def from_headers(self, headers):
69 """Return the most appropriate Response class by looking at the HTTP
70 headers"""
71 cls = Response
72 if b'Content-Type' in headers:
73 cls = self.from_content_type(
74 content_type=headers[b'Content-type'],
75 content_encoding=headers.get(b'Content-Encoding')
76 )
77 if cls is Response and b'Content-Disposition' in headers:
78 cls = self.from_content_disposition(headers[b'Content-Disposition'])
79 return cls
80
81 def from_filename(self, filename):
82 """Return the most appropriate Response class from a file name"""
83 mimetype, encoding = self.mimetypes.guess_type(filename)
84 if mimetype and not encoding:
85 return self.from_mimetype(mimetype)
86 else:
87 return Response
88
89 def from_body(self, body):
90 """Try to guess the appropriate response based on the body content.
91 This method is a bit magic and could be improved in the future, but
92 it's not meant to be used except for special cases where response types
93 cannot be guess using more straightforward methods."""
94 chunk = body[:5000]
95 chunk = to_bytes(chunk)
96 if isbinarytext(chunk):
97 return self.from_mimetype('application/octet-stream')
98 elif b"<html>" in chunk.lower():
99 return self.from_mimetype('text/html')
100 elif b"<?xml" in chunk.lower():
101 return self.from_mimetype('text/xml')
102 else:
103 return self.from_mimetype('text')
104
105 def from_args(self, headers=None, url=None, filename=None, body=None):
106 """Guess the most appropriate Response class based on
107 the given arguments."""
108 cls = Response
109 if headers is not None:
110 cls = self.from_headers(headers)
111 if cls is Response and url is not None:
112 cls = self.from_filename(url)
113 if cls is Response and filename is not None:
114 cls = self.from_filename(filename)
115 if cls is Response and body is not None:
116 cls = self.from_body(body)
117 return cls
118
119 responsetypes = ResponseTypes()
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py
--- a/scrapy/responsetypes.py
+++ b/scrapy/responsetypes.py
@@ -59,7 +59,8 @@
def from_content_disposition(self, content_disposition):
try:
- filename = to_native_str(content_disposition).split(';')[1].split('=')[1]
+ filename = to_native_str(content_disposition,
+ encoding='latin-1', errors='replace').split(';')[1].split('=')[1]
filename = filename.strip('"\'')
return self.from_filename(filename)
except IndexError:
|
{"golden_diff": "diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py\n--- a/scrapy/responsetypes.py\n+++ b/scrapy/responsetypes.py\n@@ -59,7 +59,8 @@\n \n def from_content_disposition(self, content_disposition):\n try:\n- filename = to_native_str(content_disposition).split(';')[1].split('=')[1]\n+ filename = to_native_str(content_disposition,\n+ encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n", "issue": "PY3: error decoding Content-Disposition header\nThis request\n\n```\nscrapy shell 'http://npe.com.cn/plus/save_to_doc.php?id=1666'\n```\n\nraises this error:\n\n```\nTraceback (most recent call last):\n File \"/Users/kmike/envs/dl/bin/scrapy\", line 9, in <module>\n load_entry_point('Scrapy', 'console_scripts', 'scrapy')()\n File \"/Users/kmike/svn/scrapy/scrapy/cmdline.py\", line 142, in execute\n _run_print_help(parser, _run_command, cmd, args, opts)\n File \"/Users/kmike/svn/scrapy/scrapy/cmdline.py\", line 88, in _run_print_help\n func(*a, **kw)\n File \"/Users/kmike/svn/scrapy/scrapy/cmdline.py\", line 149, in _run_command\n cmd.run(args, opts)\n File \"/Users/kmike/svn/scrapy/scrapy/commands/shell.py\", line 71, in run\n shell.start(url=url)\n File \"/Users/kmike/svn/scrapy/scrapy/shell.py\", line 47, in start\n self.fetch(url, spider)\n File \"/Users/kmike/svn/scrapy/scrapy/shell.py\", line 112, in fetch\n reactor, self._schedule, request, spider)\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/threads.py\", line 122, in blockingCallFromThread\n result.raiseException()\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py\", line 368, in raiseException\n raise self.value.with_traceback(self.tb)\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte\n```\n\nThe error points to a wrong location (similar to #1760); the real traceback is\n\n```\nTraceback (most recent call last):\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py\", line 1126, in _inlineCallbacks\n result = result.throwExceptionIntoGenerator(g)\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py\", line 389, in throwExceptionIntoGenerator\n return g.throw(self.type, self.value, self.tb)\n File \"/Users/kmike/svn/scrapy/scrapy/core/downloader/middleware.py\", line 43, in process_request\n defer.returnValue((yield download_func(request=request,spider=spider)))\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py\", line 588, in _runCallbacks\n current.result = callback(current.result, *args, **kw)\n File \"/Users/kmike/svn/scrapy/scrapy/core/downloader/handlers/http11.py\", line 272, in _cb_bodydone\n respcls = responsetypes.from_args(headers=headers, url=url)\n File \"/Users/kmike/svn/scrapy/scrapy/responsetypes.py\", line 110, in from_args\n cls = self.from_headers(headers)\n File \"/Users/kmike/svn/scrapy/scrapy/responsetypes.py\", line 78, in from_headers\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n File \"/Users/kmike/svn/scrapy/scrapy/responsetypes.py\", line 62, in from_content_disposition\n filename = to_native_str(content_disposition).split(';')[1].split('=')[1]\n File \"/Users/kmike/svn/scrapy/scrapy/utils/python.py\", line 129, in to_native_str\n return to_unicode(text, encoding, errors)\n File \"/Users/kmike/svn/scrapy/scrapy/utils/python.py\", line 107, in to_unicode\n return text.decode(encoding, errors)\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte\n```\n\nIt looks like Content-Disposition is decoded using utf-8, but the encoding was not UTF-8.\n\n", "before_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom __future__ import absolute_import\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\nimport six\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import isbinarytext, to_bytes, to_native_str\n\n\nclass ResponseTypes(object):\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in six.iteritems(self.CLASSES):\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_native_str(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_native_str(content_disposition).split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if isbinarytext(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}], "after_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom __future__ import absolute_import\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\nimport six\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import isbinarytext, to_bytes, to_native_str\n\n\nclass ResponseTypes(object):\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in six.iteritems(self.CLASSES):\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_native_str(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_native_str(content_disposition,\n encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if isbinarytext(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}]}
| 2,541 | 141 |
gh_patches_debug_40197
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-8379
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Organizer video tab shows errors if there are video rooms not using BBB
Organizer video tab shows errors if there are video rooms not using BBB.


Compare https://eventyay.com/events/3ea940a8/video/all (only for organizer accessible)
Related to https://github.com/fossasia/open-event-frontend/pull/7927
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/video_recordings.py`
Content:
```
1 from datetime import datetime
2
3 from flask_rest_jsonapi import ResourceDetail, ResourceList
4 from flask_rest_jsonapi.resource import ResourceRelationship
5
6 from app.api.helpers.db import get_or_create, safe_query_kwargs
7 from app.api.helpers.errors import ForbiddenError, UnprocessableEntityError
8 from app.api.helpers.permission_manager import has_access
9 from app.api.helpers.permissions import jwt_required
10 from app.api.schema.video_recordings import VideoRecordingSchema
11 from app.api.video_channels.bbb import BigBlueButton
12 from app.models import db
13 from app.models.video_recording import VideoRecording
14 from app.models.video_stream import VideoStream
15
16
17 class VideoRecordingList(ResourceList):
18 def before_get(self, args, kwargs):
19 if kwargs.get('video_stream_id'):
20 stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')
21
22 if not has_access('is_organizer', event_id=stream.event_id):
23 raise ForbiddenError(
24 {'pointer': 'event_id'},
25 'You need to be the event organizer to access video recordings.',
26 )
27
28 params = dict(
29 meetingID=stream.extra['response']['meetingID'],
30 )
31 channel = stream.channel
32 bbb = BigBlueButton(channel.api_url, channel.api_key)
33 result = bbb.request('getRecordings', params)
34
35 if result.data['response']['recordings']:
36 recordings = []
37 if type(result.data['response']['recordings']['recording']) is list:
38 recordings = result.data['response']['recordings']['recording']
39 else:
40 recordings.append(result.data['response']['recordings']['recording'])
41 for recording in recordings:
42 get_or_create(
43 VideoRecording,
44 bbb_record_id=recording['recordID'],
45 participants=recording['participants'],
46 url=recording['playback']['format']['url'],
47 start_time=datetime.fromtimestamp(
48 int(int(recording['startTime']) / 1000)
49 ),
50 end_time=datetime.fromtimestamp(
51 int(int(recording['endTime']) / 1000)
52 ),
53 video_stream=stream,
54 )
55
56 def query(self, view_kwargs):
57 query_ = VideoRecording.query
58 if view_kwargs.get('video_stream_id'):
59 stream = safe_query_kwargs(VideoStream, view_kwargs, 'video_stream_id')
60 query_ = VideoRecording.query.filter(
61 VideoRecording.video_stream_id == stream.id
62 )
63 else:
64 if not has_access('is_admin'):
65 raise ForbiddenError(
66 {'pointer': 'user'},
67 'You need to be the admin to access video recordings.',
68 )
69
70 return query_
71
72 methods = ['GET']
73 view_kwargs = True
74 decorators = (jwt_required,)
75 schema = VideoRecordingSchema
76 data_layer = {
77 'session': db.session,
78 'model': VideoRecording,
79 'methods': {
80 'query': query,
81 'before_get': before_get,
82 },
83 }
84
85
86 class VideoRecordingDetail(ResourceDetail):
87 def before_get_object(self, view_kwargs):
88 if view_kwargs.get('video_stream_id'):
89 video_stream = safe_query_kwargs(
90 VideoStream,
91 view_kwargs,
92 'video_stream_id',
93 )
94 view_kwargs['id'] = video_stream.id
95
96 def after_get_object(self, video_recording, view_kwargs):
97 if not has_access('is_organizer', event_id=video_recording.video_stream.event_id):
98 raise ForbiddenError(
99 {'pointer': 'event_id'},
100 'You need to be the event organizer to access video recordings.',
101 )
102
103 def before_delete_object(self, video_recording, kwargs):
104 """
105 before delete object method for recording detail
106 :param obj:
107 :param kwargs:
108 :return:
109 """
110 if not has_access('is_admin'):
111 raise ForbiddenError(
112 {'source': 'User'}, 'You are not authorized to access this.'
113 )
114 stream = video_recording.video_stream
115 params = dict(
116 recordID=video_recording.bbb_record_id,
117 )
118 channel = stream.channel
119 bbb = BigBlueButton(channel.api_url, channel.api_key)
120 result = bbb.request('deleteRecordings', params)
121
122 if not result.success:
123 raise UnprocessableEntityError(
124 {'source': 'recording_id'}, 'error while deleting recording'
125 )
126
127 methods = ['GET', 'DELETE']
128 schema = VideoRecordingSchema
129 decorators = (jwt_required,)
130 data_layer = {
131 'session': db.session,
132 'model': VideoRecording,
133 'methods': {
134 'before_get_object': before_get_object,
135 'after_get_object': after_get_object,
136 'before_delete_object': before_delete_object,
137 },
138 }
139
140
141 class VideoRecordingRelationship(ResourceRelationship):
142 schema = VideoRecordingSchema
143 methods = ['GET']
144 data_layer = {'session': db.session, 'model': VideoRecording}
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/video_recordings.py b/app/api/video_recordings.py
--- a/app/api/video_recordings.py
+++ b/app/api/video_recordings.py
@@ -19,39 +19,48 @@
if kwargs.get('video_stream_id'):
stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')
- if not has_access('is_organizer', event_id=stream.event_id):
- raise ForbiddenError(
- {'pointer': 'event_id'},
- 'You need to be the event organizer to access video recordings.',
- )
+ if stream.channel and stream.channel.provider == 'bbb':
+ if not has_access('is_organizer', event_id=stream.event_id):
+ raise ForbiddenError(
+ {'pointer': 'event_id'},
+ 'You need to be the event organizer to access video recordings.',
+ )
- params = dict(
- meetingID=stream.extra['response']['meetingID'],
- )
- channel = stream.channel
- bbb = BigBlueButton(channel.api_url, channel.api_key)
- result = bbb.request('getRecordings', params)
-
- if result.data['response']['recordings']:
- recordings = []
- if type(result.data['response']['recordings']['recording']) is list:
- recordings = result.data['response']['recordings']['recording']
- else:
- recordings.append(result.data['response']['recordings']['recording'])
- for recording in recordings:
- get_or_create(
- VideoRecording,
- bbb_record_id=recording['recordID'],
- participants=recording['participants'],
- url=recording['playback']['format']['url'],
- start_time=datetime.fromtimestamp(
- int(int(recording['startTime']) / 1000)
- ),
- end_time=datetime.fromtimestamp(
- int(int(recording['endTime']) / 1000)
- ),
- video_stream=stream,
+ if stream.extra is not None:
+ params = dict(
+ meetingID=stream.extra['response']['meetingID'],
)
+ channel = stream.channel
+ bbb = BigBlueButton(channel.api_url, channel.api_key)
+ result = bbb.request('getRecordings', params)
+
+ if result.data['response']['recordings']:
+ recordings = []
+ if (
+ type(result.data['response']['recordings']['recording'])
+ is list
+ ):
+ recordings = result.data['response']['recordings'][
+ 'recording'
+ ]
+ else:
+ recordings.append(
+ result.data['response']['recordings']['recording']
+ )
+ for recording in recordings:
+ get_or_create(
+ VideoRecording,
+ bbb_record_id=recording['recordID'],
+ participants=recording['participants'],
+ url=recording['playback']['format']['url'],
+ start_time=datetime.fromtimestamp(
+ int(int(recording['startTime']) / 1000)
+ ),
+ end_time=datetime.fromtimestamp(
+ int(int(recording['endTime']) / 1000)
+ ),
+ video_stream=stream,
+ )
def query(self, view_kwargs):
query_ = VideoRecording.query
|
{"golden_diff": "diff --git a/app/api/video_recordings.py b/app/api/video_recordings.py\n--- a/app/api/video_recordings.py\n+++ b/app/api/video_recordings.py\n@@ -19,39 +19,48 @@\n if kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')\n \n- if not has_access('is_organizer', event_id=stream.event_id):\n- raise ForbiddenError(\n- {'pointer': 'event_id'},\n- 'You need to be the event organizer to access video recordings.',\n- )\n+ if stream.channel and stream.channel.provider == 'bbb':\n+ if not has_access('is_organizer', event_id=stream.event_id):\n+ raise ForbiddenError(\n+ {'pointer': 'event_id'},\n+ 'You need to be the event organizer to access video recordings.',\n+ )\n \n- params = dict(\n- meetingID=stream.extra['response']['meetingID'],\n- )\n- channel = stream.channel\n- bbb = BigBlueButton(channel.api_url, channel.api_key)\n- result = bbb.request('getRecordings', params)\n-\n- if result.data['response']['recordings']:\n- recordings = []\n- if type(result.data['response']['recordings']['recording']) is list:\n- recordings = result.data['response']['recordings']['recording']\n- else:\n- recordings.append(result.data['response']['recordings']['recording'])\n- for recording in recordings:\n- get_or_create(\n- VideoRecording,\n- bbb_record_id=recording['recordID'],\n- participants=recording['participants'],\n- url=recording['playback']['format']['url'],\n- start_time=datetime.fromtimestamp(\n- int(int(recording['startTime']) / 1000)\n- ),\n- end_time=datetime.fromtimestamp(\n- int(int(recording['endTime']) / 1000)\n- ),\n- video_stream=stream,\n+ if stream.extra is not None:\n+ params = dict(\n+ meetingID=stream.extra['response']['meetingID'],\n )\n+ channel = stream.channel\n+ bbb = BigBlueButton(channel.api_url, channel.api_key)\n+ result = bbb.request('getRecordings', params)\n+\n+ if result.data['response']['recordings']:\n+ recordings = []\n+ if (\n+ type(result.data['response']['recordings']['recording'])\n+ is list\n+ ):\n+ recordings = result.data['response']['recordings'][\n+ 'recording'\n+ ]\n+ else:\n+ recordings.append(\n+ result.data['response']['recordings']['recording']\n+ )\n+ for recording in recordings:\n+ get_or_create(\n+ VideoRecording,\n+ bbb_record_id=recording['recordID'],\n+ participants=recording['participants'],\n+ url=recording['playback']['format']['url'],\n+ start_time=datetime.fromtimestamp(\n+ int(int(recording['startTime']) / 1000)\n+ ),\n+ end_time=datetime.fromtimestamp(\n+ int(int(recording['endTime']) / 1000)\n+ ),\n+ video_stream=stream,\n+ )\n \n def query(self, view_kwargs):\n query_ = VideoRecording.query\n", "issue": "Organizer video tab shows errors if there are video rooms not using BBB\nOrganizer video tab shows errors if there are video rooms not using BBB.\r\n\r\n\r\n\r\n\r\nCompare https://eventyay.com/events/3ea940a8/video/all (only for organizer accessible)\r\n\r\nRelated to https://github.com/fossasia/open-event-frontend/pull/7927\n", "before_files": [{"content": "from datetime import datetime\n\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList\nfrom flask_rest_jsonapi.resource import ResourceRelationship\n\nfrom app.api.helpers.db import get_or_create, safe_query_kwargs\nfrom app.api.helpers.errors import ForbiddenError, UnprocessableEntityError\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.schema.video_recordings import VideoRecordingSchema\nfrom app.api.video_channels.bbb import BigBlueButton\nfrom app.models import db\nfrom app.models.video_recording import VideoRecording\nfrom app.models.video_stream import VideoStream\n\n\nclass VideoRecordingList(ResourceList):\n def before_get(self, args, kwargs):\n if kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')\n\n if not has_access('is_organizer', event_id=stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n params = dict(\n meetingID=stream.extra['response']['meetingID'],\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('getRecordings', params)\n\n if result.data['response']['recordings']:\n recordings = []\n if type(result.data['response']['recordings']['recording']) is list:\n recordings = result.data['response']['recordings']['recording']\n else:\n recordings.append(result.data['response']['recordings']['recording'])\n for recording in recordings:\n get_or_create(\n VideoRecording,\n bbb_record_id=recording['recordID'],\n participants=recording['participants'],\n url=recording['playback']['format']['url'],\n start_time=datetime.fromtimestamp(\n int(int(recording['startTime']) / 1000)\n ),\n end_time=datetime.fromtimestamp(\n int(int(recording['endTime']) / 1000)\n ),\n video_stream=stream,\n )\n\n def query(self, view_kwargs):\n query_ = VideoRecording.query\n if view_kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, view_kwargs, 'video_stream_id')\n query_ = VideoRecording.query.filter(\n VideoRecording.video_stream_id == stream.id\n )\n else:\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'pointer': 'user'},\n 'You need to be the admin to access video recordings.',\n )\n\n return query_\n\n methods = ['GET']\n view_kwargs = True\n decorators = (jwt_required,)\n schema = VideoRecordingSchema\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'query': query,\n 'before_get': before_get,\n },\n }\n\n\nclass VideoRecordingDetail(ResourceDetail):\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('video_stream_id'):\n video_stream = safe_query_kwargs(\n VideoStream,\n view_kwargs,\n 'video_stream_id',\n )\n view_kwargs['id'] = video_stream.id\n\n def after_get_object(self, video_recording, view_kwargs):\n if not has_access('is_organizer', event_id=video_recording.video_stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n def before_delete_object(self, video_recording, kwargs):\n \"\"\"\n before delete object method for recording detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'source': 'User'}, 'You are not authorized to access this.'\n )\n stream = video_recording.video_stream\n params = dict(\n recordID=video_recording.bbb_record_id,\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('deleteRecordings', params)\n\n if not result.success:\n raise UnprocessableEntityError(\n {'source': 'recording_id'}, 'error while deleting recording'\n )\n\n methods = ['GET', 'DELETE']\n schema = VideoRecordingSchema\n decorators = (jwt_required,)\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'before_get_object': before_get_object,\n 'after_get_object': after_get_object,\n 'before_delete_object': before_delete_object,\n },\n }\n\n\nclass VideoRecordingRelationship(ResourceRelationship):\n schema = VideoRecordingSchema\n methods = ['GET']\n data_layer = {'session': db.session, 'model': VideoRecording}\n", "path": "app/api/video_recordings.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList\nfrom flask_rest_jsonapi.resource import ResourceRelationship\n\nfrom app.api.helpers.db import get_or_create, safe_query_kwargs\nfrom app.api.helpers.errors import ForbiddenError, UnprocessableEntityError\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.schema.video_recordings import VideoRecordingSchema\nfrom app.api.video_channels.bbb import BigBlueButton\nfrom app.models import db\nfrom app.models.video_recording import VideoRecording\nfrom app.models.video_stream import VideoStream\n\n\nclass VideoRecordingList(ResourceList):\n def before_get(self, args, kwargs):\n if kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')\n\n if stream.channel and stream.channel.provider == 'bbb':\n if not has_access('is_organizer', event_id=stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n if stream.extra is not None:\n params = dict(\n meetingID=stream.extra['response']['meetingID'],\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('getRecordings', params)\n\n if result.data['response']['recordings']:\n recordings = []\n if (\n type(result.data['response']['recordings']['recording'])\n is list\n ):\n recordings = result.data['response']['recordings'][\n 'recording'\n ]\n else:\n recordings.append(\n result.data['response']['recordings']['recording']\n )\n for recording in recordings:\n get_or_create(\n VideoRecording,\n bbb_record_id=recording['recordID'],\n participants=recording['participants'],\n url=recording['playback']['format']['url'],\n start_time=datetime.fromtimestamp(\n int(int(recording['startTime']) / 1000)\n ),\n end_time=datetime.fromtimestamp(\n int(int(recording['endTime']) / 1000)\n ),\n video_stream=stream,\n )\n\n def query(self, view_kwargs):\n query_ = VideoRecording.query\n if view_kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, view_kwargs, 'video_stream_id')\n query_ = VideoRecording.query.filter(\n VideoRecording.video_stream_id == stream.id\n )\n else:\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'pointer': 'user'},\n 'You need to be the admin to access video recordings.',\n )\n\n return query_\n\n methods = ['GET']\n view_kwargs = True\n decorators = (jwt_required,)\n schema = VideoRecordingSchema\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'query': query,\n 'before_get': before_get,\n },\n }\n\n\nclass VideoRecordingDetail(ResourceDetail):\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('video_stream_id'):\n video_stream = safe_query_kwargs(\n VideoStream,\n view_kwargs,\n 'video_stream_id',\n )\n view_kwargs['id'] = video_stream.id\n\n def after_get_object(self, video_recording, view_kwargs):\n if not has_access('is_organizer', event_id=video_recording.video_stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n def before_delete_object(self, video_recording, kwargs):\n \"\"\"\n before delete object method for recording detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'source': 'User'}, 'You are not authorized to access this.'\n )\n stream = video_recording.video_stream\n params = dict(\n recordID=video_recording.bbb_record_id,\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('deleteRecordings', params)\n\n if not result.success:\n raise UnprocessableEntityError(\n {'source': 'recording_id'}, 'error while deleting recording'\n )\n\n methods = ['GET', 'DELETE']\n schema = VideoRecordingSchema\n decorators = (jwt_required,)\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'before_get_object': before_get_object,\n 'after_get_object': after_get_object,\n 'before_delete_object': before_delete_object,\n },\n }\n\n\nclass VideoRecordingRelationship(ResourceRelationship):\n schema = VideoRecordingSchema\n methods = ['GET']\n data_layer = {'session': db.session, 'model': VideoRecording}\n", "path": "app/api/video_recordings.py"}]}
| 1,869 | 733 |
gh_patches_debug_39683
|
rasdani/github-patches
|
git_diff
|
qutip__qutip-1754
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
hardware_info fails (again) on M1 MacBook Pro running Big Sur 11.4
**Describe the bug**
qutip fails to import with the following error:
```
Python 3.9.6 (default, Jun 28 2021, 19:24:41)
[Clang 12.0.5 (clang-1205.0.22.9)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import qutip
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/homebrew/lib/python3.9/site-packages/qutip/__init__.py", line 115, in <module>
info = qutip.hardware_info.hardware_info()
File "/opt/homebrew/lib/python3.9/site-packages/qutip/hardware_info.py", line 133, in hardware_info
out = _mac_hardware_info()
File "/opt/homebrew/lib/python3.9/site-packages/qutip/hardware_info.py", line 50, in _mac_hardware_info
results.update({'cpu_freq': int(float(os.popen('sysctl hw.cpufrequency')
IndexError: list index out of range
```
This appears to be caused by Apple having removed hw.cpufrequency from the list of sysctl's (see below)
**To Reproduce**
Installed qutip using homebrew/pip3 (after using the workaround of specifying OPENBLAS properly before building scipy etc... so that part is all sorted). Then python3 and import qutip.
```python
from qutip import identity
print(identity(2))
```
The terminal output (aftrer I hacked up a workaround for this issue):
```
Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True
Qobj data =
[[1. 0.]
[0. 1.]]
```
**Expected behavior**
qutip successfully initializes :-)
**Your Environment**
Please use `qutip.about()` to get the information about your environment and paste it here.
```
>>> qutip.about()
QuTiP: Quantum Toolbox in Python
================================
Copyright (c) QuTiP team 2011 and later.
Current admin team: Alexander Pitchford, Nathan Shammah, Shahnawaz Ahmed, Neill Lambert, Eric Giguère, Boxi Li, Jake Lishman and Simon Cross.
Board members: Daniel Burgarth, Robert Johansson, Anton F. Kockum, Franco Nori and Will Zeng.
Original developers: R. J. Johansson & P. D. Nation.
Previous lead developers: Chris Granade & A. Grimsmo.
Currently developed through wide collaboration. See https://github.com/qutip for details.
QuTiP Version: 4.6.2
Numpy Version: 1.21.0
Scipy Version: 1.7.0
Cython Version: 0.29.23
Matplotlib Version: 3.4.2
Python Version: 3.9.6
Number of CPUs: 8
BLAS Info: OPENBLAS
OPENMP Installed: False
INTEL MKL Ext: False
Platform Info: Darwin (arm64)
Installation path: /opt/homebrew/lib/python3.9/site-packages/qutip
================================================================================
Please cite QuTiP in your publication.
================================================================================
```
**Additional context**
No sysctl hw.cpufrequency at all on this machine, so it blows up (some error handling in that function would be good :-)
```$ sysctl hw
hw.ncpu: 8
hw.byteorder: 1234
hw.memsize: 17179869184
hw.activecpu: 8
hw.optional.amx_version: 2
hw.optional.arm64: 1
hw.optional.armv8_1_atomics: 1
hw.optional.armv8_2_fhm: 1
hw.optional.armv8_2_sha3: 1
hw.optional.armv8_2_sha512: 1
hw.optional.armv8_crc32: 1
hw.optional.breakpoint: 6
hw.optional.floatingpoint: 1
hw.optional.neon: 1
hw.optional.neon_fp16: 1
hw.optional.neon_hpfp: 1
hw.optional.ucnormal_mem: 1
hw.optional.watchpoint: 4
hw.cacheconfig: 8 1 1 0 0 0 0 0 0 0
hw.cachelinesize: 128
hw.cachesize: 3616980992 65536 4194304 0 0 0 0 0 0 0
hw.cpu64bit_capable: 1
hw.cpufamily: 458787763
hw.cpusubfamily: 2
hw.cpusubtype: 2
hw.cputype: 16777228
hw.ephemeral_storage: 0
hw.l1dcachesize: 65536
hw.l1icachesize: 131072
hw.l2cachesize: 4194304
hw.logicalcpu: 8
hw.logicalcpu_max: 8
hw.osenvironment:
hw.packages: 1
hw.pagesize: 16384
hw.pagesize32: 16384
hw.physicalcpu: 8
hw.physicalcpu_max: 8
hw.serialdebugmode: 0
hw.tbfrequency: 24000000
hw.use_kernelmanagerd: 1
hw.use_recovery_securityd: 0
hw.targettype: J293
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutip/hardware_info.py`
Content:
```
1 __all__ = ['hardware_info']
2
3 import multiprocessing
4 import os
5 import sys
6
7 import numpy as np
8
9
10 def _mac_hardware_info():
11 info = dict()
12 results = dict()
13 for l in [l.split(':') for l in os.popen('sysctl hw').readlines()[1:]]:
14 info[l[0].strip(' "').replace(' ', '_').lower().strip('hw.')] = \
15 l[1].strip('.\n ')
16 results.update({'cpus': int(info['physicalcpu'])})
17 results.update({'cpu_freq': int(float(os.popen('sysctl hw.cpufrequency')
18 .readlines()[0].split(':')[
19 1]) / 1000000)})
20 results.update({'memsize': int(int(info['memsize']) / (1024 ** 2))})
21 # add OS information
22 results.update({'os': 'Mac OSX'})
23 return results
24
25
26 def _linux_hardware_info():
27 results = {}
28 # get cpu number
29 sockets = 0
30 cores_per_socket = 0
31 frequency = 0.0
32 with open("/proc/cpuinfo") as f:
33 for l in [l.split(':') for l in f.readlines()]:
34 if (l[0].strip() == "physical id"):
35 sockets = np.maximum(sockets,int(l[1].strip())+1)
36 if (l[0].strip() == "cpu cores"):
37 cores_per_socket = int(l[1].strip())
38 if (l[0].strip() == "cpu MHz"):
39 frequency = float(l[1].strip()) / 1000.
40 results.update({'cpus': sockets * cores_per_socket})
41 # get cpu frequency directly (bypasses freq scaling)
42 try:
43 with open(
44 "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq") as f:
45 line = f.readlines()[0]
46 frequency = float(line.strip('\n')) / 1000000.
47 except:
48 pass
49 results.update({'cpu_freq': frequency})
50
51 # get total amount of memory
52 mem_info = dict()
53 with open("/proc/meminfo") as f:
54 for l in [l.split(':') for l in f.readlines()]:
55 mem_info[l[0]] = l[1].strip('.\n ').strip('kB')
56 results.update({'memsize': int(mem_info['MemTotal']) / 1024})
57 # add OS information
58 results.update({'os': 'Linux'})
59 return results
60
61
62 def _freebsd_hardware_info():
63 results = {}
64 results.update({'cpus': int(os.popen('sysctl -n hw.ncpu').readlines()[0])})
65 results.update(
66 {'cpu_freq': int(os.popen('sysctl -n dev.cpu.0.freq').readlines()[0])})
67 results.update({'memsize': int(
68 os.popen('sysctl -n hw.realmem').readlines()[0]) / 1024})
69 results.update({'os': 'FreeBSD'})
70 return results
71
72
73 def _win_hardware_info():
74 try:
75 from comtypes.client import CoGetObject
76 winmgmts_root = CoGetObject(r"winmgmts:root\cimv2")
77 cpus = winmgmts_root.ExecQuery("Select * from Win32_Processor")
78 ncpus = 0
79 for cpu in cpus:
80 ncpus += int(cpu.Properties_['NumberOfCores'].Value)
81 except:
82 ncpus = int(multiprocessing.cpu_count())
83 return {'os': 'Windows', 'cpus': ncpus}
84
85
86 def hardware_info():
87 """
88 Returns basic hardware information about the computer.
89
90 Gives actual number of CPU's in the machine, even when hyperthreading is
91 turned on.
92
93 Returns
94 -------
95 info : dict
96 Dictionary containing cpu and memory information.
97
98 """
99 if sys.platform == 'darwin':
100 out = _mac_hardware_info()
101 elif sys.platform == 'win32':
102 out = _win_hardware_info()
103 elif sys.platform in ['linux', 'linux2']:
104 out = _linux_hardware_info()
105 elif sys.platform.startswith('freebsd'):
106 out = _freebsd_hardware_info()
107 else:
108 out = {}
109 return out
110
111
112 if __name__ == '__main__':
113 print(hardware_info())
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qutip/hardware_info.py b/qutip/hardware_info.py
--- a/qutip/hardware_info.py
+++ b/qutip/hardware_info.py
@@ -8,15 +8,27 @@
def _mac_hardware_info():
- info = dict()
- results = dict()
- for l in [l.split(':') for l in os.popen('sysctl hw').readlines()[1:]]:
- info[l[0].strip(' "').replace(' ', '_').lower().strip('hw.')] = \
- l[1].strip('.\n ')
+ info = {}
+ results = {}
+ with os.popen('sysctl hw') as f:
+ lines = f.readlines()
+ for line in lines[1:]:
+ key, _, value = line.partition(':')
+ key = key.strip(' "').replace(' ', '_').lower().strip('hw.')
+ value = value.strip('.\n ')
+ info[key] = value
results.update({'cpus': int(info['physicalcpu'])})
- results.update({'cpu_freq': int(float(os.popen('sysctl hw.cpufrequency')
- .readlines()[0].split(':')[
- 1]) / 1000000)})
+ # Mac OS currently doesn't not provide hw.cpufrequency on the M1
+ with os.popen('sysctl hw.cpufrequency') as f:
+ cpu_freq_lines = f.readlines()
+ if cpu_freq_lines:
+ # Yay, hw.cpufrequency present
+ results.update({
+ 'cpu_freq': float(cpu_freq_lines[0].split(':')[1]) / 1000000,
+ })
+ else:
+ # No hw.cpufrequency, assume Apple M1 CPU (all are 3.2 GHz currently)
+ results['cpu_freq'] = 3.2
results.update({'memsize': int(int(info['memsize']) / (1024 ** 2))})
# add OS information
results.update({'os': 'Mac OSX'})
@@ -32,19 +44,19 @@
with open("/proc/cpuinfo") as f:
for l in [l.split(':') for l in f.readlines()]:
if (l[0].strip() == "physical id"):
- sockets = np.maximum(sockets,int(l[1].strip())+1)
+ sockets = np.maximum(sockets, int(l[1].strip()) + 1)
if (l[0].strip() == "cpu cores"):
cores_per_socket = int(l[1].strip())
if (l[0].strip() == "cpu MHz"):
frequency = float(l[1].strip()) / 1000.
- results.update({'cpus': sockets * cores_per_socket})
+ results.update({'cpus': int(sockets * cores_per_socket)})
# get cpu frequency directly (bypasses freq scaling)
try:
with open(
"/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq") as f:
line = f.readlines()[0]
frequency = float(line.strip('\n')) / 1000000.
- except:
+ except Exception:
pass
results.update({'cpu_freq': frequency})
@@ -78,7 +90,7 @@
ncpus = 0
for cpu in cpus:
ncpus += int(cpu.Properties_['NumberOfCores'].Value)
- except:
+ except Exception:
ncpus = int(multiprocessing.cpu_count())
return {'os': 'Windows', 'cpus': ncpus}
|
{"golden_diff": "diff --git a/qutip/hardware_info.py b/qutip/hardware_info.py\n--- a/qutip/hardware_info.py\n+++ b/qutip/hardware_info.py\n@@ -8,15 +8,27 @@\n \n \n def _mac_hardware_info():\n- info = dict()\n- results = dict()\n- for l in [l.split(':') for l in os.popen('sysctl hw').readlines()[1:]]:\n- info[l[0].strip(' \"').replace(' ', '_').lower().strip('hw.')] = \\\n- l[1].strip('.\\n ')\n+ info = {}\n+ results = {}\n+ with os.popen('sysctl hw') as f:\n+ lines = f.readlines()\n+ for line in lines[1:]:\n+ key, _, value = line.partition(':')\n+ key = key.strip(' \"').replace(' ', '_').lower().strip('hw.')\n+ value = value.strip('.\\n ')\n+ info[key] = value\n results.update({'cpus': int(info['physicalcpu'])})\n- results.update({'cpu_freq': int(float(os.popen('sysctl hw.cpufrequency')\n- .readlines()[0].split(':')[\n- 1]) / 1000000)})\n+ # Mac OS currently doesn't not provide hw.cpufrequency on the M1\n+ with os.popen('sysctl hw.cpufrequency') as f:\n+ cpu_freq_lines = f.readlines()\n+ if cpu_freq_lines:\n+ # Yay, hw.cpufrequency present\n+ results.update({\n+ 'cpu_freq': float(cpu_freq_lines[0].split(':')[1]) / 1000000,\n+ })\n+ else:\n+ # No hw.cpufrequency, assume Apple M1 CPU (all are 3.2 GHz currently)\n+ results['cpu_freq'] = 3.2\n results.update({'memsize': int(int(info['memsize']) / (1024 ** 2))})\n # add OS information\n results.update({'os': 'Mac OSX'})\n@@ -32,19 +44,19 @@\n with open(\"/proc/cpuinfo\") as f:\n for l in [l.split(':') for l in f.readlines()]:\n if (l[0].strip() == \"physical id\"):\n- sockets = np.maximum(sockets,int(l[1].strip())+1)\n+ sockets = np.maximum(sockets, int(l[1].strip()) + 1)\n if (l[0].strip() == \"cpu cores\"):\n cores_per_socket = int(l[1].strip())\n if (l[0].strip() == \"cpu MHz\"):\n frequency = float(l[1].strip()) / 1000.\n- results.update({'cpus': sockets * cores_per_socket})\n+ results.update({'cpus': int(sockets * cores_per_socket)})\n # get cpu frequency directly (bypasses freq scaling)\n try:\n with open(\n \"/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq\") as f:\n line = f.readlines()[0]\n frequency = float(line.strip('\\n')) / 1000000.\n- except:\n+ except Exception:\n pass\n results.update({'cpu_freq': frequency})\n \n@@ -78,7 +90,7 @@\n ncpus = 0\n for cpu in cpus:\n ncpus += int(cpu.Properties_['NumberOfCores'].Value)\n- except:\n+ except Exception:\n ncpus = int(multiprocessing.cpu_count())\n return {'os': 'Windows', 'cpus': ncpus}\n", "issue": "hardware_info fails (again) on M1 MacBook Pro running Big Sur 11.4\n**Describe the bug**\r\nqutip fails to import with the following error:\r\n```\r\nPython 3.9.6 (default, Jun 28 2021, 19:24:41) \r\n[Clang 12.0.5 (clang-1205.0.22.9)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import qutip\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/opt/homebrew/lib/python3.9/site-packages/qutip/__init__.py\", line 115, in <module>\r\n info = qutip.hardware_info.hardware_info()\r\n File \"/opt/homebrew/lib/python3.9/site-packages/qutip/hardware_info.py\", line 133, in hardware_info\r\n out = _mac_hardware_info()\r\n File \"/opt/homebrew/lib/python3.9/site-packages/qutip/hardware_info.py\", line 50, in _mac_hardware_info\r\n results.update({'cpu_freq': int(float(os.popen('sysctl hw.cpufrequency')\r\nIndexError: list index out of range\r\n```\r\n\r\nThis appears to be caused by Apple having removed hw.cpufrequency from the list of sysctl's (see below)\r\n\r\n**To Reproduce**\r\nInstalled qutip using homebrew/pip3 (after using the workaround of specifying OPENBLAS properly before building scipy etc... so that part is all sorted). Then python3 and import qutip.\r\n\r\n```python\r\nfrom qutip import identity\r\nprint(identity(2))\r\n```\r\nThe terminal output (aftrer I hacked up a workaround for this issue):\r\n```\r\nQuantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\r\nQobj data =\r\n[[1. 0.]\r\n [0. 1.]]\r\n```\r\n\r\n**Expected behavior**\r\nqutip successfully initializes :-)\r\n\r\n**Your Environment**\r\nPlease use `qutip.about()` to get the information about your environment and paste it here.\r\n```\r\n>>> qutip.about()\r\n\r\nQuTiP: Quantum Toolbox in Python\r\n================================\r\nCopyright (c) QuTiP team 2011 and later.\r\nCurrent admin team: Alexander Pitchford, Nathan Shammah, Shahnawaz Ahmed, Neill Lambert, Eric Gigu\u00e8re, Boxi Li, Jake Lishman and Simon Cross.\r\nBoard members: Daniel Burgarth, Robert Johansson, Anton F. Kockum, Franco Nori and Will Zeng.\r\nOriginal developers: R. J. Johansson & P. D. Nation.\r\nPrevious lead developers: Chris Granade & A. Grimsmo.\r\nCurrently developed through wide collaboration. See https://github.com/qutip for details.\r\n\r\nQuTiP Version: 4.6.2\r\nNumpy Version: 1.21.0\r\nScipy Version: 1.7.0\r\nCython Version: 0.29.23\r\nMatplotlib Version: 3.4.2\r\nPython Version: 3.9.6\r\nNumber of CPUs: 8\r\nBLAS Info: OPENBLAS\r\nOPENMP Installed: False\r\nINTEL MKL Ext: False\r\nPlatform Info: Darwin (arm64)\r\nInstallation path: /opt/homebrew/lib/python3.9/site-packages/qutip\r\n================================================================================\r\nPlease cite QuTiP in your publication.\r\n================================================================================\r\n```\r\n\r\n**Additional context**\r\n No sysctl hw.cpufrequency at all on this machine, so it blows up (some error handling in that function would be good :-)\r\n\r\n```$ sysctl hw\r\nhw.ncpu: 8\r\nhw.byteorder: 1234\r\nhw.memsize: 17179869184\r\nhw.activecpu: 8\r\nhw.optional.amx_version: 2\r\nhw.optional.arm64: 1\r\nhw.optional.armv8_1_atomics: 1\r\nhw.optional.armv8_2_fhm: 1\r\nhw.optional.armv8_2_sha3: 1\r\nhw.optional.armv8_2_sha512: 1\r\nhw.optional.armv8_crc32: 1\r\nhw.optional.breakpoint: 6\r\nhw.optional.floatingpoint: 1\r\nhw.optional.neon: 1\r\nhw.optional.neon_fp16: 1\r\nhw.optional.neon_hpfp: 1\r\nhw.optional.ucnormal_mem: 1\r\nhw.optional.watchpoint: 4\r\nhw.cacheconfig: 8 1 1 0 0 0 0 0 0 0\r\nhw.cachelinesize: 128\r\nhw.cachesize: 3616980992 65536 4194304 0 0 0 0 0 0 0\r\nhw.cpu64bit_capable: 1\r\nhw.cpufamily: 458787763\r\nhw.cpusubfamily: 2\r\nhw.cpusubtype: 2\r\nhw.cputype: 16777228\r\nhw.ephemeral_storage: 0\r\nhw.l1dcachesize: 65536\r\nhw.l1icachesize: 131072\r\nhw.l2cachesize: 4194304\r\nhw.logicalcpu: 8\r\nhw.logicalcpu_max: 8\r\nhw.osenvironment: \r\nhw.packages: 1\r\nhw.pagesize: 16384\r\nhw.pagesize32: 16384\r\nhw.physicalcpu: 8\r\nhw.physicalcpu_max: 8\r\nhw.serialdebugmode: 0\r\nhw.tbfrequency: 24000000\r\nhw.use_kernelmanagerd: 1\r\nhw.use_recovery_securityd: 0\r\nhw.targettype: J293\r\n```\r\n\r\n\n", "before_files": [{"content": "__all__ = ['hardware_info']\n\nimport multiprocessing\nimport os\nimport sys\n\nimport numpy as np\n\n\ndef _mac_hardware_info():\n info = dict()\n results = dict()\n for l in [l.split(':') for l in os.popen('sysctl hw').readlines()[1:]]:\n info[l[0].strip(' \"').replace(' ', '_').lower().strip('hw.')] = \\\n l[1].strip('.\\n ')\n results.update({'cpus': int(info['physicalcpu'])})\n results.update({'cpu_freq': int(float(os.popen('sysctl hw.cpufrequency')\n .readlines()[0].split(':')[\n 1]) / 1000000)})\n results.update({'memsize': int(int(info['memsize']) / (1024 ** 2))})\n # add OS information\n results.update({'os': 'Mac OSX'})\n return results\n\n\ndef _linux_hardware_info():\n results = {}\n # get cpu number\n sockets = 0\n cores_per_socket = 0\n frequency = 0.0\n with open(\"/proc/cpuinfo\") as f:\n for l in [l.split(':') for l in f.readlines()]:\n if (l[0].strip() == \"physical id\"):\n sockets = np.maximum(sockets,int(l[1].strip())+1)\n if (l[0].strip() == \"cpu cores\"):\n cores_per_socket = int(l[1].strip())\n if (l[0].strip() == \"cpu MHz\"):\n frequency = float(l[1].strip()) / 1000.\n results.update({'cpus': sockets * cores_per_socket})\n # get cpu frequency directly (bypasses freq scaling)\n try:\n with open(\n \"/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq\") as f:\n line = f.readlines()[0]\n frequency = float(line.strip('\\n')) / 1000000.\n except:\n pass\n results.update({'cpu_freq': frequency})\n\n # get total amount of memory\n mem_info = dict()\n with open(\"/proc/meminfo\") as f:\n for l in [l.split(':') for l in f.readlines()]:\n mem_info[l[0]] = l[1].strip('.\\n ').strip('kB')\n results.update({'memsize': int(mem_info['MemTotal']) / 1024})\n # add OS information\n results.update({'os': 'Linux'})\n return results\n\n\ndef _freebsd_hardware_info():\n results = {}\n results.update({'cpus': int(os.popen('sysctl -n hw.ncpu').readlines()[0])})\n results.update(\n {'cpu_freq': int(os.popen('sysctl -n dev.cpu.0.freq').readlines()[0])})\n results.update({'memsize': int(\n os.popen('sysctl -n hw.realmem').readlines()[0]) / 1024})\n results.update({'os': 'FreeBSD'})\n return results\n\n\ndef _win_hardware_info():\n try:\n from comtypes.client import CoGetObject\n winmgmts_root = CoGetObject(r\"winmgmts:root\\cimv2\")\n cpus = winmgmts_root.ExecQuery(\"Select * from Win32_Processor\")\n ncpus = 0\n for cpu in cpus:\n ncpus += int(cpu.Properties_['NumberOfCores'].Value)\n except:\n ncpus = int(multiprocessing.cpu_count())\n return {'os': 'Windows', 'cpus': ncpus}\n\n\ndef hardware_info():\n \"\"\"\n Returns basic hardware information about the computer.\n\n Gives actual number of CPU's in the machine, even when hyperthreading is\n turned on.\n\n Returns\n -------\n info : dict\n Dictionary containing cpu and memory information.\n\n \"\"\"\n if sys.platform == 'darwin':\n out = _mac_hardware_info()\n elif sys.platform == 'win32':\n out = _win_hardware_info()\n elif sys.platform in ['linux', 'linux2']:\n out = _linux_hardware_info()\n elif sys.platform.startswith('freebsd'):\n out = _freebsd_hardware_info()\n else:\n out = {}\n return out\n\n\nif __name__ == '__main__':\n print(hardware_info())\n", "path": "qutip/hardware_info.py"}], "after_files": [{"content": "__all__ = ['hardware_info']\n\nimport multiprocessing\nimport os\nimport sys\n\nimport numpy as np\n\n\ndef _mac_hardware_info():\n info = {}\n results = {}\n with os.popen('sysctl hw') as f:\n lines = f.readlines()\n for line in lines[1:]:\n key, _, value = line.partition(':')\n key = key.strip(' \"').replace(' ', '_').lower().strip('hw.')\n value = value.strip('.\\n ')\n info[key] = value\n results.update({'cpus': int(info['physicalcpu'])})\n # Mac OS currently doesn't not provide hw.cpufrequency on the M1\n with os.popen('sysctl hw.cpufrequency') as f:\n cpu_freq_lines = f.readlines()\n if cpu_freq_lines:\n # Yay, hw.cpufrequency present\n results.update({\n 'cpu_freq': float(cpu_freq_lines[0].split(':')[1]) / 1000000,\n })\n else:\n # No hw.cpufrequency, assume Apple M1 CPU (all are 3.2 GHz currently)\n results['cpu_freq'] = 3.2\n results.update({'memsize': int(int(info['memsize']) / (1024 ** 2))})\n # add OS information\n results.update({'os': 'Mac OSX'})\n return results\n\n\ndef _linux_hardware_info():\n results = {}\n # get cpu number\n sockets = 0\n cores_per_socket = 0\n frequency = 0.0\n with open(\"/proc/cpuinfo\") as f:\n for l in [l.split(':') for l in f.readlines()]:\n if (l[0].strip() == \"physical id\"):\n sockets = np.maximum(sockets, int(l[1].strip()) + 1)\n if (l[0].strip() == \"cpu cores\"):\n cores_per_socket = int(l[1].strip())\n if (l[0].strip() == \"cpu MHz\"):\n frequency = float(l[1].strip()) / 1000.\n results.update({'cpus': int(sockets * cores_per_socket)})\n # get cpu frequency directly (bypasses freq scaling)\n try:\n with open(\n \"/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq\") as f:\n line = f.readlines()[0]\n frequency = float(line.strip('\\n')) / 1000000.\n except Exception:\n pass\n results.update({'cpu_freq': frequency})\n\n # get total amount of memory\n mem_info = dict()\n with open(\"/proc/meminfo\") as f:\n for l in [l.split(':') for l in f.readlines()]:\n mem_info[l[0]] = l[1].strip('.\\n ').strip('kB')\n results.update({'memsize': int(mem_info['MemTotal']) / 1024})\n # add OS information\n results.update({'os': 'Linux'})\n return results\n\n\ndef _freebsd_hardware_info():\n results = {}\n results.update({'cpus': int(os.popen('sysctl -n hw.ncpu').readlines()[0])})\n results.update(\n {'cpu_freq': int(os.popen('sysctl -n dev.cpu.0.freq').readlines()[0])})\n results.update({'memsize': int(\n os.popen('sysctl -n hw.realmem').readlines()[0]) / 1024})\n results.update({'os': 'FreeBSD'})\n return results\n\n\ndef _win_hardware_info():\n try:\n from comtypes.client import CoGetObject\n winmgmts_root = CoGetObject(r\"winmgmts:root\\cimv2\")\n cpus = winmgmts_root.ExecQuery(\"Select * from Win32_Processor\")\n ncpus = 0\n for cpu in cpus:\n ncpus += int(cpu.Properties_['NumberOfCores'].Value)\n except Exception:\n ncpus = int(multiprocessing.cpu_count())\n return {'os': 'Windows', 'cpus': ncpus}\n\n\ndef hardware_info():\n \"\"\"\n Returns basic hardware information about the computer.\n\n Gives actual number of CPU's in the machine, even when hyperthreading is\n turned on.\n\n Returns\n -------\n info : dict\n Dictionary containing cpu and memory information.\n\n \"\"\"\n if sys.platform == 'darwin':\n out = _mac_hardware_info()\n elif sys.platform == 'win32':\n out = _win_hardware_info()\n elif sys.platform in ['linux', 'linux2']:\n out = _linux_hardware_info()\n elif sys.platform.startswith('freebsd'):\n out = _freebsd_hardware_info()\n else:\n out = {}\n return out\n\n\nif __name__ == '__main__':\n print(hardware_info())\n", "path": "qutip/hardware_info.py"}]}
| 2,787 | 814 |
gh_patches_debug_3599
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-606
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nginx plugin destroys config
I have a config file called webp.conf in /etc/nginx/conf.d/ which works great.
After running letsencrypt -d example.org run the webp.conf is broken because it's missing a closing }
https://pastebin.mozilla.org/8837365
Line 18 gets removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `letsencrypt-nginx/letsencrypt_nginx/nginxparser.py`
Content:
```
1 """Very low-level nginx config parser based on pyparsing."""
2 import string
3
4 from pyparsing import (
5 Literal, White, Word, alphanums, CharsNotIn, Forward, Group,
6 Optional, OneOrMore, Regex, ZeroOrMore)
7 from pyparsing import stringEnd
8 from pyparsing import restOfLine
9
10 class RawNginxParser(object):
11 # pylint: disable=expression-not-assigned
12 """A class that parses nginx configuration with pyparsing."""
13
14 # constants
15 left_bracket = Literal("{").suppress()
16 right_bracket = Literal("}").suppress()
17 semicolon = Literal(";").suppress()
18 space = White().suppress()
19 key = Word(alphanums + "_/")
20 # Matches anything that is not a special character AND any chars in single
21 # or double quotes
22 value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+")
23 location = CharsNotIn("{};," + string.whitespace)
24 # modifier for location uri [ = | ~ | ~* | ^~ ]
25 modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~")
26
27 # rules
28 comment = Literal('#') + restOfLine()
29 assignment = (key + Optional(space + value, default=None) + semicolon)
30 location_statement = Optional(space + modifier) + Optional(space + location)
31 if_statement = Literal("if") + space + Regex(r"\(.+\)") + space
32 block = Forward()
33
34 block << Group(
35 (Group(key + location_statement) ^ Group(if_statement))
36 + left_bracket
37 + Group(ZeroOrMore(Group(comment | assignment) | block))
38 + right_bracket)
39
40 script = OneOrMore(Group(comment | assignment) | block) + stringEnd
41
42 def __init__(self, source):
43 self.source = source
44
45 def parse(self):
46 """Returns the parsed tree."""
47 return self.script.parseString(self.source)
48
49 def as_list(self):
50 """Returns the parsed tree as a list."""
51 return self.parse().asList()
52
53
54 class RawNginxDumper(object):
55 # pylint: disable=too-few-public-methods
56 """A class that dumps nginx configuration from the provided tree."""
57 def __init__(self, blocks, indentation=4):
58 self.blocks = blocks
59 self.indentation = indentation
60
61 def __iter__(self, blocks=None, current_indent=0, spacer=' '):
62 """Iterates the dumped nginx content."""
63 blocks = blocks or self.blocks
64 for key, values in blocks:
65 indentation = spacer * current_indent
66 if isinstance(key, list):
67 if current_indent:
68 yield ''
69 yield indentation + spacer.join(key) + ' {'
70
71 for parameter in values:
72 dumped = self.__iter__([parameter], current_indent + self.indentation)
73 for line in dumped:
74 yield line
75
76 yield indentation + '}'
77 else:
78 if key == '#':
79 yield spacer * current_indent + key + values
80 else:
81 if values is None:
82 yield spacer * current_indent + key + ';'
83 else:
84 yield spacer * current_indent + key + spacer + values + ';'
85
86 def as_string(self):
87 """Return the parsed block as a string."""
88 return '\n'.join(self) + '\n'
89
90
91 # Shortcut functions to respect Python's serialization interface
92 # (like pyyaml, picker or json)
93
94 def loads(source):
95 """Parses from a string.
96
97 :param str souce: The string to parse
98 :returns: The parsed tree
99 :rtype: list
100
101 """
102 return RawNginxParser(source).as_list()
103
104
105 def load(_file):
106 """Parses from a file.
107
108 :param file _file: The file to parse
109 :returns: The parsed tree
110 :rtype: list
111
112 """
113 return loads(_file.read())
114
115
116 def dumps(blocks, indentation=4):
117 """Dump to a string.
118
119 :param list block: The parsed tree
120 :param int indentation: The number of spaces to indent
121 :rtype: str
122
123 """
124 return RawNginxDumper(blocks, indentation).as_string()
125
126
127 def dump(blocks, _file, indentation=4):
128 """Dump to a file.
129
130 :param list block: The parsed tree
131 :param file _file: The file to dump to
132 :param int indentation: The number of spaces to indent
133 :rtype: NoneType
134
135 """
136 return _file.write(dumps(blocks, indentation))
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
--- a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
+++ b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
@@ -37,7 +37,7 @@
+ Group(ZeroOrMore(Group(comment | assignment) | block))
+ right_bracket)
- script = OneOrMore(Group(comment | assignment) | block) + stringEnd
+ script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd
def __init__(self, source):
self.source = source
|
{"golden_diff": "diff --git a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n--- a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n+++ b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n@@ -37,7 +37,7 @@\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n \n- script = OneOrMore(Group(comment | assignment) | block) + stringEnd\n+ script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd\n \n def __init__(self, source):\n self.source = source\n", "issue": "nginx plugin destroys config\nI have a config file called webp.conf in /etc/nginx/conf.d/ which works great.\nAfter running letsencrypt -d example.org run the webp.conf is broken because it's missing a closing }\nhttps://pastebin.mozilla.org/8837365\nLine 18 gets removed.\n\n", "before_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n left_bracket = Literal(\"{\").suppress()\n right_bracket = Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n space = White().suppress()\n key = Word(alphanums + \"_/\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = Literal('#') + restOfLine()\n assignment = (key + Optional(space + value, default=None) + semicolon)\n location_statement = Optional(space + modifier) + Optional(space + location)\n if_statement = Literal(\"if\") + space + Regex(r\"\\(.+\\)\") + space\n block = Forward()\n\n block << Group(\n (Group(key + location_statement) ^ Group(if_statement))\n + left_bracket\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n\n script = OneOrMore(Group(comment | assignment) | block) + stringEnd\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks, indentation=4):\n self.blocks = blocks\n self.indentation = indentation\n\n def __iter__(self, blocks=None, current_indent=0, spacer=' '):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for key, values in blocks:\n indentation = spacer * current_indent\n if isinstance(key, list):\n if current_indent:\n yield ''\n yield indentation + spacer.join(key) + ' {'\n\n for parameter in values:\n dumped = self.__iter__([parameter], current_indent + self.indentation)\n for line in dumped:\n yield line\n\n yield indentation + '}'\n else:\n if key == '#':\n yield spacer * current_indent + key + values\n else:\n if values is None:\n yield spacer * current_indent + key + ';'\n else:\n yield spacer * current_indent + key + spacer + values + ';'\n\n def as_string(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return '\\n'.join(self) + '\\n'\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return RawNginxParser(source).as_list()\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks, indentation=4):\n \"\"\"Dump to a string.\n\n :param list block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return RawNginxDumper(blocks, indentation).as_string()\n\n\ndef dump(blocks, _file, indentation=4):\n \"\"\"Dump to a file.\n\n :param list block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks, indentation))\n", "path": "letsencrypt-nginx/letsencrypt_nginx/nginxparser.py"}], "after_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n left_bracket = Literal(\"{\").suppress()\n right_bracket = Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n space = White().suppress()\n key = Word(alphanums + \"_/\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = Literal('#') + restOfLine()\n assignment = (key + Optional(space + value, default=None) + semicolon)\n location_statement = Optional(space + modifier) + Optional(space + location)\n if_statement = Literal(\"if\") + space + Regex(r\"\\(.+\\)\") + space\n block = Forward()\n\n block << Group(\n (Group(key + location_statement) ^ Group(if_statement))\n + left_bracket\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n\n script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks, indentation=4):\n self.blocks = blocks\n self.indentation = indentation\n\n def __iter__(self, blocks=None, current_indent=0, spacer=' '):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for key, values in blocks:\n indentation = spacer * current_indent\n if isinstance(key, list):\n if current_indent:\n yield ''\n yield indentation + spacer.join(key) + ' {'\n\n for parameter in values:\n dumped = self.__iter__([parameter], current_indent + self.indentation)\n for line in dumped:\n yield line\n\n yield indentation + '}'\n else:\n if key == '#':\n yield spacer * current_indent + key + values\n else:\n if values is None:\n yield spacer * current_indent + key + ';'\n else:\n yield spacer * current_indent + key + spacer + values + ';'\n\n def as_string(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return '\\n'.join(self) + '\\n'\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return RawNginxParser(source).as_list()\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks, indentation=4):\n \"\"\"Dump to a string.\n\n :param list block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return RawNginxDumper(blocks, indentation).as_string()\n\n\ndef dump(blocks, _file, indentation=4):\n \"\"\"Dump to a file.\n\n :param list block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks, indentation))\n", "path": "letsencrypt-nginx/letsencrypt_nginx/nginxparser.py"}]}
| 1,638 | 151 |
gh_patches_debug_3934
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-771
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
should editors get automatic proofreader access to their own sources?
It seems weird to me that on sources you (an editor) create, you can always access the Volpiano editor (assuming the source is non-empty!) but you have to be added as an editor to access the proofreading form.
<img width="969" alt="image" src="https://user-images.githubusercontent.com/67451875/209043248-1a0a8e13-0196-498d-a835-081fddc3ee13.png">
<img width="666" alt="image" src="https://user-images.githubusercontent.com/67451875/209043188-94b4b649-c1e7-41cc-9692-fd2a6947c28c.png">
If there were something preventing you from proofreading your own source as an intentional part of the workflow, this might be sort of useful (?) but as it is it just adds an extra step to get permissions you should in theory already have.
New source created doesn't show up in home page
I just created a new source from my account. It doesn't show up immediately on the home page in the My Sources sidebar (even after refreshing the page and re-logging in). It does however appear in the database as well as in the my-sources page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/views/source.py`
Content:
```
1 from django.views.generic import DetailView, ListView, CreateView, UpdateView
2 from django.db.models import Q, Prefetch
3 from main_app.models import Source, Provenance, Century
4 from main_app.forms import SourceCreateForm, SourceEditForm
5 from django.contrib import messages
6 from django.urls import reverse
7 from django.contrib.auth.mixins import LoginRequiredMixin
8 from django.http import HttpResponseRedirect
9 from django.contrib.auth.mixins import UserPassesTestMixin
10 from django.core.exceptions import PermissionDenied
11 from django.shortcuts import get_object_or_404
12 from main_app.views.chant import get_feast_selector_options
13
14
15 class SourceDetailView(DetailView):
16 model = Source
17 context_object_name = "source"
18 template_name = "source_detail.html"
19
20 def get_context_data(self, **kwargs):
21 source = self.get_object()
22 display_unpublished = self.request.user.is_authenticated
23 if (source.published is False) and (not display_unpublished):
24 raise PermissionDenied()
25
26 context = super().get_context_data(**kwargs)
27
28 if source.segment and source.segment.id == 4064:
29 # if this is a sequence source
30 context["sequences"] = source.sequence_set.order_by("s_sequence")
31 context["folios"] = (
32 source.sequence_set.values_list("folio", flat=True)
33 .distinct()
34 .order_by("folio")
35 )
36 else:
37 # if this is a chant source
38 folios = (
39 source.chant_set.values_list("folio", flat=True)
40 .distinct()
41 .order_by("folio")
42 )
43 context["folios"] = folios
44 # the options for the feast selector on the right, only chant sources have this
45 context["feasts_with_folios"] = get_feast_selector_options(source, folios)
46 return context
47
48
49 class SourceListView(ListView):
50 paginate_by = 100
51 context_object_name = "sources"
52 template_name = "source_list.html"
53
54 def get_context_data(self, **kwargs):
55 context = super().get_context_data(**kwargs)
56 context["provenances"] = (
57 Provenance.objects.all().order_by("name").values("id", "name")
58 )
59 context["centuries"] = (
60 Century.objects.all().order_by("name").values("id", "name")
61 )
62 return context
63
64 def get_queryset(self):
65 # use select_related() for foreign keys to reduce DB queries
66 queryset = Source.objects.select_related(
67 "rism_siglum", "segment", "provenance"
68 ).order_by("siglum")
69
70 display_unpublished = self.request.user.is_authenticated
71 if display_unpublished:
72 q_obj_filter = Q()
73 else:
74 q_obj_filter = Q(published=True)
75
76 if self.request.GET.get("century"):
77 century_name = Century.objects.get(id=self.request.GET.get("century")).name
78 q_obj_filter &= Q(century__name__icontains=century_name)
79
80 if self.request.GET.get("provenance"):
81 provenance_id = int(self.request.GET.get("provenance"))
82 q_obj_filter &= Q(provenance__id=provenance_id)
83 if self.request.GET.get("segment"):
84 segment_id = int(self.request.GET.get("segment"))
85 q_obj_filter &= Q(segment__id=segment_id)
86 if self.request.GET.get("fullSource") in ["true", "false"]:
87 full_source_str = self.request.GET.get("fullSource")
88 if full_source_str == "true":
89 full_source_q = Q(full_source=True) | Q(full_source=None)
90 q_obj_filter &= full_source_q
91 else:
92 q_obj_filter &= Q(full_source=False)
93
94 if self.request.GET.get("general"):
95 # Strip spaces at the beginning and end. Then make list of terms split on spaces
96 general_search_terms = self.request.GET.get("general").strip(" ").split(" ")
97 # We need a Q Object for each field we're gonna look into
98 title_q = Q()
99 siglum_q = Q()
100 rism_siglum_q = Q()
101 description_q = Q()
102 # it seems that old cantus don't look into title and provenance for the general search terms
103 # cantus.uwaterloo.ca/source/123901 this source cannot be found by searching its provenance 'Kremsmünster' in the general search field
104 # provenance_q = Q()
105 summary_q = Q()
106
107 # For each term, add it to the Q object of each field with an OR operation.
108 # We split the terms so that the words can be separated in the actual
109 # field, allowing for a more flexible search, and a field needs
110 # to match only one of the terms
111 for term in general_search_terms:
112 title_q |= Q(title__icontains=term)
113 siglum_q |= Q(siglum__icontains=term)
114 rism_siglum_q |= Q(rism_siglum__name__icontains=term) | Q(
115 rism_siglum__description__icontains=term
116 )
117 description_q |= Q(description__icontains=term)
118 summary_q |= Q(summary__icontains=term)
119 # provenance_q |= Q(provenance__name__icontains=term)
120 # All the Q objects are put together with OR.
121 # The end result is that at least one term has to match in at least one
122 # field
123 # general_search_q = (
124 # title_q | siglum_q | rism_siglum_q | description_q | provenance_q
125 # )
126 general_search_q = (
127 title_q | siglum_q | rism_siglum_q | description_q | summary_q
128 )
129 q_obj_filter &= general_search_q
130
131 # For the indexing notes search we follow the same procedure as above but with
132 # different fields
133 if self.request.GET.get("indexing"):
134 # Make list of terms split on spaces
135 indexing_search_terms = self.request.GET.get("indexing").split(" ")
136 # We need a Q Object for each field we're gonna look into
137 inventoried_by_q = Q()
138 full_text_entered_by_q = Q()
139 melodies_entered_by_q = Q()
140 proofreaders_q = Q()
141 other_editors_q = Q()
142 indexing_notes_q = Q()
143 # For each term, add it to the Q object of each field with an OR operation.
144 # We split the terms so that the words can be separated in the actual
145 # field, allowing for a more flexible search, and a field needs
146 # to match only one of the terms
147 for term in indexing_search_terms:
148 inventoried_by_q |= Q(inventoried_by__full_name__icontains=term)
149 full_text_entered_by_q |= Q(
150 full_text_entered_by__full_name__icontains=term
151 )
152 melodies_entered_by_q |= Q(
153 melodies_entered_by__full_name__icontains=term
154 )
155 proofreaders_q |= Q(proofreaders__full_name__icontains=term)
156 other_editors_q |= Q(other_editors__full_name__icontains=term)
157 indexing_notes_q |= Q(indexing_notes__icontains=term)
158 # All the Q objects are put together with OR.
159 # The end result is that at least one term has to match in at least one
160 # field
161 indexing_search_q = (
162 inventoried_by_q
163 | full_text_entered_by_q
164 | melodies_entered_by_q
165 | proofreaders_q
166 | other_editors_q
167 | indexing_notes_q
168 )
169 q_obj_filter &= indexing_search_q
170
171 return queryset.filter(q_obj_filter).prefetch_related(
172 Prefetch("century", queryset=Century.objects.all().order_by("id"))
173 )
174
175
176 class SourceCreateView(LoginRequiredMixin, UserPassesTestMixin, CreateView):
177 model = Source
178 template_name = "source_create_form.html"
179 form_class = SourceCreateForm
180
181 def test_func(self):
182 user = self.request.user
183 # checks if the user is allowed to create sources
184 is_authorized = user.groups.filter(
185 Q(name="project manager") | Q(name="editor") | Q(name="contributor")
186 ).exists()
187
188 if is_authorized:
189 return True
190 else:
191 return False
192
193 def get_success_url(self):
194 return reverse("source-create")
195
196 def form_valid(self, form):
197 form.instance.created_by = self.request.user
198 source = form.save()
199
200 # assign this source to the "current_editors"
201 current_editors = source.current_editors.all()
202
203 for editor in current_editors:
204 editor.sources_user_can_edit.add(source)
205
206 messages.success(
207 self.request,
208 "Source created successfully!",
209 )
210
211 return HttpResponseRedirect(self.get_success_url())
212
213
214 class SourceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
215 template_name = "source_edit.html"
216 model = Source
217 form_class = SourceEditForm
218 pk_url_kwarg = "source_id"
219
220 def test_func(self):
221 user = self.request.user
222 source_id = self.kwargs.get(self.pk_url_kwarg)
223 source = get_object_or_404(Source, id=source_id)
224
225 assigned_to_source = user.sources_user_can_edit.filter(id=source_id)
226
227 # checks if the user is a project manager
228 is_project_manager = user.groups.filter(name="project manager").exists()
229 # checks if the user is an editor
230 is_editor = user.groups.filter(name="editor").exists()
231 # checks if the user is a contributor
232 is_contributor = user.groups.filter(name="contributor").exists()
233
234 if (
235 (is_project_manager)
236 or (is_editor and assigned_to_source)
237 or (is_editor and source.created_by == user)
238 or (is_contributor and source.created_by == user)
239 ):
240 return True
241 else:
242 return False
243
244 def form_valid(self, form):
245 form.instance.last_updated_by = self.request.user
246
247 # remove this source from the old "current_editors"
248 # assign this source to the new "current_editors"
249
250 old_current_editors = list(
251 Source.objects.get(id=form.instance.id).current_editors.all()
252 )
253 new_current_editors = form.cleaned_data["current_editors"]
254 source = form.save()
255
256 for old_editor in old_current_editors:
257 old_editor.sources_user_can_edit.remove(source)
258
259 for new_editor in new_current_editors:
260 new_editor.sources_user_can_edit.add(source)
261
262 return HttpResponseRedirect(self.get_success_url())
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/views/source.py b/django/cantusdb_project/main_app/views/source.py
--- a/django/cantusdb_project/main_app/views/source.py
+++ b/django/cantusdb_project/main_app/views/source.py
@@ -199,6 +199,7 @@
# assign this source to the "current_editors"
current_editors = source.current_editors.all()
+ self.request.user.sources_user_can_edit.add(source)
for editor in current_editors:
editor.sources_user_can_edit.add(source)
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/source.py b/django/cantusdb_project/main_app/views/source.py\n--- a/django/cantusdb_project/main_app/views/source.py\n+++ b/django/cantusdb_project/main_app/views/source.py\n@@ -199,6 +199,7 @@\n \n # assign this source to the \"current_editors\"\n current_editors = source.current_editors.all()\n+ self.request.user.sources_user_can_edit.add(source)\n \n for editor in current_editors:\n editor.sources_user_can_edit.add(source)\n", "issue": "should editors get automatic proofreader access to their own sources?\nIt seems weird to me that on sources you (an editor) create, you can always access the Volpiano editor (assuming the source is non-empty!) but you have to be added as an editor to access the proofreading form. \r\n<img width=\"969\" alt=\"image\" src=\"https://user-images.githubusercontent.com/67451875/209043248-1a0a8e13-0196-498d-a835-081fddc3ee13.png\">\r\n<img width=\"666\" alt=\"image\" src=\"https://user-images.githubusercontent.com/67451875/209043188-94b4b649-c1e7-41cc-9692-fd2a6947c28c.png\">\r\n\r\nIf there were something preventing you from proofreading your own source as an intentional part of the workflow, this might be sort of useful (?) but as it is it just adds an extra step to get permissions you should in theory already have. \r\n\nNew source created doesn't show up in home page\nI just created a new source from my account. It doesn't show up immediately on the home page in the My Sources sidebar (even after refreshing the page and re-logging in). It does however appear in the database as well as in the my-sources page.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, CreateView, UpdateView\nfrom django.db.models import Q, Prefetch\nfrom main_app.models import Source, Provenance, Century\nfrom main_app.forms import SourceCreateForm, SourceEditForm\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom main_app.views.chant import get_feast_selector_options\n\n\nclass SourceDetailView(DetailView):\n model = Source\n context_object_name = \"source\"\n template_name = \"source_detail.html\"\n\n def get_context_data(self, **kwargs):\n source = self.get_object()\n display_unpublished = self.request.user.is_authenticated\n if (source.published is False) and (not display_unpublished):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n\n if source.segment and source.segment.id == 4064:\n # if this is a sequence source\n context[\"sequences\"] = source.sequence_set.order_by(\"s_sequence\")\n context[\"folios\"] = (\n source.sequence_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n else:\n # if this is a chant source\n folios = (\n source.chant_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n context[\"folios\"] = folios\n # the options for the feast selector on the right, only chant sources have this\n context[\"feasts_with_folios\"] = get_feast_selector_options(source, folios)\n return context\n\n\nclass SourceListView(ListView):\n paginate_by = 100\n context_object_name = \"sources\"\n template_name = \"source_list.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"provenances\"] = (\n Provenance.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n context[\"centuries\"] = (\n Century.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n return context\n\n def get_queryset(self):\n # use select_related() for foreign keys to reduce DB queries\n queryset = Source.objects.select_related(\n \"rism_siglum\", \"segment\", \"provenance\"\n ).order_by(\"siglum\")\n\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n q_obj_filter = Q()\n else:\n q_obj_filter = Q(published=True)\n\n if self.request.GET.get(\"century\"):\n century_name = Century.objects.get(id=self.request.GET.get(\"century\")).name\n q_obj_filter &= Q(century__name__icontains=century_name)\n\n if self.request.GET.get(\"provenance\"):\n provenance_id = int(self.request.GET.get(\"provenance\"))\n q_obj_filter &= Q(provenance__id=provenance_id)\n if self.request.GET.get(\"segment\"):\n segment_id = int(self.request.GET.get(\"segment\"))\n q_obj_filter &= Q(segment__id=segment_id)\n if self.request.GET.get(\"fullSource\") in [\"true\", \"false\"]:\n full_source_str = self.request.GET.get(\"fullSource\")\n if full_source_str == \"true\":\n full_source_q = Q(full_source=True) | Q(full_source=None)\n q_obj_filter &= full_source_q\n else:\n q_obj_filter &= Q(full_source=False)\n\n if self.request.GET.get(\"general\"):\n # Strip spaces at the beginning and end. Then make list of terms split on spaces\n general_search_terms = self.request.GET.get(\"general\").strip(\" \").split(\" \")\n # We need a Q Object for each field we're gonna look into\n title_q = Q()\n siglum_q = Q()\n rism_siglum_q = Q()\n description_q = Q()\n # it seems that old cantus don't look into title and provenance for the general search terms\n # cantus.uwaterloo.ca/source/123901 this source cannot be found by searching its provenance 'Kremsm\u00fcnster' in the general search field\n # provenance_q = Q()\n summary_q = Q()\n\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in general_search_terms:\n title_q |= Q(title__icontains=term)\n siglum_q |= Q(siglum__icontains=term)\n rism_siglum_q |= Q(rism_siglum__name__icontains=term) | Q(\n rism_siglum__description__icontains=term\n )\n description_q |= Q(description__icontains=term)\n summary_q |= Q(summary__icontains=term)\n # provenance_q |= Q(provenance__name__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n # general_search_q = (\n # title_q | siglum_q | rism_siglum_q | description_q | provenance_q\n # )\n general_search_q = (\n title_q | siglum_q | rism_siglum_q | description_q | summary_q\n )\n q_obj_filter &= general_search_q\n\n # For the indexing notes search we follow the same procedure as above but with\n # different fields\n if self.request.GET.get(\"indexing\"):\n # Make list of terms split on spaces\n indexing_search_terms = self.request.GET.get(\"indexing\").split(\" \")\n # We need a Q Object for each field we're gonna look into\n inventoried_by_q = Q()\n full_text_entered_by_q = Q()\n melodies_entered_by_q = Q()\n proofreaders_q = Q()\n other_editors_q = Q()\n indexing_notes_q = Q()\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in indexing_search_terms:\n inventoried_by_q |= Q(inventoried_by__full_name__icontains=term)\n full_text_entered_by_q |= Q(\n full_text_entered_by__full_name__icontains=term\n )\n melodies_entered_by_q |= Q(\n melodies_entered_by__full_name__icontains=term\n )\n proofreaders_q |= Q(proofreaders__full_name__icontains=term)\n other_editors_q |= Q(other_editors__full_name__icontains=term)\n indexing_notes_q |= Q(indexing_notes__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n indexing_search_q = (\n inventoried_by_q\n | full_text_entered_by_q\n | melodies_entered_by_q\n | proofreaders_q\n | other_editors_q\n | indexing_notes_q\n )\n q_obj_filter &= indexing_search_q\n\n return queryset.filter(q_obj_filter).prefetch_related(\n Prefetch(\"century\", queryset=Century.objects.all().order_by(\"id\"))\n )\n\n\nclass SourceCreateView(LoginRequiredMixin, UserPassesTestMixin, CreateView):\n model = Source\n template_name = \"source_create_form.html\"\n form_class = SourceCreateForm\n\n def test_func(self):\n user = self.request.user\n # checks if the user is allowed to create sources\n is_authorized = user.groups.filter(\n Q(name=\"project manager\") | Q(name=\"editor\") | Q(name=\"contributor\")\n ).exists()\n\n if is_authorized:\n return True\n else:\n return False\n\n def get_success_url(self):\n return reverse(\"source-create\")\n\n def form_valid(self, form):\n form.instance.created_by = self.request.user\n source = form.save()\n\n # assign this source to the \"current_editors\"\n current_editors = source.current_editors.all()\n\n for editor in current_editors:\n editor.sources_user_can_edit.add(source)\n\n messages.success(\n self.request,\n \"Source created successfully!\",\n )\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass SourceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"source_edit.html\"\n model = Source\n form_class = SourceEditForm\n pk_url_kwarg = \"source_id\"\n\n def test_func(self):\n user = self.request.user\n source_id = self.kwargs.get(self.pk_url_kwarg)\n source = get_object_or_404(Source, id=source_id)\n\n assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n\n # checks if the user is a project manager\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is an editor\n is_editor = user.groups.filter(name=\"editor\").exists()\n # checks if the user is a contributor\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if (\n (is_project_manager)\n or (is_editor and assigned_to_source)\n or (is_editor and source.created_by == user)\n or (is_contributor and source.created_by == user)\n ):\n return True\n else:\n return False\n\n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n\n # remove this source from the old \"current_editors\"\n # assign this source to the new \"current_editors\"\n\n old_current_editors = list(\n Source.objects.get(id=form.instance.id).current_editors.all()\n )\n new_current_editors = form.cleaned_data[\"current_editors\"]\n source = form.save()\n\n for old_editor in old_current_editors:\n old_editor.sources_user_can_edit.remove(source)\n\n for new_editor in new_current_editors:\n new_editor.sources_user_can_edit.add(source)\n\n return HttpResponseRedirect(self.get_success_url())\n", "path": "django/cantusdb_project/main_app/views/source.py"}], "after_files": [{"content": "from django.views.generic import DetailView, ListView, CreateView, UpdateView\nfrom django.db.models import Q, Prefetch\nfrom main_app.models import Source, Provenance, Century\nfrom main_app.forms import SourceCreateForm, SourceEditForm\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom main_app.views.chant import get_feast_selector_options\n\n\nclass SourceDetailView(DetailView):\n model = Source\n context_object_name = \"source\"\n template_name = \"source_detail.html\"\n\n def get_context_data(self, **kwargs):\n source = self.get_object()\n display_unpublished = self.request.user.is_authenticated\n if (source.published is False) and (not display_unpublished):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n\n if source.segment and source.segment.id == 4064:\n # if this is a sequence source\n context[\"sequences\"] = source.sequence_set.order_by(\"s_sequence\")\n context[\"folios\"] = (\n source.sequence_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n else:\n # if this is a chant source\n folios = (\n source.chant_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n context[\"folios\"] = folios\n # the options for the feast selector on the right, only chant sources have this\n context[\"feasts_with_folios\"] = get_feast_selector_options(source, folios)\n return context\n\n\nclass SourceListView(ListView):\n paginate_by = 100\n context_object_name = \"sources\"\n template_name = \"source_list.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"provenances\"] = (\n Provenance.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n context[\"centuries\"] = (\n Century.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n return context\n\n def get_queryset(self):\n # use select_related() for foreign keys to reduce DB queries\n queryset = Source.objects.select_related(\n \"rism_siglum\", \"segment\", \"provenance\"\n ).order_by(\"siglum\")\n\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n q_obj_filter = Q()\n else:\n q_obj_filter = Q(published=True)\n\n if self.request.GET.get(\"century\"):\n century_name = Century.objects.get(id=self.request.GET.get(\"century\")).name\n q_obj_filter &= Q(century__name__icontains=century_name)\n\n if self.request.GET.get(\"provenance\"):\n provenance_id = int(self.request.GET.get(\"provenance\"))\n q_obj_filter &= Q(provenance__id=provenance_id)\n if self.request.GET.get(\"segment\"):\n segment_id = int(self.request.GET.get(\"segment\"))\n q_obj_filter &= Q(segment__id=segment_id)\n if self.request.GET.get(\"fullSource\") in [\"true\", \"false\"]:\n full_source_str = self.request.GET.get(\"fullSource\")\n if full_source_str == \"true\":\n full_source_q = Q(full_source=True) | Q(full_source=None)\n q_obj_filter &= full_source_q\n else:\n q_obj_filter &= Q(full_source=False)\n\n if self.request.GET.get(\"general\"):\n # Strip spaces at the beginning and end. Then make list of terms split on spaces\n general_search_terms = self.request.GET.get(\"general\").strip(\" \").split(\" \")\n # We need a Q Object for each field we're gonna look into\n title_q = Q()\n siglum_q = Q()\n rism_siglum_q = Q()\n description_q = Q()\n # it seems that old cantus don't look into title and provenance for the general search terms\n # cantus.uwaterloo.ca/source/123901 this source cannot be found by searching its provenance 'Kremsm\u00fcnster' in the general search field\n # provenance_q = Q()\n summary_q = Q()\n\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in general_search_terms:\n title_q |= Q(title__icontains=term)\n siglum_q |= Q(siglum__icontains=term)\n rism_siglum_q |= Q(rism_siglum__name__icontains=term) | Q(\n rism_siglum__description__icontains=term\n )\n description_q |= Q(description__icontains=term)\n summary_q |= Q(summary__icontains=term)\n # provenance_q |= Q(provenance__name__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n # general_search_q = (\n # title_q | siglum_q | rism_siglum_q | description_q | provenance_q\n # )\n general_search_q = (\n title_q | siglum_q | rism_siglum_q | description_q | summary_q\n )\n q_obj_filter &= general_search_q\n\n # For the indexing notes search we follow the same procedure as above but with\n # different fields\n if self.request.GET.get(\"indexing\"):\n # Make list of terms split on spaces\n indexing_search_terms = self.request.GET.get(\"indexing\").split(\" \")\n # We need a Q Object for each field we're gonna look into\n inventoried_by_q = Q()\n full_text_entered_by_q = Q()\n melodies_entered_by_q = Q()\n proofreaders_q = Q()\n other_editors_q = Q()\n indexing_notes_q = Q()\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in indexing_search_terms:\n inventoried_by_q |= Q(inventoried_by__full_name__icontains=term)\n full_text_entered_by_q |= Q(\n full_text_entered_by__full_name__icontains=term\n )\n melodies_entered_by_q |= Q(\n melodies_entered_by__full_name__icontains=term\n )\n proofreaders_q |= Q(proofreaders__full_name__icontains=term)\n other_editors_q |= Q(other_editors__full_name__icontains=term)\n indexing_notes_q |= Q(indexing_notes__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n indexing_search_q = (\n inventoried_by_q\n | full_text_entered_by_q\n | melodies_entered_by_q\n | proofreaders_q\n | other_editors_q\n | indexing_notes_q\n )\n q_obj_filter &= indexing_search_q\n\n return queryset.filter(q_obj_filter).prefetch_related(\n Prefetch(\"century\", queryset=Century.objects.all().order_by(\"id\"))\n )\n\n\nclass SourceCreateView(LoginRequiredMixin, UserPassesTestMixin, CreateView):\n model = Source\n template_name = \"source_create_form.html\"\n form_class = SourceCreateForm\n\n def test_func(self):\n user = self.request.user\n # checks if the user is allowed to create sources\n is_authorized = user.groups.filter(\n Q(name=\"project manager\") | Q(name=\"editor\") | Q(name=\"contributor\")\n ).exists()\n\n if is_authorized:\n return True\n else:\n return False\n\n def get_success_url(self):\n return reverse(\"source-create\")\n\n def form_valid(self, form):\n form.instance.created_by = self.request.user\n source = form.save()\n\n # assign this source to the \"current_editors\"\n current_editors = source.current_editors.all()\n self.request.user.sources_user_can_edit.add(source)\n\n for editor in current_editors:\n editor.sources_user_can_edit.add(source)\n\n messages.success(\n self.request,\n \"Source created successfully!\",\n )\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass SourceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"source_edit.html\"\n model = Source\n form_class = SourceEditForm\n pk_url_kwarg = \"source_id\"\n\n def test_func(self):\n user = self.request.user\n source_id = self.kwargs.get(self.pk_url_kwarg)\n source = get_object_or_404(Source, id=source_id)\n\n assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n\n # checks if the user is a project manager\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is an editor\n is_editor = user.groups.filter(name=\"editor\").exists()\n # checks if the user is a contributor\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if (\n (is_project_manager)\n or (is_editor and assigned_to_source)\n or (is_editor and source.created_by == user)\n or (is_contributor and source.created_by == user)\n ):\n return True\n else:\n return False\n\n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n\n # remove this source from the old \"current_editors\"\n # assign this source to the new \"current_editors\"\n\n old_current_editors = list(\n Source.objects.get(id=form.instance.id).current_editors.all()\n )\n new_current_editors = form.cleaned_data[\"current_editors\"]\n source = form.save()\n\n for old_editor in old_current_editors:\n old_editor.sources_user_can_edit.remove(source)\n\n for new_editor in new_current_editors:\n new_editor.sources_user_can_edit.add(source)\n\n return HttpResponseRedirect(self.get_success_url())\n", "path": "django/cantusdb_project/main_app/views/source.py"}]}
| 3,577 | 128 |
gh_patches_debug_34189
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-4356
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable enter to send in dev env
In dev environment every time we fresh login we have to check ```Enter to send``` to true. We should enable it by default for dev env only to increase testing speed.
```git grep enter_sends``` will help you.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/create_user.py`
Content:
```
1 from __future__ import absolute_import
2
3 from django.contrib.auth.models import UserManager
4 from django.utils import timezone
5 from zerver.models import UserProfile, Recipient, Subscription, Realm, Stream
6 import base64
7 import ujson
8 import os
9 import string
10 from six.moves import range
11
12 from typing import Optional, Text
13
14 def random_api_key():
15 # type: () -> Text
16 choices = string.ascii_letters + string.digits
17 altchars = ''.join([choices[ord(os.urandom(1)) % 62] for _ in range(2)]).encode("utf-8")
18 return base64.b64encode(os.urandom(24), altchars=altchars).decode("utf-8")
19
20 # create_user_profile is based on Django's User.objects.create_user,
21 # except that we don't save to the database so it can used in
22 # bulk_creates
23 #
24 # Only use this for bulk_create -- for normal usage one should use
25 # create_user (below) which will also make the Subscription and
26 # Recipient objects
27 def create_user_profile(realm, email, password, active, bot_type, full_name,
28 short_name, bot_owner, is_mirror_dummy, tos_version,
29 tutorial_status=UserProfile.TUTORIAL_WAITING):
30 # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Optional[Text], Optional[Text]) -> UserProfile
31 now = timezone.now()
32 email = UserManager.normalize_email(email)
33
34 user_profile = UserProfile(email=email, is_staff=False, is_active=active,
35 full_name=full_name, short_name=short_name,
36 last_login=now, date_joined=now, realm=realm,
37 pointer=-1, is_bot=bool(bot_type), bot_type=bot_type,
38 bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,
39 tos_version=tos_version,
40 tutorial_status=tutorial_status,
41 onboarding_steps=ujson.dumps([]),
42 default_language=realm.default_language)
43
44 if bot_type or not active:
45 password = None
46
47 user_profile.set_password(password)
48
49 user_profile.api_key = random_api_key()
50 return user_profile
51
52 def create_user(email, password, realm, full_name, short_name,
53 active=True, bot_type=None, bot_owner=None, tos_version=None,
54 avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
55 is_mirror_dummy=False, default_sending_stream=None,
56 default_events_register_stream=None,
57 default_all_public_streams=None, user_profile_id=None):
58 # type: (Text, Text, Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, bool, Optional[Stream], Optional[Stream], Optional[bool], Optional[int]) -> UserProfile
59 user_profile = create_user_profile(realm, email, password, active, bot_type,
60 full_name, short_name, bot_owner,
61 is_mirror_dummy, tos_version)
62 user_profile.avatar_source = avatar_source
63 user_profile.default_sending_stream = default_sending_stream
64 user_profile.default_events_register_stream = default_events_register_stream
65 # Allow the ORM default to be used if not provided
66 if default_all_public_streams is not None:
67 user_profile.default_all_public_streams = default_all_public_streams
68
69 if user_profile_id is not None:
70 user_profile.id = user_profile_id
71
72 user_profile.save()
73 recipient = Recipient.objects.create(type_id=user_profile.id,
74 type=Recipient.PERSONAL)
75 Subscription.objects.create(user_profile=user_profile, recipient=recipient)
76 return user_profile
77
```
Path: `zerver/lib/bulk_create.py`
Content:
```
1 from __future__ import absolute_import
2 from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Text
3
4 from zerver.lib.initial_password import initial_password
5 from zerver.models import Realm, Stream, UserProfile, Huddle, \
6 Subscription, Recipient, Client, RealmAuditLog, get_huddle_hash
7 from zerver.lib.create_user import create_user_profile
8
9 def bulk_create_users(realm, users_raw, bot_type=None, tos_version=None):
10 # type: (Realm, Set[Tuple[Text, Text, Text, bool]], Optional[int], Optional[Text]) -> None
11 """
12 Creates and saves a UserProfile with the given email.
13 Has some code based off of UserManage.create_user, but doesn't .save()
14 """
15 existing_users = frozenset(UserProfile.objects.values_list('email', flat=True))
16 users = sorted([user_raw for user_raw in users_raw if user_raw[0] not in existing_users])
17
18 # Now create user_profiles
19 profiles_to_create = [] # type: List[UserProfile]
20 for (email, full_name, short_name, active) in users:
21 profile = create_user_profile(realm, email,
22 initial_password(email), active, bot_type,
23 full_name, short_name, None, False, tos_version,
24 tutorial_status=UserProfile.TUTORIAL_FINISHED)
25 profiles_to_create.append(profile)
26 UserProfile.objects.bulk_create(profiles_to_create)
27
28 RealmAuditLog.objects.bulk_create(
29 [RealmAuditLog(realm=profile_.realm, modified_user=profile_,
30 event_type='user_created', event_time=profile_.date_joined)
31 for profile_ in profiles_to_create])
32
33 profiles_by_email = {} # type: Dict[Text, UserProfile]
34 profiles_by_id = {} # type: Dict[int, UserProfile]
35 for profile in UserProfile.objects.select_related().all():
36 profiles_by_email[profile.email] = profile
37 profiles_by_id[profile.id] = profile
38
39 recipients_to_create = [] # type: List[Recipient]
40 for (email, full_name, short_name, active) in users:
41 recipients_to_create.append(Recipient(type_id=profiles_by_email[email].id,
42 type=Recipient.PERSONAL))
43 Recipient.objects.bulk_create(recipients_to_create)
44
45 recipients_by_email = {} # type: Dict[Text, Recipient]
46 for recipient in Recipient.objects.filter(type=Recipient.PERSONAL):
47 recipients_by_email[profiles_by_id[recipient.type_id].email] = recipient
48
49 subscriptions_to_create = [] # type: List[Subscription]
50 for (email, full_name, short_name, active) in users:
51 subscriptions_to_create.append(
52 Subscription(user_profile_id=profiles_by_email[email].id,
53 recipient=recipients_by_email[email]))
54 Subscription.objects.bulk_create(subscriptions_to_create)
55
56 def bulk_create_streams(realm, stream_dict):
57 # type: (Realm, Dict[Text, Dict[Text, Any]]) -> None
58 existing_streams = frozenset([name.lower() for name in
59 Stream.objects.filter(realm=realm)
60 .values_list('name', flat=True)])
61 streams_to_create = [] # type: List[Stream]
62 for name, options in stream_dict.items():
63 if name.lower() not in existing_streams:
64 streams_to_create.append(
65 Stream(
66 realm=realm, name=name, description=options["description"],
67 invite_only=options["invite_only"]
68 )
69 )
70 Stream.objects.bulk_create(streams_to_create)
71
72 recipients_to_create = [] # type: List[Recipient]
73 for stream in Stream.objects.filter(realm=realm).values('id', 'name'):
74 if stream['name'].lower() not in existing_streams:
75 recipients_to_create.append(Recipient(type_id=stream['id'],
76 type=Recipient.STREAM))
77 Recipient.objects.bulk_create(recipients_to_create)
78
79 def bulk_create_clients(client_list):
80 # type: (Iterable[Text]) -> None
81 existing_clients = set(client.name for client in Client.objects.select_related().all()) # type: Set[Text]
82
83 clients_to_create = [] # type: List[Client]
84 for name in client_list:
85 if name not in existing_clients:
86 clients_to_create.append(Client(name=name))
87 existing_clients.add(name)
88 Client.objects.bulk_create(clients_to_create)
89
90 def bulk_create_huddles(users, huddle_user_list):
91 # type: (Dict[Text, UserProfile], Iterable[Iterable[Text]]) -> None
92 huddles = {} # type: Dict[Text, Huddle]
93 huddles_by_id = {} # type: Dict[int, Huddle]
94 huddle_set = set() # type: Set[Tuple[Text, Tuple[int, ...]]]
95 existing_huddles = set() # type: Set[Text]
96 for huddle in Huddle.objects.all():
97 existing_huddles.add(huddle.huddle_hash)
98 for huddle_users in huddle_user_list:
99 user_ids = [users[email].id for email in huddle_users] # type: List[int]
100 huddle_hash = get_huddle_hash(user_ids)
101 if huddle_hash in existing_huddles:
102 continue
103 huddle_set.add((huddle_hash, tuple(sorted(user_ids))))
104
105 huddles_to_create = [] # type: List[Huddle]
106 for (huddle_hash, _) in huddle_set:
107 huddles_to_create.append(Huddle(huddle_hash=huddle_hash))
108 Huddle.objects.bulk_create(huddles_to_create)
109
110 for huddle in Huddle.objects.all():
111 huddles[huddle.huddle_hash] = huddle
112 huddles_by_id[huddle.id] = huddle
113
114 recipients_to_create = [] # type: List[Recipient]
115 for (huddle_hash, _) in huddle_set:
116 recipients_to_create.append(Recipient(type_id=huddles[huddle_hash].id, type=Recipient.HUDDLE))
117 Recipient.objects.bulk_create(recipients_to_create)
118
119 huddle_recipients = {} # type: Dict[Text, Recipient]
120 for recipient in Recipient.objects.filter(type=Recipient.HUDDLE):
121 huddle_recipients[huddles_by_id[recipient.type_id].huddle_hash] = recipient
122
123 subscriptions_to_create = [] # type: List[Subscription]
124 for (huddle_hash, huddle_user_ids) in huddle_set:
125 for user_id in huddle_user_ids:
126 subscriptions_to_create.append(Subscription(active=True, user_profile_id=user_id,
127 recipient=huddle_recipients[huddle_hash]))
128 Subscription.objects.bulk_create(subscriptions_to_create)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/bulk_create.py b/zerver/lib/bulk_create.py
--- a/zerver/lib/bulk_create.py
+++ b/zerver/lib/bulk_create.py
@@ -21,7 +21,8 @@
profile = create_user_profile(realm, email,
initial_password(email), active, bot_type,
full_name, short_name, None, False, tos_version,
- tutorial_status=UserProfile.TUTORIAL_FINISHED)
+ tutorial_status=UserProfile.TUTORIAL_FINISHED,
+ enter_sends=True)
profiles_to_create.append(profile)
UserProfile.objects.bulk_create(profiles_to_create)
diff --git a/zerver/lib/create_user.py b/zerver/lib/create_user.py
--- a/zerver/lib/create_user.py
+++ b/zerver/lib/create_user.py
@@ -26,8 +26,9 @@
# Recipient objects
def create_user_profile(realm, email, password, active, bot_type, full_name,
short_name, bot_owner, is_mirror_dummy, tos_version,
- tutorial_status=UserProfile.TUTORIAL_WAITING):
- # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Optional[Text], Optional[Text]) -> UserProfile
+ tutorial_status=UserProfile.TUTORIAL_WAITING,
+ enter_sends=False):
+ # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Optional[Text], Optional[Text], bool) -> UserProfile
now = timezone.now()
email = UserManager.normalize_email(email)
@@ -38,6 +39,7 @@
bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,
tos_version=tos_version,
tutorial_status=tutorial_status,
+ enter_sends=enter_sends,
onboarding_steps=ujson.dumps([]),
default_language=realm.default_language)
|
{"golden_diff": "diff --git a/zerver/lib/bulk_create.py b/zerver/lib/bulk_create.py\n--- a/zerver/lib/bulk_create.py\n+++ b/zerver/lib/bulk_create.py\n@@ -21,7 +21,8 @@\n profile = create_user_profile(realm, email,\n initial_password(email), active, bot_type,\n full_name, short_name, None, False, tos_version,\n- tutorial_status=UserProfile.TUTORIAL_FINISHED)\n+ tutorial_status=UserProfile.TUTORIAL_FINISHED,\n+ enter_sends=True)\n profiles_to_create.append(profile)\n UserProfile.objects.bulk_create(profiles_to_create)\n \ndiff --git a/zerver/lib/create_user.py b/zerver/lib/create_user.py\n--- a/zerver/lib/create_user.py\n+++ b/zerver/lib/create_user.py\n@@ -26,8 +26,9 @@\n # Recipient objects\n def create_user_profile(realm, email, password, active, bot_type, full_name,\n short_name, bot_owner, is_mirror_dummy, tos_version,\n- tutorial_status=UserProfile.TUTORIAL_WAITING):\n- # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Optional[Text], Optional[Text]) -> UserProfile\n+ tutorial_status=UserProfile.TUTORIAL_WAITING,\n+ enter_sends=False):\n+ # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Optional[Text], Optional[Text], bool) -> UserProfile\n now = timezone.now()\n email = UserManager.normalize_email(email)\n \n@@ -38,6 +39,7 @@\n bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,\n tos_version=tos_version,\n tutorial_status=tutorial_status,\n+ enter_sends=enter_sends,\n onboarding_steps=ujson.dumps([]),\n default_language=realm.default_language)\n", "issue": "Enable enter to send in dev env\nIn dev environment every time we fresh login we have to check ```Enter to send``` to true. We should enable it by default for dev env only to increase testing speed.\r\n\r\n```git grep enter_sends``` will help you.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom django.contrib.auth.models import UserManager\nfrom django.utils import timezone\nfrom zerver.models import UserProfile, Recipient, Subscription, Realm, Stream\nimport base64\nimport ujson\nimport os\nimport string\nfrom six.moves import range\n\nfrom typing import Optional, Text\n\ndef random_api_key():\n # type: () -> Text\n choices = string.ascii_letters + string.digits\n altchars = ''.join([choices[ord(os.urandom(1)) % 62] for _ in range(2)]).encode(\"utf-8\")\n return base64.b64encode(os.urandom(24), altchars=altchars).decode(\"utf-8\")\n\n# create_user_profile is based on Django's User.objects.create_user,\n# except that we don't save to the database so it can used in\n# bulk_creates\n#\n# Only use this for bulk_create -- for normal usage one should use\n# create_user (below) which will also make the Subscription and\n# Recipient objects\ndef create_user_profile(realm, email, password, active, bot_type, full_name,\n short_name, bot_owner, is_mirror_dummy, tos_version,\n tutorial_status=UserProfile.TUTORIAL_WAITING):\n # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Optional[Text], Optional[Text]) -> UserProfile\n now = timezone.now()\n email = UserManager.normalize_email(email)\n\n user_profile = UserProfile(email=email, is_staff=False, is_active=active,\n full_name=full_name, short_name=short_name,\n last_login=now, date_joined=now, realm=realm,\n pointer=-1, is_bot=bool(bot_type), bot_type=bot_type,\n bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,\n tos_version=tos_version,\n tutorial_status=tutorial_status,\n onboarding_steps=ujson.dumps([]),\n default_language=realm.default_language)\n\n if bot_type or not active:\n password = None\n\n user_profile.set_password(password)\n\n user_profile.api_key = random_api_key()\n return user_profile\n\ndef create_user(email, password, realm, full_name, short_name,\n active=True, bot_type=None, bot_owner=None, tos_version=None,\n avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,\n is_mirror_dummy=False, default_sending_stream=None,\n default_events_register_stream=None,\n default_all_public_streams=None, user_profile_id=None):\n # type: (Text, Text, Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, bool, Optional[Stream], Optional[Stream], Optional[bool], Optional[int]) -> UserProfile\n user_profile = create_user_profile(realm, email, password, active, bot_type,\n full_name, short_name, bot_owner,\n is_mirror_dummy, tos_version)\n user_profile.avatar_source = avatar_source\n user_profile.default_sending_stream = default_sending_stream\n user_profile.default_events_register_stream = default_events_register_stream\n # Allow the ORM default to be used if not provided\n if default_all_public_streams is not None:\n user_profile.default_all_public_streams = default_all_public_streams\n\n if user_profile_id is not None:\n user_profile.id = user_profile_id\n\n user_profile.save()\n recipient = Recipient.objects.create(type_id=user_profile.id,\n type=Recipient.PERSONAL)\n Subscription.objects.create(user_profile=user_profile, recipient=recipient)\n return user_profile\n", "path": "zerver/lib/create_user.py"}, {"content": "from __future__ import absolute_import\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Text\n\nfrom zerver.lib.initial_password import initial_password\nfrom zerver.models import Realm, Stream, UserProfile, Huddle, \\\n Subscription, Recipient, Client, RealmAuditLog, get_huddle_hash\nfrom zerver.lib.create_user import create_user_profile\n\ndef bulk_create_users(realm, users_raw, bot_type=None, tos_version=None):\n # type: (Realm, Set[Tuple[Text, Text, Text, bool]], Optional[int], Optional[Text]) -> None\n \"\"\"\n Creates and saves a UserProfile with the given email.\n Has some code based off of UserManage.create_user, but doesn't .save()\n \"\"\"\n existing_users = frozenset(UserProfile.objects.values_list('email', flat=True))\n users = sorted([user_raw for user_raw in users_raw if user_raw[0] not in existing_users])\n\n # Now create user_profiles\n profiles_to_create = [] # type: List[UserProfile]\n for (email, full_name, short_name, active) in users:\n profile = create_user_profile(realm, email,\n initial_password(email), active, bot_type,\n full_name, short_name, None, False, tos_version,\n tutorial_status=UserProfile.TUTORIAL_FINISHED)\n profiles_to_create.append(profile)\n UserProfile.objects.bulk_create(profiles_to_create)\n\n RealmAuditLog.objects.bulk_create(\n [RealmAuditLog(realm=profile_.realm, modified_user=profile_,\n event_type='user_created', event_time=profile_.date_joined)\n for profile_ in profiles_to_create])\n\n profiles_by_email = {} # type: Dict[Text, UserProfile]\n profiles_by_id = {} # type: Dict[int, UserProfile]\n for profile in UserProfile.objects.select_related().all():\n profiles_by_email[profile.email] = profile\n profiles_by_id[profile.id] = profile\n\n recipients_to_create = [] # type: List[Recipient]\n for (email, full_name, short_name, active) in users:\n recipients_to_create.append(Recipient(type_id=profiles_by_email[email].id,\n type=Recipient.PERSONAL))\n Recipient.objects.bulk_create(recipients_to_create)\n\n recipients_by_email = {} # type: Dict[Text, Recipient]\n for recipient in Recipient.objects.filter(type=Recipient.PERSONAL):\n recipients_by_email[profiles_by_id[recipient.type_id].email] = recipient\n\n subscriptions_to_create = [] # type: List[Subscription]\n for (email, full_name, short_name, active) in users:\n subscriptions_to_create.append(\n Subscription(user_profile_id=profiles_by_email[email].id,\n recipient=recipients_by_email[email]))\n Subscription.objects.bulk_create(subscriptions_to_create)\n\ndef bulk_create_streams(realm, stream_dict):\n # type: (Realm, Dict[Text, Dict[Text, Any]]) -> None\n existing_streams = frozenset([name.lower() for name in\n Stream.objects.filter(realm=realm)\n .values_list('name', flat=True)])\n streams_to_create = [] # type: List[Stream]\n for name, options in stream_dict.items():\n if name.lower() not in existing_streams:\n streams_to_create.append(\n Stream(\n realm=realm, name=name, description=options[\"description\"],\n invite_only=options[\"invite_only\"]\n )\n )\n Stream.objects.bulk_create(streams_to_create)\n\n recipients_to_create = [] # type: List[Recipient]\n for stream in Stream.objects.filter(realm=realm).values('id', 'name'):\n if stream['name'].lower() not in existing_streams:\n recipients_to_create.append(Recipient(type_id=stream['id'],\n type=Recipient.STREAM))\n Recipient.objects.bulk_create(recipients_to_create)\n\ndef bulk_create_clients(client_list):\n # type: (Iterable[Text]) -> None\n existing_clients = set(client.name for client in Client.objects.select_related().all()) # type: Set[Text]\n\n clients_to_create = [] # type: List[Client]\n for name in client_list:\n if name not in existing_clients:\n clients_to_create.append(Client(name=name))\n existing_clients.add(name)\n Client.objects.bulk_create(clients_to_create)\n\ndef bulk_create_huddles(users, huddle_user_list):\n # type: (Dict[Text, UserProfile], Iterable[Iterable[Text]]) -> None\n huddles = {} # type: Dict[Text, Huddle]\n huddles_by_id = {} # type: Dict[int, Huddle]\n huddle_set = set() # type: Set[Tuple[Text, Tuple[int, ...]]]\n existing_huddles = set() # type: Set[Text]\n for huddle in Huddle.objects.all():\n existing_huddles.add(huddle.huddle_hash)\n for huddle_users in huddle_user_list:\n user_ids = [users[email].id for email in huddle_users] # type: List[int]\n huddle_hash = get_huddle_hash(user_ids)\n if huddle_hash in existing_huddles:\n continue\n huddle_set.add((huddle_hash, tuple(sorted(user_ids))))\n\n huddles_to_create = [] # type: List[Huddle]\n for (huddle_hash, _) in huddle_set:\n huddles_to_create.append(Huddle(huddle_hash=huddle_hash))\n Huddle.objects.bulk_create(huddles_to_create)\n\n for huddle in Huddle.objects.all():\n huddles[huddle.huddle_hash] = huddle\n huddles_by_id[huddle.id] = huddle\n\n recipients_to_create = [] # type: List[Recipient]\n for (huddle_hash, _) in huddle_set:\n recipients_to_create.append(Recipient(type_id=huddles[huddle_hash].id, type=Recipient.HUDDLE))\n Recipient.objects.bulk_create(recipients_to_create)\n\n huddle_recipients = {} # type: Dict[Text, Recipient]\n for recipient in Recipient.objects.filter(type=Recipient.HUDDLE):\n huddle_recipients[huddles_by_id[recipient.type_id].huddle_hash] = recipient\n\n subscriptions_to_create = [] # type: List[Subscription]\n for (huddle_hash, huddle_user_ids) in huddle_set:\n for user_id in huddle_user_ids:\n subscriptions_to_create.append(Subscription(active=True, user_profile_id=user_id,\n recipient=huddle_recipients[huddle_hash]))\n Subscription.objects.bulk_create(subscriptions_to_create)\n", "path": "zerver/lib/bulk_create.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom django.contrib.auth.models import UserManager\nfrom django.utils import timezone\nfrom zerver.models import UserProfile, Recipient, Subscription, Realm, Stream\nimport base64\nimport ujson\nimport os\nimport string\nfrom six.moves import range\n\nfrom typing import Optional, Text\n\ndef random_api_key():\n # type: () -> Text\n choices = string.ascii_letters + string.digits\n altchars = ''.join([choices[ord(os.urandom(1)) % 62] for _ in range(2)]).encode(\"utf-8\")\n return base64.b64encode(os.urandom(24), altchars=altchars).decode(\"utf-8\")\n\n# create_user_profile is based on Django's User.objects.create_user,\n# except that we don't save to the database so it can used in\n# bulk_creates\n#\n# Only use this for bulk_create -- for normal usage one should use\n# create_user (below) which will also make the Subscription and\n# Recipient objects\ndef create_user_profile(realm, email, password, active, bot_type, full_name,\n short_name, bot_owner, is_mirror_dummy, tos_version,\n tutorial_status=UserProfile.TUTORIAL_WAITING,\n enter_sends=False):\n # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Optional[Text], Optional[Text], bool) -> UserProfile\n now = timezone.now()\n email = UserManager.normalize_email(email)\n\n user_profile = UserProfile(email=email, is_staff=False, is_active=active,\n full_name=full_name, short_name=short_name,\n last_login=now, date_joined=now, realm=realm,\n pointer=-1, is_bot=bool(bot_type), bot_type=bot_type,\n bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,\n tos_version=tos_version,\n tutorial_status=tutorial_status,\n enter_sends=enter_sends,\n onboarding_steps=ujson.dumps([]),\n default_language=realm.default_language)\n\n if bot_type or not active:\n password = None\n\n user_profile.set_password(password)\n\n user_profile.api_key = random_api_key()\n return user_profile\n\ndef create_user(email, password, realm, full_name, short_name,\n active=True, bot_type=None, bot_owner=None, tos_version=None,\n avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,\n is_mirror_dummy=False, default_sending_stream=None,\n default_events_register_stream=None,\n default_all_public_streams=None, user_profile_id=None):\n # type: (Text, Text, Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, bool, Optional[Stream], Optional[Stream], Optional[bool], Optional[int]) -> UserProfile\n user_profile = create_user_profile(realm, email, password, active, bot_type,\n full_name, short_name, bot_owner,\n is_mirror_dummy, tos_version)\n user_profile.avatar_source = avatar_source\n user_profile.default_sending_stream = default_sending_stream\n user_profile.default_events_register_stream = default_events_register_stream\n # Allow the ORM default to be used if not provided\n if default_all_public_streams is not None:\n user_profile.default_all_public_streams = default_all_public_streams\n\n if user_profile_id is not None:\n user_profile.id = user_profile_id\n\n user_profile.save()\n recipient = Recipient.objects.create(type_id=user_profile.id,\n type=Recipient.PERSONAL)\n Subscription.objects.create(user_profile=user_profile, recipient=recipient)\n return user_profile\n", "path": "zerver/lib/create_user.py"}, {"content": "from __future__ import absolute_import\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Text\n\nfrom zerver.lib.initial_password import initial_password\nfrom zerver.models import Realm, Stream, UserProfile, Huddle, \\\n Subscription, Recipient, Client, RealmAuditLog, get_huddle_hash\nfrom zerver.lib.create_user import create_user_profile\n\ndef bulk_create_users(realm, users_raw, bot_type=None, tos_version=None):\n # type: (Realm, Set[Tuple[Text, Text, Text, bool]], Optional[int], Optional[Text]) -> None\n \"\"\"\n Creates and saves a UserProfile with the given email.\n Has some code based off of UserManage.create_user, but doesn't .save()\n \"\"\"\n existing_users = frozenset(UserProfile.objects.values_list('email', flat=True))\n users = sorted([user_raw for user_raw in users_raw if user_raw[0] not in existing_users])\n\n # Now create user_profiles\n profiles_to_create = [] # type: List[UserProfile]\n for (email, full_name, short_name, active) in users:\n profile = create_user_profile(realm, email,\n initial_password(email), active, bot_type,\n full_name, short_name, None, False, tos_version,\n tutorial_status=UserProfile.TUTORIAL_FINISHED,\n enter_sends=True)\n profiles_to_create.append(profile)\n UserProfile.objects.bulk_create(profiles_to_create)\n\n RealmAuditLog.objects.bulk_create(\n [RealmAuditLog(realm=profile_.realm, modified_user=profile_,\n event_type='user_created', event_time=profile_.date_joined)\n for profile_ in profiles_to_create])\n\n profiles_by_email = {} # type: Dict[Text, UserProfile]\n profiles_by_id = {} # type: Dict[int, UserProfile]\n for profile in UserProfile.objects.select_related().all():\n profiles_by_email[profile.email] = profile\n profiles_by_id[profile.id] = profile\n\n recipients_to_create = [] # type: List[Recipient]\n for (email, full_name, short_name, active) in users:\n recipients_to_create.append(Recipient(type_id=profiles_by_email[email].id,\n type=Recipient.PERSONAL))\n Recipient.objects.bulk_create(recipients_to_create)\n\n recipients_by_email = {} # type: Dict[Text, Recipient]\n for recipient in Recipient.objects.filter(type=Recipient.PERSONAL):\n recipients_by_email[profiles_by_id[recipient.type_id].email] = recipient\n\n subscriptions_to_create = [] # type: List[Subscription]\n for (email, full_name, short_name, active) in users:\n subscriptions_to_create.append(\n Subscription(user_profile_id=profiles_by_email[email].id,\n recipient=recipients_by_email[email]))\n Subscription.objects.bulk_create(subscriptions_to_create)\n\ndef bulk_create_streams(realm, stream_dict):\n # type: (Realm, Dict[Text, Dict[Text, Any]]) -> None\n existing_streams = frozenset([name.lower() for name in\n Stream.objects.filter(realm=realm)\n .values_list('name', flat=True)])\n streams_to_create = [] # type: List[Stream]\n for name, options in stream_dict.items():\n if name.lower() not in existing_streams:\n streams_to_create.append(\n Stream(\n realm=realm, name=name, description=options[\"description\"],\n invite_only=options[\"invite_only\"]\n )\n )\n Stream.objects.bulk_create(streams_to_create)\n\n recipients_to_create = [] # type: List[Recipient]\n for stream in Stream.objects.filter(realm=realm).values('id', 'name'):\n if stream['name'].lower() not in existing_streams:\n recipients_to_create.append(Recipient(type_id=stream['id'],\n type=Recipient.STREAM))\n Recipient.objects.bulk_create(recipients_to_create)\n\ndef bulk_create_clients(client_list):\n # type: (Iterable[Text]) -> None\n existing_clients = set(client.name for client in Client.objects.select_related().all()) # type: Set[Text]\n\n clients_to_create = [] # type: List[Client]\n for name in client_list:\n if name not in existing_clients:\n clients_to_create.append(Client(name=name))\n existing_clients.add(name)\n Client.objects.bulk_create(clients_to_create)\n\ndef bulk_create_huddles(users, huddle_user_list):\n # type: (Dict[Text, UserProfile], Iterable[Iterable[Text]]) -> None\n huddles = {} # type: Dict[Text, Huddle]\n huddles_by_id = {} # type: Dict[int, Huddle]\n huddle_set = set() # type: Set[Tuple[Text, Tuple[int, ...]]]\n existing_huddles = set() # type: Set[Text]\n for huddle in Huddle.objects.all():\n existing_huddles.add(huddle.huddle_hash)\n for huddle_users in huddle_user_list:\n user_ids = [users[email].id for email in huddle_users] # type: List[int]\n huddle_hash = get_huddle_hash(user_ids)\n if huddle_hash in existing_huddles:\n continue\n huddle_set.add((huddle_hash, tuple(sorted(user_ids))))\n\n huddles_to_create = [] # type: List[Huddle]\n for (huddle_hash, _) in huddle_set:\n huddles_to_create.append(Huddle(huddle_hash=huddle_hash))\n Huddle.objects.bulk_create(huddles_to_create)\n\n for huddle in Huddle.objects.all():\n huddles[huddle.huddle_hash] = huddle\n huddles_by_id[huddle.id] = huddle\n\n recipients_to_create = [] # type: List[Recipient]\n for (huddle_hash, _) in huddle_set:\n recipients_to_create.append(Recipient(type_id=huddles[huddle_hash].id, type=Recipient.HUDDLE))\n Recipient.objects.bulk_create(recipients_to_create)\n\n huddle_recipients = {} # type: Dict[Text, Recipient]\n for recipient in Recipient.objects.filter(type=Recipient.HUDDLE):\n huddle_recipients[huddles_by_id[recipient.type_id].huddle_hash] = recipient\n\n subscriptions_to_create = [] # type: List[Subscription]\n for (huddle_hash, huddle_user_ids) in huddle_set:\n for user_id in huddle_user_ids:\n subscriptions_to_create.append(Subscription(active=True, user_profile_id=user_id,\n recipient=huddle_recipients[huddle_hash]))\n Subscription.objects.bulk_create(subscriptions_to_create)\n", "path": "zerver/lib/bulk_create.py"}]}
| 2,946 | 420 |
gh_patches_debug_19447
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-1704
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
build_meta breaks some setup.py scripts (e.g. numpy and pygame)
Setup:
* Setuptools 40.6.2
* Python 3.7.2
Some build scripts use `sys.argv[0]` to change the working directory to the parent directory of setup.py. For example, the setup.py script of numpy-1.15.4 contains the following code:
```
src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(src_path)
```
However,`sys.argv[0]` is an empty string, setup.py is called from setuptools.build_meta. Then `os.path.abspath()`, is the working directory and `os.path.dirname()` its parent directory. This changes the current directory to the parent directory of the current path and breaks relative paths in the setup.py script.
While manually running `python3 setup.py bdist_wheel` succedes, calling `setuptools.build_meta.build_wheel()` in the Python REPL then gives the following error:
```
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py", line 158, in build_wheel
_run_setup()
File "/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py", line 85, in _run_setup
exec(compile(code, __file__, 'exec'), locals())
File "setup.py", line 403, in <module>
File "setup.py", line 349, in setup_package
File "setup.py", line 147, in write_version_py
FileNotFoundError: [Errno 2] No such file or directory: 'numpy/version.py'
```
A similar error occurs for pygame-1.9.4. Maybe I overlooked something, but it seems that the problem originates from the way setuptools.build_meta calls setup.py.
build_meta breaks some setup.py scripts (e.g. numpy and pygame)
Setup:
* Setuptools 40.6.2
* Python 3.7.2
Some build scripts use `sys.argv[0]` to change the working directory to the parent directory of setup.py. For example, the setup.py script of numpy-1.15.4 contains the following code:
```
src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(src_path)
```
However,`sys.argv[0]` is an empty string, setup.py is called from setuptools.build_meta. Then `os.path.abspath()`, is the working directory and `os.path.dirname()` its parent directory. This changes the current directory to the parent directory of the current path and breaks relative paths in the setup.py script.
While manually running `python3 setup.py bdist_wheel` succedes, calling `setuptools.build_meta.build_wheel()` in the Python REPL then gives the following error:
```
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py", line 158, in build_wheel
_run_setup()
File "/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py", line 85, in _run_setup
exec(compile(code, __file__, 'exec'), locals())
File "setup.py", line 403, in <module>
File "setup.py", line 349, in setup_package
File "setup.py", line 147, in write_version_py
FileNotFoundError: [Errno 2] No such file or directory: 'numpy/version.py'
```
A similar error occurs for pygame-1.9.4. Maybe I overlooked something, but it seems that the problem originates from the way setuptools.build_meta calls setup.py.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/build_meta.py`
Content:
```
1 """A PEP 517 interface to setuptools
2
3 Previously, when a user or a command line tool (let's call it a "frontend")
4 needed to make a request of setuptools to take a certain action, for
5 example, generating a list of installation requirements, the frontend would
6 would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
7
8 PEP 517 defines a different method of interfacing with setuptools. Rather
9 than calling "setup.py" directly, the frontend should:
10
11 1. Set the current directory to the directory with a setup.py file
12 2. Import this module into a safe python interpreter (one in which
13 setuptools can potentially set global variables or crash hard).
14 3. Call one of the functions defined in PEP 517.
15
16 What each function does is defined in PEP 517. However, here is a "casual"
17 definition of the functions (this definition should not be relied on for
18 bug reports or API stability):
19
20 - `build_wheel`: build a wheel in the folder and return the basename
21 - `get_requires_for_build_wheel`: get the `setup_requires` to build
22 - `prepare_metadata_for_build_wheel`: get the `install_requires`
23 - `build_sdist`: build an sdist in the folder and return the basename
24 - `get_requires_for_build_sdist`: get the `setup_requires` to build
25
26 Again, this is not a formal definition! Just a "taste" of the module.
27 """
28
29 import io
30 import os
31 import sys
32 import tokenize
33 import shutil
34 import contextlib
35
36 import setuptools
37 import distutils
38 from setuptools.py31compat import TemporaryDirectory
39
40 from pkg_resources import parse_requirements
41 from pkg_resources.py31compat import makedirs
42
43 __all__ = ['get_requires_for_build_sdist',
44 'get_requires_for_build_wheel',
45 'prepare_metadata_for_build_wheel',
46 'build_wheel',
47 'build_sdist',
48 '__legacy__',
49 'SetupRequirementsError']
50
51 class SetupRequirementsError(BaseException):
52 def __init__(self, specifiers):
53 self.specifiers = specifiers
54
55
56 class Distribution(setuptools.dist.Distribution):
57 def fetch_build_eggs(self, specifiers):
58 specifier_list = list(map(str, parse_requirements(specifiers)))
59
60 raise SetupRequirementsError(specifier_list)
61
62 @classmethod
63 @contextlib.contextmanager
64 def patch(cls):
65 """
66 Replace
67 distutils.dist.Distribution with this class
68 for the duration of this context.
69 """
70 orig = distutils.core.Distribution
71 distutils.core.Distribution = cls
72 try:
73 yield
74 finally:
75 distutils.core.Distribution = orig
76
77
78 def _to_str(s):
79 """
80 Convert a filename to a string (on Python 2, explicitly
81 a byte string, not Unicode) as distutils checks for the
82 exact type str.
83 """
84 if sys.version_info[0] == 2 and not isinstance(s, str):
85 # Assume it's Unicode, as that's what the PEP says
86 # should be provided.
87 return s.encode(sys.getfilesystemencoding())
88 return s
89
90
91 def _get_immediate_subdirectories(a_dir):
92 return [name for name in os.listdir(a_dir)
93 if os.path.isdir(os.path.join(a_dir, name))]
94
95
96 def _file_with_extension(directory, extension):
97 matching = (
98 f for f in os.listdir(directory)
99 if f.endswith(extension)
100 )
101 file, = matching
102 return file
103
104
105 def _open_setup_script(setup_script):
106 if not os.path.exists(setup_script):
107 # Supply a default setup.py
108 return io.StringIO(u"from setuptools import setup; setup()")
109
110 return getattr(tokenize, 'open', open)(setup_script)
111
112
113 class _BuildMetaBackend(object):
114
115 def _fix_config(self, config_settings):
116 config_settings = config_settings or {}
117 config_settings.setdefault('--global-option', [])
118 return config_settings
119
120 def _get_build_requires(self, config_settings, requirements):
121 config_settings = self._fix_config(config_settings)
122
123 sys.argv = sys.argv[:1] + ['egg_info'] + \
124 config_settings["--global-option"]
125 try:
126 with Distribution.patch():
127 self.run_setup()
128 except SetupRequirementsError as e:
129 requirements += e.specifiers
130
131 return requirements
132
133 def run_setup(self, setup_script='setup.py'):
134 # Note that we can reuse our build directory between calls
135 # Correctness comes first, then optimization later
136 __file__ = setup_script
137 __name__ = '__main__'
138
139 with _open_setup_script(__file__) as f:
140 code = f.read().replace(r'\r\n', r'\n')
141
142 exec(compile(code, __file__, 'exec'), locals())
143
144 def get_requires_for_build_wheel(self, config_settings=None):
145 config_settings = self._fix_config(config_settings)
146 return self._get_build_requires(config_settings, requirements=['wheel'])
147
148 def get_requires_for_build_sdist(self, config_settings=None):
149 config_settings = self._fix_config(config_settings)
150 return self._get_build_requires(config_settings, requirements=[])
151
152 def prepare_metadata_for_build_wheel(self, metadata_directory,
153 config_settings=None):
154 sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',
155 _to_str(metadata_directory)]
156 self.run_setup()
157
158 dist_info_directory = metadata_directory
159 while True:
160 dist_infos = [f for f in os.listdir(dist_info_directory)
161 if f.endswith('.dist-info')]
162
163 if (len(dist_infos) == 0 and
164 len(_get_immediate_subdirectories(dist_info_directory)) == 1):
165
166 dist_info_directory = os.path.join(
167 dist_info_directory, os.listdir(dist_info_directory)[0])
168 continue
169
170 assert len(dist_infos) == 1
171 break
172
173 # PEP 517 requires that the .dist-info directory be placed in the
174 # metadata_directory. To comply, we MUST copy the directory to the root
175 if dist_info_directory != metadata_directory:
176 shutil.move(
177 os.path.join(dist_info_directory, dist_infos[0]),
178 metadata_directory)
179 shutil.rmtree(dist_info_directory, ignore_errors=True)
180
181 return dist_infos[0]
182
183 def _build_with_temp_dir(self, setup_command, result_extension,
184 result_directory, config_settings):
185 config_settings = self._fix_config(config_settings)
186 result_directory = os.path.abspath(result_directory)
187
188 # Build in a temporary directory, then copy to the target.
189 makedirs(result_directory, exist_ok=True)
190 with TemporaryDirectory(dir=result_directory) as tmp_dist_dir:
191 sys.argv = (sys.argv[:1] + setup_command +
192 ['--dist-dir', tmp_dist_dir] +
193 config_settings["--global-option"])
194 self.run_setup()
195
196 result_basename = _file_with_extension(tmp_dist_dir, result_extension)
197 result_path = os.path.join(result_directory, result_basename)
198 if os.path.exists(result_path):
199 # os.rename will fail overwriting on non-Unix.
200 os.remove(result_path)
201 os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)
202
203 return result_basename
204
205
206 def build_wheel(self, wheel_directory, config_settings=None,
207 metadata_directory=None):
208 return self._build_with_temp_dir(['bdist_wheel'], '.whl',
209 wheel_directory, config_settings)
210
211 def build_sdist(self, sdist_directory, config_settings=None):
212 return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],
213 '.tar.gz', sdist_directory,
214 config_settings)
215
216
217 class _BuildMetaLegacyBackend(_BuildMetaBackend):
218 """Compatibility backend for setuptools
219
220 This is a version of setuptools.build_meta that endeavors to maintain backwards
221 compatibility with pre-PEP 517 modes of invocation. It exists as a temporary
222 bridge between the old packaging mechanism and the new packaging mechanism,
223 and will eventually be removed.
224 """
225 def run_setup(self, setup_script='setup.py'):
226 # In order to maintain compatibility with scripts assuming that
227 # the setup.py script is in a directory on the PYTHONPATH, inject
228 # '' into sys.path. (pypa/setuptools#1642)
229 sys_path = list(sys.path) # Save the original path
230
231 script_dir = os.path.dirname(os.path.abspath(setup_script))
232 if script_dir not in sys.path:
233 sys.path.insert(0, script_dir)
234
235 try:
236 super(_BuildMetaLegacyBackend,
237 self).run_setup(setup_script=setup_script)
238 finally:
239 # While PEP 517 frontends should be calling each hook in a fresh
240 # subprocess according to the standard (and thus it should not be
241 # strictly necessary to restore the old sys.path), we'll restore
242 # the original path so that the path manipulation does not persist
243 # within the hook after run_setup is called.
244 sys.path[:] = sys_path
245
246 # The primary backend
247 _BACKEND = _BuildMetaBackend()
248
249 get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
250 get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
251 prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
252 build_wheel = _BACKEND.build_wheel
253 build_sdist = _BACKEND.build_sdist
254
255
256 # The legacy backend
257 __legacy__ = _BuildMetaLegacyBackend()
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py
--- a/setuptools/build_meta.py
+++ b/setuptools/build_meta.py
@@ -232,6 +232,12 @@
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
+ # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to
+ # get the directory of the source code. They expect it to refer to the
+ # setup.py script.
+ sys_argv_0 = sys.argv[0]
+ sys.argv[0] = setup_script
+
try:
super(_BuildMetaLegacyBackend,
self).run_setup(setup_script=setup_script)
@@ -242,6 +248,7 @@
# the original path so that the path manipulation does not persist
# within the hook after run_setup is called.
sys.path[:] = sys_path
+ sys.argv[0] = sys_argv_0
# The primary backend
_BACKEND = _BuildMetaBackend()
|
{"golden_diff": "diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py\n--- a/setuptools/build_meta.py\n+++ b/setuptools/build_meta.py\n@@ -232,6 +232,12 @@\n if script_dir not in sys.path:\n sys.path.insert(0, script_dir)\n \n+ # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to\n+ # get the directory of the source code. They expect it to refer to the\n+ # setup.py script.\n+ sys_argv_0 = sys.argv[0]\n+ sys.argv[0] = setup_script\n+\n try:\n super(_BuildMetaLegacyBackend,\n self).run_setup(setup_script=setup_script)\n@@ -242,6 +248,7 @@\n # the original path so that the path manipulation does not persist\n # within the hook after run_setup is called.\n sys.path[:] = sys_path\n+ sys.argv[0] = sys_argv_0\n \n # The primary backend\n _BACKEND = _BuildMetaBackend()\n", "issue": "build_meta breaks some setup.py scripts (e.g. numpy and pygame)\nSetup:\r\n* Setuptools 40.6.2\r\n* Python 3.7.2\r\n\r\nSome build scripts use `sys.argv[0]` to change the working directory to the parent directory of setup.py. For example, the setup.py script of numpy-1.15.4 contains the following code: \r\n\r\n```\r\nsrc_path = os.path.dirname(os.path.abspath(sys.argv[0]))\r\nold_path = os.getcwd()\r\nos.chdir(src_path)\r\n```\r\n\r\nHowever,`sys.argv[0]` is an empty string, setup.py is called from setuptools.build_meta. Then `os.path.abspath()`, is the working directory and `os.path.dirname()` its parent directory. This changes the current directory to the parent directory of the current path and breaks relative paths in the setup.py script.\r\n\r\nWhile manually running `python3 setup.py bdist_wheel` succedes, calling `setuptools.build_meta.build_wheel()` in the Python REPL then gives the following error:\r\n```\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py\", line 158, in build_wheel\r\n _run_setup()\r\n File \"/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py\", line 85, in _run_setup\r\n exec(compile(code, __file__, 'exec'), locals())\r\n File \"setup.py\", line 403, in <module>\r\n File \"setup.py\", line 349, in setup_package\r\n File \"setup.py\", line 147, in write_version_py\r\nFileNotFoundError: [Errno 2] No such file or directory: 'numpy/version.py'\r\n``` \r\n\r\nA similar error occurs for pygame-1.9.4. Maybe I overlooked something, but it seems that the problem originates from the way setuptools.build_meta calls setup.py. \nbuild_meta breaks some setup.py scripts (e.g. numpy and pygame)\nSetup:\r\n* Setuptools 40.6.2\r\n* Python 3.7.2\r\n\r\nSome build scripts use `sys.argv[0]` to change the working directory to the parent directory of setup.py. For example, the setup.py script of numpy-1.15.4 contains the following code: \r\n\r\n```\r\nsrc_path = os.path.dirname(os.path.abspath(sys.argv[0]))\r\nold_path = os.getcwd()\r\nos.chdir(src_path)\r\n```\r\n\r\nHowever,`sys.argv[0]` is an empty string, setup.py is called from setuptools.build_meta. Then `os.path.abspath()`, is the working directory and `os.path.dirname()` its parent directory. This changes the current directory to the parent directory of the current path and breaks relative paths in the setup.py script.\r\n\r\nWhile manually running `python3 setup.py bdist_wheel` succedes, calling `setuptools.build_meta.build_wheel()` in the Python REPL then gives the following error:\r\n```\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py\", line 158, in build_wheel\r\n _run_setup()\r\n File \"/usr/local/lib/python3.7/site-packages/setuptools/build_meta.py\", line 85, in _run_setup\r\n exec(compile(code, __file__, 'exec'), locals())\r\n File \"setup.py\", line 403, in <module>\r\n File \"setup.py\", line 349, in setup_package\r\n File \"setup.py\", line 147, in write_version_py\r\nFileNotFoundError: [Errno 2] No such file or directory: 'numpy/version.py'\r\n``` \r\n\r\nA similar error occurs for pygame-1.9.4. Maybe I overlooked something, but it seems that the problem originates from the way setuptools.build_meta calls setup.py. \n", "before_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\nfrom setuptools.py31compat import TemporaryDirectory\n\nfrom pkg_resources import parse_requirements\nfrom pkg_resources.py31compat import makedirs\n\n__all__ = ['get_requires_for_build_sdist',\n 'get_requires_for_build_wheel',\n 'prepare_metadata_for_build_wheel',\n 'build_wheel',\n 'build_sdist',\n '__legacy__',\n 'SetupRequirementsError']\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n specifier_list = list(map(str, parse_requirements(specifiers)))\n\n raise SetupRequirementsError(specifier_list)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef _file_with_extension(directory, extension):\n matching = (\n f for f in os.listdir(directory)\n if f.endswith(extension)\n )\n file, = matching\n return file\n\n\ndef _open_setup_script(setup_script):\n if not os.path.exists(setup_script):\n # Supply a default setup.py\n return io.StringIO(u\"from setuptools import setup; setup()\")\n\n return getattr(tokenize, 'open', open)(setup_script)\n\n\nclass _BuildMetaBackend(object):\n\n def _fix_config(self, config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n def _get_build_requires(self, config_settings, requirements):\n config_settings = self._fix_config(config_settings)\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n self.run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n def run_setup(self, setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n\n with _open_setup_script(__file__) as f:\n code = f.read().replace(r'\\r\\n', r'\\n')\n\n exec(compile(code, __file__, 'exec'), locals())\n\n def get_requires_for_build_wheel(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=['wheel'])\n\n def get_requires_for_build_sdist(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=[])\n\n def prepare_metadata_for_build_wheel(self, metadata_directory,\n config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',\n _to_str(metadata_directory)]\n self.run_setup()\n\n dist_info_directory = metadata_directory\n while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if (len(dist_infos) == 0 and\n len(_get_immediate_subdirectories(dist_info_directory)) == 1):\n\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n def _build_with_temp_dir(self, setup_command, result_extension,\n result_directory, config_settings):\n config_settings = self._fix_config(config_settings)\n result_directory = os.path.abspath(result_directory)\n\n # Build in a temporary directory, then copy to the target.\n makedirs(result_directory, exist_ok=True)\n with TemporaryDirectory(dir=result_directory) as tmp_dist_dir:\n sys.argv = (sys.argv[:1] + setup_command +\n ['--dist-dir', tmp_dist_dir] +\n config_settings[\"--global-option\"])\n self.run_setup()\n\n result_basename = _file_with_extension(tmp_dist_dir, result_extension)\n result_path = os.path.join(result_directory, result_basename)\n if os.path.exists(result_path):\n # os.rename will fail overwriting on non-Unix.\n os.remove(result_path)\n os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)\n\n return result_basename\n\n\n def build_wheel(self, wheel_directory, config_settings=None,\n metadata_directory=None):\n return self._build_with_temp_dir(['bdist_wheel'], '.whl',\n wheel_directory, config_settings)\n\n def build_sdist(self, sdist_directory, config_settings=None):\n return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],\n '.tar.gz', sdist_directory,\n config_settings)\n\n\nclass _BuildMetaLegacyBackend(_BuildMetaBackend):\n \"\"\"Compatibility backend for setuptools\n\n This is a version of setuptools.build_meta that endeavors to maintain backwards\n compatibility with pre-PEP 517 modes of invocation. It exists as a temporary\n bridge between the old packaging mechanism and the new packaging mechanism,\n and will eventually be removed.\n \"\"\"\n def run_setup(self, setup_script='setup.py'):\n # In order to maintain compatibility with scripts assuming that\n # the setup.py script is in a directory on the PYTHONPATH, inject\n # '' into sys.path. (pypa/setuptools#1642)\n sys_path = list(sys.path) # Save the original path\n\n script_dir = os.path.dirname(os.path.abspath(setup_script))\n if script_dir not in sys.path:\n sys.path.insert(0, script_dir)\n\n try:\n super(_BuildMetaLegacyBackend,\n self).run_setup(setup_script=setup_script)\n finally:\n # While PEP 517 frontends should be calling each hook in a fresh\n # subprocess according to the standard (and thus it should not be\n # strictly necessary to restore the old sys.path), we'll restore\n # the original path so that the path manipulation does not persist\n # within the hook after run_setup is called.\n sys.path[:] = sys_path\n\n# The primary backend\n_BACKEND = _BuildMetaBackend()\n\nget_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel\nget_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist\nprepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel\nbuild_wheel = _BACKEND.build_wheel\nbuild_sdist = _BACKEND.build_sdist\n\n\n# The legacy backend\n__legacy__ = _BuildMetaLegacyBackend()\n", "path": "setuptools/build_meta.py"}], "after_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\nfrom setuptools.py31compat import TemporaryDirectory\n\nfrom pkg_resources import parse_requirements\nfrom pkg_resources.py31compat import makedirs\n\n__all__ = ['get_requires_for_build_sdist',\n 'get_requires_for_build_wheel',\n 'prepare_metadata_for_build_wheel',\n 'build_wheel',\n 'build_sdist',\n '__legacy__',\n 'SetupRequirementsError']\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n specifier_list = list(map(str, parse_requirements(specifiers)))\n\n raise SetupRequirementsError(specifier_list)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef _file_with_extension(directory, extension):\n matching = (\n f for f in os.listdir(directory)\n if f.endswith(extension)\n )\n file, = matching\n return file\n\n\ndef _open_setup_script(setup_script):\n if not os.path.exists(setup_script):\n # Supply a default setup.py\n return io.StringIO(u\"from setuptools import setup; setup()\")\n\n return getattr(tokenize, 'open', open)(setup_script)\n\n\nclass _BuildMetaBackend(object):\n\n def _fix_config(self, config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n def _get_build_requires(self, config_settings, requirements):\n config_settings = self._fix_config(config_settings)\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n self.run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n def run_setup(self, setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n\n with _open_setup_script(__file__) as f:\n code = f.read().replace(r'\\r\\n', r'\\n')\n\n exec(compile(code, __file__, 'exec'), locals())\n\n def get_requires_for_build_wheel(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=['wheel'])\n\n def get_requires_for_build_sdist(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=[])\n\n def prepare_metadata_for_build_wheel(self, metadata_directory,\n config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',\n _to_str(metadata_directory)]\n self.run_setup()\n\n dist_info_directory = metadata_directory\n while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if (len(dist_infos) == 0 and\n len(_get_immediate_subdirectories(dist_info_directory)) == 1):\n\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n def _build_with_temp_dir(self, setup_command, result_extension,\n result_directory, config_settings):\n config_settings = self._fix_config(config_settings)\n result_directory = os.path.abspath(result_directory)\n\n # Build in a temporary directory, then copy to the target.\n makedirs(result_directory, exist_ok=True)\n with TemporaryDirectory(dir=result_directory) as tmp_dist_dir:\n sys.argv = (sys.argv[:1] + setup_command +\n ['--dist-dir', tmp_dist_dir] +\n config_settings[\"--global-option\"])\n self.run_setup()\n\n result_basename = _file_with_extension(tmp_dist_dir, result_extension)\n result_path = os.path.join(result_directory, result_basename)\n if os.path.exists(result_path):\n # os.rename will fail overwriting on non-Unix.\n os.remove(result_path)\n os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)\n\n return result_basename\n\n\n def build_wheel(self, wheel_directory, config_settings=None,\n metadata_directory=None):\n return self._build_with_temp_dir(['bdist_wheel'], '.whl',\n wheel_directory, config_settings)\n\n def build_sdist(self, sdist_directory, config_settings=None):\n return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],\n '.tar.gz', sdist_directory,\n config_settings)\n\n\nclass _BuildMetaLegacyBackend(_BuildMetaBackend):\n \"\"\"Compatibility backend for setuptools\n\n This is a version of setuptools.build_meta that endeavors to maintain backwards\n compatibility with pre-PEP 517 modes of invocation. It exists as a temporary\n bridge between the old packaging mechanism and the new packaging mechanism,\n and will eventually be removed.\n \"\"\"\n def run_setup(self, setup_script='setup.py'):\n # In order to maintain compatibility with scripts assuming that\n # the setup.py script is in a directory on the PYTHONPATH, inject\n # '' into sys.path. (pypa/setuptools#1642)\n sys_path = list(sys.path) # Save the original path\n\n script_dir = os.path.dirname(os.path.abspath(setup_script))\n if script_dir not in sys.path:\n sys.path.insert(0, script_dir)\n\n # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to\n # get the directory of the source code. They expect it to refer to the\n # setup.py script.\n sys_argv_0 = sys.argv[0]\n sys.argv[0] = setup_script\n\n try:\n super(_BuildMetaLegacyBackend,\n self).run_setup(setup_script=setup_script)\n finally:\n # While PEP 517 frontends should be calling each hook in a fresh\n # subprocess according to the standard (and thus it should not be\n # strictly necessary to restore the old sys.path), we'll restore\n # the original path so that the path manipulation does not persist\n # within the hook after run_setup is called.\n sys.path[:] = sys_path\n sys.argv[0] = sys_argv_0\n\n# The primary backend\n_BACKEND = _BuildMetaBackend()\n\nget_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel\nget_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist\nprepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel\nbuild_wheel = _BACKEND.build_wheel\nbuild_sdist = _BACKEND.build_sdist\n\n\n# The legacy backend\n__legacy__ = _BuildMetaLegacyBackend()\n", "path": "setuptools/build_meta.py"}]}
| 3,800 | 238 |
gh_patches_debug_57165
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-922
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Spanish translations on all error pages are not showing up.
## To Reproduce
Steps to reproduce the behavior:
1. Go to benefits.calitp.org/
2. Click on Spanish
3. Go to benefits.calitp.org/asfakljsfasdf
4. See error
<img width="705" alt="image" src="https://user-images.githubusercontent.com/3673236/190244616-0867bdbe-cd77-477f-9fd0-0cf8f3b8625a.png">
Happening for 404 and 500
## Expected behavior
All the text should be in Spanish, not just the Footer and the Button.
## Screenshots
<img width="705" alt="image" src="https://user-images.githubusercontent.com/3673236/190244616-0867bdbe-cd77-477f-9fd0-0cf8f3b8625a.png">
## Desktop (please complete the following information)
Both
## Smartphone (please complete the following information)
All
## Additional context
Fix translations on error pages (default arguments set once, need to use None and check for None instead)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/viewmodels.py`
Content:
```
1 """
2 The core application: view model definitions for the root of the webapp.
3 """
4 from django.utils.translation import pgettext, gettext as _
5 from django.urls import reverse
6
7 from benefits.core import models
8
9 from . import session
10
11
12 class Button:
13 """
14 Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):
15 * classes: str, str[]
16 * id: str
17 * fallback_text: str
18 * label: str
19 * text: str
20 * url: str
21 * target: str
22 * rel: str
23 """
24
25 def __init__(self, **kwargs):
26 classes = kwargs.get("classes", [])
27 if isinstance(classes, str):
28 classes = classes.split()
29
30 self.classes = ["btn", "btn-lg"]
31 self.classes.extend(classes)
32 self.id = kwargs.get("id")
33 self.fallback_text = kwargs.get("fallback_text")
34 self.label = kwargs.get("label")
35 self.text = kwargs.get("text", "Button")
36 self.url = kwargs.get("url")
37 self.target = kwargs.get("target")
38 self.rel = kwargs.get("rel")
39
40 @staticmethod
41 def agency_contact_links(agency):
42 """Create link buttons for agency contact information."""
43 return [
44 Button.link(classes="agency", label=agency.long_name, text=agency.phone, url=f"tel:{agency.phone}"),
45 Button.link(
46 classes="agency", text=agency.info_url, url=agency.info_url, target="_blank", rel="noopener noreferrer"
47 ),
48 ]
49
50 @staticmethod
51 def home(request, text=None):
52 """Create a button back to this session's origin."""
53 if text is None:
54 text = _("core.buttons.return_home")
55
56 return Button.primary(text=text, url=session.origin(request))
57
58 @staticmethod
59 def link(**kwargs):
60 classes = kwargs.pop("classes", [])
61 if isinstance(classes, str):
62 classes = classes.split(" ")
63 classes.insert(0, "btn-link")
64 return Button(classes=classes, **kwargs)
65
66 @staticmethod
67 def primary(**kwargs):
68 classes = kwargs.pop("classes", [])
69 if isinstance(classes, str):
70 classes = classes.split(" ")
71 classes.insert(0, "btn-primary")
72 return Button(classes=classes, **kwargs)
73
74 @staticmethod
75 def outline_primary(**kwargs):
76 classes = kwargs.pop("classes", [])
77 if isinstance(classes, str):
78 classes = classes.split(" ")
79 classes.insert(0, "btn-outline-primary")
80 return Button(classes=classes, **kwargs)
81
82 @staticmethod
83 def login(**kwargs):
84 """Create a login.gov button, with a login.gov logo and fallback text"""
85 btn = Button.primary(fallback_text="Login.gov", id="login", **kwargs)
86 return btn
87
88 @staticmethod
89 def logout(**kwargs):
90 """Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text"""
91 btn = Button.primary(fallback_text="Login.gov", id="login", url=reverse("oauth:logout"), text="", **kwargs)
92 return btn
93
94
95 class Icon:
96 """Represents an icon."""
97
98 def __init__(self, icon, alt):
99 self.src = f"img/icon/{icon}.svg"
100 self.alt = alt
101
102
103 class Page:
104 """
105 Represents a page of content:
106 * title: str
107 * noimage: bool
108 * icon: core.viewmodels.Icon
109 * content_title: str
110 * paragraphs: str[]
111 * form: django.forms.Form
112 * forms: django.forms.Form[]
113 * button: core.viewmodels.Button
114 * buttons: core.viewmodels.Button[]
115 * classes: str[]
116 """
117
118 def __init__(self, **kwargs):
119 self.title = kwargs.get("title")
120 if self.title is None:
121 self.title = _("core.pages.index.prefix")
122 else:
123 self.title = f"{_('core.pages.index.prefix')}: {self.title}"
124
125 self.noimage = kwargs.get("noimage", False)
126 self.icon = kwargs.get("icon")
127 self.content_title = kwargs.get("content_title")
128 self.paragraphs = kwargs.get("paragraphs", [])
129 self.steps = kwargs.get("steps")
130
131 self.forms = kwargs.get("forms", [])
132 if not isinstance(self.forms, list):
133 self.forms = [self.forms]
134 if "form" in kwargs:
135 self.forms.append(kwargs.get("form"))
136
137 self.buttons = kwargs.get("buttons", [])
138 if not isinstance(self.buttons, list):
139 self.buttons = [self.buttons]
140 if "button" in kwargs:
141 self.buttons.append(kwargs.get("button"))
142
143 self.classes = kwargs.get("classes", [])
144 if not isinstance(self.classes, list):
145 self.classes = self.classes.split(" ")
146 if not self.noimage:
147 self.classes.append("with-image")
148
149 def context_dict(self):
150 """Return a context dict for a Page."""
151 return {"page": self}
152
153
154 class ErrorPage(Page):
155 """
156 Represents an error page:
157 * title: str
158 * icon: core.viewmodels.Icon
159 * content_title: str
160 * paragraphs: str[]
161 * button: core.viewmodels.Button
162 """
163
164 def __init__(self, **kwargs):
165 super().__init__(
166 title=kwargs.get("title", _("core.pages.error.title")),
167 icon=kwargs.get("icon", Icon("sadbus", pgettext("image alt text", "core.icons.sadbus"))),
168 content_title=kwargs.get("content_title", _("core.pages.error.title")),
169 paragraphs=kwargs.get("paragraphs", [_("core.pages.server_error.content_title")]),
170 button=kwargs.get("button"),
171 )
172
173 @staticmethod
174 def error(
175 title=_("core.pages.server_error.title"),
176 content_title=_("core.pages.server_error.title"),
177 paragraphs=[_("core.pages.server_error.p[0]"), _("core.pages.server_error.p[1]")],
178 **kwargs,
179 ):
180 """Create a new core.viewmodels.ErrorPage instance with defaults for a generic error."""
181 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)
182
183 @staticmethod
184 def not_found(
185 title=_("core.pages.not_found.title"),
186 content_title=_("core.pages.not_found.content_title"),
187 paragraphs=[_("core.pages.not_found.p[0]")],
188 **kwargs,
189 ):
190 """Create a new core.viewmodels.ErrorPage with defaults for a 404."""
191 path = kwargs.pop("path", None)
192 if path and title:
193 title = f"{title}: {path}"
194 elif path and not title:
195 title = path
196 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)
197
198
199 class PaymentProcessor:
200 """
201 Represents a core.models.PaymentProcessor:
202 * model: core.models.PaymentProcessor
203 * access_token_url: str
204 * element_id: str
205 * color: str
206 * [name: str]
207 * [loading_text: str]
208 """
209
210 def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_("core.buttons.wait")):
211 if isinstance(model, models.PaymentProcessor):
212 self.access_token_url = access_token_url
213 self.element_id = element_id
214 self.color = color
215 self.name = name or model.name
216 self.loading_text = loading_text
217 self.card_tokenize_url = model.card_tokenize_url
218 self.card_tokenize_func = model.card_tokenize_func
219 self.card_tokenize_env = model.card_tokenize_env
220
221 def context_dict(self):
222 """Return a context dict for a PaymentProcessor."""
223 return {"payment_processor": self}
224
225
226 class TransitAgency:
227 """
228 Represents a core.models.TransitAgency:
229 * model: core.models.TransitAgency
230 """
231
232 def __init__(self, model):
233 if isinstance(model, models.TransitAgency):
234 self.slug = model.slug
235 self.short_name = model.short_name
236 self.long_name = model.long_name
237 self.agency_id = model.agency_id
238 self.merchant_id = model.merchant_id
239 self.info_url = model.info_url
240 self.phone = model.phone
241
242 def context_dict(self):
243 """Return a context dict for a TransitAgency."""
244 return {"agency": self}
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py
--- a/benefits/core/viewmodels.py
+++ b/benefits/core/viewmodels.py
@@ -1,7 +1,7 @@
"""
The core application: view model definitions for the root of the webapp.
"""
-from django.utils.translation import pgettext, gettext as _
+from django.utils.translation import pgettext, gettext_lazy as _
from django.urls import reverse
from benefits.core import models
|
{"golden_diff": "diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py\n--- a/benefits/core/viewmodels.py\n+++ b/benefits/core/viewmodels.py\n@@ -1,7 +1,7 @@\n \"\"\"\n The core application: view model definitions for the root of the webapp.\n \"\"\"\n-from django.utils.translation import pgettext, gettext as _\n+from django.utils.translation import pgettext, gettext_lazy as _\n from django.urls import reverse\n \n from benefits.core import models\n", "issue": "Bug: Spanish translations on all error pages are not showing up.\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Go to benefits.calitp.org/\r\n2. Click on Spanish\r\n3. Go to benefits.calitp.org/asfakljsfasdf\r\n4. See error\r\n\r\n<img width=\"705\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/190244616-0867bdbe-cd77-477f-9fd0-0cf8f3b8625a.png\">\r\n\r\nHappening for 404 and 500\r\n\r\n## Expected behavior\r\n\r\nAll the text should be in Spanish, not just the Footer and the Button.\r\n\r\n## Screenshots\r\n\r\n<img width=\"705\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/190244616-0867bdbe-cd77-477f-9fd0-0cf8f3b8625a.png\">\r\n\r\n## Desktop (please complete the following information)\r\n\r\nBoth \r\n\r\n## Smartphone (please complete the following information)\r\n\r\nAll\r\n\r\n## Additional context\r\n\r\nFix translations on error pages (default arguments set once, need to use None and check for None instead)\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * noimage: bool\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.noimage = kwargs.get(\"noimage\", False)\n self.icon = kwargs.get(\"icon\")\n self.content_title = kwargs.get(\"content_title\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n if not self.noimage:\n self.classes.append(\"with-image\")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n content_title=kwargs.get(\"content_title\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.content_title\")]),\n button=kwargs.get(\"button\"),\n )\n\n @staticmethod\n def error(\n title=_(\"core.pages.server_error.title\"),\n content_title=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\"), _(\"core.pages.server_error.p[1]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n content_title=_(\"core.pages.not_found.content_title\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * noimage: bool\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.noimage = kwargs.get(\"noimage\", False)\n self.icon = kwargs.get(\"icon\")\n self.content_title = kwargs.get(\"content_title\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n if not self.noimage:\n self.classes.append(\"with-image\")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n content_title=kwargs.get(\"content_title\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.content_title\")]),\n button=kwargs.get(\"button\"),\n )\n\n @staticmethod\n def error(\n title=_(\"core.pages.server_error.title\"),\n content_title=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\"), _(\"core.pages.server_error.p[1]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n content_title=_(\"core.pages.not_found.content_title\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}]}
| 2,974 | 103 |
gh_patches_debug_16124
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-3159
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Responsive youtube embed
I'm trying to set an embedded youtube link to 100% width in a .rst file. Is this possible?
I've tried:
```
.. youtube:: 3XsQCkF1SrE
:align: center
:width: 100%
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/rest/youtube.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2018 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """YouTube directive for reStructuredText."""
28
29 from docutils import nodes
30 from docutils.parsers.rst import Directive, directives
31 from nikola.plugins.compile.rest import _align_choice, _align_options_base
32
33 from nikola.plugin_categories import RestExtension
34
35
36 class Plugin(RestExtension):
37 """Plugin for the youtube directive."""
38
39 name = "rest_youtube"
40
41 def set_site(self, site):
42 """Set Nikola site."""
43 self.site = site
44 directives.register_directive('youtube', Youtube)
45 return super(Plugin, self).set_site(site)
46
47
48 CODE = """\
49 <div class="youtube-video{align}">
50 <iframe width="{width}" height="{height}"
51 src="https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent"
52 frameborder="0" allow="encrypted-media" allowfullscreen
53 ></iframe>
54 </div>"""
55
56
57 class Youtube(Directive):
58 """reST extension for inserting youtube embedded videos.
59
60 Usage:
61 .. youtube:: lyViVmaBQDg
62 :height: 400
63 :width: 600
64
65 """
66
67 has_content = True
68 required_arguments = 1
69 option_spec = {
70 "width": directives.positive_int,
71 "height": directives.positive_int,
72 "align": _align_choice
73 }
74
75 def run(self):
76 """Run the youtube directive."""
77 self.check_content()
78 options = {
79 'yid': self.arguments[0],
80 'width': 560,
81 'height': 315,
82 }
83 options.update(self.options)
84 if self.options.get('align') in _align_options_base:
85 options['align'] = ' align-' + self.options['align']
86 else:
87 options['align'] = ''
88 return [nodes.raw('', CODE.format(**options), format='html')]
89
90 def check_content(self):
91 """Check if content exists."""
92 if self.content: # pragma: no cover
93 raise self.warning("This directive does not accept content. The "
94 "'key=value' format for options is deprecated, "
95 "use ':key: value' instead")
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/compile/rest/youtube.py b/nikola/plugins/compile/rest/youtube.py
--- a/nikola/plugins/compile/rest/youtube.py
+++ b/nikola/plugins/compile/rest/youtube.py
@@ -67,8 +67,8 @@
has_content = True
required_arguments = 1
option_spec = {
- "width": directives.positive_int,
- "height": directives.positive_int,
+ "width": directives.unchanged,
+ "height": directives.unchanged,
"align": _align_choice
}
@@ -80,7 +80,7 @@
'width': 560,
'height': 315,
}
- options.update(self.options)
+ options.update({k: v for k, v in self.options.items() if v})
if self.options.get('align') in _align_options_base:
options['align'] = ' align-' + self.options['align']
else:
|
{"golden_diff": "diff --git a/nikola/plugins/compile/rest/youtube.py b/nikola/plugins/compile/rest/youtube.py\n--- a/nikola/plugins/compile/rest/youtube.py\n+++ b/nikola/plugins/compile/rest/youtube.py\n@@ -67,8 +67,8 @@\n has_content = True\n required_arguments = 1\n option_spec = {\n- \"width\": directives.positive_int,\n- \"height\": directives.positive_int,\n+ \"width\": directives.unchanged,\n+ \"height\": directives.unchanged,\n \"align\": _align_choice\n }\n \n@@ -80,7 +80,7 @@\n 'width': 560,\n 'height': 315,\n }\n- options.update(self.options)\n+ options.update({k: v for k, v in self.options.items() if v})\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n", "issue": "Responsive youtube embed\nI'm trying to set an embedded youtube link to 100% width in a .rst file. Is this possible?\r\n\r\nI've tried:\r\n\r\n```\r\n.. youtube:: 3XsQCkF1SrE\r\n :align: center\r\n :width: 100%\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"YouTube directive for reStructuredText.\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom nikola.plugins.compile.rest import _align_choice, _align_options_base\n\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for the youtube directive.\"\"\"\n\n name = \"rest_youtube\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.site = site\n directives.register_directive('youtube', Youtube)\n return super(Plugin, self).set_site(site)\n\n\nCODE = \"\"\"\\\n<div class=\"youtube-video{align}\">\n<iframe width=\"{width}\" height=\"{height}\"\nsrc=\"https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent\"\nframeborder=\"0\" allow=\"encrypted-media\" allowfullscreen\n></iframe>\n</div>\"\"\"\n\n\nclass Youtube(Directive):\n \"\"\"reST extension for inserting youtube embedded videos.\n\n Usage:\n .. youtube:: lyViVmaBQDg\n :height: 400\n :width: 600\n\n \"\"\"\n\n has_content = True\n required_arguments = 1\n option_spec = {\n \"width\": directives.positive_int,\n \"height\": directives.positive_int,\n \"align\": _align_choice\n }\n\n def run(self):\n \"\"\"Run the youtube directive.\"\"\"\n self.check_content()\n options = {\n 'yid': self.arguments[0],\n 'width': 560,\n 'height': 315,\n }\n options.update(self.options)\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n options['align'] = ''\n return [nodes.raw('', CODE.format(**options), format='html')]\n\n def check_content(self):\n \"\"\"Check if content exists.\"\"\"\n if self.content: # pragma: no cover\n raise self.warning(\"This directive does not accept content. The \"\n \"'key=value' format for options is deprecated, \"\n \"use ':key: value' instead\")\n", "path": "nikola/plugins/compile/rest/youtube.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"YouTube directive for reStructuredText.\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom nikola.plugins.compile.rest import _align_choice, _align_options_base\n\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for the youtube directive.\"\"\"\n\n name = \"rest_youtube\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.site = site\n directives.register_directive('youtube', Youtube)\n return super(Plugin, self).set_site(site)\n\n\nCODE = \"\"\"\\\n<div class=\"youtube-video{align}\">\n<iframe width=\"{width}\" height=\"{height}\"\nsrc=\"https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent\"\nframeborder=\"0\" allow=\"encrypted-media\" allowfullscreen\n></iframe>\n</div>\"\"\"\n\n\nclass Youtube(Directive):\n \"\"\"reST extension for inserting youtube embedded videos.\n\n Usage:\n .. youtube:: lyViVmaBQDg\n :height: 400\n :width: 600\n\n \"\"\"\n\n has_content = True\n required_arguments = 1\n option_spec = {\n \"width\": directives.unchanged,\n \"height\": directives.unchanged,\n \"align\": _align_choice\n }\n\n def run(self):\n \"\"\"Run the youtube directive.\"\"\"\n self.check_content()\n options = {\n 'yid': self.arguments[0],\n 'width': 560,\n 'height': 315,\n }\n options.update({k: v for k, v in self.options.items() if v})\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n options['align'] = ''\n return [nodes.raw('', CODE.format(**options), format='html')]\n\n def check_content(self):\n \"\"\"Check if content exists.\"\"\"\n if self.content: # pragma: no cover\n raise self.warning(\"This directive does not accept content. The \"\n \"'key=value' format for options is deprecated, \"\n \"use ':key: value' instead\")\n", "path": "nikola/plugins/compile/rest/youtube.py"}]}
| 1,238 | 224 |
gh_patches_debug_8918
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-7373
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: java.lang.IllegalArgumentException: argument type mismatch with RequestHandler
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
The following request handler:
```java
public class LegalDocPublisher implements RequestHandler<SQSEvent, Void> {
@Override
public Void handleRequest(final SQSEvent event, final Context context) {
return null;
}
}
```
causes
```
2022-10-10T06:38:23.362 INFO --- [ Thread-244] l.s.a.lambda_executors : Error executing Lambda "arn:aws:lambda:us-east-2:000000000000:function:LegalDocPublisher": InvocationException: Lambda process returned error status code: 1. Result: . Output:
Exception in thread "main" java.lang.IllegalArgumentException: argument type mismatch
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
at java.base/java.lang.reflect.Method.invoke(Unknown Source)
at cloud.localstack.LambdaExecutor.main(LambdaExecutor.java:117) File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 1423, in do_execute
execute_result = lambda_function_callable(inv_context.event, context)
File "/opt/code/localstack/localstack/services/awslambda/lambda_api.py", line 579, in execute
result = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 1532, in execute_java_lambda
invocation_result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 1366, in _execute_in_custom_runtime
raise InvocationException(
```
when execution is triggered.
This works fine until LocalStack 1.0.4.
### Expected Behavior
No exceptions.
### How are you starting LocalStack?
Custom (please describe below)
### Steps To Reproduce
LocalStack is started as part of integration tests run by Maven, via `docker-maven-plugin`.
### Environment
```markdown
- OS: 20.04
- LocalStack: 1.2.0
```
### Anything else?
AWS SDK version: 1.12.271
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/awslambda/packages.py`
Content:
```
1 import os
2 import platform
3 import stat
4 from typing import List
5
6 from localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller
7 from localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException
8 from localstack.utils.platform import get_arch
9
10 LAMBDA_RUNTIME_INIT_URL = "https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}"
11
12 LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.8-pre"
13
14 # GO Lambda runtime
15 GO_RUNTIME_VERSION = "0.4.0"
16 GO_RUNTIME_DOWNLOAD_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz"
17
18
19 class AWSLambdaRuntimePackage(Package):
20 def __init__(self, default_version: str = LAMBDA_RUNTIME_DEFAULT_VERSION):
21 super().__init__(name="AwsLambda", default_version=default_version)
22
23 def get_versions(self) -> List[str]:
24 return [
25 "v0.1.8-pre",
26 "v0.1.7-pre",
27 "v0.1.6-pre",
28 "v0.1.5-pre",
29 "v0.1.4-pre",
30 "v0.1.1-pre",
31 "v0.1-pre",
32 ]
33
34 def _get_installer(self, version: str) -> PackageInstaller:
35 return AWSLambdaRuntimePackageInstaller(name="awslambda-runtime", version=version)
36
37
38 class AWSLambdaRuntimePackageInstaller(DownloadInstaller):
39 def _get_download_url(self) -> str:
40 arch = get_arch()
41 arch = "x86_64" if arch == "amd64" else arch
42 return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)
43
44 def _install(self, target: InstallTarget) -> None:
45 super()._install(target)
46 install_location = self.get_executable_path()
47 st = os.stat(install_location)
48 os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
49
50
51 class AWSLambdaGoRuntimePackage(Package):
52 def __init__(self, default_version: str = GO_RUNTIME_VERSION):
53 super().__init__(name="AwsLambdaGo", default_version=default_version)
54
55 def get_versions(self) -> List[str]:
56 return [GO_RUNTIME_VERSION]
57
58 def _get_installer(self, version: str) -> PackageInstaller:
59 return AWSLambdaGoRuntimePackageInstaller(name="awslamba-go-runtime", version=version)
60
61
62 class AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):
63 def _get_download_url(self) -> str:
64 system = platform.system().lower()
65 arch = get_arch()
66
67 if system not in ["linux"]:
68 raise SystemNotSupportedException(f"Unsupported os {system} for awslambda-go-runtime")
69 if arch not in ["amd64", "arm64"]:
70 raise SystemNotSupportedException(f"Unsupported arch {arch} for awslambda-go-runtime")
71
72 return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(
73 version=GO_RUNTIME_VERSION,
74 os=system,
75 arch=arch,
76 )
77
78 def _get_install_marker_path(self, install_dir: str) -> str:
79 return os.path.join(install_dir, "aws-lambda-mock")
80
81 def _install(self, target: InstallTarget) -> None:
82 super()._install(target)
83
84 install_dir = self._get_install_dir(target)
85 install_location = self._get_install_marker_path(install_dir)
86 st = os.stat(install_location)
87 os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
88
89 go_lambda_mockserver = os.path.join(install_dir, "mockserver")
90 st = os.stat(go_lambda_mockserver)
91 os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
92
93
94 # version of the Maven dependency with Java utility code
95 LOCALSTACK_MAVEN_VERSION = "0.2.21"
96 MAVEN_REPO_URL = "https://repo1.maven.org/maven2"
97 URL_LOCALSTACK_FAT_JAR = (
98 "{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar"
99 )
100
101
102 class AWSLambdaJavaPackage(Package):
103 def __init__(self):
104 super().__init__("LambdaJavaLibs", "0.2.21")
105
106 def get_versions(self) -> List[str]:
107 return ["0.2.21"]
108
109 def _get_installer(self, version: str) -> PackageInstaller:
110 return AWSLambdaJavaPackageInstaller("lambda-java-libs", version)
111
112
113 class AWSLambdaJavaPackageInstaller(DownloadInstaller):
114 def _get_download_url(self) -> str:
115 return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)
116
117
118 awslambda_runtime_package = AWSLambdaRuntimePackage()
119 awslambda_go_runtime_package = AWSLambdaGoRuntimePackage()
120 lambda_java_libs_package = AWSLambdaJavaPackage()
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/localstack/services/awslambda/packages.py b/localstack/services/awslambda/packages.py
--- a/localstack/services/awslambda/packages.py
+++ b/localstack/services/awslambda/packages.py
@@ -101,10 +101,10 @@
class AWSLambdaJavaPackage(Package):
def __init__(self):
- super().__init__("LambdaJavaLibs", "0.2.21")
+ super().__init__("LambdaJavaLibs", "0.2.22")
def get_versions(self) -> List[str]:
- return ["0.2.21"]
+ return ["0.2.22", "0.2.21"]
def _get_installer(self, version: str) -> PackageInstaller:
return AWSLambdaJavaPackageInstaller("lambda-java-libs", version)
|
{"golden_diff": "diff --git a/localstack/services/awslambda/packages.py b/localstack/services/awslambda/packages.py\n--- a/localstack/services/awslambda/packages.py\n+++ b/localstack/services/awslambda/packages.py\n@@ -101,10 +101,10 @@\n \n class AWSLambdaJavaPackage(Package):\n def __init__(self):\n- super().__init__(\"LambdaJavaLibs\", \"0.2.21\")\n+ super().__init__(\"LambdaJavaLibs\", \"0.2.22\")\n \n def get_versions(self) -> List[str]:\n- return [\"0.2.21\"]\n+ return [\"0.2.22\", \"0.2.21\"]\n \n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaJavaPackageInstaller(\"lambda-java-libs\", version)\n", "issue": "bug: java.lang.IllegalArgumentException: argument type mismatch with RequestHandler\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Current Behavior\r\n\r\nThe following request handler:\r\n\r\n```java\r\npublic class LegalDocPublisher implements RequestHandler<SQSEvent, Void> {\r\n @Override\r\n public Void handleRequest(final SQSEvent event, final Context context) {\r\n return null;\r\n }\r\n}\r\n```\r\n\r\ncauses \r\n\r\n```\r\n2022-10-10T06:38:23.362 INFO --- [ Thread-244] l.s.a.lambda_executors : Error executing Lambda \"arn:aws:lambda:us-east-2:000000000000:function:LegalDocPublisher\": InvocationException: Lambda process returned error status code: 1. Result: . Output:\r\nException in thread \"main\" java.lang.IllegalArgumentException: argument type mismatch\r\n at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\r\n at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)\r\n at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)\r\n at java.base/java.lang.reflect.Method.invoke(Unknown Source)\r\n at cloud.localstack.LambdaExecutor.main(LambdaExecutor.java:117) File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 1423, in do_execute\r\n execute_result = lambda_function_callable(inv_context.event, context)\r\n File \"/opt/code/localstack/localstack/services/awslambda/lambda_api.py\", line 579, in execute\r\n result = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(\r\n File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 1532, in execute_java_lambda\r\n invocation_result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)\r\n File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 1366, in _execute_in_custom_runtime\r\n raise InvocationException(\r\n```\r\n\r\nwhen execution is triggered.\r\n\r\nThis works fine until LocalStack 1.0.4.\r\n\r\n### Expected Behavior\r\n\r\nNo exceptions.\r\n\r\n### How are you starting LocalStack?\r\n\r\nCustom (please describe below)\r\n\r\n### Steps To Reproduce\r\n\r\nLocalStack is started as part of integration tests run by Maven, via `docker-maven-plugin`.\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: 20.04\r\n- LocalStack: 1.2.0\r\n```\r\n\r\n\r\n### Anything else?\r\n\r\nAWS SDK version: 1.12.271\n", "before_files": [{"content": "import os\nimport platform\nimport stat\nfrom typing import List\n\nfrom localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller\nfrom localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException\nfrom localstack.utils.platform import get_arch\n\nLAMBDA_RUNTIME_INIT_URL = \"https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}\"\n\nLAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.8-pre\"\n\n# GO Lambda runtime\nGO_RUNTIME_VERSION = \"0.4.0\"\nGO_RUNTIME_DOWNLOAD_URL_TEMPLATE = \"https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz\"\n\n\nclass AWSLambdaRuntimePackage(Package):\n def __init__(self, default_version: str = LAMBDA_RUNTIME_DEFAULT_VERSION):\n super().__init__(name=\"AwsLambda\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [\n \"v0.1.8-pre\",\n \"v0.1.7-pre\",\n \"v0.1.6-pre\",\n \"v0.1.5-pre\",\n \"v0.1.4-pre\",\n \"v0.1.1-pre\",\n \"v0.1-pre\",\n ]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaRuntimePackageInstaller(name=\"awslambda-runtime\", version=version)\n\n\nclass AWSLambdaRuntimePackageInstaller(DownloadInstaller):\n def _get_download_url(self) -> str:\n arch = get_arch()\n arch = \"x86_64\" if arch == \"amd64\" else arch\n return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n install_location = self.get_executable_path()\n st = os.stat(install_location)\n os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\nclass AWSLambdaGoRuntimePackage(Package):\n def __init__(self, default_version: str = GO_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambdaGo\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [GO_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaGoRuntimePackageInstaller(name=\"awslamba-go-runtime\", version=version)\n\n\nclass AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):\n def _get_download_url(self) -> str:\n system = platform.system().lower()\n arch = get_arch()\n\n if system not in [\"linux\"]:\n raise SystemNotSupportedException(f\"Unsupported os {system} for awslambda-go-runtime\")\n if arch not in [\"amd64\", \"arm64\"]:\n raise SystemNotSupportedException(f\"Unsupported arch {arch} for awslambda-go-runtime\")\n\n return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(\n version=GO_RUNTIME_VERSION,\n os=system,\n arch=arch,\n )\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"aws-lambda-mock\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n\n install_dir = self._get_install_dir(target)\n install_location = self._get_install_marker_path(install_dir)\n st = os.stat(install_location)\n os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n go_lambda_mockserver = os.path.join(install_dir, \"mockserver\")\n st = os.stat(go_lambda_mockserver)\n os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = \"0.2.21\"\nMAVEN_REPO_URL = \"https://repo1.maven.org/maven2\"\nURL_LOCALSTACK_FAT_JAR = (\n \"{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar\"\n)\n\n\nclass AWSLambdaJavaPackage(Package):\n def __init__(self):\n super().__init__(\"LambdaJavaLibs\", \"0.2.21\")\n\n def get_versions(self) -> List[str]:\n return [\"0.2.21\"]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaJavaPackageInstaller(\"lambda-java-libs\", version)\n\n\nclass AWSLambdaJavaPackageInstaller(DownloadInstaller):\n def _get_download_url(self) -> str:\n return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)\n\n\nawslambda_runtime_package = AWSLambdaRuntimePackage()\nawslambda_go_runtime_package = AWSLambdaGoRuntimePackage()\nlambda_java_libs_package = AWSLambdaJavaPackage()\n", "path": "localstack/services/awslambda/packages.py"}], "after_files": [{"content": "import os\nimport platform\nimport stat\nfrom typing import List\n\nfrom localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller\nfrom localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException\nfrom localstack.utils.platform import get_arch\n\nLAMBDA_RUNTIME_INIT_URL = \"https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}\"\n\nLAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.8-pre\"\n\n# GO Lambda runtime\nGO_RUNTIME_VERSION = \"0.4.0\"\nGO_RUNTIME_DOWNLOAD_URL_TEMPLATE = \"https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz\"\n\n\nclass AWSLambdaRuntimePackage(Package):\n def __init__(self, default_version: str = LAMBDA_RUNTIME_DEFAULT_VERSION):\n super().__init__(name=\"AwsLambda\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [\n \"v0.1.8-pre\",\n \"v0.1.7-pre\",\n \"v0.1.6-pre\",\n \"v0.1.5-pre\",\n \"v0.1.4-pre\",\n \"v0.1.1-pre\",\n \"v0.1-pre\",\n ]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaRuntimePackageInstaller(name=\"awslambda-runtime\", version=version)\n\n\nclass AWSLambdaRuntimePackageInstaller(DownloadInstaller):\n def _get_download_url(self) -> str:\n arch = get_arch()\n arch = \"x86_64\" if arch == \"amd64\" else arch\n return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n install_location = self.get_executable_path()\n st = os.stat(install_location)\n os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\nclass AWSLambdaGoRuntimePackage(Package):\n def __init__(self, default_version: str = GO_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambdaGo\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [GO_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaGoRuntimePackageInstaller(name=\"awslamba-go-runtime\", version=version)\n\n\nclass AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):\n def _get_download_url(self) -> str:\n system = platform.system().lower()\n arch = get_arch()\n\n if system not in [\"linux\"]:\n raise SystemNotSupportedException(f\"Unsupported os {system} for awslambda-go-runtime\")\n if arch not in [\"amd64\", \"arm64\"]:\n raise SystemNotSupportedException(f\"Unsupported arch {arch} for awslambda-go-runtime\")\n\n return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(\n version=GO_RUNTIME_VERSION,\n os=system,\n arch=arch,\n )\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"aws-lambda-mock\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n\n install_dir = self._get_install_dir(target)\n install_location = self._get_install_marker_path(install_dir)\n st = os.stat(install_location)\n os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n go_lambda_mockserver = os.path.join(install_dir, \"mockserver\")\n st = os.stat(go_lambda_mockserver)\n os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = \"0.2.21\"\nMAVEN_REPO_URL = \"https://repo1.maven.org/maven2\"\nURL_LOCALSTACK_FAT_JAR = (\n \"{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar\"\n)\n\n\nclass AWSLambdaJavaPackage(Package):\n def __init__(self):\n super().__init__(\"LambdaJavaLibs\", \"0.2.22\")\n\n def get_versions(self) -> List[str]:\n return [\"0.2.22\", \"0.2.21\"]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaJavaPackageInstaller(\"lambda-java-libs\", version)\n\n\nclass AWSLambdaJavaPackageInstaller(DownloadInstaller):\n def _get_download_url(self) -> str:\n return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)\n\n\nawslambda_runtime_package = AWSLambdaRuntimePackage()\nawslambda_go_runtime_package = AWSLambdaGoRuntimePackage()\nlambda_java_libs_package = AWSLambdaJavaPackage()\n", "path": "localstack/services/awslambda/packages.py"}]}
| 2,218 | 191 |
gh_patches_debug_334
|
rasdani/github-patches
|
git_diff
|
searx__searx-2391
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SUGGESTION: Contacting the instance's maintainer(s)
Hello, so I use searx, but I personally think that there should be any way to contact the maintainer(s) of a public instance (email for example). It is harder to trust this awesome service if there is no way to contact the maintainer(s).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/brand.py`
Content:
```
1 GIT_URL = 'https://github.com/searx/searx'
2 GIT_BRANCH = 'master'
3 ISSUE_URL = 'https://github.com/searx/searx/issues'
4 SEARX_URL = 'https://searx.me'
5 DOCS_URL = 'https://searx.github.io/searx'
6 PUBLIC_INSTANCES = 'https://searx.space'
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/brand.py b/searx/brand.py
--- a/searx/brand.py
+++ b/searx/brand.py
@@ -4,3 +4,4 @@
SEARX_URL = 'https://searx.me'
DOCS_URL = 'https://searx.github.io/searx'
PUBLIC_INSTANCES = 'https://searx.space'
+CONTACT_URL = 'mailto:[email protected]'
|
{"golden_diff": "diff --git a/searx/brand.py b/searx/brand.py\n--- a/searx/brand.py\n+++ b/searx/brand.py\n@@ -4,3 +4,4 @@\n SEARX_URL = 'https://searx.me'\n DOCS_URL = 'https://searx.github.io/searx'\n PUBLIC_INSTANCES = 'https://searx.space'\n+CONTACT_URL = 'mailto:[email protected]'\n", "issue": "SUGGESTION: Contacting the instance's maintainer(s)\nHello, so I use searx, but I personally think that there should be any way to contact the maintainer(s) of a public instance (email for example). It is harder to trust this awesome service if there is no way to contact the maintainer(s). \r\n\n", "before_files": [{"content": "GIT_URL = 'https://github.com/searx/searx'\nGIT_BRANCH = 'master'\nISSUE_URL = 'https://github.com/searx/searx/issues'\nSEARX_URL = 'https://searx.me'\nDOCS_URL = 'https://searx.github.io/searx'\nPUBLIC_INSTANCES = 'https://searx.space'\n", "path": "searx/brand.py"}], "after_files": [{"content": "GIT_URL = 'https://github.com/searx/searx'\nGIT_BRANCH = 'master'\nISSUE_URL = 'https://github.com/searx/searx/issues'\nSEARX_URL = 'https://searx.me'\nDOCS_URL = 'https://searx.github.io/searx'\nPUBLIC_INSTANCES = 'https://searx.space'\nCONTACT_URL = 'mailto:[email protected]'\n", "path": "searx/brand.py"}]}
| 417 | 98 |
gh_patches_debug_33935
|
rasdani/github-patches
|
git_diff
|
systemd__mkosi-1771
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support libarchive cpio
Hi,
please support libarchive cpio(bsdcpio), which does not have `--reproducible`
https://lists.gnu.org/archive/html/bug-cpio/2014-11/msg00000.html
https://github.com/systemd/mkosi/blob/2c45d0effb1871750a2e9f897510d2745cb6d6b9/mkosi/__init__.py#L3489
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkosi/archive.py`
Content:
```
1 # SPDX-License-Identifier: LGPL-2.1+
2
3 import os
4 from collections.abc import Iterable
5 from pathlib import Path
6 from typing import Optional
7
8 from mkosi.log import log_step
9 from mkosi.run import bwrap, finalize_passwd_mounts
10 from mkosi.util import tar_binary
11
12
13 def tar_exclude_apivfs_tmp() -> list[str]:
14 return [
15 "--exclude", "./dev/*",
16 "--exclude", "./proc/*",
17 "--exclude", "./sys/*",
18 "--exclude", "./tmp/*",
19 "--exclude", "./run/*",
20 "--exclude", "./var/tmp/*",
21 ]
22
23
24 def make_tar(src: Path, dst: Path) -> None:
25 log_step(f"Creating tar archive {dst}…")
26 bwrap(
27 [
28 tar_binary(),
29 "--create",
30 "--file", dst,
31 "--directory", src,
32 "--acls",
33 "--selinux",
34 "--xattrs",
35 "--sparse",
36 "--force-local",
37 *tar_exclude_apivfs_tmp(),
38 ".",
39 ],
40 # Make sure tar uses user/group information from the root directory instead of the host.
41 options=finalize_passwd_mounts(src) if (src / "etc/passwd").exists() else [],
42 )
43
44
45 def extract_tar(src: Path, dst: Path, log: bool = True) -> None:
46 if log:
47 log_step(f"Extracting tar archive {src}…")
48 bwrap(
49 [
50 tar_binary(),
51 "--extract",
52 "--file", src,
53 "--directory", dst,
54 "--keep-directory-symlink",
55 "--no-overwrite-dir",
56 "--same-permissions",
57 "--same-owner" if (dst / "etc/passwd").exists() else "--numeric-owner",
58 "--same-order",
59 "--acls",
60 "--selinux",
61 "--xattrs",
62 "--force-local",
63 *tar_exclude_apivfs_tmp(),
64 ],
65 # Make sure tar uses user/group information from the root directory instead of the host.
66 options=finalize_passwd_mounts(dst) if (dst / "etc/passwd").exists() else [],
67 )
68
69
70 def make_cpio(src: Path, dst: Path, files: Optional[Iterable[Path]] = None) -> None:
71 if not files:
72 files = src.rglob("*")
73
74 log_step(f"Creating cpio archive {dst}…")
75 bwrap(
76 [
77 "cpio",
78 "--create",
79 "--reproducible",
80 "--null",
81 "--format=newc",
82 "--quiet",
83 "--directory", src,
84 "-O", dst,
85 ],
86 input="\0".join(os.fspath(f.relative_to(src)) for f in files),
87 # Make sure tar uses user/group information from the root directory instead of the host.
88 options=finalize_passwd_mounts(dst),
89 )
90
```
Path: `mkosi/util.py`
Content:
```
1 # SPDX-License-Identifier: LGPL-2.1+
2
3 import ast
4 import contextlib
5 import copy
6 import enum
7 import errno
8 import fcntl
9 import functools
10 import importlib
11 import itertools
12 import logging
13 import os
14 import pwd
15 import re
16 import resource
17 import shutil
18 import stat
19 import sys
20 import tempfile
21 from collections.abc import Iterable, Iterator, Mapping, Sequence
22 from pathlib import Path
23 from typing import Any, Callable, Optional, TypeVar
24
25 T = TypeVar("T")
26 V = TypeVar("V")
27
28
29 def dictify(f: Callable[..., Iterator[tuple[T, V]]]) -> Callable[..., dict[T, V]]:
30 def wrapper(*args: Any, **kwargs: Any) -> dict[T, V]:
31 return dict(f(*args, **kwargs))
32
33 return functools.update_wrapper(wrapper, f)
34
35
36 @dictify
37 def read_os_release() -> Iterator[tuple[str, str]]:
38 try:
39 filename = "/etc/os-release"
40 f = open(filename)
41 except FileNotFoundError:
42 filename = "/usr/lib/os-release"
43 f = open(filename)
44
45 with f:
46 for line_number, line in enumerate(f, start=1):
47 line = line.rstrip()
48 if not line or line.startswith("#"):
49 continue
50 if (m := re.match(r"([A-Z][A-Z_0-9]+)=(.*)", line)):
51 name, val = m.groups()
52 if val and val[0] in "\"'":
53 val = ast.literal_eval(val)
54 yield name, val
55 else:
56 print(f"{filename}:{line_number}: bad line {line!r}", file=sys.stderr)
57
58
59 def format_rlimit(rlimit: int) -> str:
60 limits = resource.getrlimit(rlimit)
61 soft = "infinity" if limits[0] == resource.RLIM_INFINITY else str(limits[0])
62 hard = "infinity" if limits[1] == resource.RLIM_INFINITY else str(limits[1])
63 return f"{soft}:{hard}"
64
65
66 def sort_packages(packages: Iterable[str]) -> list[str]:
67 """Sorts packages: normal first, paths second, conditional third"""
68
69 m = {"(": 2, "/": 1}
70 sort = lambda name: (m.get(name[0], 0), name)
71 return sorted(packages, key=sort)
72
73
74 def flatten(lists: Iterable[Iterable[T]]) -> list[T]:
75 """Flatten a sequence of sequences into a single list."""
76 return list(itertools.chain.from_iterable(lists))
77
78
79 class InvokingUser:
80 @staticmethod
81 def _uid_from_env() -> Optional[int]:
82 uid = os.getenv("SUDO_UID") or os.getenv("PKEXEC_UID")
83 return int(uid) if uid is not None else None
84
85 @classmethod
86 def uid(cls) -> int:
87 return cls._uid_from_env() or os.getuid()
88
89 @classmethod
90 def uid_gid(cls) -> tuple[int, int]:
91 if (uid := cls._uid_from_env()) is not None:
92 gid = int(os.getenv("SUDO_GID", pwd.getpwuid(uid).pw_gid))
93 return uid, gid
94 return os.getuid(), os.getgid()
95
96 @classmethod
97 def name(cls) -> str:
98 return pwd.getpwuid(cls.uid()).pw_name
99
100 @classmethod
101 def home(cls) -> Path:
102 return Path(f"~{cls.name()}").expanduser()
103
104 @classmethod
105 def is_running_user(cls) -> bool:
106 return cls.uid() == os.getuid()
107
108
109 @contextlib.contextmanager
110 def chdir(directory: Path) -> Iterator[None]:
111 old = Path.cwd()
112
113 if old == directory:
114 yield
115 return
116
117 try:
118 os.chdir(directory)
119 yield
120 finally:
121 os.chdir(old)
122
123
124 def qemu_check_kvm_support(log: bool) -> bool:
125 # some CI runners may present a non-working KVM device
126 try:
127 os.close(os.open("/dev/kvm", os.O_RDWR|os.O_CLOEXEC))
128 except OSError as e:
129 if e.errno == errno.ENOENT:
130 if log:
131 logging.warning("/dev/kvm not found. Not using KVM acceleration.")
132 return False
133 elif e.errno in (errno.EPERM, errno.EACCES):
134 if log:
135 logging.warning("Permission denied to access /dev/kvm. Not using KVM acceleration")
136 return False
137
138 raise e
139
140 return True
141
142
143 def qemu_check_vsock_support(log: bool) -> bool:
144 try:
145 os.close(os.open("/dev/vhost-vsock", os.O_RDWR|os.O_CLOEXEC))
146 except OSError as e:
147 if e.errno == errno.ENOENT:
148 if log:
149 logging.warning("/dev/vhost-vsock not found. Not adding a vsock device to the virtual machine.")
150 return False
151 elif e.errno in (errno.EPERM, errno.EACCES):
152 if log:
153 logging.warning("Permission denied to access /dev/vhost-vsock. Not adding a vsock device to the virtual machine.")
154 return False
155
156 raise e
157
158 return True
159
160
161 def format_bytes(num_bytes: int) -> str:
162 if num_bytes >= 1024**3:
163 return f"{num_bytes/1024**3 :0.1f}G"
164 if num_bytes >= 1024**2:
165 return f"{num_bytes/1024**2 :0.1f}M"
166 if num_bytes >= 1024:
167 return f"{num_bytes/1024 :0.1f}K"
168
169 return f"{num_bytes}B"
170
171
172 def make_executable(path: Path) -> None:
173 st = path.stat()
174 os.chmod(path, st.st_mode | stat.S_IEXEC)
175
176
177 def try_import(module: str) -> None:
178 try:
179 importlib.import_module(module)
180 except ModuleNotFoundError:
181 pass
182
183
184 @contextlib.contextmanager
185 def flock(path: Path) -> Iterator[int]:
186 fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY)
187 try:
188 fcntl.fcntl(fd, fcntl.FD_CLOEXEC)
189 fcntl.flock(fd, fcntl.LOCK_EX)
190 yield fd
191 finally:
192 os.close(fd)
193
194
195 @contextlib.contextmanager
196 def scopedenv(env: Mapping[str, Any]) -> Iterator[None]:
197 old = copy.copy(os.environ)
198 os.environ |= env
199
200 # python caches the default temporary directory so when we might modify TMPDIR we have to make sure it
201 # gets recalculated (see https://docs.python.org/3/library/tempfile.html#tempfile.tempdir).
202 tempfile.tempdir = None
203
204 try:
205 yield
206 finally:
207 os.environ = old
208 tempfile.tempdir = None
209
210
211 class StrEnum(enum.Enum):
212 def __str__(self) -> str:
213 assert isinstance(self.value, str)
214 return self.value
215
216 # Used by enum.auto() to get the next value.
217 @staticmethod
218 def _generate_next_value_(name: str, start: int, count: int, last_values: Sequence[str]) -> str:
219 return name.replace("_", "-")
220
221 @classmethod
222 def values(cls) -> list[str]:
223 return list(map(str, cls))
224
225
226 def tar_binary() -> str:
227 # Some distros (Mandriva) install BSD tar as "tar", hence prefer
228 # "gtar" if it exists, which should be GNU tar wherever it exists.
229 # We are interested in exposing same behaviour everywhere hence
230 # it's preferable to use the same implementation of tar
231 # everywhere. In particular given the limited/different SELinux
232 # support in BSD tar and the different command line syntax
233 # compared to GNU tar.
234 return "gtar" if shutil.which("gtar") else "tar"
235
236
237 def one_zero(b: bool) -> str:
238 return "1" if b else "0"
239
240
241 @contextlib.contextmanager
242 def umask(mask: int) -> Iterator[None]:
243 old = os.umask(mask)
244 try:
245 yield
246 finally:
247 os.umask(old)
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkosi/archive.py b/mkosi/archive.py
--- a/mkosi/archive.py
+++ b/mkosi/archive.py
@@ -1,13 +1,28 @@
# SPDX-License-Identifier: LGPL-2.1+
import os
+import shutil
from collections.abc import Iterable
from pathlib import Path
from typing import Optional
from mkosi.log import log_step
from mkosi.run import bwrap, finalize_passwd_mounts
-from mkosi.util import tar_binary
+
+
+def tar_binary() -> str:
+ # Some distros (Mandriva) install BSD tar as "tar", hence prefer
+ # "gtar" if it exists, which should be GNU tar wherever it exists.
+ # We are interested in exposing same behaviour everywhere hence
+ # it's preferable to use the same implementation of tar
+ # everywhere. In particular given the limited/different SELinux
+ # support in BSD tar and the different command line syntax
+ # compared to GNU tar.
+ return "gtar" if shutil.which("gtar") else "tar"
+
+
+def cpio_binary() -> str:
+ return "gcpio" if shutil.which("gcpio") else "cpio"
def tar_exclude_apivfs_tmp() -> list[str]:
@@ -74,7 +89,7 @@
log_step(f"Creating cpio archive {dst}…")
bwrap(
[
- "cpio",
+ cpio_binary(),
"--create",
"--reproducible",
"--null",
diff --git a/mkosi/util.py b/mkosi/util.py
--- a/mkosi/util.py
+++ b/mkosi/util.py
@@ -14,7 +14,6 @@
import pwd
import re
import resource
-import shutil
import stat
import sys
import tempfile
@@ -223,17 +222,6 @@
return list(map(str, cls))
-def tar_binary() -> str:
- # Some distros (Mandriva) install BSD tar as "tar", hence prefer
- # "gtar" if it exists, which should be GNU tar wherever it exists.
- # We are interested in exposing same behaviour everywhere hence
- # it's preferable to use the same implementation of tar
- # everywhere. In particular given the limited/different SELinux
- # support in BSD tar and the different command line syntax
- # compared to GNU tar.
- return "gtar" if shutil.which("gtar") else "tar"
-
-
def one_zero(b: bool) -> str:
return "1" if b else "0"
|
{"golden_diff": "diff --git a/mkosi/archive.py b/mkosi/archive.py\n--- a/mkosi/archive.py\n+++ b/mkosi/archive.py\n@@ -1,13 +1,28 @@\n # SPDX-License-Identifier: LGPL-2.1+\n \n import os\n+import shutil\n from collections.abc import Iterable\n from pathlib import Path\n from typing import Optional\n \n from mkosi.log import log_step\n from mkosi.run import bwrap, finalize_passwd_mounts\n-from mkosi.util import tar_binary\n+\n+\n+def tar_binary() -> str:\n+ # Some distros (Mandriva) install BSD tar as \"tar\", hence prefer\n+ # \"gtar\" if it exists, which should be GNU tar wherever it exists.\n+ # We are interested in exposing same behaviour everywhere hence\n+ # it's preferable to use the same implementation of tar\n+ # everywhere. In particular given the limited/different SELinux\n+ # support in BSD tar and the different command line syntax\n+ # compared to GNU tar.\n+ return \"gtar\" if shutil.which(\"gtar\") else \"tar\"\n+\n+\n+def cpio_binary() -> str:\n+ return \"gcpio\" if shutil.which(\"gcpio\") else \"cpio\"\n \n \n def tar_exclude_apivfs_tmp() -> list[str]:\n@@ -74,7 +89,7 @@\n log_step(f\"Creating cpio archive {dst}\u2026\")\n bwrap(\n [\n- \"cpio\",\n+ cpio_binary(),\n \"--create\",\n \"--reproducible\",\n \"--null\",\ndiff --git a/mkosi/util.py b/mkosi/util.py\n--- a/mkosi/util.py\n+++ b/mkosi/util.py\n@@ -14,7 +14,6 @@\n import pwd\n import re\n import resource\n-import shutil\n import stat\n import sys\n import tempfile\n@@ -223,17 +222,6 @@\n return list(map(str, cls))\n \n \n-def tar_binary() -> str:\n- # Some distros (Mandriva) install BSD tar as \"tar\", hence prefer\n- # \"gtar\" if it exists, which should be GNU tar wherever it exists.\n- # We are interested in exposing same behaviour everywhere hence\n- # it's preferable to use the same implementation of tar\n- # everywhere. In particular given the limited/different SELinux\n- # support in BSD tar and the different command line syntax\n- # compared to GNU tar.\n- return \"gtar\" if shutil.which(\"gtar\") else \"tar\"\n-\n-\n def one_zero(b: bool) -> str:\n return \"1\" if b else \"0\"\n", "issue": "Support libarchive cpio\nHi,\r\n\r\nplease support libarchive cpio(bsdcpio), which does not have `--reproducible`\r\n\r\nhttps://lists.gnu.org/archive/html/bug-cpio/2014-11/msg00000.html\r\n\r\nhttps://github.com/systemd/mkosi/blob/2c45d0effb1871750a2e9f897510d2745cb6d6b9/mkosi/__init__.py#L3489\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n\nimport os\nfrom collections.abc import Iterable\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom mkosi.log import log_step\nfrom mkosi.run import bwrap, finalize_passwd_mounts\nfrom mkosi.util import tar_binary\n\n\ndef tar_exclude_apivfs_tmp() -> list[str]:\n return [\n \"--exclude\", \"./dev/*\",\n \"--exclude\", \"./proc/*\",\n \"--exclude\", \"./sys/*\",\n \"--exclude\", \"./tmp/*\",\n \"--exclude\", \"./run/*\",\n \"--exclude\", \"./var/tmp/*\",\n ]\n\n\ndef make_tar(src: Path, dst: Path) -> None:\n log_step(f\"Creating tar archive {dst}\u2026\")\n bwrap(\n [\n tar_binary(),\n \"--create\",\n \"--file\", dst,\n \"--directory\", src,\n \"--acls\",\n \"--selinux\",\n \"--xattrs\",\n \"--sparse\",\n \"--force-local\",\n *tar_exclude_apivfs_tmp(),\n \".\",\n ],\n # Make sure tar uses user/group information from the root directory instead of the host.\n options=finalize_passwd_mounts(src) if (src / \"etc/passwd\").exists() else [],\n )\n\n\ndef extract_tar(src: Path, dst: Path, log: bool = True) -> None:\n if log:\n log_step(f\"Extracting tar archive {src}\u2026\")\n bwrap(\n [\n tar_binary(),\n \"--extract\",\n \"--file\", src,\n \"--directory\", dst,\n \"--keep-directory-symlink\",\n \"--no-overwrite-dir\",\n \"--same-permissions\",\n \"--same-owner\" if (dst / \"etc/passwd\").exists() else \"--numeric-owner\",\n \"--same-order\",\n \"--acls\",\n \"--selinux\",\n \"--xattrs\",\n \"--force-local\",\n *tar_exclude_apivfs_tmp(),\n ],\n # Make sure tar uses user/group information from the root directory instead of the host.\n options=finalize_passwd_mounts(dst) if (dst / \"etc/passwd\").exists() else [],\n )\n\n\ndef make_cpio(src: Path, dst: Path, files: Optional[Iterable[Path]] = None) -> None:\n if not files:\n files = src.rglob(\"*\")\n\n log_step(f\"Creating cpio archive {dst}\u2026\")\n bwrap(\n [\n \"cpio\",\n \"--create\",\n \"--reproducible\",\n \"--null\",\n \"--format=newc\",\n \"--quiet\",\n \"--directory\", src,\n \"-O\", dst,\n ],\n input=\"\\0\".join(os.fspath(f.relative_to(src)) for f in files),\n # Make sure tar uses user/group information from the root directory instead of the host.\n options=finalize_passwd_mounts(dst),\n )\n", "path": "mkosi/archive.py"}, {"content": "# SPDX-License-Identifier: LGPL-2.1+\n\nimport ast\nimport contextlib\nimport copy\nimport enum\nimport errno\nimport fcntl\nimport functools\nimport importlib\nimport itertools\nimport logging\nimport os\nimport pwd\nimport re\nimport resource\nimport shutil\nimport stat\nimport sys\nimport tempfile\nfrom collections.abc import Iterable, Iterator, Mapping, Sequence\nfrom pathlib import Path\nfrom typing import Any, Callable, Optional, TypeVar\n\nT = TypeVar(\"T\")\nV = TypeVar(\"V\")\n\n\ndef dictify(f: Callable[..., Iterator[tuple[T, V]]]) -> Callable[..., dict[T, V]]:\n def wrapper(*args: Any, **kwargs: Any) -> dict[T, V]:\n return dict(f(*args, **kwargs))\n\n return functools.update_wrapper(wrapper, f)\n\n\n@dictify\ndef read_os_release() -> Iterator[tuple[str, str]]:\n try:\n filename = \"/etc/os-release\"\n f = open(filename)\n except FileNotFoundError:\n filename = \"/usr/lib/os-release\"\n f = open(filename)\n\n with f:\n for line_number, line in enumerate(f, start=1):\n line = line.rstrip()\n if not line or line.startswith(\"#\"):\n continue\n if (m := re.match(r\"([A-Z][A-Z_0-9]+)=(.*)\", line)):\n name, val = m.groups()\n if val and val[0] in \"\\\"'\":\n val = ast.literal_eval(val)\n yield name, val\n else:\n print(f\"{filename}:{line_number}: bad line {line!r}\", file=sys.stderr)\n\n\ndef format_rlimit(rlimit: int) -> str:\n limits = resource.getrlimit(rlimit)\n soft = \"infinity\" if limits[0] == resource.RLIM_INFINITY else str(limits[0])\n hard = \"infinity\" if limits[1] == resource.RLIM_INFINITY else str(limits[1])\n return f\"{soft}:{hard}\"\n\n\ndef sort_packages(packages: Iterable[str]) -> list[str]:\n \"\"\"Sorts packages: normal first, paths second, conditional third\"\"\"\n\n m = {\"(\": 2, \"/\": 1}\n sort = lambda name: (m.get(name[0], 0), name)\n return sorted(packages, key=sort)\n\n\ndef flatten(lists: Iterable[Iterable[T]]) -> list[T]:\n \"\"\"Flatten a sequence of sequences into a single list.\"\"\"\n return list(itertools.chain.from_iterable(lists))\n\n\nclass InvokingUser:\n @staticmethod\n def _uid_from_env() -> Optional[int]:\n uid = os.getenv(\"SUDO_UID\") or os.getenv(\"PKEXEC_UID\")\n return int(uid) if uid is not None else None\n\n @classmethod\n def uid(cls) -> int:\n return cls._uid_from_env() or os.getuid()\n\n @classmethod\n def uid_gid(cls) -> tuple[int, int]:\n if (uid := cls._uid_from_env()) is not None:\n gid = int(os.getenv(\"SUDO_GID\", pwd.getpwuid(uid).pw_gid))\n return uid, gid\n return os.getuid(), os.getgid()\n\n @classmethod\n def name(cls) -> str:\n return pwd.getpwuid(cls.uid()).pw_name\n\n @classmethod\n def home(cls) -> Path:\n return Path(f\"~{cls.name()}\").expanduser()\n\n @classmethod\n def is_running_user(cls) -> bool:\n return cls.uid() == os.getuid()\n\n\[email protected]\ndef chdir(directory: Path) -> Iterator[None]:\n old = Path.cwd()\n\n if old == directory:\n yield\n return\n\n try:\n os.chdir(directory)\n yield\n finally:\n os.chdir(old)\n\n\ndef qemu_check_kvm_support(log: bool) -> bool:\n # some CI runners may present a non-working KVM device\n try:\n os.close(os.open(\"/dev/kvm\", os.O_RDWR|os.O_CLOEXEC))\n except OSError as e:\n if e.errno == errno.ENOENT:\n if log:\n logging.warning(\"/dev/kvm not found. Not using KVM acceleration.\")\n return False\n elif e.errno in (errno.EPERM, errno.EACCES):\n if log:\n logging.warning(\"Permission denied to access /dev/kvm. Not using KVM acceleration\")\n return False\n\n raise e\n\n return True\n\n\ndef qemu_check_vsock_support(log: bool) -> bool:\n try:\n os.close(os.open(\"/dev/vhost-vsock\", os.O_RDWR|os.O_CLOEXEC))\n except OSError as e:\n if e.errno == errno.ENOENT:\n if log:\n logging.warning(\"/dev/vhost-vsock not found. Not adding a vsock device to the virtual machine.\")\n return False\n elif e.errno in (errno.EPERM, errno.EACCES):\n if log:\n logging.warning(\"Permission denied to access /dev/vhost-vsock. Not adding a vsock device to the virtual machine.\")\n return False\n\n raise e\n\n return True\n\n\ndef format_bytes(num_bytes: int) -> str:\n if num_bytes >= 1024**3:\n return f\"{num_bytes/1024**3 :0.1f}G\"\n if num_bytes >= 1024**2:\n return f\"{num_bytes/1024**2 :0.1f}M\"\n if num_bytes >= 1024:\n return f\"{num_bytes/1024 :0.1f}K\"\n\n return f\"{num_bytes}B\"\n\n\ndef make_executable(path: Path) -> None:\n st = path.stat()\n os.chmod(path, st.st_mode | stat.S_IEXEC)\n\n\ndef try_import(module: str) -> None:\n try:\n importlib.import_module(module)\n except ModuleNotFoundError:\n pass\n\n\[email protected]\ndef flock(path: Path) -> Iterator[int]:\n fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY)\n try:\n fcntl.fcntl(fd, fcntl.FD_CLOEXEC)\n fcntl.flock(fd, fcntl.LOCK_EX)\n yield fd\n finally:\n os.close(fd)\n\n\[email protected]\ndef scopedenv(env: Mapping[str, Any]) -> Iterator[None]:\n old = copy.copy(os.environ)\n os.environ |= env\n\n # python caches the default temporary directory so when we might modify TMPDIR we have to make sure it\n # gets recalculated (see https://docs.python.org/3/library/tempfile.html#tempfile.tempdir).\n tempfile.tempdir = None\n\n try:\n yield\n finally:\n os.environ = old\n tempfile.tempdir = None\n\n\nclass StrEnum(enum.Enum):\n def __str__(self) -> str:\n assert isinstance(self.value, str)\n return self.value\n\n # Used by enum.auto() to get the next value.\n @staticmethod\n def _generate_next_value_(name: str, start: int, count: int, last_values: Sequence[str]) -> str:\n return name.replace(\"_\", \"-\")\n\n @classmethod\n def values(cls) -> list[str]:\n return list(map(str, cls))\n\n\ndef tar_binary() -> str:\n # Some distros (Mandriva) install BSD tar as \"tar\", hence prefer\n # \"gtar\" if it exists, which should be GNU tar wherever it exists.\n # We are interested in exposing same behaviour everywhere hence\n # it's preferable to use the same implementation of tar\n # everywhere. In particular given the limited/different SELinux\n # support in BSD tar and the different command line syntax\n # compared to GNU tar.\n return \"gtar\" if shutil.which(\"gtar\") else \"tar\"\n\n\ndef one_zero(b: bool) -> str:\n return \"1\" if b else \"0\"\n\n\[email protected]\ndef umask(mask: int) -> Iterator[None]:\n old = os.umask(mask)\n try:\n yield\n finally:\n os.umask(old)\n", "path": "mkosi/util.py"}], "after_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n\nimport os\nimport shutil\nfrom collections.abc import Iterable\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom mkosi.log import log_step\nfrom mkosi.run import bwrap, finalize_passwd_mounts\n\n\ndef tar_binary() -> str:\n # Some distros (Mandriva) install BSD tar as \"tar\", hence prefer\n # \"gtar\" if it exists, which should be GNU tar wherever it exists.\n # We are interested in exposing same behaviour everywhere hence\n # it's preferable to use the same implementation of tar\n # everywhere. In particular given the limited/different SELinux\n # support in BSD tar and the different command line syntax\n # compared to GNU tar.\n return \"gtar\" if shutil.which(\"gtar\") else \"tar\"\n\n\ndef cpio_binary() -> str:\n return \"gcpio\" if shutil.which(\"gcpio\") else \"cpio\"\n\n\ndef tar_exclude_apivfs_tmp() -> list[str]:\n return [\n \"--exclude\", \"./dev/*\",\n \"--exclude\", \"./proc/*\",\n \"--exclude\", \"./sys/*\",\n \"--exclude\", \"./tmp/*\",\n \"--exclude\", \"./run/*\",\n \"--exclude\", \"./var/tmp/*\",\n ]\n\n\ndef make_tar(src: Path, dst: Path) -> None:\n log_step(f\"Creating tar archive {dst}\u2026\")\n bwrap(\n [\n tar_binary(),\n \"--create\",\n \"--file\", dst,\n \"--directory\", src,\n \"--acls\",\n \"--selinux\",\n \"--xattrs\",\n \"--sparse\",\n \"--force-local\",\n *tar_exclude_apivfs_tmp(),\n \".\",\n ],\n # Make sure tar uses user/group information from the root directory instead of the host.\n options=finalize_passwd_mounts(src) if (src / \"etc/passwd\").exists() else [],\n )\n\n\ndef extract_tar(src: Path, dst: Path, log: bool = True) -> None:\n if log:\n log_step(f\"Extracting tar archive {src}\u2026\")\n bwrap(\n [\n tar_binary(),\n \"--extract\",\n \"--file\", src,\n \"--directory\", dst,\n \"--keep-directory-symlink\",\n \"--no-overwrite-dir\",\n \"--same-permissions\",\n \"--same-owner\" if (dst / \"etc/passwd\").exists() else \"--numeric-owner\",\n \"--same-order\",\n \"--acls\",\n \"--selinux\",\n \"--xattrs\",\n \"--force-local\",\n *tar_exclude_apivfs_tmp(),\n ],\n # Make sure tar uses user/group information from the root directory instead of the host.\n options=finalize_passwd_mounts(dst) if (dst / \"etc/passwd\").exists() else [],\n )\n\n\ndef make_cpio(src: Path, dst: Path, files: Optional[Iterable[Path]] = None) -> None:\n if not files:\n files = src.rglob(\"*\")\n\n log_step(f\"Creating cpio archive {dst}\u2026\")\n bwrap(\n [\n cpio_binary(),\n \"--create\",\n \"--reproducible\",\n \"--null\",\n \"--format=newc\",\n \"--quiet\",\n \"--directory\", src,\n \"-O\", dst,\n ],\n input=\"\\0\".join(os.fspath(f.relative_to(src)) for f in files),\n # Make sure tar uses user/group information from the root directory instead of the host.\n options=finalize_passwd_mounts(dst),\n )\n", "path": "mkosi/archive.py"}, {"content": "# SPDX-License-Identifier: LGPL-2.1+\n\nimport ast\nimport contextlib\nimport copy\nimport enum\nimport errno\nimport fcntl\nimport functools\nimport importlib\nimport itertools\nimport logging\nimport os\nimport pwd\nimport re\nimport resource\nimport stat\nimport sys\nimport tempfile\nfrom collections.abc import Iterable, Iterator, Mapping, Sequence\nfrom pathlib import Path\nfrom typing import Any, Callable, Optional, TypeVar\n\nT = TypeVar(\"T\")\nV = TypeVar(\"V\")\n\n\ndef dictify(f: Callable[..., Iterator[tuple[T, V]]]) -> Callable[..., dict[T, V]]:\n def wrapper(*args: Any, **kwargs: Any) -> dict[T, V]:\n return dict(f(*args, **kwargs))\n\n return functools.update_wrapper(wrapper, f)\n\n\n@dictify\ndef read_os_release() -> Iterator[tuple[str, str]]:\n try:\n filename = \"/etc/os-release\"\n f = open(filename)\n except FileNotFoundError:\n filename = \"/usr/lib/os-release\"\n f = open(filename)\n\n with f:\n for line_number, line in enumerate(f, start=1):\n line = line.rstrip()\n if not line or line.startswith(\"#\"):\n continue\n if (m := re.match(r\"([A-Z][A-Z_0-9]+)=(.*)\", line)):\n name, val = m.groups()\n if val and val[0] in \"\\\"'\":\n val = ast.literal_eval(val)\n yield name, val\n else:\n print(f\"{filename}:{line_number}: bad line {line!r}\", file=sys.stderr)\n\n\ndef format_rlimit(rlimit: int) -> str:\n limits = resource.getrlimit(rlimit)\n soft = \"infinity\" if limits[0] == resource.RLIM_INFINITY else str(limits[0])\n hard = \"infinity\" if limits[1] == resource.RLIM_INFINITY else str(limits[1])\n return f\"{soft}:{hard}\"\n\n\ndef sort_packages(packages: Iterable[str]) -> list[str]:\n \"\"\"Sorts packages: normal first, paths second, conditional third\"\"\"\n\n m = {\"(\": 2, \"/\": 1}\n sort = lambda name: (m.get(name[0], 0), name)\n return sorted(packages, key=sort)\n\n\ndef flatten(lists: Iterable[Iterable[T]]) -> list[T]:\n \"\"\"Flatten a sequence of sequences into a single list.\"\"\"\n return list(itertools.chain.from_iterable(lists))\n\n\nclass InvokingUser:\n @staticmethod\n def _uid_from_env() -> Optional[int]:\n uid = os.getenv(\"SUDO_UID\") or os.getenv(\"PKEXEC_UID\")\n return int(uid) if uid is not None else None\n\n @classmethod\n def uid(cls) -> int:\n return cls._uid_from_env() or os.getuid()\n\n @classmethod\n def uid_gid(cls) -> tuple[int, int]:\n if (uid := cls._uid_from_env()) is not None:\n gid = int(os.getenv(\"SUDO_GID\", pwd.getpwuid(uid).pw_gid))\n return uid, gid\n return os.getuid(), os.getgid()\n\n @classmethod\n def name(cls) -> str:\n return pwd.getpwuid(cls.uid()).pw_name\n\n @classmethod\n def home(cls) -> Path:\n return Path(f\"~{cls.name()}\").expanduser()\n\n @classmethod\n def is_running_user(cls) -> bool:\n return cls.uid() == os.getuid()\n\n\[email protected]\ndef chdir(directory: Path) -> Iterator[None]:\n old = Path.cwd()\n\n if old == directory:\n yield\n return\n\n try:\n os.chdir(directory)\n yield\n finally:\n os.chdir(old)\n\n\ndef qemu_check_kvm_support(log: bool) -> bool:\n # some CI runners may present a non-working KVM device\n try:\n os.close(os.open(\"/dev/kvm\", os.O_RDWR|os.O_CLOEXEC))\n except OSError as e:\n if e.errno == errno.ENOENT:\n if log:\n logging.warning(\"/dev/kvm not found. Not using KVM acceleration.\")\n return False\n elif e.errno in (errno.EPERM, errno.EACCES):\n if log:\n logging.warning(\"Permission denied to access /dev/kvm. Not using KVM acceleration\")\n return False\n\n raise e\n\n return True\n\n\ndef qemu_check_vsock_support(log: bool) -> bool:\n try:\n os.close(os.open(\"/dev/vhost-vsock\", os.O_RDWR|os.O_CLOEXEC))\n except OSError as e:\n if e.errno == errno.ENOENT:\n if log:\n logging.warning(\"/dev/vhost-vsock not found. Not adding a vsock device to the virtual machine.\")\n return False\n elif e.errno in (errno.EPERM, errno.EACCES):\n if log:\n logging.warning(\"Permission denied to access /dev/vhost-vsock. Not adding a vsock device to the virtual machine.\")\n return False\n\n raise e\n\n return True\n\n\ndef format_bytes(num_bytes: int) -> str:\n if num_bytes >= 1024**3:\n return f\"{num_bytes/1024**3 :0.1f}G\"\n if num_bytes >= 1024**2:\n return f\"{num_bytes/1024**2 :0.1f}M\"\n if num_bytes >= 1024:\n return f\"{num_bytes/1024 :0.1f}K\"\n\n return f\"{num_bytes}B\"\n\n\ndef make_executable(path: Path) -> None:\n st = path.stat()\n os.chmod(path, st.st_mode | stat.S_IEXEC)\n\n\ndef try_import(module: str) -> None:\n try:\n importlib.import_module(module)\n except ModuleNotFoundError:\n pass\n\n\[email protected]\ndef flock(path: Path) -> Iterator[int]:\n fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY)\n try:\n fcntl.fcntl(fd, fcntl.FD_CLOEXEC)\n fcntl.flock(fd, fcntl.LOCK_EX)\n yield fd\n finally:\n os.close(fd)\n\n\[email protected]\ndef scopedenv(env: Mapping[str, Any]) -> Iterator[None]:\n old = copy.copy(os.environ)\n os.environ |= env\n\n # python caches the default temporary directory so when we might modify TMPDIR we have to make sure it\n # gets recalculated (see https://docs.python.org/3/library/tempfile.html#tempfile.tempdir).\n tempfile.tempdir = None\n\n try:\n yield\n finally:\n os.environ = old\n tempfile.tempdir = None\n\n\nclass StrEnum(enum.Enum):\n def __str__(self) -> str:\n assert isinstance(self.value, str)\n return self.value\n\n # Used by enum.auto() to get the next value.\n @staticmethod\n def _generate_next_value_(name: str, start: int, count: int, last_values: Sequence[str]) -> str:\n return name.replace(\"_\", \"-\")\n\n @classmethod\n def values(cls) -> list[str]:\n return list(map(str, cls))\n\n\ndef one_zero(b: bool) -> str:\n return \"1\" if b else \"0\"\n\n\[email protected]\ndef umask(mask: int) -> Iterator[None]:\n old = os.umask(mask)\n try:\n yield\n finally:\n os.umask(old)\n", "path": "mkosi/util.py"}]}
| 3,604 | 587 |
gh_patches_debug_5972
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-40819
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ios_config incorrectly claims success when commands fail
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### COMPONENT NAME
<!--- Name of the module/plugin/task/feature -->
ios_config
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
[ansible@localhost cmdauthz]$ ansible --version
ansible 2.3.1.0
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
python version = 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)]
```
##### CONFIGURATION
<!---
If using Ansible 2.4 or above, paste the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
ansible.cfg modification:
forks = 20
gathering = explicit
host_key_checking = false
timeout = 60
vault_password_file = ~/.ansible/vault-pass.txt
retry_files_enabled = false
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.
-->
AWS AMI CentOS7
```
[ansible@localhost cmdauthz]$ uname -a
Linux localhost.localdomain 3.10.0-514.26.2.el7.x86_64 #1 SMP Tue Jul 4 15:04:05 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux
```
##### SUMMARY
<!--- Explain the problem briefly -->
There is no stdout from "ios_config". When you issue commands that are TACACS-unauthorized, ansible still reports "changed", comes back and reports success, and ignores the fact they were rejected. I assume the module is ignoring all CLI output and only looking for the next config prompt to claim success. This makes it difficult to validate that unauthorized commands were rejected and authorized commands were accepted. Such a playbook is useful as a AAA security posture checker.
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used.
-->
Task sub-list shown below.
<!--- Paste example playbooks or commands between quotes below -->
```
---
- name: "SYS >> Capture current username"
set_fact:
USER: "{{ PARAM_CREDS.username }}"
- name: "IOS >> {{ USER }}: Issue unauthorized conf commands"
ios_config:
provider: "{{ PARAM_CREDS }}"
match: none
commands:
- "{{ item }}"
register: OUTPUT
with_items: "{{ unauth_conf_cmds_t2 }}"
- debug:
var: OUTPUT
...
```
Relevant variables included.
```
---
unauth_conf_cmds_t2:
- "ip bgp new-format"
- "interface Loopback12345"
...
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
When `ios_config` determines that the commands being issued are new (a change), of if, `match: none` is set, the module should attempt to issue command in question. It should also collect any parser output, to include `Command authorization failed.`, so that the playbook writer can perform checks against it. I would recommend returning `stdout` and `stdout_lines` in much the same way that ios_command works, for consistency.
Note that if you don't want to set up a TACACS server, using a `do` statement to run a show command from config mode would probably be a valid test to ensure output is being collected.
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
`ios_config` returns correct fields per the documentation, but the lack of seeing CLI output makes it hard to discern why commands were rejected.
<!--- Paste verbatim command output between quotes below -->
```
[ansible@localhost cmdauthz]$ ansible-playbook cmdauthz-playbook.yml
PLAY [localhost] ************************************************************************************************************
TASK [SYS >> Define string match facts] *************************************************************************************
ok: [localhost]
PLAY [Verify AAA command authorization functionality] ***********************************************************************
TASK [SYS >> Capture current username] **************************************************************************************
ok: [APG_6010_PER]
TASK [IOS >> ansible2: Issue unauthorized conf commands] ********************************************************************
changed: [APG_6010_PER] => (item=ip bgp new-format)
changed: [APG_6010_PER] => (item=interface Loopback12345)
TASK [debug] ****************************************************************************************************************
ok: [APG_6010_PER] => {
"OUTPUT": {
"changed": true,
"msg": "All items completed",
"results": [
{
"_ansible_item_result": true,
"_ansible_no_log": false,
"_ansible_parsed": true,
"banners": {},
"changed": true,
"commands": [
"ip bgp new-format"
],
"invocation": {
"module_args": {
"after": null,
"auth_pass": null,
"authorize": null,
"backup": false,
"before": null,
"commands": [
"ip bgp new-format"
],
"config": null,
"defaults": false,
"force": false,
"host": null,
"lines": [
"ip bgp new-format"
],
"match": "none",
"multiline_delimiter": "@",
"parents": null,
"password": null,
"port": null,
"provider": {
"auth_pass": null,
"authorize": null,
"host": "APG_6010_PER",
"password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"port": 3022,
"ssh_keyfile": null,
"timeout": 30,
"username": "ansible2"
},
"replace": "line",
"save": false,
"src": null,
"ssh_keyfile": null,
"timeout": null,
"username": null
}
},
"item": "ip bgp new-format",
"updates": [
"ip bgp new-format"
]
},
{
"_ansible_item_result": true,
"_ansible_no_log": false,
"_ansible_parsed": true,
"banners": {},
"changed": true,
"commands": [
"interface Loopback12345"
],
"invocation": {
"module_args": {
"after": null,
"auth_pass": null,
"authorize": null,
"backup": false,
"before": null,
"commands": [
"interface Loopback12345"
],
"config": null,
"defaults": false,
"force": false,
"host": null,
"lines": [
"interface Loopback12345"
],
"match": "none",
"multiline_delimiter": "@",
"parents": null,
"password": null,
"port": null,
"provider": {
"auth_pass": null,
"authorize": null,
"host": "APG_6010_PER",
"password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"port": 3022,
"ssh_keyfile": null,
"timeout": 30,
"username": "ansible2"
},
"replace": "line",
"save": false,
"src": null,
"ssh_keyfile": null,
"timeout": null,
"username": null
}
},
"item": "interface Loopback12345",
"updates": [
"interface Loopback12345"
]
}
]
}
}
```
Manually logging into the router and issuing the commands is shown below. The commands are rejected and no changes are observed.
```
APG_6010_XPER#conf t
Enter configuration commands, one per line. End with CNTL/Z.
APG_6010_XPER(config)#ip bgp new-format
Command authorization failed.
APG_6010_XPER(config)#interface Loopback12345
Command authorization failed.
APG_6010_XPER(config)#end
APG_6010_XPER#show run | include Loopback12345|^ip_bgp
APG_6010_XPER#
APG_6010_XPER#show archive config differences nvram:startup-config
!Contextual Config Diffs:
!No changes were found
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/plugins/terminal/ios.py`
Content:
```
1 #
2 # (c) 2016 Red Hat Inc.
3 #
4 # This file is part of Ansible
5 #
6 # Ansible is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # Ansible is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
18 #
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 import json
23 import re
24
25 from ansible.errors import AnsibleConnectionFailure
26 from ansible.module_utils._text import to_text, to_bytes
27 from ansible.plugins.terminal import TerminalBase
28
29
30 class TerminalModule(TerminalBase):
31
32 terminal_stdout_re = [
33 re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
34 ]
35
36 terminal_stderr_re = [
37 re.compile(br"% ?Error"),
38 # re.compile(br"^% \w+", re.M),
39 re.compile(br"% ?Bad secret"),
40 re.compile(br"[\r\n%] Bad passwords"),
41 re.compile(br"invalid input", re.I),
42 re.compile(br"(?:incomplete|ambiguous) command", re.I),
43 re.compile(br"connection timed out", re.I),
44 re.compile(br"[^\r\n]+ not found"),
45 re.compile(br"'[^']' +returned error code: ?\d+"),
46 re.compile(br"Bad mask", re.I),
47 re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
48 re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
49 re.compile(br"[%\S] ?Informational: ?[\s]+", re.I)
50 ]
51
52 def on_open_shell(self):
53 try:
54 for cmd in (b'terminal length 0', b'terminal width 512'):
55 self._exec_cli_command(cmd)
56 except AnsibleConnectionFailure:
57 raise AnsibleConnectionFailure('unable to set terminal parameters')
58
59 def on_become(self, passwd=None):
60 if self._get_prompt().endswith(b'#'):
61 return
62
63 cmd = {u'command': u'enable'}
64 if passwd:
65 # Note: python-3.5 cannot combine u"" and r"" together. Thus make
66 # an r string and use to_text to ensure it's text on both py2 and py3.
67 cmd[u'prompt'] = to_text(r"[\r\n]password: ?$", errors='surrogate_or_strict')
68 cmd[u'answer'] = passwd
69 cmd[u'prompt_retry_check'] = True
70 try:
71 self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
72 prompt = self._get_prompt()
73 if prompt is None or not prompt.endswith(b'#'):
74 raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)
75 except AnsibleConnectionFailure as e:
76 prompt = self._get_prompt()
77 raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
78
79 def on_unbecome(self):
80 prompt = self._get_prompt()
81 if prompt is None:
82 # if prompt is None most likely the terminal is hung up at a prompt
83 return
84
85 if b'(config' in prompt:
86 self._exec_cli_command(b'end')
87 self._exec_cli_command(b'disable')
88
89 elif prompt.endswith(b'#'):
90 self._exec_cli_command(b'disable')
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/ansible/plugins/terminal/ios.py b/lib/ansible/plugins/terminal/ios.py
--- a/lib/ansible/plugins/terminal/ios.py
+++ b/lib/ansible/plugins/terminal/ios.py
@@ -46,7 +46,8 @@
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
- re.compile(br"[%\S] ?Informational: ?[\s]+", re.I)
+ re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
+ re.compile(br"Command authorization failed")
]
def on_open_shell(self):
|
{"golden_diff": "diff --git a/lib/ansible/plugins/terminal/ios.py b/lib/ansible/plugins/terminal/ios.py\n--- a/lib/ansible/plugins/terminal/ios.py\n+++ b/lib/ansible/plugins/terminal/ios.py\n@@ -46,7 +46,8 @@\n re.compile(br\"Bad mask\", re.I),\n re.compile(br\"% ?(\\S+) ?overlaps with ?(\\S+)\", re.I),\n re.compile(br\"[%\\S] ?Error: ?[\\s]+\", re.I),\n- re.compile(br\"[%\\S] ?Informational: ?[\\s]+\", re.I)\n+ re.compile(br\"[%\\S] ?Informational: ?[\\s]+\", re.I),\n+ re.compile(br\"Command authorization failed\")\n ]\n \n def on_open_shell(self):\n", "issue": "ios_config incorrectly claims success when commands fail\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Name of the module/plugin/task/feature -->\r\nios_config\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n[ansible@localhost cmdauthz]$ ansible --version\r\nansible 2.3.1.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = Default w/o overrides\r\n python version = 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nIf using Ansible 2.4 or above, paste the results of \"ansible-config dump --only-changed\"\r\n\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n\r\n-->\r\nansible.cfg modification:\r\nforks = 20\r\ngathering = explicit\r\nhost_key_checking = false\r\ntimeout = 60\r\nvault_password_file = ~/.ansible/vault-pass.txt\r\nretry_files_enabled = false\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.\r\n-->\r\nAWS AMI CentOS7\r\n```\r\n[ansible@localhost cmdauthz]$ uname -a\r\nLinux localhost.localdomain 3.10.0-514.26.2.el7.x86_64 #1 SMP Tue Jul 4 15:04:05 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux\r\n```\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\nThere is no stdout from \"ios_config\". When you issue commands that are TACACS-unauthorized, ansible still reports \"changed\", comes back and reports success, and ignores the fact they were rejected. I assume the module is ignoring all CLI output and only looking for the next config prompt to claim success. This makes it difficult to validate that unauthorized commands were rejected and authorized commands were accepted. Such a playbook is useful as a AAA security posture checker.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\nTask sub-list shown below.\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```\r\n---\r\n- name: \"SYS >> Capture current username\"\r\n set_fact:\r\n USER: \"{{ PARAM_CREDS.username }}\"\r\n\r\n- name: \"IOS >> {{ USER }}: Issue unauthorized conf commands\"\r\n ios_config:\r\n provider: \"{{ PARAM_CREDS }}\"\r\n match: none\r\n commands:\r\n - \"{{ item }}\"\r\n register: OUTPUT\r\n with_items: \"{{ unauth_conf_cmds_t2 }}\"\r\n\r\n- debug:\r\n var: OUTPUT\r\n...\r\n```\r\n\r\nRelevant variables included.\r\n\r\n```\r\n---\r\nunauth_conf_cmds_t2:\r\n- \"ip bgp new-format\"\r\n- \"interface Loopback12345\"\r\n...\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nWhen `ios_config` determines that the commands being issued are new (a change), of if, `match: none` is set, the module should attempt to issue command in question. It should also collect any parser output, to include `Command authorization failed.`, so that the playbook writer can perform checks against it. I would recommend returning `stdout` and `stdout_lines` in much the same way that ios_command works, for consistency.\r\n\r\nNote that if you don't want to set up a TACACS server, using a `do` statement to run a show command from config mode would probably be a valid test to ensure output is being collected.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\n`ios_config` returns correct fields per the documentation, but the lack of seeing CLI output makes it hard to discern why commands were rejected.\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\n[ansible@localhost cmdauthz]$ ansible-playbook cmdauthz-playbook.yml \r\n\r\nPLAY [localhost] ************************************************************************************************************\r\n\r\nTASK [SYS >> Define string match facts] *************************************************************************************\r\nok: [localhost]\r\n\r\nPLAY [Verify AAA command authorization functionality] ***********************************************************************\r\n\r\nTASK [SYS >> Capture current username] **************************************************************************************\r\nok: [APG_6010_PER]\r\n\r\nTASK [IOS >> ansible2: Issue unauthorized conf commands] ********************************************************************\r\nchanged: [APG_6010_PER] => (item=ip bgp new-format)\r\nchanged: [APG_6010_PER] => (item=interface Loopback12345)\r\n\r\nTASK [debug] ****************************************************************************************************************\r\nok: [APG_6010_PER] => {\r\n \"OUTPUT\": {\r\n \"changed\": true, \r\n \"msg\": \"All items completed\", \r\n \"results\": [\r\n {\r\n \"_ansible_item_result\": true, \r\n \"_ansible_no_log\": false, \r\n \"_ansible_parsed\": true, \r\n \"banners\": {}, \r\n \"changed\": true, \r\n \"commands\": [\r\n \"ip bgp new-format\"\r\n ], \r\n \"invocation\": {\r\n \"module_args\": {\r\n \"after\": null, \r\n \"auth_pass\": null, \r\n \"authorize\": null, \r\n \"backup\": false, \r\n \"before\": null, \r\n \"commands\": [\r\n \"ip bgp new-format\"\r\n ], \r\n \"config\": null, \r\n \"defaults\": false, \r\n \"force\": false, \r\n \"host\": null, \r\n \"lines\": [\r\n \"ip bgp new-format\"\r\n ], \r\n \"match\": \"none\", \r\n \"multiline_delimiter\": \"@\", \r\n \"parents\": null, \r\n \"password\": null, \r\n \"port\": null, \r\n \"provider\": {\r\n \"auth_pass\": null, \r\n \"authorize\": null, \r\n \"host\": \"APG_6010_PER\", \r\n \"password\": \"VALUE_SPECIFIED_IN_NO_LOG_PARAMETER\", \r\n \"port\": 3022, \r\n \"ssh_keyfile\": null, \r\n \"timeout\": 30, \r\n \"username\": \"ansible2\"\r\n }, \r\n \"replace\": \"line\", \r\n \"save\": false, \r\n \"src\": null, \r\n \"ssh_keyfile\": null, \r\n \"timeout\": null, \r\n \"username\": null\r\n }\r\n }, \r\n \"item\": \"ip bgp new-format\", \r\n \"updates\": [\r\n \"ip bgp new-format\"\r\n ]\r\n }, \r\n {\r\n \"_ansible_item_result\": true, \r\n \"_ansible_no_log\": false, \r\n \"_ansible_parsed\": true, \r\n \"banners\": {}, \r\n \"changed\": true, \r\n \"commands\": [\r\n \"interface Loopback12345\"\r\n ], \r\n \"invocation\": {\r\n \"module_args\": {\r\n \"after\": null, \r\n \"auth_pass\": null, \r\n \"authorize\": null, \r\n \"backup\": false, \r\n \"before\": null, \r\n \"commands\": [\r\n \"interface Loopback12345\"\r\n ], \r\n \"config\": null, \r\n \"defaults\": false, \r\n \"force\": false, \r\n \"host\": null, \r\n \"lines\": [\r\n \"interface Loopback12345\"\r\n ], \r\n \"match\": \"none\", \r\n \"multiline_delimiter\": \"@\", \r\n \"parents\": null, \r\n \"password\": null, \r\n \"port\": null, \r\n \"provider\": {\r\n \"auth_pass\": null, \r\n \"authorize\": null, \r\n \"host\": \"APG_6010_PER\", \r\n \"password\": \"VALUE_SPECIFIED_IN_NO_LOG_PARAMETER\", \r\n \"port\": 3022, \r\n \"ssh_keyfile\": null, \r\n \"timeout\": 30, \r\n \"username\": \"ansible2\"\r\n }, \r\n \"replace\": \"line\", \r\n \"save\": false, \r\n \"src\": null, \r\n \"ssh_keyfile\": null, \r\n \"timeout\": null, \r\n \"username\": null\r\n }\r\n }, \r\n \"item\": \"interface Loopback12345\", \r\n \"updates\": [\r\n \"interface Loopback12345\"\r\n ]\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n\r\nManually logging into the router and issuing the commands is shown below. The commands are rejected and no changes are observed.\r\n\r\n```\r\nAPG_6010_XPER#conf t\r\nEnter configuration commands, one per line. End with CNTL/Z.\r\nAPG_6010_XPER(config)#ip bgp new-format\r\nCommand authorization failed.\r\n\r\nAPG_6010_XPER(config)#interface Loopback12345\r\nCommand authorization failed.\r\n\r\nAPG_6010_XPER(config)#end\r\n\r\nAPG_6010_XPER#show run | include Loopback12345|^ip_bgp\r\nAPG_6010_XPER#\r\nAPG_6010_XPER#show archive config differences nvram:startup-config\r\n\r\n!Contextual Config Diffs:\r\n!No changes were found\r\n```\n", "before_files": [{"content": "#\n# (c) 2016 Red Hat Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport json\nimport re\n\nfrom ansible.errors import AnsibleConnectionFailure\nfrom ansible.module_utils._text import to_text, to_bytes\nfrom ansible.plugins.terminal import TerminalBase\n\n\nclass TerminalModule(TerminalBase):\n\n terminal_stdout_re = [\n re.compile(br\"[\\r\\n]?[\\w\\+\\-\\.:\\/\\[\\]]+(?:\\([^\\)]+\\)){0,3}(?:[>#]) ?$\")\n ]\n\n terminal_stderr_re = [\n re.compile(br\"% ?Error\"),\n # re.compile(br\"^% \\w+\", re.M),\n re.compile(br\"% ?Bad secret\"),\n re.compile(br\"[\\r\\n%] Bad passwords\"),\n re.compile(br\"invalid input\", re.I),\n re.compile(br\"(?:incomplete|ambiguous) command\", re.I),\n re.compile(br\"connection timed out\", re.I),\n re.compile(br\"[^\\r\\n]+ not found\"),\n re.compile(br\"'[^']' +returned error code: ?\\d+\"),\n re.compile(br\"Bad mask\", re.I),\n re.compile(br\"% ?(\\S+) ?overlaps with ?(\\S+)\", re.I),\n re.compile(br\"[%\\S] ?Error: ?[\\s]+\", re.I),\n re.compile(br\"[%\\S] ?Informational: ?[\\s]+\", re.I)\n ]\n\n def on_open_shell(self):\n try:\n for cmd in (b'terminal length 0', b'terminal width 512'):\n self._exec_cli_command(cmd)\n except AnsibleConnectionFailure:\n raise AnsibleConnectionFailure('unable to set terminal parameters')\n\n def on_become(self, passwd=None):\n if self._get_prompt().endswith(b'#'):\n return\n\n cmd = {u'command': u'enable'}\n if passwd:\n # Note: python-3.5 cannot combine u\"\" and r\"\" together. Thus make\n # an r string and use to_text to ensure it's text on both py2 and py3.\n cmd[u'prompt'] = to_text(r\"[\\r\\n]password: ?$\", errors='surrogate_or_strict')\n cmd[u'answer'] = passwd\n cmd[u'prompt_retry_check'] = True\n try:\n self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))\n prompt = self._get_prompt()\n if prompt is None or not prompt.endswith(b'#'):\n raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)\n except AnsibleConnectionFailure as e:\n prompt = self._get_prompt()\n raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))\n\n def on_unbecome(self):\n prompt = self._get_prompt()\n if prompt is None:\n # if prompt is None most likely the terminal is hung up at a prompt\n return\n\n if b'(config' in prompt:\n self._exec_cli_command(b'end')\n self._exec_cli_command(b'disable')\n\n elif prompt.endswith(b'#'):\n self._exec_cli_command(b'disable')\n", "path": "lib/ansible/plugins/terminal/ios.py"}], "after_files": [{"content": "#\n# (c) 2016 Red Hat Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport json\nimport re\n\nfrom ansible.errors import AnsibleConnectionFailure\nfrom ansible.module_utils._text import to_text, to_bytes\nfrom ansible.plugins.terminal import TerminalBase\n\n\nclass TerminalModule(TerminalBase):\n\n terminal_stdout_re = [\n re.compile(br\"[\\r\\n]?[\\w\\+\\-\\.:\\/\\[\\]]+(?:\\([^\\)]+\\)){0,3}(?:[>#]) ?$\")\n ]\n\n terminal_stderr_re = [\n re.compile(br\"% ?Error\"),\n # re.compile(br\"^% \\w+\", re.M),\n re.compile(br\"% ?Bad secret\"),\n re.compile(br\"[\\r\\n%] Bad passwords\"),\n re.compile(br\"invalid input\", re.I),\n re.compile(br\"(?:incomplete|ambiguous) command\", re.I),\n re.compile(br\"connection timed out\", re.I),\n re.compile(br\"[^\\r\\n]+ not found\"),\n re.compile(br\"'[^']' +returned error code: ?\\d+\"),\n re.compile(br\"Bad mask\", re.I),\n re.compile(br\"% ?(\\S+) ?overlaps with ?(\\S+)\", re.I),\n re.compile(br\"[%\\S] ?Error: ?[\\s]+\", re.I),\n re.compile(br\"[%\\S] ?Informational: ?[\\s]+\", re.I),\n re.compile(br\"Command authorization failed\")\n ]\n\n def on_open_shell(self):\n try:\n for cmd in (b'terminal length 0', b'terminal width 512'):\n self._exec_cli_command(cmd)\n except AnsibleConnectionFailure:\n raise AnsibleConnectionFailure('unable to set terminal parameters')\n\n def on_become(self, passwd=None):\n if self._get_prompt().endswith(b'#'):\n return\n\n cmd = {u'command': u'enable'}\n if passwd:\n # Note: python-3.5 cannot combine u\"\" and r\"\" together. Thus make\n # an r string and use to_text to ensure it's text on both py2 and py3.\n cmd[u'prompt'] = to_text(r\"[\\r\\n]password: ?$\", errors='surrogate_or_strict')\n cmd[u'answer'] = passwd\n cmd[u'prompt_retry_check'] = True\n try:\n self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))\n prompt = self._get_prompt()\n if prompt is None or not prompt.endswith(b'#'):\n raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)\n except AnsibleConnectionFailure as e:\n prompt = self._get_prompt()\n raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))\n\n def on_unbecome(self):\n prompt = self._get_prompt()\n if prompt is None:\n # if prompt is None most likely the terminal is hung up at a prompt\n return\n\n if b'(config' in prompt:\n self._exec_cli_command(b'end')\n self._exec_cli_command(b'disable')\n\n elif prompt.endswith(b'#'):\n self._exec_cli_command(b'disable')\n", "path": "lib/ansible/plugins/terminal/ios.py"}]}
| 3,425 | 176 |
gh_patches_debug_5157
|
rasdani/github-patches
|
git_diff
|
python__peps-2090
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"PEP numbers never change" verbiage is not in PEP 1
## Problem
The introduction to PEP 0 references PEP 1 as a source for
> PEP numbers are assigned by the PEP editors, and once assigned are never changed [1].
But PEP 1 doesn't say anything about PEP numbers never changing.
## Research
While skimming PEP 1, I found information about the PEP editor assigning a number:
> Once the PEP is ready for the repository, a PEP editor will:
Assign a PEP number (almost always just the next available number, but sometimes it's a special/joke number, like 666 or 3141). (Clarification: For Python 3, numbers in the 3000s were used for Py3k-specific proposals. But now that all new features go into Python 3 only, the process is back to using numbers in the 100s again. Remember that numbers below 100 are meta-PEPs.)
and
> The PEP editors are individuals responsible for managing the administrative and editorial aspects of the PEP workflow (e.g. assigning PEP numbers and changing their status). See PEP Editor Responsibilities & Workflow for details.
But I didn't find any reference to that number never changing.
## Proposal:
Can we change PEP 0's introduction so that the reference is specific to assigning numbers?
```
PEP numbers are assigned by the PEP editors[1], and once assigned are never changed.
```
## Link
https://github.com/python/peps/blob/40ef5625b7d42655f49090ffd2c0860ecf8d1d9f/pep0/constants.py#L22-L27
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pep0/constants.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 text_type = str
3 title_length = 55
4 author_length = 40
5 table_separator = "== ==== " + "="*title_length + " " + "="*author_length
6 column_format = (
7 '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'
8 ).format(title_length=title_length)
9
10 header = """\
11 PEP: 0
12 Title: Index of Python Enhancement Proposals (PEPs)
13 Version: N/A
14 Last-Modified: %s
15 Author: python-dev <[email protected]>
16 Status: Active
17 Type: Informational
18 Content-Type: text/x-rst
19 Created: 13-Jul-2000
20 """
21
22 intro = """\
23 This PEP contains the index of all Python Enhancement Proposals,
24 known as PEPs. PEP numbers are assigned by the PEP editors, and
25 once assigned are never changed [1_]. The version control history [2_] of
26 the PEP texts represent their historical record.
27 """
28
29 references = """\
30 .. [1] PEP 1: PEP Purpose and Guidelines
31 .. [2] View PEP history online: https://github.com/python/peps
32 """
33
34 footer = """\
35 ..
36 Local Variables:
37 mode: indented-text
38 indent-tabs-mode: nil
39 sentence-end-double-space: t
40 fill-column: 70
41 coding: utf-8
42 End:\
43 """
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pep0/constants.py b/pep0/constants.py
--- a/pep0/constants.py
+++ b/pep0/constants.py
@@ -21,8 +21,8 @@
intro = """\
This PEP contains the index of all Python Enhancement Proposals,
-known as PEPs. PEP numbers are assigned by the PEP editors, and
-once assigned are never changed [1_]. The version control history [2_] of
+known as PEPs. PEP numbers are assigned by the PEP editors[1_], and
+once assigned are never changed. The version control history [2_] of
the PEP texts represent their historical record.
"""
|
{"golden_diff": "diff --git a/pep0/constants.py b/pep0/constants.py\n--- a/pep0/constants.py\n+++ b/pep0/constants.py\n@@ -21,8 +21,8 @@\n \n intro = \"\"\"\\\n This PEP contains the index of all Python Enhancement Proposals,\n-known as PEPs. PEP numbers are assigned by the PEP editors, and\n-once assigned are never changed [1_]. The version control history [2_] of\n+known as PEPs. PEP numbers are assigned by the PEP editors[1_], and\n+once assigned are never changed. The version control history [2_] of\n the PEP texts represent their historical record.\n \"\"\"\n", "issue": "\"PEP numbers never change\" verbiage is not in PEP 1\n## Problem\r\n\r\nThe introduction to PEP 0 references PEP 1 as a source for\r\n\r\n> PEP numbers are assigned by the PEP editors, and once assigned are never changed [1].\r\n\r\nBut PEP 1 doesn't say anything about PEP numbers never changing.\r\n\r\n## Research\r\n\r\nWhile skimming PEP 1, I found information about the PEP editor assigning a number:\r\n\r\n> Once the PEP is ready for the repository, a PEP editor will:\r\nAssign a PEP number (almost always just the next available number, but sometimes it's a special/joke number, like 666 or 3141). (Clarification: For Python 3, numbers in the 3000s were used for Py3k-specific proposals. But now that all new features go into Python 3 only, the process is back to using numbers in the 100s again. Remember that numbers below 100 are meta-PEPs.)\r\n\r\nand\r\n\r\n> The PEP editors are individuals responsible for managing the administrative and editorial aspects of the PEP workflow (e.g. assigning PEP numbers and changing their status). See PEP Editor Responsibilities & Workflow for details.\r\n\r\nBut I didn't find any reference to that number never changing. \r\n\r\n## Proposal:\r\n\r\nCan we change PEP 0's introduction so that the reference is specific to assigning numbers?\r\n\r\n```\r\nPEP numbers are assigned by the PEP editors[1], and once assigned are never changed.\r\n```\r\n\r\n## Link\r\n\r\nhttps://github.com/python/peps/blob/40ef5625b7d42655f49090ffd2c0860ecf8d1d9f/pep0/constants.py#L22-L27\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\ntext_type = str\ntitle_length = 55\nauthor_length = 40\ntable_separator = \"== ==== \" + \"=\"*title_length + \" \" + \"=\"*author_length\ncolumn_format = (\n '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'\n).format(title_length=title_length)\n\nheader = \"\"\"\\\nPEP: 0\nTitle: Index of Python Enhancement Proposals (PEPs)\nVersion: N/A\nLast-Modified: %s\nAuthor: python-dev <[email protected]>\nStatus: Active\nType: Informational\nContent-Type: text/x-rst\nCreated: 13-Jul-2000\n\"\"\"\n\nintro = \"\"\"\\\nThis PEP contains the index of all Python Enhancement Proposals,\nknown as PEPs. PEP numbers are assigned by the PEP editors, and\nonce assigned are never changed [1_]. The version control history [2_] of\nthe PEP texts represent their historical record.\n\"\"\"\n\nreferences = \"\"\"\\\n.. [1] PEP 1: PEP Purpose and Guidelines\n.. [2] View PEP history online: https://github.com/python/peps\n\"\"\"\n\nfooter = \"\"\"\f\\\n..\n Local Variables:\n mode: indented-text\n indent-tabs-mode: nil\n sentence-end-double-space: t\n fill-column: 70\n coding: utf-8\n End:\\\n\"\"\"\n", "path": "pep0/constants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\ntext_type = str\ntitle_length = 55\nauthor_length = 40\ntable_separator = \"== ==== \" + \"=\"*title_length + \" \" + \"=\"*author_length\ncolumn_format = (\n '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'\n).format(title_length=title_length)\n\nheader = \"\"\"\\\nPEP: 0\nTitle: Index of Python Enhancement Proposals (PEPs)\nVersion: N/A\nLast-Modified: %s\nAuthor: python-dev <[email protected]>\nStatus: Active\nType: Informational\nContent-Type: text/x-rst\nCreated: 13-Jul-2000\n\"\"\"\n\nintro = \"\"\"\\\nThis PEP contains the index of all Python Enhancement Proposals,\nknown as PEPs. PEP numbers are assigned by the PEP editors[1_], and\nonce assigned are never changed. The version control history [2_] of\nthe PEP texts represent their historical record.\n\"\"\"\n\nreferences = \"\"\"\\\n.. [1] PEP 1: PEP Purpose and Guidelines\n.. [2] View PEP history online: https://github.com/python/peps\n\"\"\"\n\nfooter = \"\"\"\f\\\n..\n Local Variables:\n mode: indented-text\n indent-tabs-mode: nil\n sentence-end-double-space: t\n fill-column: 70\n coding: utf-8\n End:\\\n\"\"\"\n", "path": "pep0/constants.py"}]}
| 1,055 | 157 |
gh_patches_debug_15177
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-2439
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bad icon scaling in Systray widget
# Issue description
1. I added the system tray widget to my qtile bar with an icon size of 20.
2. I currently use the icon set Papirus Dark.
3. I launched the `nm-applet` through the autostart script and I obtained a really small icon for the `nm-applet` as you can see in the following image:

The previous small icon also happened if I launched `nm-applet` through the terminal.
One way I found to correct it was to setting the **icon size for the system tray to 16**. With this icon size, I obtained the following good results:

# Qtile version
`qtile 0.17.0`
# Configuration
My configuration can be found in the following [dotfiles](https://github.com/juanscr/dotfiles/tree/master/.config/qtile)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/systray.py`
Content:
```
1 # Copyright (c) 2010 Aldo Cortesi
2 # Copyright (c) 2010-2011 dequis
3 # Copyright (c) 2010, 2012 roger
4 # Copyright (c) 2011 Mounier Florian
5 # Copyright (c) 2011-2012, 2014 Tycho Andersen
6 # Copyright (c) 2012 dmpayton
7 # Copyright (c) 2012-2013 Craig Barnes
8 # Copyright (c) 2013 hbc
9 # Copyright (c) 2013 Tao Sauvage
10 # Copyright (c) 2014 Sean Vig
11 #
12 # Permission is hereby granted, free of charge, to any person obtaining a copy
13 # of this software and associated documentation files (the "Software"), to deal
14 # in the Software without restriction, including without limitation the rights
15 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 # copies of the Software, and to permit persons to whom the Software is
17 # furnished to do so, subject to the following conditions:
18 #
19 # The above copyright notice and this permission notice shall be included in
20 # all copies or substantial portions of the Software.
21 #
22 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 # SOFTWARE.
29 import xcffib
30 from xcffib.xproto import (
31 ClientMessageData,
32 ClientMessageEvent,
33 EventMask,
34 SetMode,
35 )
36
37 from libqtile import bar
38 from libqtile.backend.x11 import window
39 from libqtile.widget import base
40
41 XEMBED_PROTOCOL_VERSION = 0
42
43
44 class Icon(window._Window):
45 _window_mask = EventMask.StructureNotify | \
46 EventMask.PropertyChange | \
47 EventMask.Exposure
48
49 def __init__(self, win, qtile, systray):
50 window._Window.__init__(self, win, qtile)
51 self.systray = systray
52 self.update_size()
53
54 def update_size(self):
55 icon_size = self.systray.icon_size
56 self.update_hints()
57
58 try:
59 width = self.hints["min_width"]
60 height = self.hints["min_height"]
61 except KeyError:
62 width = icon_size
63 height = icon_size
64
65 if height > icon_size:
66 width = width * icon_size // height
67 height = icon_size
68 if height <= 0:
69 width = icon_size
70 height = icon_size
71
72 self.width = width
73 self.height = height
74 return False
75
76 def handle_PropertyNotify(self, e): # noqa: N802
77 name = self.qtile.core.conn.atoms.get_name(e.atom)
78 if name == "_XEMBED_INFO":
79 info = self.window.get_property('_XEMBED_INFO', unpack=int)
80 if info and info[1]:
81 self.systray.bar.draw()
82
83 return False
84
85 def handle_DestroyNotify(self, event): # noqa: N802
86 wid = event.window
87 del(self.qtile.windows_map[wid])
88 del(self.systray.icons[wid])
89 self.systray.bar.draw()
90 return False
91
92 handle_UnmapNotify = handle_DestroyNotify # noqa: N815
93
94
95 class Systray(window._Window, base._Widget):
96 """A widget that manages system tray"""
97
98 _window_mask = EventMask.StructureNotify | \
99 EventMask.Exposure
100
101 orientations = base.ORIENTATION_HORIZONTAL
102
103 defaults = [
104 ('icon_size', 20, 'Icon width'),
105 ('padding', 5, 'Padding between icons'),
106 ]
107
108 def __init__(self, **config):
109 base._Widget.__init__(self, bar.CALCULATED, **config)
110 self.add_defaults(Systray.defaults)
111 self.icons = {}
112 self.screen = 0
113
114 def calculate_length(self):
115 width = sum(i.width for i in self.icons.values())
116 width += self.padding * len(self.icons)
117 return width
118
119 def _configure(self, qtile, bar):
120 base._Widget._configure(self, qtile, bar)
121
122 if self.configured:
123 return
124
125 self.conn = conn = qtile.core.conn
126 win = conn.create_window(-1, -1, 1, 1)
127 window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)
128 qtile.windows_map[win.wid] = self
129
130 # Even when we have multiple "Screen"s, we are setting up as the system
131 # tray on a particular X display, that is the screen we need to
132 # reference in the atom
133 if qtile.current_screen:
134 self.screen = qtile.current_screen.index
135 self.bar = bar
136 atoms = conn.atoms
137
138 conn.conn.core.SetSelectionOwner(
139 win.wid,
140 atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],
141 xcffib.CurrentTime
142 )
143 data = [
144 xcffib.CurrentTime,
145 atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],
146 win.wid, 0, 0
147 ]
148 union = ClientMessageData.synthetic(data, "I" * 5)
149 event = ClientMessageEvent.synthetic(
150 format=32,
151 window=qtile.core._root.wid,
152 type=atoms['MANAGER'],
153 data=union
154 )
155 qtile.core._root.send_event(event, mask=EventMask.StructureNotify)
156
157 def handle_ClientMessage(self, event): # noqa: N802
158 atoms = self.conn.atoms
159
160 opcode = event.type
161 data = event.data.data32
162 message = data[1]
163 wid = data[2]
164
165 parent = self.bar.window.window
166
167 if opcode == atoms['_NET_SYSTEM_TRAY_OPCODE'] and message == 0:
168 w = window.XWindow(self.conn, wid)
169 icon = Icon(w, self.qtile, self)
170 self.icons[wid] = icon
171 self.qtile.windows_map[wid] = icon
172
173 self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)
174 self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)
175 self.conn.conn.flush()
176
177 info = icon.window.get_property('_XEMBED_INFO', unpack=int)
178
179 if not info:
180 self.bar.draw()
181 return False
182
183 if info[1]:
184 self.bar.draw()
185
186 return False
187
188 def draw(self):
189 xoffset = self.padding
190 self.drawer.clear(self.background or self.bar.background)
191 self.drawer.draw(offsetx=self.offset, width=self.length)
192 for pos, icon in enumerate(self.icons.values()):
193 icon.window.set_attribute(backpixmap=self.drawer.pixmap)
194 icon.place(
195 self.offset + xoffset,
196 self.bar.height // 2 - self.icon_size // 2,
197 icon.width, self.icon_size,
198 0,
199 None
200 )
201 if icon.hidden:
202 icon.unhide()
203 data = [
204 self.conn.atoms["_XEMBED_EMBEDDED_NOTIFY"],
205 xcffib.xproto.Time.CurrentTime,
206 0,
207 self.bar.window.wid,
208 XEMBED_PROTOCOL_VERSION
209 ]
210 u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
211 event = xcffib.xproto.ClientMessageEvent.synthetic(
212 format=32,
213 window=icon.wid,
214 type=self.conn.atoms["_XEMBED"],
215 data=u
216 )
217 self.window.send_event(event)
218
219 xoffset += icon.width + self.padding
220
221 def finalize(self):
222 base._Widget.finalize(self)
223 atoms = self.conn.atoms
224 self.conn.conn.core.SetSelectionOwner(
225 0,
226 atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],
227 xcffib.CurrentTime,
228 )
229 self.hide()
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py
--- a/libqtile/widget/systray.py
+++ b/libqtile/widget/systray.py
@@ -55,19 +55,15 @@
icon_size = self.systray.icon_size
self.update_hints()
- try:
- width = self.hints["min_width"]
- height = self.hints["min_height"]
- except KeyError:
- width = icon_size
- height = icon_size
+ width = self.hints.get("min_width", icon_size)
+ height = self.hints.get("min_height", icon_size)
+
+ width = max(width, icon_size)
+ height = max(height, icon_size)
if height > icon_size:
width = width * icon_size // height
height = icon_size
- if height <= 0:
- width = icon_size
- height = icon_size
self.width = width
self.height = height
|
{"golden_diff": "diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py\n--- a/libqtile/widget/systray.py\n+++ b/libqtile/widget/systray.py\n@@ -55,19 +55,15 @@\n icon_size = self.systray.icon_size\n self.update_hints()\n \n- try:\n- width = self.hints[\"min_width\"]\n- height = self.hints[\"min_height\"]\n- except KeyError:\n- width = icon_size\n- height = icon_size\n+ width = self.hints.get(\"min_width\", icon_size)\n+ height = self.hints.get(\"min_height\", icon_size)\n+\n+ width = max(width, icon_size)\n+ height = max(height, icon_size)\n \n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n- if height <= 0:\n- width = icon_size\n- height = icon_size\n \n self.width = width\n self.height = height\n", "issue": "Bad icon scaling in Systray widget\n# Issue description\r\n1. I added the system tray widget to my qtile bar with an icon size of 20.\r\n2. I currently use the icon set Papirus Dark.\r\n3. I launched the `nm-applet` through the autostart script and I obtained a really small icon for the `nm-applet` as you can see in the following image: \r\n\r\n\r\n\r\nThe previous small icon also happened if I launched `nm-applet` through the terminal. \r\n\r\nOne way I found to correct it was to setting the **icon size for the system tray to 16**. With this icon size, I obtained the following good results:\r\n\r\n\r\n\r\n# Qtile version\r\n`qtile 0.17.0`\r\n\r\n# Configuration\r\n\r\nMy configuration can be found in the following [dotfiles](https://github.com/juanscr/dotfiles/tree/master/.config/qtile)\r\n\n", "before_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010-2011 dequis\n# Copyright (c) 2010, 2012 roger\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2011-2012, 2014 Tycho Andersen\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 hbc\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport xcffib\nfrom xcffib.xproto import (\n ClientMessageData,\n ClientMessageEvent,\n EventMask,\n SetMode,\n)\n\nfrom libqtile import bar\nfrom libqtile.backend.x11 import window\nfrom libqtile.widget import base\n\nXEMBED_PROTOCOL_VERSION = 0\n\n\nclass Icon(window._Window):\n _window_mask = EventMask.StructureNotify | \\\n EventMask.PropertyChange | \\\n EventMask.Exposure\n\n def __init__(self, win, qtile, systray):\n window._Window.__init__(self, win, qtile)\n self.systray = systray\n self.update_size()\n\n def update_size(self):\n icon_size = self.systray.icon_size\n self.update_hints()\n\n try:\n width = self.hints[\"min_width\"]\n height = self.hints[\"min_height\"]\n except KeyError:\n width = icon_size\n height = icon_size\n\n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n if height <= 0:\n width = icon_size\n height = icon_size\n\n self.width = width\n self.height = height\n return False\n\n def handle_PropertyNotify(self, e): # noqa: N802\n name = self.qtile.core.conn.atoms.get_name(e.atom)\n if name == \"_XEMBED_INFO\":\n info = self.window.get_property('_XEMBED_INFO', unpack=int)\n if info and info[1]:\n self.systray.bar.draw()\n\n return False\n\n def handle_DestroyNotify(self, event): # noqa: N802\n wid = event.window\n del(self.qtile.windows_map[wid])\n del(self.systray.icons[wid])\n self.systray.bar.draw()\n return False\n\n handle_UnmapNotify = handle_DestroyNotify # noqa: N815\n\n\nclass Systray(window._Window, base._Widget):\n \"\"\"A widget that manages system tray\"\"\"\n\n _window_mask = EventMask.StructureNotify | \\\n EventMask.Exposure\n\n orientations = base.ORIENTATION_HORIZONTAL\n\n defaults = [\n ('icon_size', 20, 'Icon width'),\n ('padding', 5, 'Padding between icons'),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(Systray.defaults)\n self.icons = {}\n self.screen = 0\n\n def calculate_length(self):\n width = sum(i.width for i in self.icons.values())\n width += self.padding * len(self.icons)\n return width\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if self.configured:\n return\n\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n qtile.windows_map[win.wid] = self\n\n # Even when we have multiple \"Screen\"s, we are setting up as the system\n # tray on a particular X display, that is the screen we need to\n # reference in the atom\n if qtile.current_screen:\n self.screen = qtile.current_screen.index\n self.bar = bar\n atoms = conn.atoms\n\n conn.conn.core.SetSelectionOwner(\n win.wid,\n atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],\n xcffib.CurrentTime\n )\n data = [\n xcffib.CurrentTime,\n atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],\n win.wid, 0, 0\n ]\n union = ClientMessageData.synthetic(data, \"I\" * 5)\n event = ClientMessageEvent.synthetic(\n format=32,\n window=qtile.core._root.wid,\n type=atoms['MANAGER'],\n data=union\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n\n opcode = event.type\n data = event.data.data32\n message = data[1]\n wid = data[2]\n\n parent = self.bar.window.window\n\n if opcode == atoms['_NET_SYSTEM_TRAY_OPCODE'] and message == 0:\n w = window.XWindow(self.conn, wid)\n icon = Icon(w, self.qtile, self)\n self.icons[wid] = icon\n self.qtile.windows_map[wid] = icon\n\n self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)\n self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)\n self.conn.conn.flush()\n\n info = icon.window.get_property('_XEMBED_INFO', unpack=int)\n\n if not info:\n self.bar.draw()\n return False\n\n if info[1]:\n self.bar.draw()\n\n return False\n\n def draw(self):\n xoffset = self.padding\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.draw(offsetx=self.offset, width=self.length)\n for pos, icon in enumerate(self.icons.values()):\n icon.window.set_attribute(backpixmap=self.drawer.pixmap)\n icon.place(\n self.offset + xoffset,\n self.bar.height // 2 - self.icon_size // 2,\n icon.width, self.icon_size,\n 0,\n None\n )\n if icon.hidden:\n icon.unhide()\n data = [\n self.conn.atoms[\"_XEMBED_EMBEDDED_NOTIFY\"],\n xcffib.xproto.Time.CurrentTime,\n 0,\n self.bar.window.wid,\n XEMBED_PROTOCOL_VERSION\n ]\n u = xcffib.xproto.ClientMessageData.synthetic(data, \"I\" * 5)\n event = xcffib.xproto.ClientMessageEvent.synthetic(\n format=32,\n window=icon.wid,\n type=self.conn.atoms[\"_XEMBED\"],\n data=u\n )\n self.window.send_event(event)\n\n xoffset += icon.width + self.padding\n\n def finalize(self):\n base._Widget.finalize(self)\n atoms = self.conn.atoms\n self.conn.conn.core.SetSelectionOwner(\n 0,\n atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],\n xcffib.CurrentTime,\n )\n self.hide()\n", "path": "libqtile/widget/systray.py"}], "after_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010-2011 dequis\n# Copyright (c) 2010, 2012 roger\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2011-2012, 2014 Tycho Andersen\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 hbc\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport xcffib\nfrom xcffib.xproto import (\n ClientMessageData,\n ClientMessageEvent,\n EventMask,\n SetMode,\n)\n\nfrom libqtile import bar\nfrom libqtile.backend.x11 import window\nfrom libqtile.widget import base\n\nXEMBED_PROTOCOL_VERSION = 0\n\n\nclass Icon(window._Window):\n _window_mask = EventMask.StructureNotify | \\\n EventMask.PropertyChange | \\\n EventMask.Exposure\n\n def __init__(self, win, qtile, systray):\n window._Window.__init__(self, win, qtile)\n self.systray = systray\n self.update_size()\n\n def update_size(self):\n icon_size = self.systray.icon_size\n self.update_hints()\n\n width = self.hints.get(\"min_width\", icon_size)\n height = self.hints.get(\"min_height\", icon_size)\n\n width = max(width, icon_size)\n height = max(height, icon_size)\n\n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n\n self.width = width\n self.height = height\n return False\n\n def handle_PropertyNotify(self, e): # noqa: N802\n name = self.qtile.core.conn.atoms.get_name(e.atom)\n if name == \"_XEMBED_INFO\":\n info = self.window.get_property('_XEMBED_INFO', unpack=int)\n if info and info[1]:\n self.systray.bar.draw()\n\n return False\n\n def handle_DestroyNotify(self, event): # noqa: N802\n wid = event.window\n del(self.qtile.windows_map[wid])\n del(self.systray.icons[wid])\n self.systray.bar.draw()\n return False\n\n handle_UnmapNotify = handle_DestroyNotify # noqa: N815\n\n\nclass Systray(window._Window, base._Widget):\n \"\"\"A widget that manages system tray\"\"\"\n\n _window_mask = EventMask.StructureNotify | \\\n EventMask.Exposure\n\n orientations = base.ORIENTATION_HORIZONTAL\n\n defaults = [\n ('icon_size', 20, 'Icon width'),\n ('padding', 5, 'Padding between icons'),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(Systray.defaults)\n self.icons = {}\n self.screen = 0\n\n def calculate_length(self):\n width = sum(i.width for i in self.icons.values())\n width += self.padding * len(self.icons)\n return width\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if self.configured:\n return\n\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n qtile.windows_map[win.wid] = self\n\n # Even when we have multiple \"Screen\"s, we are setting up as the system\n # tray on a particular X display, that is the screen we need to\n # reference in the atom\n if qtile.current_screen:\n self.screen = qtile.current_screen.index\n self.bar = bar\n atoms = conn.atoms\n\n conn.conn.core.SetSelectionOwner(\n win.wid,\n atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],\n xcffib.CurrentTime\n )\n data = [\n xcffib.CurrentTime,\n atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],\n win.wid, 0, 0\n ]\n union = ClientMessageData.synthetic(data, \"I\" * 5)\n event = ClientMessageEvent.synthetic(\n format=32,\n window=qtile.core._root.wid,\n type=atoms['MANAGER'],\n data=union\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n\n opcode = event.type\n data = event.data.data32\n message = data[1]\n wid = data[2]\n\n parent = self.bar.window.window\n\n if opcode == atoms['_NET_SYSTEM_TRAY_OPCODE'] and message == 0:\n w = window.XWindow(self.conn, wid)\n icon = Icon(w, self.qtile, self)\n self.icons[wid] = icon\n self.qtile.windows_map[wid] = icon\n\n self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)\n self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)\n self.conn.conn.flush()\n\n info = icon.window.get_property('_XEMBED_INFO', unpack=int)\n\n if not info:\n self.bar.draw()\n return False\n\n if info[1]:\n self.bar.draw()\n\n return False\n\n def draw(self):\n xoffset = self.padding\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.draw(offsetx=self.offset, width=self.length)\n for pos, icon in enumerate(self.icons.values()):\n icon.window.set_attribute(backpixmap=self.drawer.pixmap)\n icon.place(\n self.offset + xoffset,\n self.bar.height // 2 - self.icon_size // 2,\n icon.width, self.icon_size,\n 0,\n None\n )\n if icon.hidden:\n icon.unhide()\n data = [\n self.conn.atoms[\"_XEMBED_EMBEDDED_NOTIFY\"],\n xcffib.xproto.Time.CurrentTime,\n 0,\n self.bar.window.wid,\n XEMBED_PROTOCOL_VERSION\n ]\n u = xcffib.xproto.ClientMessageData.synthetic(data, \"I\" * 5)\n event = xcffib.xproto.ClientMessageEvent.synthetic(\n format=32,\n window=icon.wid,\n type=self.conn.atoms[\"_XEMBED\"],\n data=u\n )\n self.window.send_event(event)\n\n xoffset += icon.width + self.padding\n\n def finalize(self):\n base._Widget.finalize(self)\n atoms = self.conn.atoms\n self.conn.conn.core.SetSelectionOwner(\n 0,\n atoms['_NET_SYSTEM_TRAY_S{:d}'.format(self.screen)],\n xcffib.CurrentTime,\n )\n self.hide()\n", "path": "libqtile/widget/systray.py"}]}
| 2,997 | 234 |
gh_patches_debug_24167
|
rasdani/github-patches
|
git_diff
|
kartoza__prj.app-271
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sponsor urls should be plural
<project>/sponsor/??? should be <project>/sponsors/??? except in cases where the view references a single sponsor
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/changes/urls.py`
Content:
```
1 # coding=utf-8
2 # flake8: noqa
3 """Urls for changelog application."""
4
5 from django.conf.urls import patterns, url, include # noqa
6
7 from django.conf import settings
8
9 from feeds.version import RssVersionFeed, AtomVersionFeed
10 from feeds.entry import RssEntryFeed, AtomEntryFeed
11 from views import (
12 # Category
13 CategoryDetailView,
14 CategoryDeleteView,
15 CategoryCreateView,
16 CategoryListView,
17 JSONCategoryListView,
18 CategoryUpdateView,
19 PendingCategoryListView,
20 ApproveCategoryView,
21 # Version
22 VersionMarkdownView,
23 VersionDetailView,
24 VersionThumbnailView,
25 VersionDeleteView,
26 VersionCreateView,
27 VersionListView,
28 VersionUpdateView,
29 PendingVersionListView,
30 ApproveVersionView,
31 VersionDownload,
32 VersionDownloadGnu,
33 # Entry
34 EntryDetailView,
35 EntryDeleteView,
36 EntryCreateView,
37 EntryListView,
38 EntryUpdateView,
39 PendingEntryListView,
40 ApproveEntryView,
41 # Sponsor
42 SponsorDetailView,
43 SponsorDeleteView,
44 SponsorCreateView,
45 SponsorListView,
46 SponsorWorldMapView,
47 JSONSponsorListView,
48 SponsorUpdateView,
49 PendingSponsorListView,
50 ApproveSponsorView,
51
52 # Sponsorship Level
53
54 SponsorshipLevelDetailView,
55 SponsorshipLevelDeleteView,
56 SponsorshipLevelCreateView,
57 SponsorshipLevelListView,
58 JSONSponsorshipLevelListView,
59 SponsorshipLevelUpdateView,
60 PendingSponsorshipLevelListView,
61 ApproveSponsorshipLevelView,
62
63 # Sponsorship Period
64
65 SponsorshipPeriodDetailView,
66 SponsorshipPeriodDeleteView,
67 SponsorshipPeriodCreateView,
68 SponsorshipPeriodListView,
69 JSONSponsorshipPeriodListView,
70 SponsorshipPeriodUpdateView,
71 PendingSponsorshipPeriodListView,
72 ApproveSponsorshipPeriodView,
73 )
74
75 urlpatterns = patterns(
76 '',
77 # Category management
78
79 # This view is only accessible via ajax
80 url(regex='^json-category/list/(?P<version>\d+)/$',
81 view=JSONCategoryListView.as_view(),
82 name='json-category-list'),
83 url(regex='^(?P<project_slug>[\w-]+)/pending-category/list/$',
84 view=PendingCategoryListView.as_view(),
85 name='pending-category-list'),
86 url(regex='^(?P<project_slug>[\w-]+)/approve-category/(?P<slug>[\w-]+)/$',
87 view=ApproveCategoryView.as_view(),
88 name='category-approve'),
89 url(regex='^(?P<project_slug>[\w-]+)/category/list/$',
90 view=CategoryListView.as_view(),
91 name='category-list'),
92 url(regex='^(?P<project_slug>[\w-]+)/category/(?P<slug>[\w-]+)/$',
93 view=CategoryDetailView.as_view(),
94 name='category-detail'),
95 url(regex='^(?P<project_slug>[\w-]+)/category/(?P<slug>[\w-]+)/delete/$',
96 view=CategoryDeleteView.as_view(),
97 name='category-delete'),
98 url(regex='^(?P<project_slug>[\w-]+)/create-category/$',
99 view=CategoryCreateView.as_view(),
100 name='category-create'),
101 url(regex='^(?P<project_slug>[\w-]+)/category/(?P<slug>[\w-]+)/update/$',
102 view=CategoryUpdateView.as_view(),
103 name='category-update'),
104
105 # Version management
106 url(regex='^(?P<project_slug>[\w-]+)/pending-versions/list/$',
107 view=PendingVersionListView.as_view(),
108 name='pending-version-list'),
109 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/approve/$',
110 view=ApproveVersionView.as_view(),
111 name='version-approve'),
112 url(regex='^(?P<project_slug>[\w-]+)/version/list/$',
113 view=VersionListView.as_view(),
114 name='version-list'),
115 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/markdown/$',
116 view=VersionMarkdownView.as_view(),
117 name='version-markdown'),
118 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/$',
119 view=VersionDetailView.as_view(),
120 name='version-detail'),
121 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/thumbs/$',
122 view=VersionThumbnailView.as_view(),
123 name='version-thumbs'),
124 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/delete/$',
125 view=VersionDeleteView.as_view(),
126 name='version-delete'),
127 url(regex='^(?P<project_slug>[\w-]+)/create-version/$',
128 view=VersionCreateView.as_view(),
129 name='version-create'),
130 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/update/$',
131 view=VersionUpdateView.as_view(),
132 name='version-update'),
133 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/download/$',
134 view=VersionDownload.as_view(),
135 name='version-download'),
136 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<slug>[\w.-]+)/gnu/$',
137 view=VersionDownloadGnu.as_view(),
138 name='version-download-gnu'),
139
140 # Changelog entry management
141 url(regex='^(?P<project_slug>[\w-]+)/(?P<version_slug>[\w.-]+)/'
142 'pending-entry/list/$',
143 view=PendingEntryListView.as_view(),
144 name='pending-entry-list'),
145 url(regex='^entry/approve/(?P<pk>\d+)$',
146 view=ApproveEntryView.as_view(),
147 name='entry-approve'),
148 url(regex='^(?P<project_slug>[\w-]+)/(?P<version_slug>[\w'
149 '.-]+)/entry/list/$',
150 view=EntryListView.as_view(),
151 name='entry-list'),
152 url(regex='^entry/(?P<pk>\d+)$',
153 view=EntryDetailView.as_view(),
154 name='entry-detail'),
155 url(regex='^entry/delete/(?P<pk>\d+)$',
156 view=EntryDeleteView.as_view(),
157 name='entry-delete'),
158 url(regex='^(?P<project_slug>[\w-]+)/(?P<version_slug>[\w.-]+)/'
159 'create-entry/$',
160 view=EntryCreateView.as_view(),
161 name='entry-create'),
162 url(regex='^entry/update/(?P<pk>\d+)$',
163 view=EntryUpdateView.as_view(),
164 name='entry-update'),
165
166 # Feeds
167 url(regex='^(?P<project_slug>[\w-]+)/rss/latest-version/$',
168 view=RssVersionFeed(),
169 name='latest-version-rss-feed'),
170 url(regex='^(?P<project_slug>[\w-]+)/atom/latest-version/$',
171 view=AtomVersionFeed(),
172 name='latest-version-atom-feed'),
173 url(regex='^(?P<project_slug>[\w-]+)/rss/latest-entry/$',
174 view=RssEntryFeed(),
175 name='latest-entry-rss-feed'),
176 url(regex='^(?P<project_slug>[\w-]+)/atom/latest-entry/$',
177 view=AtomEntryFeed(),
178 name='latest-entry-atom-feed'),
179
180 # Feeds specific version and projects
181 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<version_slug>['
182 '\w.-]+)/rss$',
183 view=RssEntryFeed(),
184 name='entry-rss-feed'),
185 url(regex='^(?P<project_slug>[\w-]+)/version/(?P<version_slug>['
186 '\w.-]+)/atom$',
187 view=AtomEntryFeed(),
188 name='entry-atom-feed'),
189
190 # User map
191 # url(r'^user-map/', include('user_map.urls')),
192
193 # Sponsor management
194
195 # This view is only accessible via ajax
196 url(regex='^json-sponsor/list/(?P<version>\d+)/$',
197 view=JSONSponsorListView.as_view(),
198 name='json-sponsor-list'),
199 url(regex='^(?P<project_slug>[\w-]+)/pending-sponsor/list/$',
200 view=PendingSponsorListView.as_view(),
201 name='pending-sponsor-list'),
202 url(regex='^(?P<project_slug>[\w-]+)/approve-sponsor/(?P<slug>[\w-]+)/$',
203 view=ApproveSponsorView.as_view(),
204 name='sponsor-approve'),
205 url(regex='^(?P<project_slug>[\w-]+)/sponsor/list/$',
206 view=SponsorListView.as_view(),
207 name='sponsor-list'),
208 url(regex='^(?P<project_slug>[\w-]+)/sponsor/world-map/$',
209 view=SponsorWorldMapView.as_view(),
210 name='sponsor-world-map'),
211 url(regex='^(?P<project_slug>[\w-]+)/sponsor/(?P<slug>[\w-]+)/$',
212 view=SponsorDetailView.as_view(),
213 name='sponsor-detail'),
214 url(regex='^(?P<project_slug>[\w-]+)/sponsor/(?P<slug>[\w-]+)/delete/$',
215 view=SponsorDeleteView.as_view(),
216 name='sponsor-delete'),
217 url(regex='^(?P<project_slug>[\w-]+)/create-sponsor/$',
218 view=SponsorCreateView.as_view(),
219 name='sponsor-create'),
220 url(regex='^(?P<project_slug>[\w-]+)/sponsor/(?P<slug>[\w-]+)/update/$',
221 view=SponsorUpdateView.as_view(),
222 name='sponsor-update'),
223
224 # Sponsorship Level management
225
226 # This view is only accessible via ajax
227 url(regex='^json-sponsorshiplevel/list/(?P<version>\d+)/$',
228 view=JSONSponsorshipLevelListView.as_view(),
229 name='json-sponsorshiplevel-list'),
230 url(regex='^(?P<project_slug>[\w-]+)/pending-sponsorshiplevel/list/$',
231 view=PendingSponsorshipLevelListView.as_view(),
232 name='pending-sponsorshiplevel-list'),
233 url(regex='^(?P<project_slug>[\w-]+)/approve-sponsorshiplevel/(?P<slug>[\w-]+)/$',
234 view=ApproveSponsorshipLevelView.as_view(),
235 name='sponsorshiplevel-approve'),
236 url(regex='^(?P<project_slug>[\w-]+)/sponsorshiplevel/list/$',
237 view=SponsorshipLevelListView.as_view(),
238 name='sponsorshiplevel-list'),
239 url(regex='^(?P<project_slug>[\w-]+)/sponsorshiplevel/(?P<slug>[\w-]+)/$',
240 view=SponsorshipLevelDetailView.as_view(),
241 name='sponsorshiplevel-detail'),
242 url(regex='^(?P<project_slug>[\w-]+)/sponsorshiplevel/(?P<slug>[\w-]+)/delete/$',
243 view=SponsorshipLevelDeleteView.as_view(),
244 name='sponsorshiplevel-delete'),
245 url(regex='^(?P<project_slug>[\w-]+)/create-sponsorshiplevel/$',
246 view=SponsorshipLevelCreateView.as_view(),
247 name='sponsorshiplevel-create'),
248 url(regex='^(?P<project_slug>[\w-]+)/sponsorshiplevel/(?P<slug>[\w-]+)/update/$',
249 view=SponsorshipLevelUpdateView.as_view(),
250 name='sponsorshiplevel-update'),
251
252 # Sponsorship Period management
253
254 # This view is only accessible via ajax
255 url(regex='^json-sponsorshipperiod/list/(?P<version>\d+)/$',
256 view=JSONSponsorshipPeriodListView.as_view(),
257 name='json-sponsorshipperiod-list'),
258 url(regex='^(?P<project_slug>[\w-]+)/pending-sponsorshipperiod/list/$',
259 view=PendingSponsorshipPeriodListView.as_view(),
260 name='pending-sponsorshipperiod-list'),
261 url(regex='^(?P<project_slug>[\w-]+)/approve-sponsorshipperiod/(?P<slug>[\w-]+)/$',
262 view=ApproveSponsorshipPeriodView.as_view(),
263 name='sponsorshipperiod-approve'),
264 url(regex='^(?P<project_slug>[\w-]+)/sponsorshipperiod/list/$',
265 view=SponsorshipPeriodListView.as_view(),
266 name='sponsorshipperiod-list'),
267 url(regex='^(?P<project_slug>[\w-]+)/sponsorshipperiod/(?P<slug>[\w-]+)/$',
268 view=SponsorshipPeriodDetailView.as_view(),
269 name='sponsorshipperiod-detail'),
270 url(regex='^(?P<project_slug>[\w-]+)/sponsorshipperiod/(?P<slug>[\w-]+)/delete/$',
271 view=SponsorshipPeriodDeleteView.as_view(),
272 name='sponsorshipperiod-delete'),
273 url(regex='^(?P<project_slug>[\w-]+)/create-sponsorshipperiod/$',
274 view=SponsorshipPeriodCreateView.as_view(),
275 name='sponsorshipperiod-create'),
276 url(regex='^(?P<project_slug>[\w-]+)/sponsorshipperiod/(?P<slug>[\w-]+)/update/$',
277 view=SponsorshipPeriodUpdateView.as_view(),
278 name='sponsorshipperiod-update'),
279 )
280
281
282 if settings.DEBUG:
283 # static files (images, css, javascript, etc.)
284 urlpatterns += patterns(
285 '',
286 (r'^media/(?P<path>.*)$', 'django.views.static.serve', {
287 'document_root': settings.MEDIA_ROOT}))
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django_project/changes/urls.py b/django_project/changes/urls.py
--- a/django_project/changes/urls.py
+++ b/django_project/changes/urls.py
@@ -196,16 +196,16 @@
url(regex='^json-sponsor/list/(?P<version>\d+)/$',
view=JSONSponsorListView.as_view(),
name='json-sponsor-list'),
- url(regex='^(?P<project_slug>[\w-]+)/pending-sponsor/list/$',
+ url(regex='^(?P<project_slug>[\w-]+)/pending-sponsors/list/$',
view=PendingSponsorListView.as_view(),
name='pending-sponsor-list'),
url(regex='^(?P<project_slug>[\w-]+)/approve-sponsor/(?P<slug>[\w-]+)/$',
view=ApproveSponsorView.as_view(),
name='sponsor-approve'),
- url(regex='^(?P<project_slug>[\w-]+)/sponsor/list/$',
+ url(regex='^(?P<project_slug>[\w-]+)/sponsors/list/$',
view=SponsorListView.as_view(),
name='sponsor-list'),
- url(regex='^(?P<project_slug>[\w-]+)/sponsor/world-map/$',
+ url(regex='^(?P<project_slug>[\w-]+)/sponsors/world-map/$',
view=SponsorWorldMapView.as_view(),
name='sponsor-world-map'),
url(regex='^(?P<project_slug>[\w-]+)/sponsor/(?P<slug>[\w-]+)/$',
|
{"golden_diff": "diff --git a/django_project/changes/urls.py b/django_project/changes/urls.py\n--- a/django_project/changes/urls.py\n+++ b/django_project/changes/urls.py\n@@ -196,16 +196,16 @@\n url(regex='^json-sponsor/list/(?P<version>\\d+)/$',\n view=JSONSponsorListView.as_view(),\n name='json-sponsor-list'),\n- url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsor/list/$',\n+ url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsors/list/$',\n view=PendingSponsorListView.as_view(),\n name='pending-sponsor-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-sponsor/(?P<slug>[\\w-]+)/$',\n view=ApproveSponsorView.as_view(),\n name='sponsor-approve'),\n- url(regex='^(?P<project_slug>[\\w-]+)/sponsor/list/$',\n+ url(regex='^(?P<project_slug>[\\w-]+)/sponsors/list/$',\n view=SponsorListView.as_view(),\n name='sponsor-list'),\n- url(regex='^(?P<project_slug>[\\w-]+)/sponsor/world-map/$',\n+ url(regex='^(?P<project_slug>[\\w-]+)/sponsors/world-map/$',\n view=SponsorWorldMapView.as_view(),\n name='sponsor-world-map'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/(?P<slug>[\\w-]+)/$',\n", "issue": "Sponsor urls should be plural\n<project>/sponsor/??? should be <project>/sponsors/??? except in cases where the view references a single sponsor\n\n", "before_files": [{"content": "# coding=utf-8\n# flake8: noqa\n\"\"\"Urls for changelog application.\"\"\"\n\nfrom django.conf.urls import patterns, url, include # noqa\n\nfrom django.conf import settings\n\nfrom feeds.version import RssVersionFeed, AtomVersionFeed\nfrom feeds.entry import RssEntryFeed, AtomEntryFeed\nfrom views import (\n # Category\n CategoryDetailView,\n CategoryDeleteView,\n CategoryCreateView,\n CategoryListView,\n JSONCategoryListView,\n CategoryUpdateView,\n PendingCategoryListView,\n ApproveCategoryView,\n # Version\n VersionMarkdownView,\n VersionDetailView,\n VersionThumbnailView,\n VersionDeleteView,\n VersionCreateView,\n VersionListView,\n VersionUpdateView,\n PendingVersionListView,\n ApproveVersionView,\n VersionDownload,\n VersionDownloadGnu,\n # Entry\n EntryDetailView,\n EntryDeleteView,\n EntryCreateView,\n EntryListView,\n EntryUpdateView,\n PendingEntryListView,\n ApproveEntryView,\n # Sponsor\n SponsorDetailView,\n SponsorDeleteView,\n SponsorCreateView,\n SponsorListView,\n SponsorWorldMapView,\n JSONSponsorListView,\n SponsorUpdateView,\n PendingSponsorListView,\n ApproveSponsorView,\n\n # Sponsorship Level\n\n SponsorshipLevelDetailView,\n SponsorshipLevelDeleteView,\n SponsorshipLevelCreateView,\n SponsorshipLevelListView,\n JSONSponsorshipLevelListView,\n SponsorshipLevelUpdateView,\n PendingSponsorshipLevelListView,\n ApproveSponsorshipLevelView,\n\n # Sponsorship Period\n\n SponsorshipPeriodDetailView,\n SponsorshipPeriodDeleteView,\n SponsorshipPeriodCreateView,\n SponsorshipPeriodListView,\n JSONSponsorshipPeriodListView,\n SponsorshipPeriodUpdateView,\n PendingSponsorshipPeriodListView,\n ApproveSponsorshipPeriodView,\n)\n\nurlpatterns = patterns(\n '',\n # Category management\n\n # This view is only accessible via ajax\n url(regex='^json-category/list/(?P<version>\\d+)/$',\n view=JSONCategoryListView.as_view(),\n name='json-category-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-category/list/$',\n view=PendingCategoryListView.as_view(),\n name='pending-category-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-category/(?P<slug>[\\w-]+)/$',\n view=ApproveCategoryView.as_view(),\n name='category-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/list/$',\n view=CategoryListView.as_view(),\n name='category-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/(?P<slug>[\\w-]+)/$',\n view=CategoryDetailView.as_view(),\n name='category-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/(?P<slug>[\\w-]+)/delete/$',\n view=CategoryDeleteView.as_view(),\n name='category-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-category/$',\n view=CategoryCreateView.as_view(),\n name='category-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/(?P<slug>[\\w-]+)/update/$',\n view=CategoryUpdateView.as_view(),\n name='category-update'),\n\n # Version management\n url(regex='^(?P<project_slug>[\\w-]+)/pending-versions/list/$',\n view=PendingVersionListView.as_view(),\n name='pending-version-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/approve/$',\n view=ApproveVersionView.as_view(),\n name='version-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/list/$',\n view=VersionListView.as_view(),\n name='version-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/markdown/$',\n view=VersionMarkdownView.as_view(),\n name='version-markdown'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/$',\n view=VersionDetailView.as_view(),\n name='version-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/thumbs/$',\n view=VersionThumbnailView.as_view(),\n name='version-thumbs'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/delete/$',\n view=VersionDeleteView.as_view(),\n name='version-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-version/$',\n view=VersionCreateView.as_view(),\n name='version-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/update/$',\n view=VersionUpdateView.as_view(),\n name='version-update'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/download/$',\n view=VersionDownload.as_view(),\n name='version-download'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/gnu/$',\n view=VersionDownloadGnu.as_view(),\n name='version-download-gnu'),\n\n # Changelog entry management\n url(regex='^(?P<project_slug>[\\w-]+)/(?P<version_slug>[\\w.-]+)/'\n 'pending-entry/list/$',\n view=PendingEntryListView.as_view(),\n name='pending-entry-list'),\n url(regex='^entry/approve/(?P<pk>\\d+)$',\n view=ApproveEntryView.as_view(),\n name='entry-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/(?P<version_slug>[\\w'\n '.-]+)/entry/list/$',\n view=EntryListView.as_view(),\n name='entry-list'),\n url(regex='^entry/(?P<pk>\\d+)$',\n view=EntryDetailView.as_view(),\n name='entry-detail'),\n url(regex='^entry/delete/(?P<pk>\\d+)$',\n view=EntryDeleteView.as_view(),\n name='entry-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/(?P<version_slug>[\\w.-]+)/'\n 'create-entry/$',\n view=EntryCreateView.as_view(),\n name='entry-create'),\n url(regex='^entry/update/(?P<pk>\\d+)$',\n view=EntryUpdateView.as_view(),\n name='entry-update'),\n\n # Feeds\n url(regex='^(?P<project_slug>[\\w-]+)/rss/latest-version/$',\n view=RssVersionFeed(),\n name='latest-version-rss-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/atom/latest-version/$',\n view=AtomVersionFeed(),\n name='latest-version-atom-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/rss/latest-entry/$',\n view=RssEntryFeed(),\n name='latest-entry-rss-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/atom/latest-entry/$',\n view=AtomEntryFeed(),\n name='latest-entry-atom-feed'),\n\n # Feeds specific version and projects\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<version_slug>['\n '\\w.-]+)/rss$',\n view=RssEntryFeed(),\n name='entry-rss-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<version_slug>['\n '\\w.-]+)/atom$',\n view=AtomEntryFeed(),\n name='entry-atom-feed'),\n\n # User map\n # url(r'^user-map/', include('user_map.urls')),\n\n # Sponsor management\n\n # This view is only accessible via ajax\n url(regex='^json-sponsor/list/(?P<version>\\d+)/$',\n view=JSONSponsorListView.as_view(),\n name='json-sponsor-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsor/list/$',\n view=PendingSponsorListView.as_view(),\n name='pending-sponsor-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-sponsor/(?P<slug>[\\w-]+)/$',\n view=ApproveSponsorView.as_view(),\n name='sponsor-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/list/$',\n view=SponsorListView.as_view(),\n name='sponsor-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/world-map/$',\n view=SponsorWorldMapView.as_view(),\n name='sponsor-world-map'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/(?P<slug>[\\w-]+)/$',\n view=SponsorDetailView.as_view(),\n name='sponsor-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/(?P<slug>[\\w-]+)/delete/$',\n view=SponsorDeleteView.as_view(),\n name='sponsor-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-sponsor/$',\n view=SponsorCreateView.as_view(),\n name='sponsor-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/(?P<slug>[\\w-]+)/update/$',\n view=SponsorUpdateView.as_view(),\n name='sponsor-update'),\n\n # Sponsorship Level management\n\n # This view is only accessible via ajax\n url(regex='^json-sponsorshiplevel/list/(?P<version>\\d+)/$',\n view=JSONSponsorshipLevelListView.as_view(),\n name='json-sponsorshiplevel-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsorshiplevel/list/$',\n view=PendingSponsorshipLevelListView.as_view(),\n name='pending-sponsorshiplevel-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-sponsorshiplevel/(?P<slug>[\\w-]+)/$',\n view=ApproveSponsorshipLevelView.as_view(),\n name='sponsorshiplevel-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/list/$',\n view=SponsorshipLevelListView.as_view(),\n name='sponsorshiplevel-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/(?P<slug>[\\w-]+)/$',\n view=SponsorshipLevelDetailView.as_view(),\n name='sponsorshiplevel-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/(?P<slug>[\\w-]+)/delete/$',\n view=SponsorshipLevelDeleteView.as_view(),\n name='sponsorshiplevel-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-sponsorshiplevel/$',\n view=SponsorshipLevelCreateView.as_view(),\n name='sponsorshiplevel-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/(?P<slug>[\\w-]+)/update/$',\n view=SponsorshipLevelUpdateView.as_view(),\n name='sponsorshiplevel-update'),\n\n # Sponsorship Period management\n\n # This view is only accessible via ajax\n url(regex='^json-sponsorshipperiod/list/(?P<version>\\d+)/$',\n view=JSONSponsorshipPeriodListView.as_view(),\n name='json-sponsorshipperiod-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsorshipperiod/list/$',\n view=PendingSponsorshipPeriodListView.as_view(),\n name='pending-sponsorshipperiod-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-sponsorshipperiod/(?P<slug>[\\w-]+)/$',\n view=ApproveSponsorshipPeriodView.as_view(),\n name='sponsorshipperiod-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/list/$',\n view=SponsorshipPeriodListView.as_view(),\n name='sponsorshipperiod-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/(?P<slug>[\\w-]+)/$',\n view=SponsorshipPeriodDetailView.as_view(),\n name='sponsorshipperiod-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/(?P<slug>[\\w-]+)/delete/$',\n view=SponsorshipPeriodDeleteView.as_view(),\n name='sponsorshipperiod-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-sponsorshipperiod/$',\n view=SponsorshipPeriodCreateView.as_view(),\n name='sponsorshipperiod-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/(?P<slug>[\\w-]+)/update/$',\n view=SponsorshipPeriodUpdateView.as_view(),\n name='sponsorshipperiod-update'),\n)\n\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += patterns(\n '',\n (r'^media/(?P<path>.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT}))\n", "path": "django_project/changes/urls.py"}], "after_files": [{"content": "# coding=utf-8\n# flake8: noqa\n\"\"\"Urls for changelog application.\"\"\"\n\nfrom django.conf.urls import patterns, url, include # noqa\n\nfrom django.conf import settings\n\nfrom feeds.version import RssVersionFeed, AtomVersionFeed\nfrom feeds.entry import RssEntryFeed, AtomEntryFeed\nfrom views import (\n # Category\n CategoryDetailView,\n CategoryDeleteView,\n CategoryCreateView,\n CategoryListView,\n JSONCategoryListView,\n CategoryUpdateView,\n PendingCategoryListView,\n ApproveCategoryView,\n # Version\n VersionMarkdownView,\n VersionDetailView,\n VersionThumbnailView,\n VersionDeleteView,\n VersionCreateView,\n VersionListView,\n VersionUpdateView,\n PendingVersionListView,\n ApproveVersionView,\n VersionDownload,\n VersionDownloadGnu,\n # Entry\n EntryDetailView,\n EntryDeleteView,\n EntryCreateView,\n EntryListView,\n EntryUpdateView,\n PendingEntryListView,\n ApproveEntryView,\n # Sponsor\n SponsorDetailView,\n SponsorDeleteView,\n SponsorCreateView,\n SponsorListView,\n SponsorWorldMapView,\n JSONSponsorListView,\n SponsorUpdateView,\n PendingSponsorListView,\n ApproveSponsorView,\n\n # Sponsorship Level\n\n SponsorshipLevelDetailView,\n SponsorshipLevelDeleteView,\n SponsorshipLevelCreateView,\n SponsorshipLevelListView,\n JSONSponsorshipLevelListView,\n SponsorshipLevelUpdateView,\n PendingSponsorshipLevelListView,\n ApproveSponsorshipLevelView,\n\n # Sponsorship Period\n\n SponsorshipPeriodDetailView,\n SponsorshipPeriodDeleteView,\n SponsorshipPeriodCreateView,\n SponsorshipPeriodListView,\n JSONSponsorshipPeriodListView,\n SponsorshipPeriodUpdateView,\n PendingSponsorshipPeriodListView,\n ApproveSponsorshipPeriodView,\n)\n\nurlpatterns = patterns(\n '',\n # Category management\n\n # This view is only accessible via ajax\n url(regex='^json-category/list/(?P<version>\\d+)/$',\n view=JSONCategoryListView.as_view(),\n name='json-category-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-category/list/$',\n view=PendingCategoryListView.as_view(),\n name='pending-category-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-category/(?P<slug>[\\w-]+)/$',\n view=ApproveCategoryView.as_view(),\n name='category-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/list/$',\n view=CategoryListView.as_view(),\n name='category-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/(?P<slug>[\\w-]+)/$',\n view=CategoryDetailView.as_view(),\n name='category-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/(?P<slug>[\\w-]+)/delete/$',\n view=CategoryDeleteView.as_view(),\n name='category-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-category/$',\n view=CategoryCreateView.as_view(),\n name='category-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/category/(?P<slug>[\\w-]+)/update/$',\n view=CategoryUpdateView.as_view(),\n name='category-update'),\n\n # Version management\n url(regex='^(?P<project_slug>[\\w-]+)/pending-versions/list/$',\n view=PendingVersionListView.as_view(),\n name='pending-version-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/approve/$',\n view=ApproveVersionView.as_view(),\n name='version-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/list/$',\n view=VersionListView.as_view(),\n name='version-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/markdown/$',\n view=VersionMarkdownView.as_view(),\n name='version-markdown'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/$',\n view=VersionDetailView.as_view(),\n name='version-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/thumbs/$',\n view=VersionThumbnailView.as_view(),\n name='version-thumbs'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/delete/$',\n view=VersionDeleteView.as_view(),\n name='version-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-version/$',\n view=VersionCreateView.as_view(),\n name='version-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/update/$',\n view=VersionUpdateView.as_view(),\n name='version-update'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/download/$',\n view=VersionDownload.as_view(),\n name='version-download'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<slug>[\\w.-]+)/gnu/$',\n view=VersionDownloadGnu.as_view(),\n name='version-download-gnu'),\n\n # Changelog entry management\n url(regex='^(?P<project_slug>[\\w-]+)/(?P<version_slug>[\\w.-]+)/'\n 'pending-entry/list/$',\n view=PendingEntryListView.as_view(),\n name='pending-entry-list'),\n url(regex='^entry/approve/(?P<pk>\\d+)$',\n view=ApproveEntryView.as_view(),\n name='entry-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/(?P<version_slug>[\\w'\n '.-]+)/entry/list/$',\n view=EntryListView.as_view(),\n name='entry-list'),\n url(regex='^entry/(?P<pk>\\d+)$',\n view=EntryDetailView.as_view(),\n name='entry-detail'),\n url(regex='^entry/delete/(?P<pk>\\d+)$',\n view=EntryDeleteView.as_view(),\n name='entry-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/(?P<version_slug>[\\w.-]+)/'\n 'create-entry/$',\n view=EntryCreateView.as_view(),\n name='entry-create'),\n url(regex='^entry/update/(?P<pk>\\d+)$',\n view=EntryUpdateView.as_view(),\n name='entry-update'),\n\n # Feeds\n url(regex='^(?P<project_slug>[\\w-]+)/rss/latest-version/$',\n view=RssVersionFeed(),\n name='latest-version-rss-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/atom/latest-version/$',\n view=AtomVersionFeed(),\n name='latest-version-atom-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/rss/latest-entry/$',\n view=RssEntryFeed(),\n name='latest-entry-rss-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/atom/latest-entry/$',\n view=AtomEntryFeed(),\n name='latest-entry-atom-feed'),\n\n # Feeds specific version and projects\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<version_slug>['\n '\\w.-]+)/rss$',\n view=RssEntryFeed(),\n name='entry-rss-feed'),\n url(regex='^(?P<project_slug>[\\w-]+)/version/(?P<version_slug>['\n '\\w.-]+)/atom$',\n view=AtomEntryFeed(),\n name='entry-atom-feed'),\n\n # User map\n # url(r'^user-map/', include('user_map.urls')),\n\n # Sponsor management\n\n # This view is only accessible via ajax\n url(regex='^json-sponsor/list/(?P<version>\\d+)/$',\n view=JSONSponsorListView.as_view(),\n name='json-sponsor-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsors/list/$',\n view=PendingSponsorListView.as_view(),\n name='pending-sponsor-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-sponsor/(?P<slug>[\\w-]+)/$',\n view=ApproveSponsorView.as_view(),\n name='sponsor-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsors/list/$',\n view=SponsorListView.as_view(),\n name='sponsor-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsors/world-map/$',\n view=SponsorWorldMapView.as_view(),\n name='sponsor-world-map'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/(?P<slug>[\\w-]+)/$',\n view=SponsorDetailView.as_view(),\n name='sponsor-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/(?P<slug>[\\w-]+)/delete/$',\n view=SponsorDeleteView.as_view(),\n name='sponsor-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-sponsor/$',\n view=SponsorCreateView.as_view(),\n name='sponsor-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsor/(?P<slug>[\\w-]+)/update/$',\n view=SponsorUpdateView.as_view(),\n name='sponsor-update'),\n\n # Sponsorship Level management\n\n # This view is only accessible via ajax\n url(regex='^json-sponsorshiplevel/list/(?P<version>\\d+)/$',\n view=JSONSponsorshipLevelListView.as_view(),\n name='json-sponsorshiplevel-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsorshiplevel/list/$',\n view=PendingSponsorshipLevelListView.as_view(),\n name='pending-sponsorshiplevel-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-sponsorshiplevel/(?P<slug>[\\w-]+)/$',\n view=ApproveSponsorshipLevelView.as_view(),\n name='sponsorshiplevel-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/list/$',\n view=SponsorshipLevelListView.as_view(),\n name='sponsorshiplevel-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/(?P<slug>[\\w-]+)/$',\n view=SponsorshipLevelDetailView.as_view(),\n name='sponsorshiplevel-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/(?P<slug>[\\w-]+)/delete/$',\n view=SponsorshipLevelDeleteView.as_view(),\n name='sponsorshiplevel-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-sponsorshiplevel/$',\n view=SponsorshipLevelCreateView.as_view(),\n name='sponsorshiplevel-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshiplevel/(?P<slug>[\\w-]+)/update/$',\n view=SponsorshipLevelUpdateView.as_view(),\n name='sponsorshiplevel-update'),\n\n # Sponsorship Period management\n\n # This view is only accessible via ajax\n url(regex='^json-sponsorshipperiod/list/(?P<version>\\d+)/$',\n view=JSONSponsorshipPeriodListView.as_view(),\n name='json-sponsorshipperiod-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/pending-sponsorshipperiod/list/$',\n view=PendingSponsorshipPeriodListView.as_view(),\n name='pending-sponsorshipperiod-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/approve-sponsorshipperiod/(?P<slug>[\\w-]+)/$',\n view=ApproveSponsorshipPeriodView.as_view(),\n name='sponsorshipperiod-approve'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/list/$',\n view=SponsorshipPeriodListView.as_view(),\n name='sponsorshipperiod-list'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/(?P<slug>[\\w-]+)/$',\n view=SponsorshipPeriodDetailView.as_view(),\n name='sponsorshipperiod-detail'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/(?P<slug>[\\w-]+)/delete/$',\n view=SponsorshipPeriodDeleteView.as_view(),\n name='sponsorshipperiod-delete'),\n url(regex='^(?P<project_slug>[\\w-]+)/create-sponsorshipperiod/$',\n view=SponsorshipPeriodCreateView.as_view(),\n name='sponsorshipperiod-create'),\n url(regex='^(?P<project_slug>[\\w-]+)/sponsorshipperiod/(?P<slug>[\\w-]+)/update/$',\n view=SponsorshipPeriodUpdateView.as_view(),\n name='sponsorshipperiod-update'),\n)\n\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += patterns(\n '',\n (r'^media/(?P<path>.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT}))\n", "path": "django_project/changes/urls.py"}]}
| 3,979 | 346 |
gh_patches_debug_23820
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-5602
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
windres module doesn't flatten arguments
```meson
import('windows').compile_resources('file.rc', args : [[-DFOO'], '-DBAR])
```
results in
```
ERROR: List item must be one of <class 'str'>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/modules/windows.py`
Content:
```
1 # Copyright 2015 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import enum
16 import os
17 import re
18
19 from .. import mlog
20 from .. import mesonlib, build
21 from ..mesonlib import MachineChoice, MesonException, extract_as_list
22 from . import get_include_args
23 from . import ModuleReturnValue
24 from . import ExtensionModule
25 from ..interpreter import CustomTargetHolder
26 from ..interpreterbase import permittedKwargs, FeatureNewKwargs
27 from ..dependencies import ExternalProgram
28
29 class ResourceCompilerType(enum.Enum):
30 windres = 1
31 rc = 2
32
33 class WindowsModule(ExtensionModule):
34
35 def detect_compiler(self, compilers):
36 for l in ('c', 'cpp'):
37 if l in compilers:
38 return compilers[l]
39 raise MesonException('Resource compilation requires a C or C++ compiler.')
40
41 def _find_resource_compiler(self, state):
42 # FIXME: Does not handle `native: true` executables, see
43 # See https://github.com/mesonbuild/meson/issues/1531
44 # Take a parameter instead of the hardcoded definition below
45 for_machine = MachineChoice.HOST
46
47 if hasattr(self, '_rescomp'):
48 return self._rescomp
49
50 # Will try cross / native file and then env var
51 rescomp = ExternalProgram.from_bin_list(state.environment.binaries[for_machine], 'windres')
52
53 if not rescomp or not rescomp.found():
54 comp = self.detect_compiler(state.environment.coredata.compilers[for_machine])
55 if comp.id in {'msvc', 'clang-cl', 'intel-cl'}:
56 rescomp = ExternalProgram('rc', silent=True)
57 else:
58 rescomp = ExternalProgram('windres', silent=True)
59
60 if not rescomp.found():
61 raise MesonException('Could not find Windows resource compiler')
62
63 for (arg, match, rc_type) in [
64 ('/?', '^.*Microsoft.*Resource Compiler.*$', ResourceCompilerType.rc),
65 ('--version', '^.*GNU windres.*$', ResourceCompilerType.windres),
66 ]:
67 p, o, e = mesonlib.Popen_safe(rescomp.get_command() + [arg])
68 m = re.search(match, o, re.MULTILINE)
69 if m:
70 mlog.log('Windows resource compiler: %s' % m.group())
71 self._rescomp = (rescomp, rc_type)
72 break
73 else:
74 raise MesonException('Could not determine type of Windows resource compiler')
75
76 return self._rescomp
77
78 @FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files', 'depends'])
79 @permittedKwargs({'args', 'include_directories', 'depend_files', 'depends'})
80 def compile_resources(self, state, args, kwargs):
81 extra_args = mesonlib.stringlistify(kwargs.get('args', []))
82 wrc_depend_files = extract_as_list(kwargs, 'depend_files', pop = True)
83 wrc_depends = extract_as_list(kwargs, 'depends', pop = True)
84 for d in wrc_depends:
85 if isinstance(d, CustomTargetHolder):
86 extra_args += get_include_args([d.outdir_include()])
87 inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)
88 for incd in inc_dirs:
89 if not isinstance(incd.held_object, (str, build.IncludeDirs)):
90 raise MesonException('Resource include dirs should be include_directories().')
91 extra_args += get_include_args(inc_dirs)
92
93 rescomp, rescomp_type = self._find_resource_compiler(state)
94 if rescomp_type == ResourceCompilerType.rc:
95 # RC is used to generate .res files, a special binary resource
96 # format, which can be passed directly to LINK (apparently LINK uses
97 # CVTRES internally to convert this to a COFF object)
98 suffix = 'res'
99 res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']
100 else:
101 # ld only supports object files, so windres is used to generate a
102 # COFF object
103 suffix = 'o'
104 res_args = extra_args + ['@INPUT@', '@OUTPUT@']
105
106 m = 'Argument {!r} has a space which may not work with windres due to ' \
107 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'
108 for arg in extra_args:
109 if ' ' in arg:
110 mlog.warning(m.format(arg))
111
112 res_targets = []
113
114 def add_target(src):
115 if isinstance(src, list):
116 for subsrc in src:
117 add_target(subsrc)
118 return
119
120 if hasattr(src, 'held_object'):
121 src = src.held_object
122
123 if isinstance(src, str):
124 name_format = 'file {!r}'
125 name = os.path.join(state.subdir, src)
126 elif isinstance(src, mesonlib.File):
127 name_format = 'file {!r}'
128 name = src.relative_name()
129 elif isinstance(src, build.CustomTarget):
130 if len(src.get_outputs()) > 1:
131 raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')
132
133 name_format = 'target {!r}'
134 name = src.get_id()
135 else:
136 raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))
137
138 # Path separators are not allowed in target names
139 name = name.replace('/', '_').replace('\\', '_')
140
141 res_kwargs = {
142 'output': name + '_@BASENAME@.' + suffix,
143 'input': [src],
144 'command': [rescomp] + res_args,
145 'depend_files': wrc_depend_files,
146 'depends': wrc_depends,
147 }
148
149 # instruct binutils windres to generate a preprocessor depfile
150 if rescomp_type == ResourceCompilerType.windres:
151 res_kwargs['depfile'] = res_kwargs['output'] + '.d'
152 res_kwargs['command'] += ['--preprocessor-arg=-MD', '--preprocessor-arg=-MQ@OUTPUT@', '--preprocessor-arg=-MF@DEPFILE@']
153
154 res_targets.append(build.CustomTarget('Windows resource for ' + name_format.format(name), state.subdir, state.subproject, res_kwargs))
155
156 add_target(args)
157
158 return ModuleReturnValue(res_targets, [res_targets])
159
160 def initialize(*args, **kwargs):
161 return WindowsModule(*args, **kwargs)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py
--- a/mesonbuild/modules/windows.py
+++ b/mesonbuild/modules/windows.py
@@ -23,7 +23,7 @@
from . import ModuleReturnValue
from . import ExtensionModule
from ..interpreter import CustomTargetHolder
-from ..interpreterbase import permittedKwargs, FeatureNewKwargs
+from ..interpreterbase import permittedKwargs, FeatureNewKwargs, flatten
from ..dependencies import ExternalProgram
class ResourceCompilerType(enum.Enum):
@@ -78,7 +78,7 @@
@FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files', 'depends'])
@permittedKwargs({'args', 'include_directories', 'depend_files', 'depends'})
def compile_resources(self, state, args, kwargs):
- extra_args = mesonlib.stringlistify(kwargs.get('args', []))
+ extra_args = mesonlib.stringlistify(flatten(kwargs.get('args', [])))
wrc_depend_files = extract_as_list(kwargs, 'depend_files', pop = True)
wrc_depends = extract_as_list(kwargs, 'depends', pop = True)
for d in wrc_depends:
|
{"golden_diff": "diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py\n--- a/mesonbuild/modules/windows.py\n+++ b/mesonbuild/modules/windows.py\n@@ -23,7 +23,7 @@\n from . import ModuleReturnValue\n from . import ExtensionModule\n from ..interpreter import CustomTargetHolder\n-from ..interpreterbase import permittedKwargs, FeatureNewKwargs\n+from ..interpreterbase import permittedKwargs, FeatureNewKwargs, flatten\n from ..dependencies import ExternalProgram\n \n class ResourceCompilerType(enum.Enum):\n@@ -78,7 +78,7 @@\n @FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files', 'depends'])\n @permittedKwargs({'args', 'include_directories', 'depend_files', 'depends'})\n def compile_resources(self, state, args, kwargs):\n- extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n+ extra_args = mesonlib.stringlistify(flatten(kwargs.get('args', [])))\n wrc_depend_files = extract_as_list(kwargs, 'depend_files', pop = True)\n wrc_depends = extract_as_list(kwargs, 'depends', pop = True)\n for d in wrc_depends:\n", "issue": "windres module doesn't flatten arguments\n```meson\r\nimport('windows').compile_resources('file.rc', args : [[-DFOO'], '-DBAR])\r\n```\r\nresults in\r\n```\r\nERROR: List item must be one of <class 'str'>\r\n```\n", "before_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport enum\nimport os\nimport re\n\nfrom .. import mlog\nfrom .. import mesonlib, build\nfrom ..mesonlib import MachineChoice, MesonException, extract_as_list\nfrom . import get_include_args\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreter import CustomTargetHolder\nfrom ..interpreterbase import permittedKwargs, FeatureNewKwargs\nfrom ..dependencies import ExternalProgram\n\nclass ResourceCompilerType(enum.Enum):\n windres = 1\n rc = 2\n\nclass WindowsModule(ExtensionModule):\n\n def detect_compiler(self, compilers):\n for l in ('c', 'cpp'):\n if l in compilers:\n return compilers[l]\n raise MesonException('Resource compilation requires a C or C++ compiler.')\n\n def _find_resource_compiler(self, state):\n # FIXME: Does not handle `native: true` executables, see\n # See https://github.com/mesonbuild/meson/issues/1531\n # Take a parameter instead of the hardcoded definition below\n for_machine = MachineChoice.HOST\n\n if hasattr(self, '_rescomp'):\n return self._rescomp\n\n # Will try cross / native file and then env var\n rescomp = ExternalProgram.from_bin_list(state.environment.binaries[for_machine], 'windres')\n\n if not rescomp or not rescomp.found():\n comp = self.detect_compiler(state.environment.coredata.compilers[for_machine])\n if comp.id in {'msvc', 'clang-cl', 'intel-cl'}:\n rescomp = ExternalProgram('rc', silent=True)\n else:\n rescomp = ExternalProgram('windres', silent=True)\n\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler')\n\n for (arg, match, rc_type) in [\n ('/?', '^.*Microsoft.*Resource Compiler.*$', ResourceCompilerType.rc),\n ('--version', '^.*GNU windres.*$', ResourceCompilerType.windres),\n ]:\n p, o, e = mesonlib.Popen_safe(rescomp.get_command() + [arg])\n m = re.search(match, o, re.MULTILINE)\n if m:\n mlog.log('Windows resource compiler: %s' % m.group())\n self._rescomp = (rescomp, rc_type)\n break\n else:\n raise MesonException('Could not determine type of Windows resource compiler')\n\n return self._rescomp\n\n @FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files', 'depends'])\n @permittedKwargs({'args', 'include_directories', 'depend_files', 'depends'})\n def compile_resources(self, state, args, kwargs):\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n wrc_depend_files = extract_as_list(kwargs, 'depend_files', pop = True)\n wrc_depends = extract_as_list(kwargs, 'depends', pop = True)\n for d in wrc_depends:\n if isinstance(d, CustomTargetHolder):\n extra_args += get_include_args([d.outdir_include()])\n inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)\n for incd in inc_dirs:\n if not isinstance(incd.held_object, (str, build.IncludeDirs)):\n raise MesonException('Resource include dirs should be include_directories().')\n extra_args += get_include_args(inc_dirs)\n\n rescomp, rescomp_type = self._find_resource_compiler(state)\n if rescomp_type == ResourceCompilerType.rc:\n # RC is used to generate .res files, a special binary resource\n # format, which can be passed directly to LINK (apparently LINK uses\n # CVTRES internally to convert this to a COFF object)\n suffix = 'res'\n res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']\n else:\n # ld only supports object files, so windres is used to generate a\n # COFF object\n suffix = 'o'\n res_args = extra_args + ['@INPUT@', '@OUTPUT@']\n\n m = 'Argument {!r} has a space which may not work with windres due to ' \\\n 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'\n for arg in extra_args:\n if ' ' in arg:\n mlog.warning(m.format(arg))\n\n res_targets = []\n\n def add_target(src):\n if isinstance(src, list):\n for subsrc in src:\n add_target(subsrc)\n return\n\n if hasattr(src, 'held_object'):\n src = src.held_object\n\n if isinstance(src, str):\n name_format = 'file {!r}'\n name = os.path.join(state.subdir, src)\n elif isinstance(src, mesonlib.File):\n name_format = 'file {!r}'\n name = src.relative_name()\n elif isinstance(src, build.CustomTarget):\n if len(src.get_outputs()) > 1:\n raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n\n name_format = 'target {!r}'\n name = src.get_id()\n else:\n raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))\n\n # Path separators are not allowed in target names\n name = name.replace('/', '_').replace('\\\\', '_')\n\n res_kwargs = {\n 'output': name + '_@BASENAME@.' + suffix,\n 'input': [src],\n 'command': [rescomp] + res_args,\n 'depend_files': wrc_depend_files,\n 'depends': wrc_depends,\n }\n\n # instruct binutils windres to generate a preprocessor depfile\n if rescomp_type == ResourceCompilerType.windres:\n res_kwargs['depfile'] = res_kwargs['output'] + '.d'\n res_kwargs['command'] += ['--preprocessor-arg=-MD', '--preprocessor-arg=-MQ@OUTPUT@', '--preprocessor-arg=-MF@DEPFILE@']\n\n res_targets.append(build.CustomTarget('Windows resource for ' + name_format.format(name), state.subdir, state.subproject, res_kwargs))\n\n add_target(args)\n\n return ModuleReturnValue(res_targets, [res_targets])\n\ndef initialize(*args, **kwargs):\n return WindowsModule(*args, **kwargs)\n", "path": "mesonbuild/modules/windows.py"}], "after_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport enum\nimport os\nimport re\n\nfrom .. import mlog\nfrom .. import mesonlib, build\nfrom ..mesonlib import MachineChoice, MesonException, extract_as_list\nfrom . import get_include_args\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreter import CustomTargetHolder\nfrom ..interpreterbase import permittedKwargs, FeatureNewKwargs, flatten\nfrom ..dependencies import ExternalProgram\n\nclass ResourceCompilerType(enum.Enum):\n windres = 1\n rc = 2\n\nclass WindowsModule(ExtensionModule):\n\n def detect_compiler(self, compilers):\n for l in ('c', 'cpp'):\n if l in compilers:\n return compilers[l]\n raise MesonException('Resource compilation requires a C or C++ compiler.')\n\n def _find_resource_compiler(self, state):\n # FIXME: Does not handle `native: true` executables, see\n # See https://github.com/mesonbuild/meson/issues/1531\n # Take a parameter instead of the hardcoded definition below\n for_machine = MachineChoice.HOST\n\n if hasattr(self, '_rescomp'):\n return self._rescomp\n\n # Will try cross / native file and then env var\n rescomp = ExternalProgram.from_bin_list(state.environment.binaries[for_machine], 'windres')\n\n if not rescomp or not rescomp.found():\n comp = self.detect_compiler(state.environment.coredata.compilers[for_machine])\n if comp.id in {'msvc', 'clang-cl', 'intel-cl'}:\n rescomp = ExternalProgram('rc', silent=True)\n else:\n rescomp = ExternalProgram('windres', silent=True)\n\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler')\n\n for (arg, match, rc_type) in [\n ('/?', '^.*Microsoft.*Resource Compiler.*$', ResourceCompilerType.rc),\n ('--version', '^.*GNU windres.*$', ResourceCompilerType.windres),\n ]:\n p, o, e = mesonlib.Popen_safe(rescomp.get_command() + [arg])\n m = re.search(match, o, re.MULTILINE)\n if m:\n mlog.log('Windows resource compiler: %s' % m.group())\n self._rescomp = (rescomp, rc_type)\n break\n else:\n raise MesonException('Could not determine type of Windows resource compiler')\n\n return self._rescomp\n\n @FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files', 'depends'])\n @permittedKwargs({'args', 'include_directories', 'depend_files', 'depends'})\n def compile_resources(self, state, args, kwargs):\n extra_args = mesonlib.stringlistify(flatten(kwargs.get('args', [])))\n wrc_depend_files = extract_as_list(kwargs, 'depend_files', pop = True)\n wrc_depends = extract_as_list(kwargs, 'depends', pop = True)\n for d in wrc_depends:\n if isinstance(d, CustomTargetHolder):\n extra_args += get_include_args([d.outdir_include()])\n inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)\n for incd in inc_dirs:\n if not isinstance(incd.held_object, (str, build.IncludeDirs)):\n raise MesonException('Resource include dirs should be include_directories().')\n extra_args += get_include_args(inc_dirs)\n\n rescomp, rescomp_type = self._find_resource_compiler(state)\n if rescomp_type == ResourceCompilerType.rc:\n # RC is used to generate .res files, a special binary resource\n # format, which can be passed directly to LINK (apparently LINK uses\n # CVTRES internally to convert this to a COFF object)\n suffix = 'res'\n res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']\n else:\n # ld only supports object files, so windres is used to generate a\n # COFF object\n suffix = 'o'\n res_args = extra_args + ['@INPUT@', '@OUTPUT@']\n\n m = 'Argument {!r} has a space which may not work with windres due to ' \\\n 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'\n for arg in extra_args:\n if ' ' in arg:\n mlog.warning(m.format(arg))\n\n res_targets = []\n\n def add_target(src):\n if isinstance(src, list):\n for subsrc in src:\n add_target(subsrc)\n return\n\n if hasattr(src, 'held_object'):\n src = src.held_object\n\n if isinstance(src, str):\n name_format = 'file {!r}'\n name = os.path.join(state.subdir, src)\n elif isinstance(src, mesonlib.File):\n name_format = 'file {!r}'\n name = src.relative_name()\n elif isinstance(src, build.CustomTarget):\n if len(src.get_outputs()) > 1:\n raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n\n name_format = 'target {!r}'\n name = src.get_id()\n else:\n raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))\n\n # Path separators are not allowed in target names\n name = name.replace('/', '_').replace('\\\\', '_')\n\n res_kwargs = {\n 'output': name + '_@BASENAME@.' + suffix,\n 'input': [src],\n 'command': [rescomp] + res_args,\n 'depend_files': wrc_depend_files,\n 'depends': wrc_depends,\n }\n\n # instruct binutils windres to generate a preprocessor depfile\n if rescomp_type == ResourceCompilerType.windres:\n res_kwargs['depfile'] = res_kwargs['output'] + '.d'\n res_kwargs['command'] += ['--preprocessor-arg=-MD', '--preprocessor-arg=-MQ@OUTPUT@', '--preprocessor-arg=-MF@DEPFILE@']\n\n res_targets.append(build.CustomTarget('Windows resource for ' + name_format.format(name), state.subdir, state.subproject, res_kwargs))\n\n add_target(args)\n\n return ModuleReturnValue(res_targets, [res_targets])\n\ndef initialize(*args, **kwargs):\n return WindowsModule(*args, **kwargs)\n", "path": "mesonbuild/modules/windows.py"}]}
| 2,233 | 268 |
gh_patches_debug_51474
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-1926
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SoundLoader can't determine file types for URL's with URL parameters in them.
Kivy currently can't load audio files from URL's that have URL parameters in them (For example `https://audio.example.com/get/test.wav?dl=true&token=9a8s76f9a876`).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/audio/__init__.py`
Content:
```
1 '''
2 Audio
3 =====
4
5 Load an audio sound and play it with::
6
7 from kivy.core.audio import SoundLoader
8
9 sound = SoundLoader.load('mytest.wav')
10 if sound:
11 print("Sound found at %s" % sound.source)
12 print("Sound is %.3f seconds" % sound.length)
13 sound.play()
14
15 You should not use the Sound class directly. The class returned by
16 **SoundLoader.load** will be the best sound provider for that particular file
17 type, so it might return different Sound classes depending the file type.
18
19 .. versionchanged:: 1.8.0
20 There is now 2 distinct Gstreamer implementation: one using Gi/Gst working
21 for both Python 2+3 with Gstreamer 1.0, and one using PyGST working
22 only for Python 2 + Gstreamer 0.10.
23 If you have issue with GStreamer, have a look at
24 :ref:`gstreamer-compatibility`
25
26 .. note::
27
28 Recording audio is not supported.
29
30 '''
31
32 __all__ = ('Sound', 'SoundLoader')
33
34 from kivy.logger import Logger
35 from kivy.event import EventDispatcher
36 from kivy.core import core_register_libs
37 from kivy.compat import PY2
38 from kivy.resources import resource_find
39 from kivy.properties import StringProperty, NumericProperty, OptionProperty, \
40 AliasProperty, BooleanProperty
41
42
43 class SoundLoader:
44 '''Load a sound, using the best loader for the given file type.
45 '''
46
47 _classes = []
48
49 @staticmethod
50 def register(classobj):
51 '''Register a new class to load the sound.'''
52 Logger.debug('Audio: register %s' % classobj.__name__)
53 SoundLoader._classes.append(classobj)
54
55 @staticmethod
56 def load(filename):
57 '''Load a sound, and return a Sound() instance.'''
58 rfn = resource_find(filename)
59 if rfn is not None:
60 filename = rfn
61 ext = filename.split('.')[-1].lower()
62 for classobj in SoundLoader._classes:
63 if ext in classobj.extensions():
64 return classobj(source=filename)
65 Logger.warning('Audio: Unable to find a loader for <%s>' %
66 filename)
67 return None
68
69
70 class Sound(EventDispatcher):
71 '''Represents a sound to play. This class is abstract, and cannot be used
72 directly.
73
74 Use SoundLoader to load a sound.
75
76 :Events:
77 `on_play` : None
78 Fired when the sound is played.
79 `on_stop` : None
80 Fired when the sound is stopped.
81 '''
82
83 source = StringProperty(None)
84 '''Filename / source of your audio file.
85
86 .. versionadded:: 1.3.0
87
88 :attr:`source` is a :class:`~kivy.properties.StringProperty` that defaults
89 to None and is read-only. Use the :meth:`SoundLoader.load` for loading
90 audio.
91 '''
92
93 volume = NumericProperty(1.)
94 '''Volume, in the range 0-1. 1 means full volume, 0 means mute.
95
96 .. versionadded:: 1.3.0
97
98 :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults
99 to 1.
100 '''
101
102 state = OptionProperty('stop', options=('stop', 'play'))
103 '''State of the sound, one of 'stop' or 'play'.
104
105 .. versionadded:: 1.3.0
106
107 :attr:`state` is a read-only :class:`~kivy.properties.OptionProperty`.'''
108
109 loop = BooleanProperty(False)
110 '''Set to True if the sound should automatically loop when it finishes.
111
112 .. versionadded:: 1.8.0
113
114 :attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and defaults to
115 False.'''
116
117 #
118 # deprecated
119 #
120 def _get_status(self):
121 return self.state
122 status = AliasProperty(_get_status, None, bind=('state', ))
123 '''
124 .. deprecated:: 1.3.0
125 Use :attr:`state` instead.
126 '''
127
128 def _get_filename(self):
129 return self.source
130 filename = AliasProperty(_get_filename, None, bind=('source', ))
131 '''
132 .. deprecated:: 1.3.0
133 Use :attr:`source` instead.
134 '''
135
136 __events__ = ('on_play', 'on_stop')
137
138 def on_source(self, instance, filename):
139 self.unload()
140 if filename is None:
141 return
142 self.load()
143
144 def get_pos(self):
145 '''
146 Returns the current position of the audio file.
147 Returns 0 if not playing.
148
149 .. versionadded:: 1.4.1
150 '''
151 return 0
152
153 def _get_length(self):
154 return 0
155
156 length = property(lambda self: self._get_length(),
157 doc='Get length of the sound (in seconds).')
158
159 def load(self):
160 '''Load the file into memory.'''
161 pass
162
163 def unload(self):
164 '''Unload the file from memory.'''
165 pass
166
167 def play(self):
168 '''Play the file.'''
169 self.state = 'play'
170 self.dispatch('on_play')
171
172 def stop(self):
173 '''Stop playback.'''
174 self.state = 'stop'
175 self.dispatch('on_stop')
176
177 def seek(self, position):
178 '''Go to the <position> (in seconds).'''
179 pass
180
181 def on_play(self):
182 pass
183
184 def on_stop(self):
185 pass
186
187
188 # Little trick here, don't activate gstreamer on window
189 # seem to have lot of crackle or something...
190 audio_libs = []
191
192 # from now on, prefer our gstplayer instead of gi/pygst.
193 try:
194 from kivy.lib.gstplayer import GstPlayer # NOQA
195 audio_libs += [('gstplayer', 'audio_gstplayer')]
196 except ImportError:
197 #audio_libs += [('gi', 'audio_gi')]
198 if PY2:
199 audio_libs += [('pygst', 'audio_pygst')]
200 audio_libs += [('sdl', 'audio_sdl')]
201 audio_libs += [('pygame', 'audio_pygame')]
202
203 core_register_libs('audio', audio_libs)
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/core/audio/__init__.py b/kivy/core/audio/__init__.py
--- a/kivy/core/audio/__init__.py
+++ b/kivy/core/audio/__init__.py
@@ -59,6 +59,8 @@
if rfn is not None:
filename = rfn
ext = filename.split('.')[-1].lower()
+ if '?' in ext:
+ ext = ext.split('?')[0]
for classobj in SoundLoader._classes:
if ext in classobj.extensions():
return classobj(source=filename)
|
{"golden_diff": "diff --git a/kivy/core/audio/__init__.py b/kivy/core/audio/__init__.py\n--- a/kivy/core/audio/__init__.py\n+++ b/kivy/core/audio/__init__.py\n@@ -59,6 +59,8 @@\n if rfn is not None:\n filename = rfn\n ext = filename.split('.')[-1].lower()\n+ if '?' in ext:\n+ ext = ext.split('?')[0]\n for classobj in SoundLoader._classes:\n if ext in classobj.extensions():\n return classobj(source=filename)\n", "issue": "SoundLoader can't determine file types for URL's with URL parameters in them.\nKivy currently can't load audio files from URL's that have URL parameters in them (For example `https://audio.example.com/get/test.wav?dl=true&token=9a8s76f9a876`).\n\n", "before_files": [{"content": "'''\nAudio\n=====\n\nLoad an audio sound and play it with::\n\n from kivy.core.audio import SoundLoader\n\n sound = SoundLoader.load('mytest.wav')\n if sound:\n print(\"Sound found at %s\" % sound.source)\n print(\"Sound is %.3f seconds\" % sound.length)\n sound.play()\n\nYou should not use the Sound class directly. The class returned by\n**SoundLoader.load** will be the best sound provider for that particular file\ntype, so it might return different Sound classes depending the file type.\n\n.. versionchanged:: 1.8.0\n There is now 2 distinct Gstreamer implementation: one using Gi/Gst working\n for both Python 2+3 with Gstreamer 1.0, and one using PyGST working\n only for Python 2 + Gstreamer 0.10.\n If you have issue with GStreamer, have a look at\n :ref:`gstreamer-compatibility`\n\n.. note::\n\n Recording audio is not supported.\n\n'''\n\n__all__ = ('Sound', 'SoundLoader')\n\nfrom kivy.logger import Logger\nfrom kivy.event import EventDispatcher\nfrom kivy.core import core_register_libs\nfrom kivy.compat import PY2\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, NumericProperty, OptionProperty, \\\n AliasProperty, BooleanProperty\n\n\nclass SoundLoader:\n '''Load a sound, using the best loader for the given file type.\n '''\n\n _classes = []\n\n @staticmethod\n def register(classobj):\n '''Register a new class to load the sound.'''\n Logger.debug('Audio: register %s' % classobj.__name__)\n SoundLoader._classes.append(classobj)\n\n @staticmethod\n def load(filename):\n '''Load a sound, and return a Sound() instance.'''\n rfn = resource_find(filename)\n if rfn is not None:\n filename = rfn\n ext = filename.split('.')[-1].lower()\n for classobj in SoundLoader._classes:\n if ext in classobj.extensions():\n return classobj(source=filename)\n Logger.warning('Audio: Unable to find a loader for <%s>' %\n filename)\n return None\n\n\nclass Sound(EventDispatcher):\n '''Represents a sound to play. This class is abstract, and cannot be used\n directly.\n\n Use SoundLoader to load a sound.\n\n :Events:\n `on_play` : None\n Fired when the sound is played.\n `on_stop` : None\n Fired when the sound is stopped.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your audio file.\n\n .. versionadded:: 1.3.0\n\n :attr:`source` is a :class:`~kivy.properties.StringProperty` that defaults\n to None and is read-only. Use the :meth:`SoundLoader.load` for loading\n audio.\n '''\n\n volume = NumericProperty(1.)\n '''Volume, in the range 0-1. 1 means full volume, 0 means mute.\n\n .. versionadded:: 1.3.0\n\n :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.\n '''\n\n state = OptionProperty('stop', options=('stop', 'play'))\n '''State of the sound, one of 'stop' or 'play'.\n\n .. versionadded:: 1.3.0\n\n :attr:`state` is a read-only :class:`~kivy.properties.OptionProperty`.'''\n\n loop = BooleanProperty(False)\n '''Set to True if the sound should automatically loop when it finishes.\n\n .. versionadded:: 1.8.0\n\n :attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.'''\n\n #\n # deprecated\n #\n def _get_status(self):\n return self.state\n status = AliasProperty(_get_status, None, bind=('state', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`state` instead.\n '''\n\n def _get_filename(self):\n return self.source\n filename = AliasProperty(_get_filename, None, bind=('source', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`source` instead.\n '''\n\n __events__ = ('on_play', 'on_stop')\n\n def on_source(self, instance, filename):\n self.unload()\n if filename is None:\n return\n self.load()\n\n def get_pos(self):\n '''\n Returns the current position of the audio file.\n Returns 0 if not playing.\n\n .. versionadded:: 1.4.1\n '''\n return 0\n\n def _get_length(self):\n return 0\n\n length = property(lambda self: self._get_length(),\n doc='Get length of the sound (in seconds).')\n\n def load(self):\n '''Load the file into memory.'''\n pass\n\n def unload(self):\n '''Unload the file from memory.'''\n pass\n\n def play(self):\n '''Play the file.'''\n self.state = 'play'\n self.dispatch('on_play')\n\n def stop(self):\n '''Stop playback.'''\n self.state = 'stop'\n self.dispatch('on_stop')\n\n def seek(self, position):\n '''Go to the <position> (in seconds).'''\n pass\n\n def on_play(self):\n pass\n\n def on_stop(self):\n pass\n\n\n# Little trick here, don't activate gstreamer on window\n# seem to have lot of crackle or something...\naudio_libs = []\n\n# from now on, prefer our gstplayer instead of gi/pygst.\ntry:\n from kivy.lib.gstplayer import GstPlayer # NOQA\n audio_libs += [('gstplayer', 'audio_gstplayer')]\nexcept ImportError:\n #audio_libs += [('gi', 'audio_gi')]\n if PY2:\n audio_libs += [('pygst', 'audio_pygst')]\naudio_libs += [('sdl', 'audio_sdl')]\naudio_libs += [('pygame', 'audio_pygame')]\n\ncore_register_libs('audio', audio_libs)\n", "path": "kivy/core/audio/__init__.py"}], "after_files": [{"content": "'''\nAudio\n=====\n\nLoad an audio sound and play it with::\n\n from kivy.core.audio import SoundLoader\n\n sound = SoundLoader.load('mytest.wav')\n if sound:\n print(\"Sound found at %s\" % sound.source)\n print(\"Sound is %.3f seconds\" % sound.length)\n sound.play()\n\nYou should not use the Sound class directly. The class returned by\n**SoundLoader.load** will be the best sound provider for that particular file\ntype, so it might return different Sound classes depending the file type.\n\n.. versionchanged:: 1.8.0\n There is now 2 distinct Gstreamer implementation: one using Gi/Gst working\n for both Python 2+3 with Gstreamer 1.0, and one using PyGST working\n only for Python 2 + Gstreamer 0.10.\n If you have issue with GStreamer, have a look at\n :ref:`gstreamer-compatibility`\n\n.. note::\n\n Recording audio is not supported.\n\n'''\n\n__all__ = ('Sound', 'SoundLoader')\n\nfrom kivy.logger import Logger\nfrom kivy.event import EventDispatcher\nfrom kivy.core import core_register_libs\nfrom kivy.compat import PY2\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, NumericProperty, OptionProperty, \\\n AliasProperty, BooleanProperty\n\n\nclass SoundLoader:\n '''Load a sound, using the best loader for the given file type.\n '''\n\n _classes = []\n\n @staticmethod\n def register(classobj):\n '''Register a new class to load the sound.'''\n Logger.debug('Audio: register %s' % classobj.__name__)\n SoundLoader._classes.append(classobj)\n\n @staticmethod\n def load(filename):\n '''Load a sound, and return a Sound() instance.'''\n rfn = resource_find(filename)\n if rfn is not None:\n filename = rfn\n ext = filename.split('.')[-1].lower()\n if '?' in ext:\n ext = ext.split('?')[0]\n for classobj in SoundLoader._classes:\n if ext in classobj.extensions():\n return classobj(source=filename)\n Logger.warning('Audio: Unable to find a loader for <%s>' %\n filename)\n return None\n\n\nclass Sound(EventDispatcher):\n '''Represents a sound to play. This class is abstract, and cannot be used\n directly.\n\n Use SoundLoader to load a sound.\n\n :Events:\n `on_play` : None\n Fired when the sound is played.\n `on_stop` : None\n Fired when the sound is stopped.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your audio file.\n\n .. versionadded:: 1.3.0\n\n :attr:`source` is a :class:`~kivy.properties.StringProperty` that defaults\n to None and is read-only. Use the :meth:`SoundLoader.load` for loading\n audio.\n '''\n\n volume = NumericProperty(1.)\n '''Volume, in the range 0-1. 1 means full volume, 0 means mute.\n\n .. versionadded:: 1.3.0\n\n :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.\n '''\n\n state = OptionProperty('stop', options=('stop', 'play'))\n '''State of the sound, one of 'stop' or 'play'.\n\n .. versionadded:: 1.3.0\n\n :attr:`state` is a read-only :class:`~kivy.properties.OptionProperty`.'''\n\n loop = BooleanProperty(False)\n '''Set to True if the sound should automatically loop when it finishes.\n\n .. versionadded:: 1.8.0\n\n :attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.'''\n\n #\n # deprecated\n #\n def _get_status(self):\n return self.state\n status = AliasProperty(_get_status, None, bind=('state', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`state` instead.\n '''\n\n def _get_filename(self):\n return self.source\n filename = AliasProperty(_get_filename, None, bind=('source', ))\n '''\n .. deprecated:: 1.3.0\n Use :attr:`source` instead.\n '''\n\n __events__ = ('on_play', 'on_stop')\n\n def on_source(self, instance, filename):\n self.unload()\n if filename is None:\n return\n self.load()\n\n def get_pos(self):\n '''\n Returns the current position of the audio file.\n Returns 0 if not playing.\n\n .. versionadded:: 1.4.1\n '''\n return 0\n\n def _get_length(self):\n return 0\n\n length = property(lambda self: self._get_length(),\n doc='Get length of the sound (in seconds).')\n\n def load(self):\n '''Load the file into memory.'''\n pass\n\n def unload(self):\n '''Unload the file from memory.'''\n pass\n\n def play(self):\n '''Play the file.'''\n self.state = 'play'\n self.dispatch('on_play')\n\n def stop(self):\n '''Stop playback.'''\n self.state = 'stop'\n self.dispatch('on_stop')\n\n def seek(self, position):\n '''Go to the <position> (in seconds).'''\n pass\n\n def on_play(self):\n pass\n\n def on_stop(self):\n pass\n\n\n# Little trick here, don't activate gstreamer on window\n# seem to have lot of crackle or something...\naudio_libs = []\n\n# from now on, prefer our gstplayer instead of gi/pygst.\ntry:\n from kivy.lib.gstplayer import GstPlayer # NOQA\n audio_libs += [('gstplayer', 'audio_gstplayer')]\nexcept ImportError:\n #audio_libs += [('gi', 'audio_gi')]\n if PY2:\n audio_libs += [('pygst', 'audio_pygst')]\naudio_libs += [('sdl', 'audio_sdl')]\naudio_libs += [('pygame', 'audio_pygame')]\n\ncore_register_libs('audio', audio_libs)\n", "path": "kivy/core/audio/__init__.py"}]}
| 2,198 | 124 |
gh_patches_debug_16408
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2569
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
not allowing POP3/IMAP leads to infinite loop in webmail
v1.9.32
I noticed a small bug. If both are disabled, webmail is stuck in an infinite loop. I guess nobody ever tried it before since both are checked by default.
Not very consequential, but I figured you might want to know. Not sure about the use case either. I unchecked them because there was no need for this particular account and found it that way.
Cheers
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/internal/nginx.py`
Content:
```
1 from mailu import models, utils
2 from flask import current_app as app
3
4 import re
5 import urllib
6 import ipaddress
7 import socket
8 import sqlalchemy.exc
9 import tenacity
10
11 SUPPORTED_AUTH_METHODS = ["none", "plain"]
12
13
14 STATUSES = {
15 "authentication": ("Authentication credentials invalid", {
16 "imap": "AUTHENTICATIONFAILED",
17 "smtp": "535 5.7.8",
18 "pop3": "-ERR Authentication failed"
19 }),
20 "encryption": ("Must issue a STARTTLS command first", {
21 "smtp": "530 5.7.0"
22 }),
23 "ratelimit": ("Temporary authentication failure (rate-limit)", {
24 "imap": "LIMIT",
25 "smtp": "451 4.3.2",
26 "pop3": "-ERR [LOGIN-DELAY] Retry later"
27 }),
28 }
29
30 def check_credentials(user, password, ip, protocol=None, auth_port=None):
31 if not user or not user.enabled or (protocol == "imap" and not user.enable_imap) or (protocol == "pop3" and not user.enable_pop):
32 return False
33 is_ok = False
34 # webmails
35 if auth_port in ['10143', '10025'] and password.startswith('token-'):
36 if utils.verify_temp_token(user.get_id(), password):
37 is_ok = True
38 # All tokens are 32 characters hex lowercase
39 if not is_ok and len(password) == 32:
40 for token in user.tokens:
41 if (token.check_password(password) and
42 (not token.ip or token.ip == ip)):
43 is_ok = True
44 break
45 if not is_ok and user.check_password(password):
46 is_ok = True
47 return is_ok
48
49 def handle_authentication(headers):
50 """ Handle an HTTP nginx authentication request
51 See: http://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol
52 """
53 method = headers["Auth-Method"]
54 protocol = headers["Auth-Protocol"]
55 # Incoming mail, no authentication
56 if method == "none" and protocol == "smtp":
57 server, port = get_server(protocol, False)
58 if app.config["INBOUND_TLS_ENFORCE"]:
59 if "Auth-SSL" in headers and headers["Auth-SSL"] == "on":
60 return {
61 "Auth-Status": "OK",
62 "Auth-Server": server,
63 "Auth-Port": port
64 }
65 else:
66 status, code = get_status(protocol, "encryption")
67 return {
68 "Auth-Status": status,
69 "Auth-Error-Code" : code,
70 "Auth-Wait": 0
71 }
72 else:
73 return {
74 "Auth-Status": "OK",
75 "Auth-Server": server,
76 "Auth-Port": port
77 }
78 # Authenticated user
79 elif method == "plain":
80 is_valid_user = False
81 # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should
82 # be ASCII and are generally considered ISO8859-1. However when passing
83 # the password, nginx does not transcode the input UTF string, thus
84 # we need to manually decode.
85 raw_user_email = urllib.parse.unquote(headers["Auth-User"])
86 raw_password = urllib.parse.unquote(headers["Auth-Pass"])
87 user_email = 'invalid'
88 try:
89 user_email = raw_user_email.encode("iso8859-1").decode("utf8")
90 password = raw_password.encode("iso8859-1").decode("utf8")
91 ip = urllib.parse.unquote(headers["Client-Ip"])
92 except:
93 app.logger.warn(f'Received undecodable user/password from nginx: {raw_user_email!r}/{raw_password!r}')
94 else:
95 try:
96 user = models.User.query.get(user_email) if '@' in user_email else None
97 except sqlalchemy.exc.StatementError as exc:
98 exc = str(exc).split('\n', 1)[0]
99 app.logger.warn(f'Invalid user {user_email!r}: {exc}')
100 else:
101 is_valid_user = user is not None
102 ip = urllib.parse.unquote(headers["Client-Ip"])
103 if check_credentials(user, password, ip, protocol, headers["Auth-Port"]):
104 server, port = get_server(headers["Auth-Protocol"], True)
105 return {
106 "Auth-Status": "OK",
107 "Auth-Server": server,
108 "Auth-User": user_email,
109 "Auth-User-Exists": is_valid_user,
110 "Auth-Port": port
111 }
112 status, code = get_status(protocol, "authentication")
113 return {
114 "Auth-Status": status,
115 "Auth-Error-Code": code,
116 "Auth-User": user_email,
117 "Auth-User-Exists": is_valid_user,
118 "Auth-Wait": 0
119 }
120 # Unexpected
121 return {}
122
123
124 def get_status(protocol, status):
125 """ Return the proper error code depending on the protocol
126 """
127 status, codes = STATUSES[status]
128 return status, codes[protocol]
129
130 def extract_host_port(host_and_port, default_port):
131 host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()
132 return host, int(port) if port else default_port
133
134 def get_server(protocol, authenticated=False):
135 if protocol == "imap":
136 hostname, port = extract_host_port(app.config['IMAP_ADDRESS'], 143)
137 elif protocol == "pop3":
138 hostname, port = extract_host_port(app.config['POP3_ADDRESS'], 110)
139 elif protocol == "smtp":
140 if authenticated:
141 hostname, port = extract_host_port(app.config['AUTHSMTP_ADDRESS'], 10025)
142 else:
143 hostname, port = extract_host_port(app.config['SMTP_ADDRESS'], 25)
144 try:
145 # test if hostname is already resolved to an ip adddress
146 ipaddress.ip_address(hostname)
147 except:
148 # hostname is not an ip address - so we need to resolve it
149 hostname = resolve_hostname(hostname)
150 return hostname, port
151
152 @tenacity.retry(stop=tenacity.stop_after_attempt(100),
153 wait=tenacity.wait_random(min=2, max=5))
154 def resolve_hostname(hostname):
155 """ This function uses system DNS to resolve a hostname.
156 It is capable of retrying in case the host is not immediately available
157 """
158 return socket.gethostbyname(hostname)
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -27,12 +27,14 @@
}),
}
+WEBMAIL_PORTS = ['10143', '10025']
+
def check_credentials(user, password, ip, protocol=None, auth_port=None):
- if not user or not user.enabled or (protocol == "imap" and not user.enable_imap) or (protocol == "pop3" and not user.enable_pop):
+ if not user or not user.enabled or (protocol == "imap" and not user.enable_imap and not auth_port in WEBMAIL_PORTS) or (protocol == "pop3" and not user.enable_pop):
return False
is_ok = False
# webmails
- if auth_port in ['10143', '10025'] and password.startswith('token-'):
+ if auth_port in WEBMAIL_PORTS and password.startswith('token-'):
if utils.verify_temp_token(user.get_id(), password):
is_ok = True
# All tokens are 32 characters hex lowercase
|
{"golden_diff": "diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py\n--- a/core/admin/mailu/internal/nginx.py\n+++ b/core/admin/mailu/internal/nginx.py\n@@ -27,12 +27,14 @@\n }),\n }\n \n+WEBMAIL_PORTS = ['10143', '10025']\n+\n def check_credentials(user, password, ip, protocol=None, auth_port=None):\n- if not user or not user.enabled or (protocol == \"imap\" and not user.enable_imap) or (protocol == \"pop3\" and not user.enable_pop):\n+ if not user or not user.enabled or (protocol == \"imap\" and not user.enable_imap and not auth_port in WEBMAIL_PORTS) or (protocol == \"pop3\" and not user.enable_pop):\n return False\n is_ok = False\n # webmails\n- if auth_port in ['10143', '10025'] and password.startswith('token-'):\n+ if auth_port in WEBMAIL_PORTS and password.startswith('token-'):\n if utils.verify_temp_token(user.get_id(), password):\n is_ok = True\n # All tokens are 32 characters hex lowercase\n", "issue": "not allowing POP3/IMAP leads to infinite loop in webmail\nv1.9.32\r\n\r\nI noticed a small bug. If both are disabled, webmail is stuck in an infinite loop. I guess nobody ever tried it before since both are checked by default.\r\n\r\nNot very consequential, but I figured you might want to know. Not sure about the use case either. I unchecked them because there was no need for this particular account and found it that way.\r\n\r\nCheers\n", "before_files": [{"content": "from mailu import models, utils\nfrom flask import current_app as app\n\nimport re\nimport urllib\nimport ipaddress\nimport socket\nimport sqlalchemy.exc\nimport tenacity\n\nSUPPORTED_AUTH_METHODS = [\"none\", \"plain\"]\n\n\nSTATUSES = {\n \"authentication\": (\"Authentication credentials invalid\", {\n \"imap\": \"AUTHENTICATIONFAILED\",\n \"smtp\": \"535 5.7.8\",\n \"pop3\": \"-ERR Authentication failed\"\n }),\n \"encryption\": (\"Must issue a STARTTLS command first\", {\n \"smtp\": \"530 5.7.0\"\n }),\n \"ratelimit\": (\"Temporary authentication failure (rate-limit)\", {\n \"imap\": \"LIMIT\",\n \"smtp\": \"451 4.3.2\",\n \"pop3\": \"-ERR [LOGIN-DELAY] Retry later\"\n }),\n}\n\ndef check_credentials(user, password, ip, protocol=None, auth_port=None):\n if not user or not user.enabled or (protocol == \"imap\" and not user.enable_imap) or (protocol == \"pop3\" and not user.enable_pop):\n return False\n is_ok = False\n # webmails\n if auth_port in ['10143', '10025'] and password.startswith('token-'):\n if utils.verify_temp_token(user.get_id(), password):\n is_ok = True\n # All tokens are 32 characters hex lowercase\n if not is_ok and len(password) == 32:\n for token in user.tokens:\n if (token.check_password(password) and\n (not token.ip or token.ip == ip)):\n is_ok = True\n break\n if not is_ok and user.check_password(password):\n is_ok = True\n return is_ok\n\ndef handle_authentication(headers):\n \"\"\" Handle an HTTP nginx authentication request\n See: http://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol\n \"\"\"\n method = headers[\"Auth-Method\"]\n protocol = headers[\"Auth-Protocol\"]\n # Incoming mail, no authentication\n if method == \"none\" and protocol == \"smtp\":\n server, port = get_server(protocol, False)\n if app.config[\"INBOUND_TLS_ENFORCE\"]:\n if \"Auth-SSL\" in headers and headers[\"Auth-SSL\"] == \"on\":\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n else:\n status, code = get_status(protocol, \"encryption\")\n return {\n \"Auth-Status\": status,\n \"Auth-Error-Code\" : code,\n \"Auth-Wait\": 0\n }\n else:\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n # Authenticated user\n elif method == \"plain\":\n is_valid_user = False\n # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should\n # be ASCII and are generally considered ISO8859-1. However when passing\n # the password, nginx does not transcode the input UTF string, thus\n # we need to manually decode.\n raw_user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n raw_password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n user_email = 'invalid'\n try:\n user_email = raw_user_email.encode(\"iso8859-1\").decode(\"utf8\")\n password = raw_password.encode(\"iso8859-1\").decode(\"utf8\")\n ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n except:\n app.logger.warn(f'Received undecodable user/password from nginx: {raw_user_email!r}/{raw_password!r}')\n else:\n try:\n user = models.User.query.get(user_email) if '@' in user_email else None\n except sqlalchemy.exc.StatementError as exc:\n exc = str(exc).split('\\n', 1)[0]\n app.logger.warn(f'Invalid user {user_email!r}: {exc}')\n else:\n is_valid_user = user is not None\n ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n if check_credentials(user, password, ip, protocol, headers[\"Auth-Port\"]):\n server, port = get_server(headers[\"Auth-Protocol\"], True)\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-User\": user_email,\n \"Auth-User-Exists\": is_valid_user,\n \"Auth-Port\": port\n }\n status, code = get_status(protocol, \"authentication\")\n return {\n \"Auth-Status\": status,\n \"Auth-Error-Code\": code,\n \"Auth-User\": user_email,\n \"Auth-User-Exists\": is_valid_user,\n \"Auth-Wait\": 0\n }\n # Unexpected\n return {}\n\n\ndef get_status(protocol, status):\n \"\"\" Return the proper error code depending on the protocol\n \"\"\"\n status, codes = STATUSES[status]\n return status, codes[protocol]\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\ndef get_server(protocol, authenticated=False):\n if protocol == \"imap\":\n hostname, port = extract_host_port(app.config['IMAP_ADDRESS'], 143)\n elif protocol == \"pop3\":\n hostname, port = extract_host_port(app.config['POP3_ADDRESS'], 110)\n elif protocol == \"smtp\":\n if authenticated:\n hostname, port = extract_host_port(app.config['AUTHSMTP_ADDRESS'], 10025)\n else:\n hostname, port = extract_host_port(app.config['SMTP_ADDRESS'], 25)\n try:\n # test if hostname is already resolved to an ip adddress\n ipaddress.ip_address(hostname)\n except:\n # hostname is not an ip address - so we need to resolve it\n hostname = resolve_hostname(hostname)\n return hostname, port\n\[email protected](stop=tenacity.stop_after_attempt(100),\n wait=tenacity.wait_random(min=2, max=5))\ndef resolve_hostname(hostname):\n \"\"\" This function uses system DNS to resolve a hostname.\n It is capable of retrying in case the host is not immediately available\n \"\"\"\n return socket.gethostbyname(hostname)\n", "path": "core/admin/mailu/internal/nginx.py"}], "after_files": [{"content": "from mailu import models, utils\nfrom flask import current_app as app\n\nimport re\nimport urllib\nimport ipaddress\nimport socket\nimport sqlalchemy.exc\nimport tenacity\n\nSUPPORTED_AUTH_METHODS = [\"none\", \"plain\"]\n\n\nSTATUSES = {\n \"authentication\": (\"Authentication credentials invalid\", {\n \"imap\": \"AUTHENTICATIONFAILED\",\n \"smtp\": \"535 5.7.8\",\n \"pop3\": \"-ERR Authentication failed\"\n }),\n \"encryption\": (\"Must issue a STARTTLS command first\", {\n \"smtp\": \"530 5.7.0\"\n }),\n \"ratelimit\": (\"Temporary authentication failure (rate-limit)\", {\n \"imap\": \"LIMIT\",\n \"smtp\": \"451 4.3.2\",\n \"pop3\": \"-ERR [LOGIN-DELAY] Retry later\"\n }),\n}\n\nWEBMAIL_PORTS = ['10143', '10025']\n\ndef check_credentials(user, password, ip, protocol=None, auth_port=None):\n if not user or not user.enabled or (protocol == \"imap\" and not user.enable_imap and not auth_port in WEBMAIL_PORTS) or (protocol == \"pop3\" and not user.enable_pop):\n return False\n is_ok = False\n # webmails\n if auth_port in WEBMAIL_PORTS and password.startswith('token-'):\n if utils.verify_temp_token(user.get_id(), password):\n is_ok = True\n # All tokens are 32 characters hex lowercase\n if not is_ok and len(password) == 32:\n for token in user.tokens:\n if (token.check_password(password) and\n (not token.ip or token.ip == ip)):\n is_ok = True\n break\n if not is_ok and user.check_password(password):\n is_ok = True\n return is_ok\n\ndef handle_authentication(headers):\n \"\"\" Handle an HTTP nginx authentication request\n See: http://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol\n \"\"\"\n method = headers[\"Auth-Method\"]\n protocol = headers[\"Auth-Protocol\"]\n # Incoming mail, no authentication\n if method == \"none\" and protocol == \"smtp\":\n server, port = get_server(protocol, False)\n if app.config[\"INBOUND_TLS_ENFORCE\"]:\n if \"Auth-SSL\" in headers and headers[\"Auth-SSL\"] == \"on\":\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n else:\n status, code = get_status(protocol, \"encryption\")\n return {\n \"Auth-Status\": status,\n \"Auth-Error-Code\" : code,\n \"Auth-Wait\": 0\n }\n else:\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n # Authenticated user\n elif method == \"plain\":\n is_valid_user = False\n # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should\n # be ASCII and are generally considered ISO8859-1. However when passing\n # the password, nginx does not transcode the input UTF string, thus\n # we need to manually decode.\n raw_user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n raw_password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n user_email = 'invalid'\n try:\n user_email = raw_user_email.encode(\"iso8859-1\").decode(\"utf8\")\n password = raw_password.encode(\"iso8859-1\").decode(\"utf8\")\n ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n except:\n app.logger.warn(f'Received undecodable user/password from nginx: {raw_user_email!r}/{raw_password!r}')\n else:\n try:\n user = models.User.query.get(user_email) if '@' in user_email else None\n except sqlalchemy.exc.StatementError as exc:\n exc = str(exc).split('\\n', 1)[0]\n app.logger.warn(f'Invalid user {user_email!r}: {exc}')\n else:\n is_valid_user = user is not None\n ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n if check_credentials(user, password, ip, protocol, headers[\"Auth-Port\"]):\n server, port = get_server(headers[\"Auth-Protocol\"], True)\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-User\": user_email,\n \"Auth-User-Exists\": is_valid_user,\n \"Auth-Port\": port\n }\n status, code = get_status(protocol, \"authentication\")\n return {\n \"Auth-Status\": status,\n \"Auth-Error-Code\": code,\n \"Auth-User\": user_email,\n \"Auth-User-Exists\": is_valid_user,\n \"Auth-Wait\": 0\n }\n # Unexpected\n return {}\n\n\ndef get_status(protocol, status):\n \"\"\" Return the proper error code depending on the protocol\n \"\"\"\n status, codes = STATUSES[status]\n return status, codes[protocol]\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\ndef get_server(protocol, authenticated=False):\n if protocol == \"imap\":\n hostname, port = extract_host_port(app.config['IMAP_ADDRESS'], 143)\n elif protocol == \"pop3\":\n hostname, port = extract_host_port(app.config['POP3_ADDRESS'], 110)\n elif protocol == \"smtp\":\n if authenticated:\n hostname, port = extract_host_port(app.config['AUTHSMTP_ADDRESS'], 10025)\n else:\n hostname, port = extract_host_port(app.config['SMTP_ADDRESS'], 25)\n try:\n # test if hostname is already resolved to an ip adddress\n ipaddress.ip_address(hostname)\n except:\n # hostname is not an ip address - so we need to resolve it\n hostname = resolve_hostname(hostname)\n return hostname, port\n\[email protected](stop=tenacity.stop_after_attempt(100),\n wait=tenacity.wait_random(min=2, max=5))\ndef resolve_hostname(hostname):\n \"\"\" This function uses system DNS to resolve a hostname.\n It is capable of retrying in case the host is not immediately available\n \"\"\"\n return socket.gethostbyname(hostname)\n", "path": "core/admin/mailu/internal/nginx.py"}]}
| 2,153 | 265 |
gh_patches_debug_13686
|
rasdani/github-patches
|
git_diff
|
cobbler__cobbler-3649
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SELinux issue when service is restarted
This issue was initially reported at
https://bugzilla.redhat.com/show_bug.cgi?id=1754430
There is a SELinux denial when the cobblerd service is restarted because of the permissions of the web.ss and others webui_sessions files.
I'm not sure to understand if this is 3.0.x only or also only exists in 2.8.x, but for me there is a need to understand why cobblerd (uid root) tries to read theses files...
Of course it can probably be fixed by using 640 perm on theses files. (to be tested) but it just workaround the problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/cobblerd.py`
Content:
```
1 """
2 Cobbler daemon for logging remote syslog traffic during automatic installation
3
4 Copyright 2007-2009, Red Hat, Inc and Others
5 Michael DeHaan <michael.dehaan AT gmail>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301 USA
21 """
22
23 import binascii
24 import os
25 import pwd
26 import time
27
28 from cobbler import remote
29 from cobbler import utils
30 from cobbler.api import CobblerAPI
31
32
33 def core(cobbler_api: CobblerAPI):
34 """
35 Starts Cobbler.
36
37 :param cobbler_api: The cobbler_api instance which is used for this method.
38 """
39 settings = cobbler_api.settings()
40 xmlrpc_port = settings.xmlrpc_port
41
42 regen_ss_file()
43 do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)
44
45
46 def regen_ss_file():
47 """
48 This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been
49 cleared by Kerberos.
50 """
51 ssfile = "/var/lib/cobbler/web.ss"
52 with open("/dev/urandom", 'rb') as fd:
53 data = fd.read(512)
54
55 with open(ssfile, 'wb', 0o660) as fd:
56 fd.write(binascii.hexlify(data))
57
58 http_user = "apache"
59 family = utils.get_family()
60 if family == "debian":
61 http_user = "www-data"
62 elif family == "suse":
63 http_user = "wwwrun"
64 os.lchown("/var/lib/cobbler/web.ss", pwd.getpwnam(http_user)[2], -1)
65
66
67 def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):
68 """
69 This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.
70
71 :param cobbler_api: The cobbler_api instance which is used for this method.
72 :param settings: The Cobbler settings instance which is used for this method.
73 :param port: The port where the xmlrpc api should run on.
74 """
75 xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)
76 server = remote.CobblerXMLRPCServer(('127.0.0.1', port))
77 server.logRequests = 0 # don't print stuff
78 xinterface.logger.debug("XMLRPC running on %s" % port)
79 server.register_instance(xinterface)
80
81 while True:
82 try:
83 print("SERVING!")
84 server.serve_forever()
85 except IOError:
86 # interrupted? try to serve again
87 time.sleep(0.5)
88
89
90 if __name__ == "__main__":
91 core(CobblerAPI())
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cobbler/cobblerd.py b/cobbler/cobblerd.py
--- a/cobbler/cobblerd.py
+++ b/cobbler/cobblerd.py
@@ -52,7 +52,7 @@
with open("/dev/urandom", 'rb') as fd:
data = fd.read(512)
- with open(ssfile, 'wb', 0o660) as fd:
+ with open(ssfile, 'wb', 0o640) as fd:
fd.write(binascii.hexlify(data))
http_user = "apache"
@@ -61,7 +61,7 @@
http_user = "www-data"
elif family == "suse":
http_user = "wwwrun"
- os.lchown("/var/lib/cobbler/web.ss", pwd.getpwnam(http_user)[2], -1)
+ os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])
def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):
|
{"golden_diff": "diff --git a/cobbler/cobblerd.py b/cobbler/cobblerd.py\n--- a/cobbler/cobblerd.py\n+++ b/cobbler/cobblerd.py\n@@ -52,7 +52,7 @@\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n \n- with open(ssfile, 'wb', 0o660) as fd:\n+ with open(ssfile, 'wb', 0o640) as fd:\n fd.write(binascii.hexlify(data))\n \n http_user = \"apache\"\n@@ -61,7 +61,7 @@\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n- os.lchown(\"/var/lib/cobbler/web.ss\", pwd.getpwnam(http_user)[2], -1)\n+ os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])\n \n \n def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n", "issue": "SELinux issue when service is restarted\nThis issue was initially reported at\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=1754430\r\n\r\nThere is a SELinux denial when the cobblerd service is restarted because of the permissions of the web.ss and others webui_sessions files.\r\n\r\nI'm not sure to understand if this is 3.0.x only or also only exists in 2.8.x, but for me there is a need to understand why cobblerd (uid root) tries to read theses files...\r\nOf course it can probably be fixed by using 640 perm on theses files. (to be tested) but it just workaround the problem.\n", "before_files": [{"content": "\"\"\"\nCobbler daemon for logging remote syslog traffic during automatic installation\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport binascii\nimport os\nimport pwd\nimport time\n\nfrom cobbler import remote\nfrom cobbler import utils\nfrom cobbler.api import CobblerAPI\n\n\ndef core(cobbler_api: CobblerAPI):\n \"\"\"\n Starts Cobbler.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n \"\"\"\n settings = cobbler_api.settings()\n xmlrpc_port = settings.xmlrpc_port\n\n regen_ss_file()\n do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)\n\n\ndef regen_ss_file():\n \"\"\"\n This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been\n cleared by Kerberos.\n \"\"\"\n ssfile = \"/var/lib/cobbler/web.ss\"\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n\n with open(ssfile, 'wb', 0o660) as fd:\n fd.write(binascii.hexlify(data))\n\n http_user = \"apache\"\n family = utils.get_family()\n if family == \"debian\":\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n os.lchown(\"/var/lib/cobbler/web.ss\", pwd.getpwnam(http_user)[2], -1)\n\n\ndef do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n \"\"\"\n This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n :param settings: The Cobbler settings instance which is used for this method.\n :param port: The port where the xmlrpc api should run on.\n \"\"\"\n xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)\n server = remote.CobblerXMLRPCServer(('127.0.0.1', port))\n server.logRequests = 0 # don't print stuff\n xinterface.logger.debug(\"XMLRPC running on %s\" % port)\n server.register_instance(xinterface)\n\n while True:\n try:\n print(\"SERVING!\")\n server.serve_forever()\n except IOError:\n # interrupted? try to serve again\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n core(CobblerAPI())\n", "path": "cobbler/cobblerd.py"}], "after_files": [{"content": "\"\"\"\nCobbler daemon for logging remote syslog traffic during automatic installation\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport binascii\nimport os\nimport pwd\nimport time\n\nfrom cobbler import remote\nfrom cobbler import utils\nfrom cobbler.api import CobblerAPI\n\n\ndef core(cobbler_api: CobblerAPI):\n \"\"\"\n Starts Cobbler.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n \"\"\"\n settings = cobbler_api.settings()\n xmlrpc_port = settings.xmlrpc_port\n\n regen_ss_file()\n do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)\n\n\ndef regen_ss_file():\n \"\"\"\n This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been\n cleared by Kerberos.\n \"\"\"\n ssfile = \"/var/lib/cobbler/web.ss\"\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n\n with open(ssfile, 'wb', 0o640) as fd:\n fd.write(binascii.hexlify(data))\n\n http_user = \"apache\"\n family = utils.get_family()\n if family == \"debian\":\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])\n\n\ndef do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n \"\"\"\n This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n :param settings: The Cobbler settings instance which is used for this method.\n :param port: The port where the xmlrpc api should run on.\n \"\"\"\n xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)\n server = remote.CobblerXMLRPCServer(('127.0.0.1', port))\n server.logRequests = 0 # don't print stuff\n xinterface.logger.debug(\"XMLRPC running on %s\" % port)\n server.register_instance(xinterface)\n\n while True:\n try:\n print(\"SERVING!\")\n server.serve_forever()\n except IOError:\n # interrupted? try to serve again\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n core(CobblerAPI())\n", "path": "cobbler/cobblerd.py"}]}
| 1,340 | 244 |
gh_patches_debug_25206
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-1049
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
About option of win_package
I found two issue.
I think Product-ID parameter is not "product_id" , is it "productid"?
Also , it seems the required columns is "yes".
```
fatal: [10.1.1.6]: FAILED! => {"changed": false, "failed": true, "msg": "Missing required argument: productid"
```
Therefore , it take a mistake about "ProductId" below an example on document of win_package:
```
# Playbook example
- name: Install the vc thingy
win_package:
name="Microsoft Visual C thingy"
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
Arguments="/install /passive /norestart"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `windows/win_package.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2014, Trond Hindenes <[email protected]>, and others
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 # this is a windows documentation stub. actual code lives in the .ps1
22 # file of the same name
23
24 DOCUMENTATION = '''
25 ---
26 module: win_package
27 version_added: "1.7"
28 short_description: Installs/Uninstalls a installable package, either from local file system or url
29 description:
30 - Installs or uninstalls a package
31 options:
32 path:
33 description:
34 - Location of the package to be installed (either on file system, network share or url)
35 required: true
36 default: null
37 aliases: []
38 name:
39 description:
40 - name of the package. Just for logging reasons, will use the value of path if name isn't specified
41 required: false
42 default: null
43 aliases: []
44 product_id:
45 description:
46 - product id of the installed package (used for checking if already installed)
47 required: false
48 default: null
49 aliases: []
50 arguments:
51 description:
52 - Any arguments the installer needs
53 default: null
54 aliases: []
55 state:
56 description:
57 - Install or Uninstall
58 choices:
59 - present
60 - absent
61 default: present
62 aliases: [ensure]
63 user_name:
64 description:
65 - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
66 default: null
67 aliases: []
68 user_password:
69 description:
70 - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
71 default: null
72 aliases: []
73 author: Trond Hindenes
74 '''
75
76 EXAMPLES = '''
77 # Playbook example
78 - name: Install the vc thingy
79 win_package:
80 name="Microsoft Visual C thingy"
81 path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
82 ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
83 Arguments="/install /passive /norestart"
84
85
86 '''
87
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/windows/win_package.py b/windows/win_package.py
--- a/windows/win_package.py
+++ b/windows/win_package.py
@@ -25,6 +25,7 @@
---
module: win_package
version_added: "1.7"
+author: Trond Hindenes
short_description: Installs/Uninstalls a installable package, either from local file system or url
description:
- Installs or uninstalls a package
@@ -44,9 +45,9 @@
product_id:
description:
- product id of the installed package (used for checking if already installed)
- required: false
+ required: true
default: null
- aliases: []
+ aliases: [productid]
arguments:
description:
- Any arguments the installer needs
@@ -79,7 +80,7 @@
win_package:
name="Microsoft Visual C thingy"
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
- ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
+ Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
Arguments="/install /passive /norestart"
|
{"golden_diff": "diff --git a/windows/win_package.py b/windows/win_package.py\n--- a/windows/win_package.py\n+++ b/windows/win_package.py\n@@ -25,6 +25,7 @@\n ---\n module: win_package\n version_added: \"1.7\"\n+author: Trond Hindenes\n short_description: Installs/Uninstalls a installable package, either from local file system or url\n description:\n - Installs or uninstalls a package\n@@ -44,9 +45,9 @@\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n- required: false\n+ required: true\n default: null\n- aliases: []\n+ aliases: [productid]\n arguments:\n description:\n - Any arguments the installer needs\n@@ -79,7 +80,7 @@\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n- ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n+ Product_Id=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n", "issue": "About option of win_package\nI found two issue.\n\nI think Product-ID parameter is not \"product_id\" , is it \"productid\"?\nAlso , it seems the required columns is \"yes\".\n\n```\nfatal: [10.1.1.6]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"Missing required argument: productid\"\n```\n\nTherefore , it take a mistake about \"ProductId\" below an example on document of win_package:\n\n```\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Trond Hindenes <[email protected]>, and others\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# this is a windows documentation stub. actual code lives in the .ps1\n# file of the same name\n\nDOCUMENTATION = '''\n---\nmodule: win_package\nversion_added: \"1.7\"\nshort_description: Installs/Uninstalls a installable package, either from local file system or url\ndescription:\n - Installs or uninstalls a package\noptions:\n path:\n description:\n - Location of the package to be installed (either on file system, network share or url)\n required: true\n default: null\n aliases: []\n name:\n description:\n - name of the package. Just for logging reasons, will use the value of path if name isn't specified\n required: false\n default: null\n aliases: []\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n required: false\n default: null\n aliases: []\n arguments:\n description:\n - Any arguments the installer needs\n default: null\n aliases: []\n state:\n description:\n - Install or Uninstall\n choices:\n - present\n - absent\n default: present\n aliases: [ensure]\n user_name:\n description:\n - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.\n default: null\n aliases: []\n user_password:\n description:\n - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.\n default: null\n aliases: []\nauthor: Trond Hindenes\n'''\n\nEXAMPLES = '''\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n\n\n'''\n\n", "path": "windows/win_package.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Trond Hindenes <[email protected]>, and others\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# this is a windows documentation stub. actual code lives in the .ps1\n# file of the same name\n\nDOCUMENTATION = '''\n---\nmodule: win_package\nversion_added: \"1.7\"\nauthor: Trond Hindenes\nshort_description: Installs/Uninstalls a installable package, either from local file system or url\ndescription:\n - Installs or uninstalls a package\noptions:\n path:\n description:\n - Location of the package to be installed (either on file system, network share or url)\n required: true\n default: null\n aliases: []\n name:\n description:\n - name of the package. Just for logging reasons, will use the value of path if name isn't specified\n required: false\n default: null\n aliases: []\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n required: true\n default: null\n aliases: [productid]\n arguments:\n description:\n - Any arguments the installer needs\n default: null\n aliases: []\n state:\n description:\n - Install or Uninstall\n choices:\n - present\n - absent\n default: present\n aliases: [ensure]\n user_name:\n description:\n - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.\n default: null\n aliases: []\n user_password:\n description:\n - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.\n default: null\n aliases: []\nauthor: Trond Hindenes\n'''\n\nEXAMPLES = '''\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n Product_Id=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n\n\n'''\n\n", "path": "windows/win_package.py"}]}
| 1,388 | 345 |
gh_patches_debug_2029
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-15568
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in Tag model
### Deployment Type
Self-hosted
### NetBox Version
v3.7.4
### Python Version
3.8
### Steps to Reproduce
Typo in help_text where "this" is mistakenly repeated.
https://github.com/netbox-community/netbox/blob/69c0aac1051015660133b2ae3c86607dabd8084b/netbox/extras/models/tags.py#L40
### Expected Behavior
The object type(s) to which this tag can be applied.
### Observed Behavior
The object type(s) to which this this tag can be applied.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/extras/models/tags.py`
Content:
```
1 from django.conf import settings
2 from django.db import models
3 from django.urls import reverse
4 from django.utils.text import slugify
5 from django.utils.translation import gettext_lazy as _
6 from taggit.models import TagBase, GenericTaggedItemBase
7
8 from netbox.models import ChangeLoggedModel
9 from netbox.models.features import CloningMixin, ExportTemplatesMixin
10 from utilities.choices import ColorChoices
11 from utilities.fields import ColorField
12
13 __all__ = (
14 'Tag',
15 'TaggedItem',
16 )
17
18
19 #
20 # Tags
21 #
22
23 class Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):
24 id = models.BigAutoField(
25 primary_key=True
26 )
27 color = ColorField(
28 verbose_name=_('color'),
29 default=ColorChoices.COLOR_GREY
30 )
31 description = models.CharField(
32 verbose_name=_('description'),
33 max_length=200,
34 blank=True,
35 )
36 object_types = models.ManyToManyField(
37 to='contenttypes.ContentType',
38 related_name='+',
39 blank=True,
40 help_text=_("The object type(s) to which this this tag can be applied.")
41 )
42
43 clone_fields = (
44 'color', 'description', 'object_types',
45 )
46
47 class Meta:
48 ordering = ['name']
49 verbose_name = _('tag')
50 verbose_name_plural = _('tags')
51
52 def get_absolute_url(self):
53 return reverse('extras:tag', args=[self.pk])
54
55 @property
56 def docs_url(self):
57 return f'{settings.STATIC_URL}docs/models/extras/tag/'
58
59 def slugify(self, tag, i=None):
60 # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)
61 slug = slugify(tag, allow_unicode=True)
62 if i is not None:
63 slug += "_%d" % i
64 return slug
65
66
67 class TaggedItem(GenericTaggedItemBase):
68 tag = models.ForeignKey(
69 to=Tag,
70 related_name="%(app_label)s_%(class)s_items",
71 on_delete=models.CASCADE
72 )
73
74 _netbox_private = True
75
76 class Meta:
77 indexes = [models.Index(fields=["content_type", "object_id"])]
78 verbose_name = _('tagged item')
79 verbose_name_plural = _('tagged items')
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/extras/models/tags.py b/netbox/extras/models/tags.py
--- a/netbox/extras/models/tags.py
+++ b/netbox/extras/models/tags.py
@@ -37,7 +37,7 @@
to='contenttypes.ContentType',
related_name='+',
blank=True,
- help_text=_("The object type(s) to which this this tag can be applied.")
+ help_text=_("The object type(s) to which this tag can be applied.")
)
clone_fields = (
|
{"golden_diff": "diff --git a/netbox/extras/models/tags.py b/netbox/extras/models/tags.py\n--- a/netbox/extras/models/tags.py\n+++ b/netbox/extras/models/tags.py\n@@ -37,7 +37,7 @@\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n- help_text=_(\"The object type(s) to which this this tag can be applied.\")\n+ help_text=_(\"The object type(s) to which this tag can be applied.\")\n )\n \n clone_fields = (\n", "issue": "Typo in Tag model\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.4\n\n### Python Version\n\n3.8\n\n### Steps to Reproduce\n\nTypo in help_text where \"this\" is mistakenly repeated.\r\n\r\nhttps://github.com/netbox-community/netbox/blob/69c0aac1051015660133b2ae3c86607dabd8084b/netbox/extras/models/tags.py#L40\n\n### Expected Behavior\n\nThe object type(s) to which this tag can be applied.\n\n### Observed Behavior\n\nThe object type(s) to which this this tag can be applied.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.models import TagBase, GenericTaggedItemBase\n\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import CloningMixin, ExportTemplatesMixin\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField\n\n__all__ = (\n 'Tag',\n 'TaggedItem',\n)\n\n\n#\n# Tags\n#\n\nclass Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):\n id = models.BigAutoField(\n primary_key=True\n )\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True,\n )\n object_types = models.ManyToManyField(\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n help_text=_(\"The object type(s) to which this this tag can be applied.\")\n )\n\n clone_fields = (\n 'color', 'description', 'object_types',\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n\n def get_absolute_url(self):\n return reverse('extras:tag', args=[self.pk])\n\n @property\n def docs_url(self):\n return f'{settings.STATIC_URL}docs/models/extras/tag/'\n\n def slugify(self, tag, i=None):\n # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)\n slug = slugify(tag, allow_unicode=True)\n if i is not None:\n slug += \"_%d\" % i\n return slug\n\n\nclass TaggedItem(GenericTaggedItemBase):\n tag = models.ForeignKey(\n to=Tag,\n related_name=\"%(app_label)s_%(class)s_items\",\n on_delete=models.CASCADE\n )\n\n _netbox_private = True\n\n class Meta:\n indexes = [models.Index(fields=[\"content_type\", \"object_id\"])]\n verbose_name = _('tagged item')\n verbose_name_plural = _('tagged items')\n", "path": "netbox/extras/models/tags.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.models import TagBase, GenericTaggedItemBase\n\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import CloningMixin, ExportTemplatesMixin\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField\n\n__all__ = (\n 'Tag',\n 'TaggedItem',\n)\n\n\n#\n# Tags\n#\n\nclass Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):\n id = models.BigAutoField(\n primary_key=True\n )\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True,\n )\n object_types = models.ManyToManyField(\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n help_text=_(\"The object type(s) to which this tag can be applied.\")\n )\n\n clone_fields = (\n 'color', 'description', 'object_types',\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n\n def get_absolute_url(self):\n return reverse('extras:tag', args=[self.pk])\n\n @property\n def docs_url(self):\n return f'{settings.STATIC_URL}docs/models/extras/tag/'\n\n def slugify(self, tag, i=None):\n # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)\n slug = slugify(tag, allow_unicode=True)\n if i is not None:\n slug += \"_%d\" % i\n return slug\n\n\nclass TaggedItem(GenericTaggedItemBase):\n tag = models.ForeignKey(\n to=Tag,\n related_name=\"%(app_label)s_%(class)s_items\",\n on_delete=models.CASCADE\n )\n\n _netbox_private = True\n\n class Meta:\n indexes = [models.Index(fields=[\"content_type\", \"object_id\"])]\n verbose_name = _('tagged item')\n verbose_name_plural = _('tagged items')\n", "path": "netbox/extras/models/tags.py"}]}
| 1,049 | 113 |
gh_patches_debug_15836
|
rasdani/github-patches
|
git_diff
|
scverse__scanpy-1054
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sc.queries.enrich throws AssertionError with floats
<!-- Please give a clear and concise description of what the bug is: -->
I'm trying to run an enrichment analysis after filtering out certain genes via `sc.tl.filter_rank_genes_groups`, so I use `key='rank_genes_groups_filtered'` as an argument for `sc.queries.enrich`. Since the filtered values are replaced with `nan` I hoped they'd by ignored in the enrichment analysis, but it actually leads to an uninformative `AssertionError`.
My suggestion here is simply to filter `nan` values from the gene list around here and 2 lines later: https://github.com/theislab/scanpy/blob/249fc572471683357b86b8bbf41d3284118bc8f8/scanpy/queries/_queries.py#L296
I can make a little PR if we agree with this simple fix
Note you can reproduce this very simply without an adata object (but of course the likely use case is with an adata object as outlined above):
<!-- Put a minimal reproducible example that reproduces the bug in the code block below: -->
```
sc.queries.enrich([float('nan')])
```
Output:
<!-- Put your Error output in this code block (if applicable, else delete the block): -->
```pytb
AssertionError: query failed with error 500
```
#### Versions:
```
scanpy==1.4.5.post2 anndata==0.6.22.post1 umap==0.3.10 numpy==1.18.1 scipy==1.2.1 pandas==1.0.1 scikit-learn==0.22.1 statsmodels==0.11.0 python-igraph==0.8.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/queries/_queries.py`
Content:
```
1 import collections.abc as cabc
2 from functools import singledispatch
3 from types import MappingProxyType
4 from typing import Any, Union, Optional, Iterable, Dict, Mapping
5
6 import pandas as pd
7 from anndata import AnnData
8
9 from ..get import rank_genes_groups_df
10 from .._utils import _doc_params
11
12
13 _doc_org = """\
14 org
15 Organism to query. Must be an organism in ensembl biomart. "hsapiens",
16 "mmusculus", "drerio", etc.\
17 """
18
19 _doc_host = """\
20 host
21 A valid BioMart host URL. Alternative values include archive urls (like
22 "grch37.ensembl.org") or regional mirrors (like "useast.ensembl.org").\
23 """
24
25 _doc_use_cache = """\
26 use_cache
27 Whether pybiomart should use a cache for requests. Will create a
28 `.pybiomart.sqlite` file in current directory if used.\
29 """
30
31
32 @_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
33 def simple_query(
34 org: str,
35 attrs: Union[Iterable[str], str],
36 *,
37 filters: Optional[Dict[str, Any]] = None,
38 host: str = "www.ensembl.org",
39 use_cache: bool = False,
40 ) -> pd.DataFrame:
41 """\
42 A simple interface to biomart.
43
44 Params
45 ------
46 {doc_org}
47 attrs
48 What you want returned.
49 filters
50 What you want to pick out.
51 {doc_host}
52 {doc_use_cache}
53 """
54 if isinstance(attrs, str):
55 attrs = [attrs]
56 elif isinstance(attrs, cabc.Iterable):
57 attrs = list(attrs)
58 else:
59 raise TypeError(f"attrs must be of type list or str, was {type(attrs)}.")
60 try:
61 from pybiomart import Server
62 except ImportError:
63 raise ImportError(
64 "This method requires the `pybiomart` module to be installed."
65 )
66 server = Server(host, use_cache=use_cache)
67 dataset = server.marts["ENSEMBL_MART_ENSEMBL"].datasets[
68 "{}_gene_ensembl".format(org)
69 ]
70 res = dataset.query(attributes=attrs, filters=filters, use_attr_names=True)
71 return res
72
73
74 @_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
75 def biomart_annotations(
76 org: str,
77 attrs: Iterable[str],
78 *,
79 host: str = "www.ensembl.org",
80 use_cache: bool = False,
81 ) -> pd.DataFrame:
82 """\
83 Retrieve gene annotations from ensembl biomart.
84
85 Parameters
86 ----------
87 {doc_org}
88 attrs
89 Attributes to query biomart for.
90 {doc_host}
91 {doc_use_cache}
92
93 Returns
94 -------
95 Dataframe containing annotations.
96
97 Examples
98 --------
99 Retrieve genes coordinates and chromosomes
100
101 >>> import scanpy as sc
102 >>> annot = sc.queries.biomart_annotations(
103 "hsapiens",
104 ["ensembl_gene_id", "start_position", "end_position", "chromosome_name"],
105 ).set_index("ensembl_gene_id")
106 >>> adata.var[annot.columns] = annot
107 """
108 return simple_query(org=org, attrs=attrs, host=host, use_cache=use_cache)
109
110
111 @_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
112 def gene_coordinates(
113 org: str,
114 gene_name: str,
115 *,
116 gene_attr: str = "external_gene_name",
117 chr_exclude: Iterable[str] = (),
118 host: str = "www.ensembl.org",
119 use_cache: bool = False,
120 ) -> pd.DataFrame:
121 """\
122 Retrieve gene coordinates for specific organism through BioMart.
123
124 Parameters
125 ----------
126 {doc_org}
127 gene_name
128 The gene symbol (e.g. "hgnc_symbol" for human) for which to retrieve
129 coordinates.
130 gene_attr
131 The biomart attribute the gene symbol should show up for.
132 chr_exclude
133 A list of chromosomes to exclude from query.
134 {doc_host}
135 {doc_use_cache}
136
137 Returns
138 -------
139 Dataframe containing gene coordinates for the specified gene symbol.
140
141 Examples
142 --------
143 >>> import scanpy as sc
144 >>> sc.queries.gene_coordinates("hsapiens", "MT-TF")
145 """
146 res = simple_query(
147 org=org,
148 attrs=["chromosome_name", "start_position", "end_position"],
149 filters={gene_attr: gene_name},
150 host=host,
151 use_cache=use_cache,
152 )
153 return res[~res["chromosome_name"].isin(chr_exclude)]
154
155
156 @_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
157 def mitochondrial_genes(
158 org: str,
159 *,
160 attrname: str = "external_gene_name",
161 host: str = "www.ensembl.org",
162 use_cache: bool = False,
163 chromosome: str = "MT",
164 ) -> pd.DataFrame:
165 """\
166 Mitochondrial gene symbols for specific organism through BioMart.
167
168 Parameters
169 ----------
170 {doc_org}
171 attrname
172 Biomart attribute field to return. Possible values include
173 "external_gene_name", "ensembl_gene_id", "hgnc_symbol", "mgi_symbol",
174 and "zfin_id_symbol".
175 {doc_host}
176 {doc_use_cache}
177 chromosome
178 Mitochrondrial chromosome name used in BioMart for organism.
179
180 Returns
181 -------
182 Dataframe containing identifiers for mitochondrial genes.
183
184 Examples
185 --------
186 >>> import scanpy as sc
187 >>> mito_gene_names = sc.queries.mitochondrial_genes("hsapiens")
188 >>> mito_ensembl_ids = sc.queries.mitochondrial_genes("hsapiens", attrname="ensembl_gene_id")
189 >>> mito_gene_names_fly = sc.queries.mitochondrial_genes("dmelanogaster", chromosome="mitochondrion_genome")
190 """
191 return simple_query(
192 org,
193 attrs=[attrname],
194 filters={"chromosome_name": [chromosome]},
195 host=host,
196 use_cache=use_cache,
197 )
198
199
200 @singledispatch
201 @_doc_params(doc_org=_doc_org)
202 def enrich(
203 container: Iterable[str],
204 *,
205 org: str = "hsapiens",
206 gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
207 ) -> pd.DataFrame:
208 """\
209 Get enrichment for DE results.
210
211 This is a thin convenience wrapper around the very useful gprofiler_.
212
213 This method dispatches on the first argument, leading to the following two
214 signatures::
215
216 enrich(container, ...)
217 enrich(adata: AnnData, group, key: str, ...)
218
219 Where::
220
221 enrich(adata, group, key, ...) = enrich(adata.uns[key]["names"][group], ...)
222
223 .. _gprofiler: https://pypi.org/project/gprofiler-official/#description
224
225 Parameters
226 ----------
227 container
228 Contains genes you'd like to search.
229 adata
230 AnnData object whose group will be looked for.
231 group
232 The group whose genes should be used for enrichment.
233 key
234 Key in `uns` to find group under.
235 {doc_org}
236 gprofiler_kwargs
237 Keyword arguments to pass to `GProfiler.profile`, see gprofiler_.
238
239 Returns
240 -------
241 Dataframe of enrichment results.
242
243 Examples
244 --------
245 Using `sc.queries.enrich` on a list of genes:
246
247 >>> import scanpy as sc
248 >>> sc.queries.enrich(['Klf4', 'Pax5', 'Sox2', 'Nanog'], org="hsapiens")
249
250 Using `sc.queries.enrich` on an :class:`anndata.AnnData` object:
251
252 >>> pbmcs = sc.datasets.pbmc68k_reduced()
253 >>> sc.tl.rank_genes_groups(pbmcs, "bulk_labels")
254 >>> sc.queries.enrich(pbmcs, "CD34+")
255 """
256 try:
257 from gprofiler import GProfiler
258 except ImportError:
259 raise ImportError(
260 "This method requires the `gprofiler-official` module to be installed."
261 )
262 gprofiler = GProfiler(user_agent="scanpy", return_dataframe=True)
263 gprofiler_kwargs = dict(gprofiler_kwargs)
264 for k in ["organism"]:
265 if gprofiler_kwargs.get(k) is not None:
266 raise ValueError(
267 f"Argument `{k}` should be passed directly through `enrich`, "
268 "not through `gprofiler_kwargs`"
269 )
270 return gprofiler.profile(list(container), organism=org, **gprofiler_kwargs)
271
272
273 @enrich.register(AnnData)
274 def _enrich_anndata(
275 adata: AnnData,
276 group: str,
277 *,
278 org: Optional[str] = "hsapiens",
279 key: str = "rank_genes_groups",
280 pval_cutoff: float = 0.05,
281 log2fc_min: Optional[float] = None,
282 log2fc_max: Optional[float] = None,
283 gene_symbols: Optional[str] = None,
284 gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
285 ) -> pd.DataFrame:
286 de = rank_genes_groups_df(
287 adata,
288 group=group,
289 key=key,
290 pval_cutoff=pval_cutoff,
291 log2fc_min=log2fc_min,
292 log2fc_max=log2fc_max,
293 gene_symbols=gene_symbols,
294 )
295 if gene_symbols is not None:
296 gene_list = list(de[gene_symbols])
297 else:
298 gene_list = list(de["names"])
299 return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)
300
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scanpy/queries/_queries.py b/scanpy/queries/_queries.py
--- a/scanpy/queries/_queries.py
+++ b/scanpy/queries/_queries.py
@@ -235,6 +235,9 @@
{doc_org}
gprofiler_kwargs
Keyword arguments to pass to `GProfiler.profile`, see gprofiler_.
+ **kwargs
+ All other keyword arguments are passed to `sc.get.rank_genes_groups_df`. E.g.
+ pval_cutoff, log2fc_min.
Returns
-------
@@ -293,7 +296,7 @@
gene_symbols=gene_symbols,
)
if gene_symbols is not None:
- gene_list = list(de[gene_symbols])
+ gene_list = list(de[gene_symbols].dropna())
else:
- gene_list = list(de["names"])
+ gene_list = list(de["names"].dropna())
return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)
|
{"golden_diff": "diff --git a/scanpy/queries/_queries.py b/scanpy/queries/_queries.py\n--- a/scanpy/queries/_queries.py\n+++ b/scanpy/queries/_queries.py\n@@ -235,6 +235,9 @@\n {doc_org}\n gprofiler_kwargs\n Keyword arguments to pass to `GProfiler.profile`, see gprofiler_.\n+ **kwargs\n+ All other keyword arguments are passed to `sc.get.rank_genes_groups_df`. E.g.\n+ pval_cutoff, log2fc_min.\n \n Returns\n -------\n@@ -293,7 +296,7 @@\n gene_symbols=gene_symbols,\n )\n if gene_symbols is not None:\n- gene_list = list(de[gene_symbols])\n+ gene_list = list(de[gene_symbols].dropna())\n else:\n- gene_list = list(de[\"names\"])\n+ gene_list = list(de[\"names\"].dropna())\n return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)\n", "issue": "sc.queries.enrich throws AssertionError with floats\n<!-- Please give a clear and concise description of what the bug is: -->\r\nI'm trying to run an enrichment analysis after filtering out certain genes via `sc.tl.filter_rank_genes_groups`, so I use `key='rank_genes_groups_filtered'` as an argument for `sc.queries.enrich`. Since the filtered values are replaced with `nan` I hoped they'd by ignored in the enrichment analysis, but it actually leads to an uninformative `AssertionError`.\r\n\r\nMy suggestion here is simply to filter `nan` values from the gene list around here and 2 lines later: https://github.com/theislab/scanpy/blob/249fc572471683357b86b8bbf41d3284118bc8f8/scanpy/queries/_queries.py#L296\r\n\r\nI can make a little PR if we agree with this simple fix\r\n\r\nNote you can reproduce this very simply without an adata object (but of course the likely use case is with an adata object as outlined above):\r\n\r\n<!-- Put a minimal reproducible example that reproduces the bug in the code block below: -->\r\n```\r\nsc.queries.enrich([float('nan')])\r\n```\r\nOutput:\r\n<!-- Put your Error output in this code block (if applicable, else delete the block): -->\r\n```pytb\r\nAssertionError: query failed with error 500\r\n```\r\n\r\n#### Versions:\r\n```\r\nscanpy==1.4.5.post2 anndata==0.6.22.post1 umap==0.3.10 numpy==1.18.1 scipy==1.2.1 pandas==1.0.1 scikit-learn==0.22.1 statsmodels==0.11.0 python-igraph==0.8.0\r\n```\n", "before_files": [{"content": "import collections.abc as cabc\nfrom functools import singledispatch\nfrom types import MappingProxyType\nfrom typing import Any, Union, Optional, Iterable, Dict, Mapping\n\nimport pandas as pd\nfrom anndata import AnnData\n\nfrom ..get import rank_genes_groups_df\nfrom .._utils import _doc_params\n\n\n_doc_org = \"\"\"\\\norg\n Organism to query. Must be an organism in ensembl biomart. \"hsapiens\",\n \"mmusculus\", \"drerio\", etc.\\\n\"\"\"\n\n_doc_host = \"\"\"\\\nhost\n A valid BioMart host URL. Alternative values include archive urls (like\n \"grch37.ensembl.org\") or regional mirrors (like \"useast.ensembl.org\").\\\n\"\"\"\n\n_doc_use_cache = \"\"\"\\\nuse_cache\n Whether pybiomart should use a cache for requests. Will create a\n `.pybiomart.sqlite` file in current directory if used.\\\n\"\"\"\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef simple_query(\n org: str,\n attrs: Union[Iterable[str], str],\n *,\n filters: Optional[Dict[str, Any]] = None,\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n) -> pd.DataFrame:\n \"\"\"\\\n A simple interface to biomart.\n\n Params\n ------\n {doc_org}\n attrs\n What you want returned.\n filters\n What you want to pick out.\n {doc_host}\n {doc_use_cache}\n \"\"\"\n if isinstance(attrs, str):\n attrs = [attrs]\n elif isinstance(attrs, cabc.Iterable):\n attrs = list(attrs)\n else:\n raise TypeError(f\"attrs must be of type list or str, was {type(attrs)}.\")\n try:\n from pybiomart import Server\n except ImportError:\n raise ImportError(\n \"This method requires the `pybiomart` module to be installed.\"\n )\n server = Server(host, use_cache=use_cache)\n dataset = server.marts[\"ENSEMBL_MART_ENSEMBL\"].datasets[\n \"{}_gene_ensembl\".format(org)\n ]\n res = dataset.query(attributes=attrs, filters=filters, use_attr_names=True)\n return res\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef biomart_annotations(\n org: str,\n attrs: Iterable[str],\n *,\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n) -> pd.DataFrame:\n \"\"\"\\\n Retrieve gene annotations from ensembl biomart.\n\n Parameters\n ----------\n {doc_org}\n attrs\n Attributes to query biomart for.\n {doc_host}\n {doc_use_cache}\n\n Returns\n -------\n Dataframe containing annotations.\n\n Examples\n --------\n Retrieve genes coordinates and chromosomes\n\n >>> import scanpy as sc\n >>> annot = sc.queries.biomart_annotations(\n \"hsapiens\",\n [\"ensembl_gene_id\", \"start_position\", \"end_position\", \"chromosome_name\"],\n ).set_index(\"ensembl_gene_id\")\n >>> adata.var[annot.columns] = annot\n \"\"\"\n return simple_query(org=org, attrs=attrs, host=host, use_cache=use_cache)\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef gene_coordinates(\n org: str,\n gene_name: str,\n *,\n gene_attr: str = \"external_gene_name\",\n chr_exclude: Iterable[str] = (),\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n) -> pd.DataFrame:\n \"\"\"\\\n Retrieve gene coordinates for specific organism through BioMart.\n\n Parameters\n ----------\n {doc_org}\n gene_name\n The gene symbol (e.g. \"hgnc_symbol\" for human) for which to retrieve\n coordinates.\n gene_attr\n The biomart attribute the gene symbol should show up for.\n chr_exclude\n A list of chromosomes to exclude from query.\n {doc_host}\n {doc_use_cache}\n\n Returns\n -------\n Dataframe containing gene coordinates for the specified gene symbol.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> sc.queries.gene_coordinates(\"hsapiens\", \"MT-TF\")\n \"\"\"\n res = simple_query(\n org=org,\n attrs=[\"chromosome_name\", \"start_position\", \"end_position\"],\n filters={gene_attr: gene_name},\n host=host,\n use_cache=use_cache,\n )\n return res[~res[\"chromosome_name\"].isin(chr_exclude)]\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef mitochondrial_genes(\n org: str,\n *,\n attrname: str = \"external_gene_name\",\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n chromosome: str = \"MT\",\n) -> pd.DataFrame:\n \"\"\"\\\n Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n {doc_org}\n attrname\n Biomart attribute field to return. Possible values include\n \"external_gene_name\", \"ensembl_gene_id\", \"hgnc_symbol\", \"mgi_symbol\",\n and \"zfin_id_symbol\".\n {doc_host}\n {doc_use_cache}\n chromosome\n Mitochrondrial chromosome name used in BioMart for organism.\n\n Returns\n -------\n Dataframe containing identifiers for mitochondrial genes.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> mito_gene_names = sc.queries.mitochondrial_genes(\"hsapiens\")\n >>> mito_ensembl_ids = sc.queries.mitochondrial_genes(\"hsapiens\", attrname=\"ensembl_gene_id\")\n >>> mito_gene_names_fly = sc.queries.mitochondrial_genes(\"dmelanogaster\", chromosome=\"mitochondrion_genome\")\n \"\"\"\n return simple_query(\n org,\n attrs=[attrname],\n filters={\"chromosome_name\": [chromosome]},\n host=host,\n use_cache=use_cache,\n )\n\n\n@singledispatch\n@_doc_params(doc_org=_doc_org)\ndef enrich(\n container: Iterable[str],\n *,\n org: str = \"hsapiens\",\n gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),\n) -> pd.DataFrame:\n \"\"\"\\\n Get enrichment for DE results.\n\n This is a thin convenience wrapper around the very useful gprofiler_.\n\n This method dispatches on the first argument, leading to the following two\n signatures::\n\n enrich(container, ...)\n enrich(adata: AnnData, group, key: str, ...)\n\n Where::\n\n enrich(adata, group, key, ...) = enrich(adata.uns[key][\"names\"][group], ...)\n\n .. _gprofiler: https://pypi.org/project/gprofiler-official/#description\n\n Parameters\n ----------\n container\n Contains genes you'd like to search.\n adata\n AnnData object whose group will be looked for.\n group\n The group whose genes should be used for enrichment.\n key\n Key in `uns` to find group under.\n {doc_org}\n gprofiler_kwargs\n Keyword arguments to pass to `GProfiler.profile`, see gprofiler_.\n\n Returns\n -------\n Dataframe of enrichment results.\n\n Examples\n --------\n Using `sc.queries.enrich` on a list of genes:\n\n >>> import scanpy as sc\n >>> sc.queries.enrich(['Klf4', 'Pax5', 'Sox2', 'Nanog'], org=\"hsapiens\")\n\n Using `sc.queries.enrich` on an :class:`anndata.AnnData` object:\n\n >>> pbmcs = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(pbmcs, \"bulk_labels\")\n >>> sc.queries.enrich(pbmcs, \"CD34+\")\n \"\"\"\n try:\n from gprofiler import GProfiler\n except ImportError:\n raise ImportError(\n \"This method requires the `gprofiler-official` module to be installed.\"\n )\n gprofiler = GProfiler(user_agent=\"scanpy\", return_dataframe=True)\n gprofiler_kwargs = dict(gprofiler_kwargs)\n for k in [\"organism\"]:\n if gprofiler_kwargs.get(k) is not None:\n raise ValueError(\n f\"Argument `{k}` should be passed directly through `enrich`, \"\n \"not through `gprofiler_kwargs`\"\n )\n return gprofiler.profile(list(container), organism=org, **gprofiler_kwargs)\n\n\[email protected](AnnData)\ndef _enrich_anndata(\n adata: AnnData,\n group: str,\n *,\n org: Optional[str] = \"hsapiens\",\n key: str = \"rank_genes_groups\",\n pval_cutoff: float = 0.05,\n log2fc_min: Optional[float] = None,\n log2fc_max: Optional[float] = None,\n gene_symbols: Optional[str] = None,\n gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),\n) -> pd.DataFrame:\n de = rank_genes_groups_df(\n adata,\n group=group,\n key=key,\n pval_cutoff=pval_cutoff,\n log2fc_min=log2fc_min,\n log2fc_max=log2fc_max,\n gene_symbols=gene_symbols,\n )\n if gene_symbols is not None:\n gene_list = list(de[gene_symbols])\n else:\n gene_list = list(de[\"names\"])\n return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)\n", "path": "scanpy/queries/_queries.py"}], "after_files": [{"content": "import collections.abc as cabc\nfrom functools import singledispatch\nfrom types import MappingProxyType\nfrom typing import Any, Union, Optional, Iterable, Dict, Mapping\n\nimport pandas as pd\nfrom anndata import AnnData\n\nfrom ..get import rank_genes_groups_df\nfrom .._utils import _doc_params\n\n\n_doc_org = \"\"\"\\\norg\n Organism to query. Must be an organism in ensembl biomart. \"hsapiens\",\n \"mmusculus\", \"drerio\", etc.\\\n\"\"\"\n\n_doc_host = \"\"\"\\\nhost\n A valid BioMart host URL. Alternative values include archive urls (like\n \"grch37.ensembl.org\") or regional mirrors (like \"useast.ensembl.org\").\\\n\"\"\"\n\n_doc_use_cache = \"\"\"\\\nuse_cache\n Whether pybiomart should use a cache for requests. Will create a\n `.pybiomart.sqlite` file in current directory if used.\\\n\"\"\"\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef simple_query(\n org: str,\n attrs: Union[Iterable[str], str],\n *,\n filters: Optional[Dict[str, Any]] = None,\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n) -> pd.DataFrame:\n \"\"\"\\\n A simple interface to biomart.\n\n Params\n ------\n {doc_org}\n attrs\n What you want returned.\n filters\n What you want to pick out.\n {doc_host}\n {doc_use_cache}\n \"\"\"\n if isinstance(attrs, str):\n attrs = [attrs]\n elif isinstance(attrs, cabc.Iterable):\n attrs = list(attrs)\n else:\n raise TypeError(f\"attrs must be of type list or str, was {type(attrs)}.\")\n try:\n from pybiomart import Server\n except ImportError:\n raise ImportError(\n \"This method requires the `pybiomart` module to be installed.\"\n )\n server = Server(host, use_cache=use_cache)\n dataset = server.marts[\"ENSEMBL_MART_ENSEMBL\"].datasets[\n \"{}_gene_ensembl\".format(org)\n ]\n res = dataset.query(attributes=attrs, filters=filters, use_attr_names=True)\n return res\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef biomart_annotations(\n org: str,\n attrs: Iterable[str],\n *,\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n) -> pd.DataFrame:\n \"\"\"\\\n Retrieve gene annotations from ensembl biomart.\n\n Parameters\n ----------\n {doc_org}\n attrs\n Attributes to query biomart for.\n {doc_host}\n {doc_use_cache}\n\n Returns\n -------\n Dataframe containing annotations.\n\n Examples\n --------\n Retrieve genes coordinates and chromosomes\n\n >>> import scanpy as sc\n >>> annot = sc.queries.biomart_annotations(\n \"hsapiens\",\n [\"ensembl_gene_id\", \"start_position\", \"end_position\", \"chromosome_name\"],\n ).set_index(\"ensembl_gene_id\")\n >>> adata.var[annot.columns] = annot\n \"\"\"\n return simple_query(org=org, attrs=attrs, host=host, use_cache=use_cache)\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef gene_coordinates(\n org: str,\n gene_name: str,\n *,\n gene_attr: str = \"external_gene_name\",\n chr_exclude: Iterable[str] = (),\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n) -> pd.DataFrame:\n \"\"\"\\\n Retrieve gene coordinates for specific organism through BioMart.\n\n Parameters\n ----------\n {doc_org}\n gene_name\n The gene symbol (e.g. \"hgnc_symbol\" for human) for which to retrieve\n coordinates.\n gene_attr\n The biomart attribute the gene symbol should show up for.\n chr_exclude\n A list of chromosomes to exclude from query.\n {doc_host}\n {doc_use_cache}\n\n Returns\n -------\n Dataframe containing gene coordinates for the specified gene symbol.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> sc.queries.gene_coordinates(\"hsapiens\", \"MT-TF\")\n \"\"\"\n res = simple_query(\n org=org,\n attrs=[\"chromosome_name\", \"start_position\", \"end_position\"],\n filters={gene_attr: gene_name},\n host=host,\n use_cache=use_cache,\n )\n return res[~res[\"chromosome_name\"].isin(chr_exclude)]\n\n\n@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)\ndef mitochondrial_genes(\n org: str,\n *,\n attrname: str = \"external_gene_name\",\n host: str = \"www.ensembl.org\",\n use_cache: bool = False,\n chromosome: str = \"MT\",\n) -> pd.DataFrame:\n \"\"\"\\\n Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n {doc_org}\n attrname\n Biomart attribute field to return. Possible values include\n \"external_gene_name\", \"ensembl_gene_id\", \"hgnc_symbol\", \"mgi_symbol\",\n and \"zfin_id_symbol\".\n {doc_host}\n {doc_use_cache}\n chromosome\n Mitochrondrial chromosome name used in BioMart for organism.\n\n Returns\n -------\n Dataframe containing identifiers for mitochondrial genes.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> mito_gene_names = sc.queries.mitochondrial_genes(\"hsapiens\")\n >>> mito_ensembl_ids = sc.queries.mitochondrial_genes(\"hsapiens\", attrname=\"ensembl_gene_id\")\n >>> mito_gene_names_fly = sc.queries.mitochondrial_genes(\"dmelanogaster\", chromosome=\"mitochondrion_genome\")\n \"\"\"\n return simple_query(\n org,\n attrs=[attrname],\n filters={\"chromosome_name\": [chromosome]},\n host=host,\n use_cache=use_cache,\n )\n\n\n@singledispatch\n@_doc_params(doc_org=_doc_org)\ndef enrich(\n container: Iterable[str],\n *,\n org: str = \"hsapiens\",\n gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),\n) -> pd.DataFrame:\n \"\"\"\\\n Get enrichment for DE results.\n\n This is a thin convenience wrapper around the very useful gprofiler_.\n\n This method dispatches on the first argument, leading to the following two\n signatures::\n\n enrich(container, ...)\n enrich(adata: AnnData, group, key: str, ...)\n\n Where::\n\n enrich(adata, group, key, ...) = enrich(adata.uns[key][\"names\"][group], ...)\n\n .. _gprofiler: https://pypi.org/project/gprofiler-official/#description\n\n Parameters\n ----------\n container\n Contains genes you'd like to search.\n adata\n AnnData object whose group will be looked for.\n group\n The group whose genes should be used for enrichment.\n key\n Key in `uns` to find group under.\n {doc_org}\n gprofiler_kwargs\n Keyword arguments to pass to `GProfiler.profile`, see gprofiler_.\n **kwargs\n All other keyword arguments are passed to `sc.get.rank_genes_groups_df`. E.g.\n pval_cutoff, log2fc_min.\n\n Returns\n -------\n Dataframe of enrichment results.\n\n Examples\n --------\n Using `sc.queries.enrich` on a list of genes:\n\n >>> import scanpy as sc\n >>> sc.queries.enrich(['Klf4', 'Pax5', 'Sox2', 'Nanog'], org=\"hsapiens\")\n\n Using `sc.queries.enrich` on an :class:`anndata.AnnData` object:\n\n >>> pbmcs = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(pbmcs, \"bulk_labels\")\n >>> sc.queries.enrich(pbmcs, \"CD34+\")\n \"\"\"\n try:\n from gprofiler import GProfiler\n except ImportError:\n raise ImportError(\n \"This method requires the `gprofiler-official` module to be installed.\"\n )\n gprofiler = GProfiler(user_agent=\"scanpy\", return_dataframe=True)\n gprofiler_kwargs = dict(gprofiler_kwargs)\n for k in [\"organism\"]:\n if gprofiler_kwargs.get(k) is not None:\n raise ValueError(\n f\"Argument `{k}` should be passed directly through `enrich`, \"\n \"not through `gprofiler_kwargs`\"\n )\n return gprofiler.profile(list(container), organism=org, **gprofiler_kwargs)\n\n\[email protected](AnnData)\ndef _enrich_anndata(\n adata: AnnData,\n group: str,\n *,\n org: Optional[str] = \"hsapiens\",\n key: str = \"rank_genes_groups\",\n pval_cutoff: float = 0.05,\n log2fc_min: Optional[float] = None,\n log2fc_max: Optional[float] = None,\n gene_symbols: Optional[str] = None,\n gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),\n) -> pd.DataFrame:\n de = rank_genes_groups_df(\n adata,\n group=group,\n key=key,\n pval_cutoff=pval_cutoff,\n log2fc_min=log2fc_min,\n log2fc_max=log2fc_max,\n gene_symbols=gene_symbols,\n )\n if gene_symbols is not None:\n gene_list = list(de[gene_symbols].dropna())\n else:\n gene_list = list(de[\"names\"].dropna())\n return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)\n", "path": "scanpy/queries/_queries.py"}]}
| 3,610 | 230 |
gh_patches_debug_61839
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-4065
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs] Preview pop-up on a link covers the link itself
### Summary
I've come across an issue with that breaks the online documentation completely for me. Whenever I move my mouse pointer over a link to a different part of the documentation, a pop-up appears that covers the link, making it near-impossible to click the link. See this screen recording of the situation that this has manifested first for me:
https://github.com/pypa/setuptools/assets/50332/af946044-7222-4e2d-b090-c771be758598
(On this page: https://setuptools.pypa.io/en/latest/pkg_resources.html)
### OS / Environment
Safari 16.6, macOS 12.6.8.
### Additional Information
IMHO, as it presents itself to me, this feature has downsides that are orders of magnitude bigger that it's upsides. My browser already allows me to preview a page by triple-tapping on the trackpad (macOS) or long-pressing (iOS), so it doesn't add a benefit on these platforms.
---
As an additional note: Even if this feature was implemented in a way where it wouldn't make it impossible to click on some links, it would still be an accessibility issue for me:
I'm on the ADD spectrum and I use my mouse pointer for focussing while reading. It's very natural for me to move my pointer along the text while reading. Such an unavoidable popup will draw my focus away from what I am reading (because it appears when I'm not expecting it and haven't performed an explicit action to make it appear). I'm having this issue also e.g. on GitHub, where some links have pop-ups that appear on mouse hovering.
If you intend on keeping these pop-ups, there is something that you could do to make it a bit less intrusive for people like me (I can't speak for everyone on the ADD spectrum of course): Make the pop-up appear immediately when entering the link's region _and also_ disappear immediately when leaving the region, instead of after a short delay. For example, buttons and links that change appearance while hovering or tool-tips in UIs that appear immediately are much less distracting to me. I think my brain is more likely to associate my action with the appearance of the pop-up and thus able to ignore the stimulus.
But hey, thanks for your work anyways!
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 extensions = [
2 'sphinx.ext.autodoc',
3 'jaraco.packaging.sphinx',
4 ]
5
6 master_doc = "index"
7 html_theme = "furo"
8
9 # Link dates and other references in the changelog
10 extensions += ['rst.linker']
11 link_files = {
12 '../NEWS.rst': dict(
13 using=dict(
14 BB='https://bitbucket.org',
15 GH='https://github.com',
16 ),
17 replace=[
18 dict(
19 pattern=r'(Issue #|\B#)(?P<issue>\d+)',
20 url='{package_url}/issues/{issue}',
21 ),
22 dict(
23 pattern=r'(?m:^((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n)',
24 with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
25 ),
26 dict(
27 pattern=r'PEP[- ](?P<pep_number>\d+)',
28 url='https://peps.python.org/pep-{pep_number:0>4}/',
29 ),
30 dict(
31 pattern=r'(?<!\w)PR #(?P<pull>\d+)',
32 url='{package_url}/pull/{pull}',
33 ),
34 dict(
35 pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
36 url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
37 ),
38 dict(
39 pattern=r'Distribute #(?P<distribute>\d+)',
40 url='{BB}/tarek/distribute/issue/{distribute}',
41 ),
42 dict(
43 pattern=r'Buildout #(?P<buildout>\d+)',
44 url='{GH}/buildout/buildout/issues/{buildout}',
45 ),
46 dict(
47 pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
48 url='http://bugs.python.org/setuptools/issue{old_setuptools}',
49 ),
50 dict(
51 pattern=r'Jython #(?P<jython>\d+)',
52 url='http://bugs.jython.org/issue{jython}',
53 ),
54 dict(
55 pattern=r'(Python #|bpo-)(?P<python>\d+)',
56 url='http://bugs.python.org/issue{python}',
57 ),
58 dict(
59 pattern=r'Interop #(?P<interop>\d+)',
60 url='{GH}/pypa/interoperability-peps/issues/{interop}',
61 ),
62 dict(
63 pattern=r'Pip #(?P<pip>\d+)',
64 url='{GH}/pypa/pip/issues/{pip}',
65 ),
66 dict(
67 pattern=r'Packaging #(?P<packaging>\d+)',
68 url='{GH}/pypa/packaging/issues/{packaging}',
69 ),
70 dict(
71 pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
72 url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
73 ),
74 dict(
75 pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
76 url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
77 ),
78 dict(
79 pattern=r'pypa/(?P<issue_repo>[\-\.\w]+)#(?P<issue_number>\d+)',
80 url='{GH}/pypa/{issue_repo}/issues/{issue_number}',
81 ),
82 dict(
83 pattern=r'pypa/(?P<commit_repo>[\-\.\w]+)@(?P<commit_number>[\da-f]+)',
84 url='{GH}/pypa/{commit_repo}/commit/{commit_number}',
85 ),
86 ],
87 ),
88 }
89
90 # Be strict about any broken references
91 nitpicky = True
92
93 # Include Python intersphinx mapping to prevent failures
94 # jaraco/skeleton#51
95 extensions += ['sphinx.ext.intersphinx']
96 intersphinx_mapping = {
97 'python': ('https://docs.python.org/3', None),
98 }
99
100 # Preserve authored syntax for defaults
101 autodoc_preserve_defaults = True
102
103 intersphinx_mapping.update(
104 {
105 'pip': ('https://pip.pypa.io/en/latest', None),
106 'build': ('https://pypa-build.readthedocs.io/en/latest', None),
107 'PyPUG': ('https://packaging.python.org/en/latest/', None),
108 'packaging': ('https://packaging.pypa.io/en/latest/', None),
109 'twine': ('https://twine.readthedocs.io/en/stable/', None),
110 'importlib-resources': (
111 'https://importlib-resources.readthedocs.io/en/latest',
112 None,
113 ),
114 }
115 )
116
117 # Support tooltips on references
118 extensions += ['hoverxref.extension']
119 hoverxref_auto_ref = True
120 hoverxref_intersphinx = [
121 'python',
122 'pip',
123 'build',
124 'PyPUG',
125 'packaging',
126 'twine',
127 'importlib-resources',
128 ]
129
130 # Add support for linking usernames
131 github_url = 'https://github.com'
132 github_repo_org = 'pypa'
133 github_repo_name = 'setuptools'
134 github_repo_slug = f'{github_repo_org}/{github_repo_name}'
135 github_repo_url = f'{github_url}/{github_repo_slug}'
136 github_sponsors_url = f'{github_url}/sponsors'
137 extlinks = {
138 'user': (f'{github_sponsors_url}/%s', '@%s'), # noqa: WPS323
139 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323
140 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323
141 }
142 extensions += ['sphinx.ext.extlinks']
143
144 # Ref: https://github.com/python-attrs/attrs/pull/571/files\
145 # #diff-85987f48f1258d9ee486e3191495582dR82
146 default_role = 'any'
147
148 # HTML theme
149 html_theme = 'furo'
150 html_logo = "images/logo.svg"
151
152 html_theme_options = {
153 "sidebar_hide_name": True,
154 "light_css_variables": {
155 "color-brand-primary": "#336790", # "blue"
156 "color-brand-content": "#336790",
157 },
158 "dark_css_variables": {
159 "color-brand-primary": "#E5B62F", # "yellow"
160 "color-brand-content": "#E5B62F",
161 },
162 }
163
164 # Redirect old docs so links and references in the ecosystem don't break
165 extensions += ['sphinx_reredirects']
166 redirects = {
167 "userguide/keywords": "/deprecated/changed_keywords.html",
168 "userguide/commands": "/deprecated/commands.html",
169 }
170
171 # Add support for inline tabs
172 extensions += ['sphinx_inline_tabs']
173
174 # Support for distutils
175
176 # Ref: https://stackoverflow.com/a/30624034/595220
177 nitpick_ignore = [
178 ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs
179 ('envvar', 'DISTUTILS_DEBUG'), # undocumented
180 ('envvar', 'HOME'), # undocumented
181 ('envvar', 'PLAT'), # undocumented
182 ('envvar', 'DIST_EXTRA_CONFIG'), # undocumented
183 ('py:attr', 'CCompiler.language_map'), # undocumented
184 ('py:attr', 'CCompiler.language_order'), # undocumented
185 ('py:class', 'distutils.dist.Distribution'), # undocumented
186 ('py:class', 'distutils.extension.Extension'), # undocumented
187 ('py:class', 'BorlandCCompiler'), # undocumented
188 ('py:class', 'CCompiler'), # undocumented
189 ('py:class', 'CygwinCCompiler'), # undocumented
190 ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented
191 ('py:class', 'FileList'), # undocumented
192 ('py:class', 'IShellLink'), # ref to MS docs
193 ('py:class', 'MSVCCompiler'), # undocumented
194 ('py:class', 'OptionDummy'), # undocumented
195 ('py:class', 'UnixCCompiler'), # undocumented
196 ('py:exc', 'CompileError'), # undocumented
197 ('py:exc', 'DistutilsExecError'), # undocumented
198 ('py:exc', 'DistutilsFileError'), # undocumented
199 ('py:exc', 'LibError'), # undocumented
200 ('py:exc', 'LinkError'), # undocumented
201 ('py:exc', 'PreprocessError'), # undocumented
202 ('py:exc', 'setuptools.errors.PlatformError'), # sphinx cannot find it
203 ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented
204 # undocumented:
205 ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),
206 ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented
207 ('py:func', 'distutils.log.debug'), # undocumented
208 ('py:func', 'distutils.spawn.find_executable'), # undocumented
209 ('py:func', 'distutils.spawn.spawn'), # undocumented
210 # TODO: check https://docutils.rtfd.io in the future
211 ('py:mod', 'docutils'), # there's no Sphinx site documenting this
212 ]
213
214 # Allow linking objects on other Sphinx sites seamlessly:
215 intersphinx_mapping.update(
216 python=('https://docs.python.org/3', None),
217 )
218
219 # Add support for the unreleased "next-version" change notes
220 extensions += ['sphinxcontrib.towncrier']
221 # Extension needs a path from here to the towncrier config.
222 towncrier_draft_working_directory = '..'
223 # Avoid an empty section for unpublished changes.
224 towncrier_draft_include_empty = False
225 # sphinx-contrib/sphinxcontrib-towncrier#81
226 towncrier_draft_config_path = 'towncrier.toml'
227
228 extensions += ['jaraco.tidelift']
229
230 # Add icons (aka "favicons") to documentation
231 extensions += ['sphinx_favicon']
232 html_static_path = ['images'] # should contain the folder with icons
233
234 # Add support for nice Not Found 404 pages
235 extensions += ['notfound.extension']
236
237 # List of dicts with <link> HTML attributes
238 # static-file points to files in the html_static_path (href is computed)
239 favicons = [
240 { # "Catch-all" goes first, otherwise some browsers will overwrite
241 "rel": "icon",
242 "type": "image/svg+xml",
243 "static-file": "logo-symbol-only.svg",
244 "sizes": "any",
245 },
246 { # Version with thicker strokes for better visibility at smaller sizes
247 "rel": "icon",
248 "type": "image/svg+xml",
249 "static-file": "favicon.svg",
250 "sizes": "16x16 24x24 32x32 48x48",
251 },
252 # rel="apple-touch-icon" does not support SVG yet
253 ]
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -114,19 +114,6 @@
}
)
-# Support tooltips on references
-extensions += ['hoverxref.extension']
-hoverxref_auto_ref = True
-hoverxref_intersphinx = [
- 'python',
- 'pip',
- 'build',
- 'PyPUG',
- 'packaging',
- 'twine',
- 'importlib-resources',
-]
-
# Add support for linking usernames
github_url = 'https://github.com'
github_repo_org = 'pypa'
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -114,19 +114,6 @@\n }\n )\n \n-# Support tooltips on references\n-extensions += ['hoverxref.extension']\n-hoverxref_auto_ref = True\n-hoverxref_intersphinx = [\n- 'python',\n- 'pip',\n- 'build',\n- 'PyPUG',\n- 'packaging',\n- 'twine',\n- 'importlib-resources',\n-]\n-\n # Add support for linking usernames\n github_url = 'https://github.com'\n github_repo_org = 'pypa'\n", "issue": "[Docs] Preview pop-up on a link covers the link itself\n### Summary\n\nI've come across an issue with that breaks the online documentation completely for me. Whenever I move my mouse pointer over a link to a different part of the documentation, a pop-up appears that covers the link, making it near-impossible to click the link. See this screen recording of the situation that this has manifested first for me:\r\n\r\nhttps://github.com/pypa/setuptools/assets/50332/af946044-7222-4e2d-b090-c771be758598\r\n\r\n(On this page: https://setuptools.pypa.io/en/latest/pkg_resources.html)\n\n### OS / Environment\n\nSafari 16.6, macOS 12.6.8.\n\n### Additional Information\n\nIMHO, as it presents itself to me, this feature has downsides that are orders of magnitude bigger that it's upsides. My browser already allows me to preview a page by triple-tapping on the trackpad (macOS) or long-pressing (iOS), so it doesn't add a benefit on these platforms.\r\n\r\n---\r\n\r\nAs an additional note: Even if this feature was implemented in a way where it wouldn't make it impossible to click on some links, it would still be an accessibility issue for me:\r\n\r\nI'm on the ADD spectrum and I use my mouse pointer for focussing while reading. It's very natural for me to move my pointer along the text while reading. Such an unavoidable popup will draw my focus away from what I am reading (because it appears when I'm not expecting it and haven't performed an explicit action to make it appear). I'm having this issue also e.g. on GitHub, where some links have pop-ups that appear on mouse hovering.\r\n\r\nIf you intend on keeping these pop-ups, there is something that you could do to make it a bit less intrusive for people like me (I can't speak for everyone on the ADD spectrum of course): Make the pop-up appear immediately when entering the link's region _and also_ disappear immediately when leaving the region, instead of after a short delay. For example, buttons and links that change appearance while hovering or tool-tips in UIs that appear immediately are much less distracting to me. I think my brain is more likely to associate my action with the appearance of the pop-up and thus able to ignore the stimulus.\r\n\r\nBut hey, thanks for your work anyways!\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "extensions = [\n 'sphinx.ext.autodoc',\n 'jaraco.packaging.sphinx',\n]\n\nmaster_doc = \"index\"\nhtml_theme = \"furo\"\n\n# Link dates and other references in the changelog\nextensions += ['rst.linker']\nlink_files = {\n '../NEWS.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue #|\\B#)(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'(?m:^((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n)',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://peps.python.org/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'(?<!\\w)PR #(?P<pull>\\d+)',\n url='{package_url}/pull/{pull}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/(?P<issue_repo>[\\-\\.\\w]+)#(?P<issue_number>\\d+)',\n url='{GH}/pypa/{issue_repo}/issues/{issue_number}',\n ),\n dict(\n pattern=r'pypa/(?P<commit_repo>[\\-\\.\\w]+)@(?P<commit_number>[\\da-f]+)',\n url='{GH}/pypa/{commit_repo}/commit/{commit_number}',\n ),\n ],\n ),\n}\n\n# Be strict about any broken references\nnitpicky = True\n\n# Include Python intersphinx mapping to prevent failures\n# jaraco/skeleton#51\nextensions += ['sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n\n# Preserve authored syntax for defaults\nautodoc_preserve_defaults = True\n\nintersphinx_mapping.update(\n {\n 'pip': ('https://pip.pypa.io/en/latest', None),\n 'build': ('https://pypa-build.readthedocs.io/en/latest', None),\n 'PyPUG': ('https://packaging.python.org/en/latest/', None),\n 'packaging': ('https://packaging.pypa.io/en/latest/', None),\n 'twine': ('https://twine.readthedocs.io/en/stable/', None),\n 'importlib-resources': (\n 'https://importlib-resources.readthedocs.io/en/latest',\n None,\n ),\n }\n)\n\n# Support tooltips on references\nextensions += ['hoverxref.extension']\nhoverxref_auto_ref = True\nhoverxref_intersphinx = [\n 'python',\n 'pip',\n 'build',\n 'PyPUG',\n 'packaging',\n 'twine',\n 'importlib-resources',\n]\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_repo_org = 'pypa'\ngithub_repo_name = 'setuptools'\ngithub_repo_slug = f'{github_repo_org}/{github_repo_name}'\ngithub_repo_url = f'{github_url}/{github_repo_slug}'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@%s'), # noqa: WPS323\n 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323\n 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# HTML theme\nhtml_theme = 'furo'\nhtml_logo = \"images/logo.svg\"\n\nhtml_theme_options = {\n \"sidebar_hide_name\": True,\n \"light_css_variables\": {\n \"color-brand-primary\": \"#336790\", # \"blue\"\n \"color-brand-content\": \"#336790\",\n },\n \"dark_css_variables\": {\n \"color-brand-primary\": \"#E5B62F\", # \"yellow\"\n \"color-brand-content\": \"#E5B62F\",\n },\n}\n\n# Redirect old docs so links and references in the ecosystem don't break\nextensions += ['sphinx_reredirects']\nredirects = {\n \"userguide/keywords\": \"/deprecated/changed_keywords.html\",\n \"userguide/commands\": \"/deprecated/commands.html\",\n}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n\n# Support for distutils\n\n# Ref: https://stackoverflow.com/a/30624034/595220\nnitpick_ignore = [\n ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs\n ('envvar', 'DISTUTILS_DEBUG'), # undocumented\n ('envvar', 'HOME'), # undocumented\n ('envvar', 'PLAT'), # undocumented\n ('envvar', 'DIST_EXTRA_CONFIG'), # undocumented\n ('py:attr', 'CCompiler.language_map'), # undocumented\n ('py:attr', 'CCompiler.language_order'), # undocumented\n ('py:class', 'distutils.dist.Distribution'), # undocumented\n ('py:class', 'distutils.extension.Extension'), # undocumented\n ('py:class', 'BorlandCCompiler'), # undocumented\n ('py:class', 'CCompiler'), # undocumented\n ('py:class', 'CygwinCCompiler'), # undocumented\n ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented\n ('py:class', 'FileList'), # undocumented\n ('py:class', 'IShellLink'), # ref to MS docs\n ('py:class', 'MSVCCompiler'), # undocumented\n ('py:class', 'OptionDummy'), # undocumented\n ('py:class', 'UnixCCompiler'), # undocumented\n ('py:exc', 'CompileError'), # undocumented\n ('py:exc', 'DistutilsExecError'), # undocumented\n ('py:exc', 'DistutilsFileError'), # undocumented\n ('py:exc', 'LibError'), # undocumented\n ('py:exc', 'LinkError'), # undocumented\n ('py:exc', 'PreprocessError'), # undocumented\n ('py:exc', 'setuptools.errors.PlatformError'), # sphinx cannot find it\n ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented\n # undocumented:\n ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),\n ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented\n ('py:func', 'distutils.log.debug'), # undocumented\n ('py:func', 'distutils.spawn.find_executable'), # undocumented\n ('py:func', 'distutils.spawn.spawn'), # undocumented\n # TODO: check https://docutils.rtfd.io in the future\n ('py:mod', 'docutils'), # there's no Sphinx site documenting this\n]\n\n# Allow linking objects on other Sphinx sites seamlessly:\nintersphinx_mapping.update(\n python=('https://docs.python.org/3', None),\n)\n\n# Add support for the unreleased \"next-version\" change notes\nextensions += ['sphinxcontrib.towncrier']\n# Extension needs a path from here to the towncrier config.\ntowncrier_draft_working_directory = '..'\n# Avoid an empty section for unpublished changes.\ntowncrier_draft_include_empty = False\n# sphinx-contrib/sphinxcontrib-towncrier#81\ntowncrier_draft_config_path = 'towncrier.toml'\n\nextensions += ['jaraco.tidelift']\n\n# Add icons (aka \"favicons\") to documentation\nextensions += ['sphinx_favicon']\nhtml_static_path = ['images'] # should contain the folder with icons\n\n# Add support for nice Not Found 404 pages\nextensions += ['notfound.extension']\n\n# List of dicts with <link> HTML attributes\n# static-file points to files in the html_static_path (href is computed)\nfavicons = [\n { # \"Catch-all\" goes first, otherwise some browsers will overwrite\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"logo-symbol-only.svg\",\n \"sizes\": \"any\",\n },\n { # Version with thicker strokes for better visibility at smaller sizes\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"favicon.svg\",\n \"sizes\": \"16x16 24x24 32x32 48x48\",\n },\n # rel=\"apple-touch-icon\" does not support SVG yet\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "extensions = [\n 'sphinx.ext.autodoc',\n 'jaraco.packaging.sphinx',\n]\n\nmaster_doc = \"index\"\nhtml_theme = \"furo\"\n\n# Link dates and other references in the changelog\nextensions += ['rst.linker']\nlink_files = {\n '../NEWS.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue #|\\B#)(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'(?m:^((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n)',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://peps.python.org/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'(?<!\\w)PR #(?P<pull>\\d+)',\n url='{package_url}/pull/{pull}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/(?P<issue_repo>[\\-\\.\\w]+)#(?P<issue_number>\\d+)',\n url='{GH}/pypa/{issue_repo}/issues/{issue_number}',\n ),\n dict(\n pattern=r'pypa/(?P<commit_repo>[\\-\\.\\w]+)@(?P<commit_number>[\\da-f]+)',\n url='{GH}/pypa/{commit_repo}/commit/{commit_number}',\n ),\n ],\n ),\n}\n\n# Be strict about any broken references\nnitpicky = True\n\n# Include Python intersphinx mapping to prevent failures\n# jaraco/skeleton#51\nextensions += ['sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n\n# Preserve authored syntax for defaults\nautodoc_preserve_defaults = True\n\nintersphinx_mapping.update(\n {\n 'pip': ('https://pip.pypa.io/en/latest', None),\n 'build': ('https://pypa-build.readthedocs.io/en/latest', None),\n 'PyPUG': ('https://packaging.python.org/en/latest/', None),\n 'packaging': ('https://packaging.pypa.io/en/latest/', None),\n 'twine': ('https://twine.readthedocs.io/en/stable/', None),\n 'importlib-resources': (\n 'https://importlib-resources.readthedocs.io/en/latest',\n None,\n ),\n }\n)\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_repo_org = 'pypa'\ngithub_repo_name = 'setuptools'\ngithub_repo_slug = f'{github_repo_org}/{github_repo_name}'\ngithub_repo_url = f'{github_url}/{github_repo_slug}'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@%s'), # noqa: WPS323\n 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323\n 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# HTML theme\nhtml_theme = 'furo'\nhtml_logo = \"images/logo.svg\"\n\nhtml_theme_options = {\n \"sidebar_hide_name\": True,\n \"light_css_variables\": {\n \"color-brand-primary\": \"#336790\", # \"blue\"\n \"color-brand-content\": \"#336790\",\n },\n \"dark_css_variables\": {\n \"color-brand-primary\": \"#E5B62F\", # \"yellow\"\n \"color-brand-content\": \"#E5B62F\",\n },\n}\n\n# Redirect old docs so links and references in the ecosystem don't break\nextensions += ['sphinx_reredirects']\nredirects = {\n \"userguide/keywords\": \"/deprecated/changed_keywords.html\",\n \"userguide/commands\": \"/deprecated/commands.html\",\n}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n\n# Support for distutils\n\n# Ref: https://stackoverflow.com/a/30624034/595220\nnitpick_ignore = [\n ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs\n ('envvar', 'DISTUTILS_DEBUG'), # undocumented\n ('envvar', 'HOME'), # undocumented\n ('envvar', 'PLAT'), # undocumented\n ('envvar', 'DIST_EXTRA_CONFIG'), # undocumented\n ('py:attr', 'CCompiler.language_map'), # undocumented\n ('py:attr', 'CCompiler.language_order'), # undocumented\n ('py:class', 'distutils.dist.Distribution'), # undocumented\n ('py:class', 'distutils.extension.Extension'), # undocumented\n ('py:class', 'BorlandCCompiler'), # undocumented\n ('py:class', 'CCompiler'), # undocumented\n ('py:class', 'CygwinCCompiler'), # undocumented\n ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented\n ('py:class', 'FileList'), # undocumented\n ('py:class', 'IShellLink'), # ref to MS docs\n ('py:class', 'MSVCCompiler'), # undocumented\n ('py:class', 'OptionDummy'), # undocumented\n ('py:class', 'UnixCCompiler'), # undocumented\n ('py:exc', 'CompileError'), # undocumented\n ('py:exc', 'DistutilsExecError'), # undocumented\n ('py:exc', 'DistutilsFileError'), # undocumented\n ('py:exc', 'LibError'), # undocumented\n ('py:exc', 'LinkError'), # undocumented\n ('py:exc', 'PreprocessError'), # undocumented\n ('py:exc', 'setuptools.errors.PlatformError'), # sphinx cannot find it\n ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented\n # undocumented:\n ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),\n ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented\n ('py:func', 'distutils.log.debug'), # undocumented\n ('py:func', 'distutils.spawn.find_executable'), # undocumented\n ('py:func', 'distutils.spawn.spawn'), # undocumented\n # TODO: check https://docutils.rtfd.io in the future\n ('py:mod', 'docutils'), # there's no Sphinx site documenting this\n]\n\n# Allow linking objects on other Sphinx sites seamlessly:\nintersphinx_mapping.update(\n python=('https://docs.python.org/3', None),\n)\n\n# Add support for the unreleased \"next-version\" change notes\nextensions += ['sphinxcontrib.towncrier']\n# Extension needs a path from here to the towncrier config.\ntowncrier_draft_working_directory = '..'\n# Avoid an empty section for unpublished changes.\ntowncrier_draft_include_empty = False\n# sphinx-contrib/sphinxcontrib-towncrier#81\ntowncrier_draft_config_path = 'towncrier.toml'\n\nextensions += ['jaraco.tidelift']\n\n# Add icons (aka \"favicons\") to documentation\nextensions += ['sphinx_favicon']\nhtml_static_path = ['images'] # should contain the folder with icons\n\n# Add support for nice Not Found 404 pages\nextensions += ['notfound.extension']\n\n# List of dicts with <link> HTML attributes\n# static-file points to files in the html_static_path (href is computed)\nfavicons = [\n { # \"Catch-all\" goes first, otherwise some browsers will overwrite\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"logo-symbol-only.svg\",\n \"sizes\": \"any\",\n },\n { # Version with thicker strokes for better visibility at smaller sizes\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"favicon.svg\",\n \"sizes\": \"16x16 24x24 32x32 48x48\",\n },\n # rel=\"apple-touch-icon\" does not support SVG yet\n]\n", "path": "docs/conf.py"}]}
| 3,880 | 144 |
gh_patches_debug_5047
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-3578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix formatting of PyPI package description.
See https://pypi.org/project/ray/.
Note that we can test this out first at https://test.pypi.org/project/ray/.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/setup.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import re
7 import shutil
8 import subprocess
9 import sys
10
11 from setuptools import setup, find_packages, Distribution
12 import setuptools.command.build_ext as _build_ext
13
14 # Ideally, we could include these files by putting them in a
15 # MANIFEST.in or using the package_data argument to setup, but the
16 # MANIFEST.in gets applied at the very beginning when setup.py runs
17 # before these files have been created, so we have to move the files
18 # manually.
19
20 # NOTE: The lists below must be kept in sync with ray/CMakeLists.txt.
21
22 ray_files = [
23 "ray/core/src/ray/thirdparty/redis/src/redis-server",
24 "ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
25 "ray/core/src/plasma/plasma_store_server",
26 "ray/core/src/ray/raylet/libraylet_library_python.so",
27 "ray/core/src/ray/raylet/raylet_monitor", "ray/core/src/ray/raylet/raylet",
28 "ray/WebUI.ipynb"
29 ]
30
31 # These are the directories where automatically generated Python flatbuffer
32 # bindings are created.
33 generated_python_directories = [
34 "ray/core/generated", "ray/core/generated/ray",
35 "ray/core/generated/ray/protocol"
36 ]
37
38 optional_ray_files = []
39
40 ray_ui_files = [
41 "ray/core/src/catapult_files/index.html",
42 "ray/core/src/catapult_files/trace_viewer_full.html"
43 ]
44
45 ray_autoscaler_files = [
46 "ray/autoscaler/aws/example-full.yaml",
47 "ray/autoscaler/gcp/example-full.yaml",
48 "ray/autoscaler/local/example-full.yaml",
49 ]
50
51 if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
52 ray_files += [
53 "ray/core/src/credis/build/src/libmember.so",
54 "ray/core/src/credis/build/src/libmaster.so",
55 "ray/core/src/credis/redis/src/redis-server"
56 ]
57
58 # The UI files are mandatory if the INCLUDE_UI environment variable equals 1.
59 # Otherwise, they are optional.
60 if "INCLUDE_UI" in os.environ and os.environ["INCLUDE_UI"] == "1":
61 ray_files += ray_ui_files
62 else:
63 optional_ray_files += ray_ui_files
64
65 optional_ray_files += ray_autoscaler_files
66
67 extras = {
68 "rllib": ["pyyaml", "gym[atari]", "opencv-python", "lz4", "scipy"],
69 "debug": ["psutil", "setproctitle", "py-spy"],
70 }
71
72
73 class build_ext(_build_ext.build_ext):
74 def run(self):
75 # Note: We are passing in sys.executable so that we use the same
76 # version of Python to build pyarrow inside the build.sh script. Note
77 # that certain flags will not be passed along such as --user or sudo.
78 # TODO(rkn): Fix this.
79 subprocess.check_call(["../build.sh", "-p", sys.executable])
80
81 # We also need to install pyarrow along with Ray, so make sure that the
82 # relevant non-Python pyarrow files get copied.
83 pyarrow_files = []
84 for (root, dirs, filenames) in os.walk("./ray/pyarrow_files/pyarrow"):
85 for name in filenames:
86 pyarrow_files.append(os.path.join(root, name))
87
88 files_to_include = ray_files + pyarrow_files
89
90 # Copy over the autogenerated flatbuffer Python bindings.
91 for directory in generated_python_directories:
92 for filename in os.listdir(directory):
93 if filename[-3:] == ".py":
94 files_to_include.append(os.path.join(directory, filename))
95
96 for filename in files_to_include:
97 self.move_file(filename)
98
99 # Try to copy over the optional files.
100 for filename in optional_ray_files:
101 try:
102 self.move_file(filename)
103 except Exception:
104 print("Failed to copy optional file {}. This is ok."
105 .format(filename))
106
107 def move_file(self, filename):
108 # TODO(rkn): This feels very brittle. It may not handle all cases. See
109 # https://github.com/apache/arrow/blob/master/python/setup.py for an
110 # example.
111 source = filename
112 destination = os.path.join(self.build_lib, filename)
113 # Create the target directory if it doesn't already exist.
114 parent_directory = os.path.dirname(destination)
115 if not os.path.exists(parent_directory):
116 os.makedirs(parent_directory)
117 print("Copying {} to {}.".format(source, destination))
118 shutil.copy(source, destination)
119
120
121 class BinaryDistribution(Distribution):
122 def has_ext_modules(self):
123 return True
124
125
126 def find_version(*filepath):
127 # Extract version information from filepath
128 here = os.path.abspath(os.path.dirname(__file__))
129 with open(os.path.join(here, *filepath)) as fp:
130 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
131 fp.read(), re.M)
132 if version_match:
133 return version_match.group(1)
134 raise RuntimeError("Unable to find version string.")
135
136
137 requires = [
138 "numpy",
139 "filelock",
140 "funcsigs",
141 "click",
142 "colorama",
143 "pytest",
144 "pyyaml",
145 "redis",
146 # The six module is required by pyarrow.
147 "six >= 1.0.0",
148 "flatbuffers",
149 ]
150
151 if sys.version_info < (3, 0):
152 requires.append("faulthandler")
153
154 setup(
155 name="ray",
156 version=find_version("ray", "__init__.py"),
157 description=("A system for parallel and distributed Python that unifies "
158 "the ML ecosystem."),
159 long_description=open("../README.rst").read(),
160 url="https://github.com/ray-project/ray",
161 keywords=("ray distributed parallel machine-learning "
162 "reinforcement-learning deep-learning python"),
163 packages=find_packages(),
164 cmdclass={"build_ext": build_ext},
165 # The BinaryDistribution argument triggers build_ext.
166 distclass=BinaryDistribution,
167 install_requires=requires,
168 setup_requires=["cython >= 0.29"],
169 extras_require=extras,
170 entry_points={
171 "console_scripts": [
172 "ray=ray.scripts.scripts:main",
173 "rllib=ray.rllib.scripts:cli [rllib]"
174 ]
175 },
176 include_package_data=True,
177 zip_safe=False,
178 license="Apache 2.0")
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -154,6 +154,8 @@
setup(
name="ray",
version=find_version("ray", "__init__.py"),
+ author="Ray Team",
+ author_email="[email protected]",
description=("A system for parallel and distributed Python that unifies "
"the ML ecosystem."),
long_description=open("../README.rst").read(),
|
{"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -154,6 +154,8 @@\n setup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n+ author=\"Ray Team\",\n+ author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n", "issue": "Fix formatting of PyPI package description.\nSee https://pypi.org/project/ray/.\r\n\r\nNote that we can test this out first at https://test.pypi.org/project/ray/.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/CMakeLists.txt.\n\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\",\n \"ray/core/src/ray/raylet/libraylet_library_python.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\", \"ray/core/src/ray/raylet/raylet\",\n \"ray/WebUI.ipynb\"\n]\n\n# These are the directories where automatically generated Python flatbuffer\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\", \"ray/core/generated/ray\",\n \"ray/core/generated/ray/protocol\"\n]\n\noptional_ray_files = []\n\nray_ui_files = [\n \"ray/core/src/catapult_files/index.html\",\n \"ray/core/src/catapult_files/trace_viewer_full.html\"\n]\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n]\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\n# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.\n# Otherwise, they are optional.\nif \"INCLUDE_UI\" in os.environ and os.environ[\"INCLUDE_UI\"] == \"1\":\n ray_files += ray_ui_files\nelse:\n optional_ray_files += ray_ui_files\n\noptional_ray_files += ray_autoscaler_files\n\nextras = {\n \"rllib\": [\"pyyaml\", \"gym[atari]\", \"opencv-python\", \"lz4\", \"scipy\"],\n \"debug\": [\"psutil\", \"setproctitle\", \"py-spy\"],\n}\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", \"-p\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pyarrow_files/pyarrow\"):\n for name in filenames:\n pyarrow_files.append(os.path.join(root, name))\n\n files_to_include = ray_files + pyarrow_files\n\n # Copy over the autogenerated flatbuffer Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy\",\n \"filelock\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"pytest\",\n \"pyyaml\",\n \"redis\",\n # The six module is required by pyarrow.\n \"six >= 1.0.0\",\n \"flatbuffers\",\n]\n\nif sys.version_info < (3, 0):\n requires.append(\"faulthandler\")\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/CMakeLists.txt.\n\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\",\n \"ray/core/src/ray/raylet/libraylet_library_python.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\", \"ray/core/src/ray/raylet/raylet\",\n \"ray/WebUI.ipynb\"\n]\n\n# These are the directories where automatically generated Python flatbuffer\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\", \"ray/core/generated/ray\",\n \"ray/core/generated/ray/protocol\"\n]\n\noptional_ray_files = []\n\nray_ui_files = [\n \"ray/core/src/catapult_files/index.html\",\n \"ray/core/src/catapult_files/trace_viewer_full.html\"\n]\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n]\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\n# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.\n# Otherwise, they are optional.\nif \"INCLUDE_UI\" in os.environ and os.environ[\"INCLUDE_UI\"] == \"1\":\n ray_files += ray_ui_files\nelse:\n optional_ray_files += ray_ui_files\n\noptional_ray_files += ray_autoscaler_files\n\nextras = {\n \"rllib\": [\"pyyaml\", \"gym[atari]\", \"opencv-python\", \"lz4\", \"scipy\"],\n \"debug\": [\"psutil\", \"setproctitle\", \"py-spy\"],\n}\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", \"-p\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pyarrow_files/pyarrow\"):\n for name in filenames:\n pyarrow_files.append(os.path.join(root, name))\n\n files_to_include = ray_files + pyarrow_files\n\n # Copy over the autogenerated flatbuffer Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy\",\n \"filelock\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"pytest\",\n \"pyyaml\",\n \"redis\",\n # The six module is required by pyarrow.\n \"six >= 1.0.0\",\n \"flatbuffers\",\n]\n\nif sys.version_info < (3, 0):\n requires.append(\"faulthandler\")\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n author=\"Ray Team\",\n author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]}
| 2,131 | 108 |
gh_patches_debug_2293
|
rasdani/github-patches
|
git_diff
|
inventree__InvenTree-4285
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Increase worker-timeout to account for install times
I might have another instance of the same worker-timeout-loop during startup to add to the issue. My docker production installation at InvenTree:latest is running on a Raspberry Pi 3B+.
The loop occured after I had added the `inventree-brother-plugin` to `plugins.txt` - the initial plugin installation took too long during startup so that the worker processes timed out and were constantly restartet.
My "solution" was to increase the gunicorn timeout variable in the `.env` file to
```
# Options for gunicorn server
INVENTREE_GUNICORN_TIMEOUT=60
```
but maybe actions like pip installs should somehow generally not count against the worker timeout? (I'm not sure about the technical internals on this one at the moment...)
_Originally posted by @simonkuehling in https://github.com/inventree/InvenTree/issues/4180#issuecomment-1410348943_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/gunicorn.conf.py`
Content:
```
1 """Gunicorn configuration for InvenTree."""
2
3 import logging
4 import multiprocessing
5 import os
6
7 # Logger configuration
8 logger = logging.getLogger('inventree')
9 accesslog = '-'
10 errorlog = '-'
11 loglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()
12 capture_output = True
13
14 # Worker configuration
15 # TODO: Implement support for gevent
16 # worker_class = 'gevent' # Allow multi-threading support
17 worker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)
18 threads = 4
19
20
21 # Worker timeout (default = 30 seconds)
22 timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)
23
24 # Number of worker processes
25 workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)
26
27 if workers is not None:
28 try:
29 workers = int(workers)
30 except ValueError:
31 workers = None
32
33 if workers is None:
34 workers = multiprocessing.cpu_count() * 2 + 1
35
36 logger.info(f"Starting gunicorn server with {workers} workers")
37
38 max_requests = 1000
39 max_requests_jitter = 50
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/gunicorn.conf.py b/docker/gunicorn.conf.py
--- a/docker/gunicorn.conf.py
+++ b/docker/gunicorn.conf.py
@@ -18,8 +18,8 @@
threads = 4
-# Worker timeout (default = 30 seconds)
-timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)
+# Worker timeout (default = 90 seconds)
+timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)
# Number of worker processes
workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)
|
{"golden_diff": "diff --git a/docker/gunicorn.conf.py b/docker/gunicorn.conf.py\n--- a/docker/gunicorn.conf.py\n+++ b/docker/gunicorn.conf.py\n@@ -18,8 +18,8 @@\n threads = 4\n \n \n-# Worker timeout (default = 30 seconds)\n-timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)\n+# Worker timeout (default = 90 seconds)\n+timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)\n \n # Number of worker processes\n workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n", "issue": "[BUG] Increase worker-timeout to account for install times\n I might have another instance of the same worker-timeout-loop during startup to add to the issue. My docker production installation at InvenTree:latest is running on a Raspberry Pi 3B+.\r\nThe loop occured after I had added the `inventree-brother-plugin` to `plugins.txt` - the initial plugin installation took too long during startup so that the worker processes timed out and were constantly restartet.\r\n\r\nMy \"solution\" was to increase the gunicorn timeout variable in the `.env` file to\r\n```\r\n# Options for gunicorn server\r\nINVENTREE_GUNICORN_TIMEOUT=60\r\n```\r\nbut maybe actions like pip installs should somehow generally not count against the worker timeout? (I'm not sure about the technical internals on this one at the moment...)\r\n\r\n_Originally posted by @simonkuehling in https://github.com/inventree/InvenTree/issues/4180#issuecomment-1410348943_\r\n \n", "before_files": [{"content": "\"\"\"Gunicorn configuration for InvenTree.\"\"\"\n\nimport logging\nimport multiprocessing\nimport os\n\n# Logger configuration\nlogger = logging.getLogger('inventree')\naccesslog = '-'\nerrorlog = '-'\nloglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()\ncapture_output = True\n\n# Worker configuration\n# TODO: Implement support for gevent\n# worker_class = 'gevent' # Allow multi-threading support\nworker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)\nthreads = 4\n\n\n# Worker timeout (default = 30 seconds)\ntimeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)\n\n# Number of worker processes\nworkers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n\nif workers is not None:\n try:\n workers = int(workers)\n except ValueError:\n workers = None\n\nif workers is None:\n workers = multiprocessing.cpu_count() * 2 + 1\n\nlogger.info(f\"Starting gunicorn server with {workers} workers\")\n\nmax_requests = 1000\nmax_requests_jitter = 50\n", "path": "docker/gunicorn.conf.py"}], "after_files": [{"content": "\"\"\"Gunicorn configuration for InvenTree.\"\"\"\n\nimport logging\nimport multiprocessing\nimport os\n\n# Logger configuration\nlogger = logging.getLogger('inventree')\naccesslog = '-'\nerrorlog = '-'\nloglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()\ncapture_output = True\n\n# Worker configuration\n# TODO: Implement support for gevent\n# worker_class = 'gevent' # Allow multi-threading support\nworker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)\nthreads = 4\n\n\n# Worker timeout (default = 90 seconds)\ntimeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)\n\n# Number of worker processes\nworkers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n\nif workers is not None:\n try:\n workers = int(workers)\n except ValueError:\n workers = None\n\nif workers is None:\n workers = multiprocessing.cpu_count() * 2 + 1\n\nlogger.info(f\"Starting gunicorn server with {workers} workers\")\n\nmax_requests = 1000\nmax_requests_jitter = 50\n", "path": "docker/gunicorn.conf.py"}]}
| 809 | 138 |
gh_patches_debug_30543
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-1678
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Panel show not working in chrome

MRE:
```python
import panel as pn
pn.extension()
pn.widgets.FloatSlider().show()
```
tested on linux and windows 10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/io/notebook.py`
Content:
```
1 """
2 Various utilities for loading JS dependencies and rendering plots
3 inside the Jupyter notebook.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7 import json
8 import uuid
9 import sys
10
11 from contextlib import contextmanager
12 from collections import OrderedDict
13 from six import string_types
14
15 import bokeh
16 import bokeh.embed.notebook
17
18 from bokeh.core.json_encoder import serialize_json
19 from bokeh.core.templates import MACROS
20 from bokeh.document import Document
21 from bokeh.embed import server_document
22 from bokeh.embed.bundle import bundle_for_objs_and_resources
23 from bokeh.embed.elements import div_for_render_item, script_for_render_items
24 from bokeh.embed.util import standalone_docs_json_and_render_items
25 from bokeh.embed.wrappers import wrap_in_script_tag
26 from bokeh.models import LayoutDOM, Model
27 from bokeh.resources import CDN, INLINE
28 from bokeh.settings import settings
29 from bokeh.util.serialization import make_id
30 from pyviz_comms import (
31 PYVIZ_PROXY, Comm, JupyterCommManager as _JupyterCommManager, nb_mime_js
32 )
33
34 try:
35 from bokeh.util.string import escape
36 except Exception:
37 from html import escape
38
39 from ..compiler import require_components
40 from .embed import embed_state
41 from .model import add_to_doc, diff
42 from .resources import _env
43 from .server import _server_url, _origin_url, get_server
44 from .state import state
45
46
47 #---------------------------------------------------------------------
48 # Private API
49 #---------------------------------------------------------------------
50
51 LOAD_MIME = 'application/vnd.holoviews_load.v0+json'
52 EXEC_MIME = 'application/vnd.holoviews_exec.v0+json'
53 HTML_MIME = 'text/html'
54
55 def push(doc, comm, binary=True):
56 """
57 Pushes events stored on the document across the provided comm.
58 """
59 msg = diff(doc, binary=binary)
60 if msg is None:
61 return
62 comm.send(msg.header_json)
63 comm.send(msg.metadata_json)
64 comm.send(msg.content_json)
65 for header, payload in msg.buffers:
66 comm.send(json.dumps(header))
67 comm.send(buffers=[payload])
68
69 DOC_NB_JS = _env.get_template("doc_nb_js.js")
70 AUTOLOAD_NB_JS = _env.get_template("autoload_panel_js.js")
71 NB_TEMPLATE_BASE = _env.get_template('nb_template.html')
72
73 def _autoload_js(bundle, configs, requirements, exports, skip_imports, ipywidget, load_timeout=5000):
74 return AUTOLOAD_NB_JS.render(
75 bundle = bundle,
76 force = True,
77 timeout = load_timeout,
78 configs = configs,
79 requirements = requirements,
80 exports = exports,
81 skip_imports = skip_imports,
82 ipywidget = ipywidget
83 )
84
85
86 def html_for_render_items(docs_json, render_items, template=None, template_variables={}):
87 json_id = make_id()
88 json = escape(serialize_json(docs_json), quote=False)
89 json = wrap_in_script_tag(json, "application/json", json_id)
90
91 script = wrap_in_script_tag(script_for_render_items(json_id, render_items))
92
93 context = template_variables.copy()
94
95 context.update(dict(
96 title = '',
97 plot_script = json + script,
98 docs = render_items,
99 base = NB_TEMPLATE_BASE,
100 macros = MACROS,
101 ))
102
103 if len(render_items) == 1:
104 context["doc"] = context["docs"][0]
105 context["roots"] = context["doc"].roots
106
107 if template is None:
108 template = NB_TEMPLATE_BASE
109 elif isinstance(template, string_types):
110 template = _env.from_string("{% extends base %}\n" + template)
111
112 return template.render(context)
113
114
115 def render_template(document, comm=None, manager=None):
116 ref = document.roots[0].ref['id']
117 (docs_json, render_items) = standalone_docs_json_and_render_items(document, True)
118
119 # We do not want the CommManager to appear in the roots because
120 # the custom template may not reference it
121 if manager:
122 item = render_items[0]
123 item.roots._roots = OrderedDict(list(item.roots._roots.items())[:-1])
124
125 html = html_for_render_items(
126 docs_json, render_items, template=document.template,
127 template_variables=document.template_variables
128 )
129 return ({'text/html': html, EXEC_MIME: ''}, {EXEC_MIME: {'id': ref}})
130
131
132 def render_model(model, comm=None):
133 if not isinstance(model, Model):
134 raise ValueError("notebook_content expects a single Model instance")
135 from ..config import panel_extension as pnext
136
137 target = model.ref['id']
138
139 (docs_json, [render_item]) = standalone_docs_json_and_render_items([model], True)
140 div = div_for_render_item(render_item)
141 render_item = render_item.to_json()
142 requirements = [pnext._globals[ext] for ext in pnext._loaded_extensions
143 if ext in pnext._globals]
144 ipywidget = 'ipywidgets_bokeh' in sys.modules
145
146 script = DOC_NB_JS.render(
147 docs_json=serialize_json(docs_json),
148 render_items=serialize_json([render_item]),
149 requirements=requirements,
150 ipywidget=ipywidget
151 )
152 bokeh_script, bokeh_div = script, div
153 html = "<div id='{id}'>{html}</div>".format(id=target, html=bokeh_div)
154
155 data = {'text/html': html, 'application/javascript': bokeh_script}
156 return ({'text/html': mimebundle_to_html(data), EXEC_MIME: ''},
157 {EXEC_MIME: {'id': target}})
158
159
160 def render_mimebundle(model, doc, comm, manager=None, location=None):
161 """
162 Displays bokeh output inside a notebook using the PyViz display
163 and comms machinery.
164 """
165 if not isinstance(model, LayoutDOM):
166 raise ValueError('Can only render bokeh LayoutDOM models')
167 add_to_doc(model, doc, True)
168 if manager is not None:
169 doc.add_root(manager)
170 if location is not None:
171 loc = location._get_model(doc, model, model, comm)
172 doc.add_root(loc)
173 return render_model(model, comm)
174
175
176 def mimebundle_to_html(bundle):
177 """
178 Converts a MIME bundle into HTML.
179 """
180 if isinstance(bundle, tuple):
181 data, metadata = bundle
182 else:
183 data = bundle
184 html = data.get('text/html', '')
185 if 'application/javascript' in data:
186 js = data['application/javascript']
187 html += '\n<script type="application/javascript">{js}</script>'.format(js=js)
188 return html
189
190 #---------------------------------------------------------------------
191 # Public API
192 #---------------------------------------------------------------------
193
194
195 @contextmanager
196 def block_comm():
197 """
198 Context manager to temporarily block comm push
199 """
200 state._hold = True
201 try:
202 yield
203 finally:
204 state._hold = False
205
206
207 def load_notebook(inline=True, load_timeout=5000):
208 from IPython.display import publish_display_data
209
210 resources = INLINE if inline else CDN
211 prev_resources = settings.resources()
212 try:
213 settings.resources = 'cdn'
214 bundle = bundle_for_objs_and_resources(None, resources)
215 configs, requirements, exports, skip_imports = require_components()
216 ipywidget = 'ipywidgets_bokeh' in sys.modules
217 bokeh_js = _autoload_js(bundle, configs, requirements, exports,
218 skip_imports, ipywidget, load_timeout)
219 finally:
220 settings.resources = prev_resources
221
222 publish_display_data({
223 'application/javascript': bokeh_js,
224 LOAD_MIME: bokeh_js,
225 })
226 bokeh.io.notebook.curstate().output_notebook()
227
228 # Publish comm manager
229 JS = '\n'.join([PYVIZ_PROXY, _JupyterCommManager.js_manager, nb_mime_js])
230 publish_display_data(data={LOAD_MIME: JS, 'application/javascript': JS})
231
232
233 def show_server(panel, notebook_url, port):
234 """
235 Displays a bokeh server inline in the notebook.
236
237 Arguments
238 ---------
239 panel: Viewable
240 Panel Viewable object to launch a server for
241 notebook_url: str
242 The URL of the running Jupyter notebook server
243 port: int (optional, default=0)
244 Allows specifying a specific port
245 server_id: str
246 Unique ID to identify the server with
247
248 Returns
249 -------
250 server: bokeh.server.Server
251 """
252 from IPython.display import publish_display_data
253
254 if callable(notebook_url):
255 origin = notebook_url(None)
256 else:
257 origin = _origin_url(notebook_url)
258 server_id = uuid.uuid4().hex
259 server = get_server(panel, port=port, websocket_origin=origin,
260 start=True, show=False, server_id=server_id)
261
262 if callable(notebook_url):
263 url = notebook_url(server.port)
264 else:
265 url = _server_url(notebook_url, server.port)
266
267 script = server_document(url, resources=None)
268
269 publish_display_data({
270 HTML_MIME: script,
271 EXEC_MIME: ""
272 }, metadata={
273 EXEC_MIME: {"server_id": server_id}
274 })
275 return server
276
277
278 def show_embed(panel, max_states=1000, max_opts=3, json=False,
279 json_prefix='', save_path='./', load_path=None,
280 progress=True, states={}):
281 """
282 Renders a static version of a panel in a notebook by evaluating
283 the set of states defined by the widgets in the model. Note
284 this will only work well for simple apps with a relatively
285 small state space.
286
287 Arguments
288 ---------
289 max_states: int
290 The maximum number of states to embed
291 max_opts: int
292 The maximum number of states for a single widget
293 json: boolean (default=True)
294 Whether to export the data to json files
295 json_prefix: str (default='')
296 Prefix for JSON filename
297 save_path: str (default='./')
298 The path to save json files to
299 load_path: str (default=None)
300 The path or URL the json files will be loaded from.
301 progress: boolean (default=False)
302 Whether to report progress
303 states: dict (default={})
304 A dictionary specifying the widget values to embed for each widget
305 """
306 from IPython.display import publish_display_data
307 from ..config import config
308
309 doc = Document()
310 comm = Comm()
311 with config.set(embed=True):
312 model = panel.get_root(doc, comm)
313 embed_state(panel, model, doc, max_states, max_opts,
314 json, json_prefix, save_path, load_path, progress,
315 states)
316 publish_display_data(*render_model(model))
317
318
319 def ipywidget(obj, **kwargs):
320 """
321 Creates a root model from the Panel object and wraps it in
322 a jupyter_bokeh ipywidget BokehModel.
323
324 Arguments
325 ---------
326 obj: object
327 Any Panel object or object which can be rendered with Panel
328 **kwargs: dict
329 Keyword arguments passed to the pn.panel utility function
330
331 Returns
332 -------
333 Returns an ipywidget model which renders the Panel object.
334 """
335 from jupyter_bokeh import BokehModel
336 from ..pane import panel
337 model = panel(obj, **kwargs).get_root()
338 widget = BokehModel(model, combine_events=True)
339 if hasattr(widget, '_view_count'):
340 widget._view_count = 0
341 def view_count_changed(change, current=[model]):
342 new_model = None
343 if change['old'] > 0 and change['new'] == 0 and current:
344 obj._cleanup(current[0])
345 current[:] = []
346 elif (change['old'] == 0 and change['new'] > 0 and
347 (not current or current[0] is not model)):
348 if current:
349 try:
350 obj._cleanup(current[0])
351 except Exception:
352 pass
353 new_model = obj.get_root()
354 widget.update_from_model(new_model)
355 current[:] = [new_model]
356 widget.observe(view_count_changed, '_view_count')
357 return widget
358
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/panel/io/notebook.py b/panel/io/notebook.py
--- a/panel/io/notebook.py
+++ b/panel/io/notebook.py
@@ -25,7 +25,7 @@
from bokeh.embed.wrappers import wrap_in_script_tag
from bokeh.models import LayoutDOM, Model
from bokeh.resources import CDN, INLINE
-from bokeh.settings import settings
+from bokeh.settings import settings, _Unset
from bokeh.util.serialization import make_id
from pyviz_comms import (
PYVIZ_PROXY, Comm, JupyterCommManager as _JupyterCommManager, nb_mime_js
@@ -208,16 +208,20 @@
from IPython.display import publish_display_data
resources = INLINE if inline else CDN
- prev_resources = settings.resources()
+ prev_resources = settings.resources(default="server")
+ user_resources = settings.resources._user_value is not _Unset
try:
- settings.resources = 'cdn'
+ settings.resources = 'inline' if inline else 'cdn'
bundle = bundle_for_objs_and_resources(None, resources)
configs, requirements, exports, skip_imports = require_components()
ipywidget = 'ipywidgets_bokeh' in sys.modules
bokeh_js = _autoload_js(bundle, configs, requirements, exports,
skip_imports, ipywidget, load_timeout)
finally:
- settings.resources = prev_resources
+ if user_resources:
+ settings.resources = prev_resources
+ else:
+ settings.resources.unset_value()
publish_display_data({
'application/javascript': bokeh_js,
|
{"golden_diff": "diff --git a/panel/io/notebook.py b/panel/io/notebook.py\n--- a/panel/io/notebook.py\n+++ b/panel/io/notebook.py\n@@ -25,7 +25,7 @@\n from bokeh.embed.wrappers import wrap_in_script_tag\n from bokeh.models import LayoutDOM, Model\n from bokeh.resources import CDN, INLINE\n-from bokeh.settings import settings\n+from bokeh.settings import settings, _Unset\n from bokeh.util.serialization import make_id\n from pyviz_comms import (\n PYVIZ_PROXY, Comm, JupyterCommManager as _JupyterCommManager, nb_mime_js\n@@ -208,16 +208,20 @@\n from IPython.display import publish_display_data\n \n resources = INLINE if inline else CDN\n- prev_resources = settings.resources()\n+ prev_resources = settings.resources(default=\"server\")\n+ user_resources = settings.resources._user_value is not _Unset\n try:\n- settings.resources = 'cdn'\n+ settings.resources = 'inline' if inline else 'cdn'\n bundle = bundle_for_objs_and_resources(None, resources)\n configs, requirements, exports, skip_imports = require_components()\n ipywidget = 'ipywidgets_bokeh' in sys.modules\n bokeh_js = _autoload_js(bundle, configs, requirements, exports,\n skip_imports, ipywidget, load_timeout)\n finally:\n- settings.resources = prev_resources\n+ if user_resources:\n+ settings.resources = prev_resources\n+ else:\n+ settings.resources.unset_value()\n \n publish_display_data({\n 'application/javascript': bokeh_js,\n", "issue": "Panel show not working in chrome\n\r\n\r\nMRE:\r\n```python\r\nimport panel as pn\r\npn.extension()\r\n\r\npn.widgets.FloatSlider().show()\r\n```\r\n\r\ntested on linux and windows 10\n", "before_files": [{"content": "\"\"\"\nVarious utilities for loading JS dependencies and rendering plots\ninside the Jupyter notebook.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport json\nimport uuid\nimport sys\n\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\nfrom six import string_types\n\nimport bokeh\nimport bokeh.embed.notebook\n\nfrom bokeh.core.json_encoder import serialize_json\nfrom bokeh.core.templates import MACROS\nfrom bokeh.document import Document\nfrom bokeh.embed import server_document\nfrom bokeh.embed.bundle import bundle_for_objs_and_resources\nfrom bokeh.embed.elements import div_for_render_item, script_for_render_items\nfrom bokeh.embed.util import standalone_docs_json_and_render_items\nfrom bokeh.embed.wrappers import wrap_in_script_tag\nfrom bokeh.models import LayoutDOM, Model\nfrom bokeh.resources import CDN, INLINE\nfrom bokeh.settings import settings\nfrom bokeh.util.serialization import make_id\nfrom pyviz_comms import (\n PYVIZ_PROXY, Comm, JupyterCommManager as _JupyterCommManager, nb_mime_js\n)\n\ntry:\n from bokeh.util.string import escape\nexcept Exception:\n from html import escape\n\nfrom ..compiler import require_components\nfrom .embed import embed_state\nfrom .model import add_to_doc, diff\nfrom .resources import _env\nfrom .server import _server_url, _origin_url, get_server\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\nLOAD_MIME = 'application/vnd.holoviews_load.v0+json'\nEXEC_MIME = 'application/vnd.holoviews_exec.v0+json'\nHTML_MIME = 'text/html'\n\ndef push(doc, comm, binary=True):\n \"\"\"\n Pushes events stored on the document across the provided comm.\n \"\"\"\n msg = diff(doc, binary=binary)\n if msg is None:\n return\n comm.send(msg.header_json)\n comm.send(msg.metadata_json)\n comm.send(msg.content_json)\n for header, payload in msg.buffers:\n comm.send(json.dumps(header))\n comm.send(buffers=[payload])\n\nDOC_NB_JS = _env.get_template(\"doc_nb_js.js\")\nAUTOLOAD_NB_JS = _env.get_template(\"autoload_panel_js.js\")\nNB_TEMPLATE_BASE = _env.get_template('nb_template.html')\n\ndef _autoload_js(bundle, configs, requirements, exports, skip_imports, ipywidget, load_timeout=5000):\n return AUTOLOAD_NB_JS.render(\n bundle = bundle,\n force = True,\n timeout = load_timeout,\n configs = configs,\n requirements = requirements,\n exports = exports,\n skip_imports = skip_imports,\n ipywidget = ipywidget\n )\n\n\ndef html_for_render_items(docs_json, render_items, template=None, template_variables={}):\n json_id = make_id()\n json = escape(serialize_json(docs_json), quote=False)\n json = wrap_in_script_tag(json, \"application/json\", json_id)\n\n script = wrap_in_script_tag(script_for_render_items(json_id, render_items))\n\n context = template_variables.copy()\n\n context.update(dict(\n title = '',\n plot_script = json + script,\n docs = render_items,\n base = NB_TEMPLATE_BASE,\n macros = MACROS,\n ))\n\n if len(render_items) == 1:\n context[\"doc\"] = context[\"docs\"][0]\n context[\"roots\"] = context[\"doc\"].roots\n\n if template is None:\n template = NB_TEMPLATE_BASE\n elif isinstance(template, string_types):\n template = _env.from_string(\"{% extends base %}\\n\" + template)\n\n return template.render(context)\n\n\ndef render_template(document, comm=None, manager=None):\n ref = document.roots[0].ref['id']\n (docs_json, render_items) = standalone_docs_json_and_render_items(document, True)\n\n # We do not want the CommManager to appear in the roots because\n # the custom template may not reference it\n if manager:\n item = render_items[0]\n item.roots._roots = OrderedDict(list(item.roots._roots.items())[:-1])\n\n html = html_for_render_items(\n docs_json, render_items, template=document.template,\n template_variables=document.template_variables\n )\n return ({'text/html': html, EXEC_MIME: ''}, {EXEC_MIME: {'id': ref}})\n\n\ndef render_model(model, comm=None):\n if not isinstance(model, Model):\n raise ValueError(\"notebook_content expects a single Model instance\")\n from ..config import panel_extension as pnext\n\n target = model.ref['id']\n\n (docs_json, [render_item]) = standalone_docs_json_and_render_items([model], True)\n div = div_for_render_item(render_item)\n render_item = render_item.to_json()\n requirements = [pnext._globals[ext] for ext in pnext._loaded_extensions\n if ext in pnext._globals]\n ipywidget = 'ipywidgets_bokeh' in sys.modules\n\n script = DOC_NB_JS.render(\n docs_json=serialize_json(docs_json),\n render_items=serialize_json([render_item]),\n requirements=requirements,\n ipywidget=ipywidget\n )\n bokeh_script, bokeh_div = script, div\n html = \"<div id='{id}'>{html}</div>\".format(id=target, html=bokeh_div)\n\n data = {'text/html': html, 'application/javascript': bokeh_script}\n return ({'text/html': mimebundle_to_html(data), EXEC_MIME: ''},\n {EXEC_MIME: {'id': target}})\n\n\ndef render_mimebundle(model, doc, comm, manager=None, location=None):\n \"\"\"\n Displays bokeh output inside a notebook using the PyViz display\n and comms machinery.\n \"\"\"\n if not isinstance(model, LayoutDOM):\n raise ValueError('Can only render bokeh LayoutDOM models')\n add_to_doc(model, doc, True)\n if manager is not None:\n doc.add_root(manager)\n if location is not None:\n loc = location._get_model(doc, model, model, comm)\n doc.add_root(loc)\n return render_model(model, comm)\n\n\ndef mimebundle_to_html(bundle):\n \"\"\"\n Converts a MIME bundle into HTML.\n \"\"\"\n if isinstance(bundle, tuple):\n data, metadata = bundle\n else:\n data = bundle\n html = data.get('text/html', '')\n if 'application/javascript' in data:\n js = data['application/javascript']\n html += '\\n<script type=\"application/javascript\">{js}</script>'.format(js=js)\n return html\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\n\n@contextmanager\ndef block_comm():\n \"\"\"\n Context manager to temporarily block comm push\n \"\"\"\n state._hold = True\n try:\n yield\n finally:\n state._hold = False\n\n\ndef load_notebook(inline=True, load_timeout=5000):\n from IPython.display import publish_display_data\n\n resources = INLINE if inline else CDN\n prev_resources = settings.resources()\n try:\n settings.resources = 'cdn'\n bundle = bundle_for_objs_and_resources(None, resources)\n configs, requirements, exports, skip_imports = require_components()\n ipywidget = 'ipywidgets_bokeh' in sys.modules\n bokeh_js = _autoload_js(bundle, configs, requirements, exports,\n skip_imports, ipywidget, load_timeout)\n finally:\n settings.resources = prev_resources\n\n publish_display_data({\n 'application/javascript': bokeh_js,\n LOAD_MIME: bokeh_js,\n })\n bokeh.io.notebook.curstate().output_notebook()\n\n # Publish comm manager\n JS = '\\n'.join([PYVIZ_PROXY, _JupyterCommManager.js_manager, nb_mime_js])\n publish_display_data(data={LOAD_MIME: JS, 'application/javascript': JS})\n\n\ndef show_server(panel, notebook_url, port):\n \"\"\"\n Displays a bokeh server inline in the notebook.\n\n Arguments\n ---------\n panel: Viewable\n Panel Viewable object to launch a server for\n notebook_url: str\n The URL of the running Jupyter notebook server\n port: int (optional, default=0)\n Allows specifying a specific port\n server_id: str\n Unique ID to identify the server with\n\n Returns\n -------\n server: bokeh.server.Server\n \"\"\"\n from IPython.display import publish_display_data\n\n if callable(notebook_url):\n origin = notebook_url(None)\n else:\n origin = _origin_url(notebook_url)\n server_id = uuid.uuid4().hex\n server = get_server(panel, port=port, websocket_origin=origin,\n start=True, show=False, server_id=server_id)\n\n if callable(notebook_url):\n url = notebook_url(server.port)\n else:\n url = _server_url(notebook_url, server.port)\n\n script = server_document(url, resources=None)\n\n publish_display_data({\n HTML_MIME: script,\n EXEC_MIME: \"\"\n }, metadata={\n EXEC_MIME: {\"server_id\": server_id}\n })\n return server\n\n\ndef show_embed(panel, max_states=1000, max_opts=3, json=False,\n json_prefix='', save_path='./', load_path=None,\n progress=True, states={}):\n \"\"\"\n Renders a static version of a panel in a notebook by evaluating\n the set of states defined by the widgets in the model. Note\n this will only work well for simple apps with a relatively\n small state space.\n\n Arguments\n ---------\n max_states: int\n The maximum number of states to embed\n max_opts: int\n The maximum number of states for a single widget\n json: boolean (default=True)\n Whether to export the data to json files\n json_prefix: str (default='')\n Prefix for JSON filename\n save_path: str (default='./')\n The path to save json files to\n load_path: str (default=None)\n The path or URL the json files will be loaded from.\n progress: boolean (default=False)\n Whether to report progress\n states: dict (default={})\n A dictionary specifying the widget values to embed for each widget\n \"\"\"\n from IPython.display import publish_display_data\n from ..config import config\n\n doc = Document()\n comm = Comm()\n with config.set(embed=True):\n model = panel.get_root(doc, comm)\n embed_state(panel, model, doc, max_states, max_opts,\n json, json_prefix, save_path, load_path, progress,\n states)\n publish_display_data(*render_model(model))\n\n\ndef ipywidget(obj, **kwargs):\n \"\"\"\n Creates a root model from the Panel object and wraps it in\n a jupyter_bokeh ipywidget BokehModel.\n\n Arguments\n ---------\n obj: object\n Any Panel object or object which can be rendered with Panel\n **kwargs: dict\n Keyword arguments passed to the pn.panel utility function\n\n Returns\n -------\n Returns an ipywidget model which renders the Panel object.\n \"\"\"\n from jupyter_bokeh import BokehModel\n from ..pane import panel\n model = panel(obj, **kwargs).get_root()\n widget = BokehModel(model, combine_events=True)\n if hasattr(widget, '_view_count'):\n widget._view_count = 0\n def view_count_changed(change, current=[model]):\n new_model = None\n if change['old'] > 0 and change['new'] == 0 and current:\n obj._cleanup(current[0])\n current[:] = []\n elif (change['old'] == 0 and change['new'] > 0 and\n (not current or current[0] is not model)):\n if current:\n try:\n obj._cleanup(current[0])\n except Exception:\n pass\n new_model = obj.get_root()\n widget.update_from_model(new_model)\n current[:] = [new_model]\n widget.observe(view_count_changed, '_view_count')\n return widget\n", "path": "panel/io/notebook.py"}], "after_files": [{"content": "\"\"\"\nVarious utilities for loading JS dependencies and rendering plots\ninside the Jupyter notebook.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport json\nimport uuid\nimport sys\n\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\nfrom six import string_types\n\nimport bokeh\nimport bokeh.embed.notebook\n\nfrom bokeh.core.json_encoder import serialize_json\nfrom bokeh.core.templates import MACROS\nfrom bokeh.document import Document\nfrom bokeh.embed import server_document\nfrom bokeh.embed.bundle import bundle_for_objs_and_resources\nfrom bokeh.embed.elements import div_for_render_item, script_for_render_items\nfrom bokeh.embed.util import standalone_docs_json_and_render_items\nfrom bokeh.embed.wrappers import wrap_in_script_tag\nfrom bokeh.models import LayoutDOM, Model\nfrom bokeh.resources import CDN, INLINE\nfrom bokeh.settings import settings, _Unset\nfrom bokeh.util.serialization import make_id\nfrom pyviz_comms import (\n PYVIZ_PROXY, Comm, JupyterCommManager as _JupyterCommManager, nb_mime_js\n)\n\ntry:\n from bokeh.util.string import escape\nexcept Exception:\n from html import escape\n\nfrom ..compiler import require_components\nfrom .embed import embed_state\nfrom .model import add_to_doc, diff\nfrom .resources import _env\nfrom .server import _server_url, _origin_url, get_server\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\nLOAD_MIME = 'application/vnd.holoviews_load.v0+json'\nEXEC_MIME = 'application/vnd.holoviews_exec.v0+json'\nHTML_MIME = 'text/html'\n\ndef push(doc, comm, binary=True):\n \"\"\"\n Pushes events stored on the document across the provided comm.\n \"\"\"\n msg = diff(doc, binary=binary)\n if msg is None:\n return\n comm.send(msg.header_json)\n comm.send(msg.metadata_json)\n comm.send(msg.content_json)\n for header, payload in msg.buffers:\n comm.send(json.dumps(header))\n comm.send(buffers=[payload])\n\nDOC_NB_JS = _env.get_template(\"doc_nb_js.js\")\nAUTOLOAD_NB_JS = _env.get_template(\"autoload_panel_js.js\")\nNB_TEMPLATE_BASE = _env.get_template('nb_template.html')\n\ndef _autoload_js(bundle, configs, requirements, exports, skip_imports, ipywidget, load_timeout=5000):\n return AUTOLOAD_NB_JS.render(\n bundle = bundle,\n force = True,\n timeout = load_timeout,\n configs = configs,\n requirements = requirements,\n exports = exports,\n skip_imports = skip_imports,\n ipywidget = ipywidget\n )\n\n\ndef html_for_render_items(docs_json, render_items, template=None, template_variables={}):\n json_id = make_id()\n json = escape(serialize_json(docs_json), quote=False)\n json = wrap_in_script_tag(json, \"application/json\", json_id)\n\n script = wrap_in_script_tag(script_for_render_items(json_id, render_items))\n\n context = template_variables.copy()\n\n context.update(dict(\n title = '',\n plot_script = json + script,\n docs = render_items,\n base = NB_TEMPLATE_BASE,\n macros = MACROS,\n ))\n\n if len(render_items) == 1:\n context[\"doc\"] = context[\"docs\"][0]\n context[\"roots\"] = context[\"doc\"].roots\n\n if template is None:\n template = NB_TEMPLATE_BASE\n elif isinstance(template, string_types):\n template = _env.from_string(\"{% extends base %}\\n\" + template)\n\n return template.render(context)\n\n\ndef render_template(document, comm=None, manager=None):\n ref = document.roots[0].ref['id']\n (docs_json, render_items) = standalone_docs_json_and_render_items(document, True)\n\n # We do not want the CommManager to appear in the roots because\n # the custom template may not reference it\n if manager:\n item = render_items[0]\n item.roots._roots = OrderedDict(list(item.roots._roots.items())[:-1])\n\n html = html_for_render_items(\n docs_json, render_items, template=document.template,\n template_variables=document.template_variables\n )\n return ({'text/html': html, EXEC_MIME: ''}, {EXEC_MIME: {'id': ref}})\n\n\ndef render_model(model, comm=None):\n if not isinstance(model, Model):\n raise ValueError(\"notebook_content expects a single Model instance\")\n from ..config import panel_extension as pnext\n\n target = model.ref['id']\n\n (docs_json, [render_item]) = standalone_docs_json_and_render_items([model], True)\n div = div_for_render_item(render_item)\n render_item = render_item.to_json()\n requirements = [pnext._globals[ext] for ext in pnext._loaded_extensions\n if ext in pnext._globals]\n ipywidget = 'ipywidgets_bokeh' in sys.modules\n\n script = DOC_NB_JS.render(\n docs_json=serialize_json(docs_json),\n render_items=serialize_json([render_item]),\n requirements=requirements,\n ipywidget=ipywidget\n )\n bokeh_script, bokeh_div = script, div\n html = \"<div id='{id}'>{html}</div>\".format(id=target, html=bokeh_div)\n\n data = {'text/html': html, 'application/javascript': bokeh_script}\n return ({'text/html': mimebundle_to_html(data), EXEC_MIME: ''},\n {EXEC_MIME: {'id': target}})\n\n\ndef render_mimebundle(model, doc, comm, manager=None, location=None):\n \"\"\"\n Displays bokeh output inside a notebook using the PyViz display\n and comms machinery.\n \"\"\"\n if not isinstance(model, LayoutDOM):\n raise ValueError('Can only render bokeh LayoutDOM models')\n add_to_doc(model, doc, True)\n if manager is not None:\n doc.add_root(manager)\n if location is not None:\n loc = location._get_model(doc, model, model, comm)\n doc.add_root(loc)\n return render_model(model, comm)\n\n\ndef mimebundle_to_html(bundle):\n \"\"\"\n Converts a MIME bundle into HTML.\n \"\"\"\n if isinstance(bundle, tuple):\n data, metadata = bundle\n else:\n data = bundle\n html = data.get('text/html', '')\n if 'application/javascript' in data:\n js = data['application/javascript']\n html += '\\n<script type=\"application/javascript\">{js}</script>'.format(js=js)\n return html\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\n\n@contextmanager\ndef block_comm():\n \"\"\"\n Context manager to temporarily block comm push\n \"\"\"\n state._hold = True\n try:\n yield\n finally:\n state._hold = False\n\n\ndef load_notebook(inline=True, load_timeout=5000):\n from IPython.display import publish_display_data\n\n resources = INLINE if inline else CDN\n prev_resources = settings.resources(default=\"server\")\n user_resources = settings.resources._user_value is not _Unset\n try:\n settings.resources = 'inline' if inline else 'cdn'\n bundle = bundle_for_objs_and_resources(None, resources)\n configs, requirements, exports, skip_imports = require_components()\n ipywidget = 'ipywidgets_bokeh' in sys.modules\n bokeh_js = _autoload_js(bundle, configs, requirements, exports,\n skip_imports, ipywidget, load_timeout)\n finally:\n if user_resources:\n settings.resources = prev_resources\n else:\n settings.resources.unset_value()\n\n publish_display_data({\n 'application/javascript': bokeh_js,\n LOAD_MIME: bokeh_js,\n })\n bokeh.io.notebook.curstate().output_notebook()\n\n # Publish comm manager\n JS = '\\n'.join([PYVIZ_PROXY, _JupyterCommManager.js_manager, nb_mime_js])\n publish_display_data(data={LOAD_MIME: JS, 'application/javascript': JS})\n\n\ndef show_server(panel, notebook_url, port):\n \"\"\"\n Displays a bokeh server inline in the notebook.\n\n Arguments\n ---------\n panel: Viewable\n Panel Viewable object to launch a server for\n notebook_url: str\n The URL of the running Jupyter notebook server\n port: int (optional, default=0)\n Allows specifying a specific port\n server_id: str\n Unique ID to identify the server with\n\n Returns\n -------\n server: bokeh.server.Server\n \"\"\"\n from IPython.display import publish_display_data\n\n if callable(notebook_url):\n origin = notebook_url(None)\n else:\n origin = _origin_url(notebook_url)\n server_id = uuid.uuid4().hex\n server = get_server(panel, port=port, websocket_origin=origin,\n start=True, show=False, server_id=server_id)\n\n if callable(notebook_url):\n url = notebook_url(server.port)\n else:\n url = _server_url(notebook_url, server.port)\n\n script = server_document(url, resources=None)\n\n publish_display_data({\n HTML_MIME: script,\n EXEC_MIME: \"\"\n }, metadata={\n EXEC_MIME: {\"server_id\": server_id}\n })\n return server\n\n\ndef show_embed(panel, max_states=1000, max_opts=3, json=False,\n json_prefix='', save_path='./', load_path=None,\n progress=True, states={}):\n \"\"\"\n Renders a static version of a panel in a notebook by evaluating\n the set of states defined by the widgets in the model. Note\n this will only work well for simple apps with a relatively\n small state space.\n\n Arguments\n ---------\n max_states: int\n The maximum number of states to embed\n max_opts: int\n The maximum number of states for a single widget\n json: boolean (default=True)\n Whether to export the data to json files\n json_prefix: str (default='')\n Prefix for JSON filename\n save_path: str (default='./')\n The path to save json files to\n load_path: str (default=None)\n The path or URL the json files will be loaded from.\n progress: boolean (default=False)\n Whether to report progress\n states: dict (default={})\n A dictionary specifying the widget values to embed for each widget\n \"\"\"\n from IPython.display import publish_display_data\n from ..config import config\n\n doc = Document()\n comm = Comm()\n with config.set(embed=True):\n model = panel.get_root(doc, comm)\n embed_state(panel, model, doc, max_states, max_opts,\n json, json_prefix, save_path, load_path, progress,\n states)\n publish_display_data(*render_model(model))\n\n\ndef ipywidget(obj, **kwargs):\n \"\"\"\n Creates a root model from the Panel object and wraps it in\n a jupyter_bokeh ipywidget BokehModel.\n\n Arguments\n ---------\n obj: object\n Any Panel object or object which can be rendered with Panel\n **kwargs: dict\n Keyword arguments passed to the pn.panel utility function\n\n Returns\n -------\n Returns an ipywidget model which renders the Panel object.\n \"\"\"\n from jupyter_bokeh import BokehModel\n from ..pane import panel\n model = panel(obj, **kwargs).get_root()\n widget = BokehModel(model, combine_events=True)\n if hasattr(widget, '_view_count'):\n widget._view_count = 0\n def view_count_changed(change, current=[model]):\n new_model = None\n if change['old'] > 0 and change['new'] == 0 and current:\n obj._cleanup(current[0])\n current[:] = []\n elif (change['old'] == 0 and change['new'] > 0 and\n (not current or current[0] is not model)):\n if current:\n try:\n obj._cleanup(current[0])\n except Exception:\n pass\n new_model = obj.get_root()\n widget.update_from_model(new_model)\n current[:] = [new_model]\n widget.observe(view_count_changed, '_view_count')\n return widget\n", "path": "panel/io/notebook.py"}]}
| 3,944 | 357 |
gh_patches_debug_35648
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-2747
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEATURE REQUEST] language filtering and safe search with odysee
**Is your feature request related to a problem? Please describe.**
No
**Describe the solution you'd like**
We can use, for example, language=de in the search URL. en, de-DE, and en-US also seem to work. There is no list of supported languages afaik, we just need to try things out one by one.
for safe search Moderate/Strict we should use nsfw=false in the URL
**Additional context**
The information that you need for this is here: https://github.com/searx/searx/issues/2504
----
Related
- https://github.com/searxng/searxng/pull/2656
- https://github.com/searxng/searxng/issues/590
- [lbr command line](https://gitlab.com/gardenappl/lbt/-/blob/main/lbt?ref_type=heads)
- [LBRY SDK ](https://github.com/lbryio/lbry-sdk/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/odysee.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Odysee_ is a decentralised video hosting platform.
4
5 .. _Odysee: https://github.com/OdyseeTeam/odysee-frontend
6 """
7
8 import time
9 from urllib.parse import urlencode
10 from datetime import datetime
11
12 # Engine metadata
13 about = {
14 "website": "https://odysee.com/",
15 "wikidata_id": "Q102046570",
16 "official_api_documentation": None,
17 "use_official_api": False,
18 "require_api_key": False,
19 "results": "JSON",
20 }
21
22 # Engine configuration
23 paging = True
24 results_per_page = 20
25 categories = ['videos']
26
27 # Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)
28 base_url = "https://lighthouse.odysee.tv/search"
29
30
31 def request(query, params):
32 start_index = (params["pageno"] - 1) * results_per_page
33 query_params = {
34 "s": query,
35 "size": results_per_page,
36 "from": start_index,
37 "include": "channel,thumbnail_url,title,description,duration,release_time",
38 "mediaType": "video",
39 }
40
41 params["url"] = f"{base_url}?{urlencode(query_params)}"
42 return params
43
44
45 # Format the video duration
46 def format_duration(duration):
47 seconds = int(duration)
48 length = time.gmtime(seconds)
49 if length.tm_hour:
50 return time.strftime("%H:%M:%S", length)
51 return time.strftime("%M:%S", length)
52
53
54 def response(resp):
55 data = resp.json()
56 results = []
57
58 for item in data:
59 name = item["name"]
60 claim_id = item["claimId"]
61 title = item["title"]
62 thumbnail_url = item["thumbnail_url"]
63 description = item["description"] or ""
64 channel = item["channel"]
65 release_time = item["release_time"]
66 duration = item["duration"]
67
68 release_date = datetime.strptime(release_time.split("T")[0], "%Y-%m-%d")
69 formatted_date = datetime.utcfromtimestamp(release_date.timestamp())
70
71 url = f"https://odysee.com/{name}:{claim_id}"
72 iframe_url = f"https://odysee.com/$/embed/{name}:{claim_id}"
73 odysee_thumbnail = f"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}"
74 formatted_duration = format_duration(duration)
75
76 results.append(
77 {
78 "title": title,
79 "url": url,
80 "content": description,
81 "author": channel,
82 "publishedDate": formatted_date,
83 "length": formatted_duration,
84 "thumbnail": odysee_thumbnail,
85 "iframe_src": iframe_url,
86 "template": "videos.html",
87 }
88 )
89
90 return results
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/odysee.py b/searx/engines/odysee.py
--- a/searx/engines/odysee.py
+++ b/searx/engines/odysee.py
@@ -9,6 +9,14 @@
from urllib.parse import urlencode
from datetime import datetime
+import babel
+
+from searx.network import get
+from searx.locales import language_tag
+from searx.enginelib.traits import EngineTraits
+
+traits: EngineTraits
+
# Engine metadata
about = {
"website": "https://odysee.com/",
@@ -21,6 +29,7 @@
# Engine configuration
paging = True
+time_range_support = True
results_per_page = 20
categories = ['videos']
@@ -29,6 +38,13 @@
def request(query, params):
+ time_range_dict = {
+ "day": "today",
+ "week": "thisweek",
+ "month": "thismonth",
+ "year": "thisyear",
+ }
+
start_index = (params["pageno"] - 1) * results_per_page
query_params = {
"s": query,
@@ -38,6 +54,13 @@
"mediaType": "video",
}
+ lang = traits.get_language(params['searxng_locale'], None)
+ if lang is not None:
+ query_params['language'] = lang
+
+ if params['time_range'] in time_range_dict:
+ query_params['time_filter'] = time_range_dict[params['time_range']]
+
params["url"] = f"{base_url}?{urlencode(query_params)}"
return params
@@ -88,3 +111,35 @@
)
return results
+
+
+def fetch_traits(engine_traits: EngineTraits):
+ """
+ Fetch languages from Odysee's source code.
+ """
+
+ resp = get(
+ 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long
+ timeout=60,
+ )
+
+ if not resp.ok:
+ print("ERROR: can't determine languages from Odysee")
+ return
+
+ for line in resp.text.split("\n")[1:-4]:
+ lang_tag = line.strip().split(": ")[0].replace("'", "")
+
+ try:
+ sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep="-"))
+ except babel.UnknownLocaleError:
+ print("ERROR: %s is unknown by babel" % lang_tag)
+ continue
+
+ conflict = engine_traits.languages.get(sxng_tag)
+ if conflict:
+ if conflict != lang_tag:
+ print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, lang_tag))
+ continue
+
+ engine_traits.languages[sxng_tag] = lang_tag
|
{"golden_diff": "diff --git a/searx/engines/odysee.py b/searx/engines/odysee.py\n--- a/searx/engines/odysee.py\n+++ b/searx/engines/odysee.py\n@@ -9,6 +9,14 @@\n from urllib.parse import urlencode\n from datetime import datetime\n \n+import babel\n+\n+from searx.network import get\n+from searx.locales import language_tag\n+from searx.enginelib.traits import EngineTraits\n+\n+traits: EngineTraits\n+\n # Engine metadata\n about = {\n \"website\": \"https://odysee.com/\",\n@@ -21,6 +29,7 @@\n \n # Engine configuration\n paging = True\n+time_range_support = True\n results_per_page = 20\n categories = ['videos']\n \n@@ -29,6 +38,13 @@\n \n \n def request(query, params):\n+ time_range_dict = {\n+ \"day\": \"today\",\n+ \"week\": \"thisweek\",\n+ \"month\": \"thismonth\",\n+ \"year\": \"thisyear\",\n+ }\n+\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n@@ -38,6 +54,13 @@\n \"mediaType\": \"video\",\n }\n \n+ lang = traits.get_language(params['searxng_locale'], None)\n+ if lang is not None:\n+ query_params['language'] = lang\n+\n+ if params['time_range'] in time_range_dict:\n+ query_params['time_filter'] = time_range_dict[params['time_range']]\n+\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n \n@@ -88,3 +111,35 @@\n )\n \n return results\n+\n+\n+def fetch_traits(engine_traits: EngineTraits):\n+ \"\"\"\n+ Fetch languages from Odysee's source code.\n+ \"\"\"\n+\n+ resp = get(\n+ 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long\n+ timeout=60,\n+ )\n+\n+ if not resp.ok:\n+ print(\"ERROR: can't determine languages from Odysee\")\n+ return\n+\n+ for line in resp.text.split(\"\\n\")[1:-4]:\n+ lang_tag = line.strip().split(\": \")[0].replace(\"'\", \"\")\n+\n+ try:\n+ sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep=\"-\"))\n+ except babel.UnknownLocaleError:\n+ print(\"ERROR: %s is unknown by babel\" % lang_tag)\n+ continue\n+\n+ conflict = engine_traits.languages.get(sxng_tag)\n+ if conflict:\n+ if conflict != lang_tag:\n+ print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, lang_tag))\n+ continue\n+\n+ engine_traits.languages[sxng_tag] = lang_tag\n", "issue": "[FEATURE REQUEST] language filtering and safe search with odysee\n**Is your feature request related to a problem? Please describe.**\r\nNo\r\n\r\n**Describe the solution you'd like**\r\nWe can use, for example, language=de in the search URL. en, de-DE, and en-US also seem to work. There is no list of supported languages afaik, we just need to try things out one by one.\r\n\r\nfor safe search Moderate/Strict we should use nsfw=false in the URL\r\n\r\n**Additional context**\r\nThe information that you need for this is here: https://github.com/searx/searx/issues/2504\r\n\r\n\r\n----\r\nRelated\r\n\r\n- https://github.com/searxng/searxng/pull/2656\r\n- https://github.com/searxng/searxng/issues/590\r\n- [lbr command line](https://gitlab.com/gardenappl/lbt/-/blob/main/lbt?ref_type=heads)\r\n- [LBRY SDK ](https://github.com/lbryio/lbry-sdk/)\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Odysee_ is a decentralised video hosting platform.\n\n.. _Odysee: https://github.com/OdyseeTeam/odysee-frontend\n\"\"\"\n\nimport time\nfrom urllib.parse import urlencode\nfrom datetime import datetime\n\n# Engine metadata\nabout = {\n \"website\": \"https://odysee.com/\",\n \"wikidata_id\": \"Q102046570\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = ['videos']\n\n# Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)\nbase_url = \"https://lighthouse.odysee.tv/search\"\n\n\ndef request(query, params):\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n \"size\": results_per_page,\n \"from\": start_index,\n \"include\": \"channel,thumbnail_url,title,description,duration,release_time\",\n \"mediaType\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n seconds = int(duration)\n length = time.gmtime(seconds)\n if length.tm_hour:\n return time.strftime(\"%H:%M:%S\", length)\n return time.strftime(\"%M:%S\", length)\n\n\ndef response(resp):\n data = resp.json()\n results = []\n\n for item in data:\n name = item[\"name\"]\n claim_id = item[\"claimId\"]\n title = item[\"title\"]\n thumbnail_url = item[\"thumbnail_url\"]\n description = item[\"description\"] or \"\"\n channel = item[\"channel\"]\n release_time = item[\"release_time\"]\n duration = item[\"duration\"]\n\n release_date = datetime.strptime(release_time.split(\"T\")[0], \"%Y-%m-%d\")\n formatted_date = datetime.utcfromtimestamp(release_date.timestamp())\n\n url = f\"https://odysee.com/{name}:{claim_id}\"\n iframe_url = f\"https://odysee.com/$/embed/{name}:{claim_id}\"\n odysee_thumbnail = f\"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}\"\n formatted_duration = format_duration(duration)\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": channel,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": odysee_thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/odysee.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Odysee_ is a decentralised video hosting platform.\n\n.. _Odysee: https://github.com/OdyseeTeam/odysee-frontend\n\"\"\"\n\nimport time\nfrom urllib.parse import urlencode\nfrom datetime import datetime\n\nimport babel\n\nfrom searx.network import get\nfrom searx.locales import language_tag\nfrom searx.enginelib.traits import EngineTraits\n\ntraits: EngineTraits\n\n# Engine metadata\nabout = {\n \"website\": \"https://odysee.com/\",\n \"wikidata_id\": \"Q102046570\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\ntime_range_support = True\nresults_per_page = 20\ncategories = ['videos']\n\n# Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)\nbase_url = \"https://lighthouse.odysee.tv/search\"\n\n\ndef request(query, params):\n time_range_dict = {\n \"day\": \"today\",\n \"week\": \"thisweek\",\n \"month\": \"thismonth\",\n \"year\": \"thisyear\",\n }\n\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n \"size\": results_per_page,\n \"from\": start_index,\n \"include\": \"channel,thumbnail_url,title,description,duration,release_time\",\n \"mediaType\": \"video\",\n }\n\n lang = traits.get_language(params['searxng_locale'], None)\n if lang is not None:\n query_params['language'] = lang\n\n if params['time_range'] in time_range_dict:\n query_params['time_filter'] = time_range_dict[params['time_range']]\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n seconds = int(duration)\n length = time.gmtime(seconds)\n if length.tm_hour:\n return time.strftime(\"%H:%M:%S\", length)\n return time.strftime(\"%M:%S\", length)\n\n\ndef response(resp):\n data = resp.json()\n results = []\n\n for item in data:\n name = item[\"name\"]\n claim_id = item[\"claimId\"]\n title = item[\"title\"]\n thumbnail_url = item[\"thumbnail_url\"]\n description = item[\"description\"] or \"\"\n channel = item[\"channel\"]\n release_time = item[\"release_time\"]\n duration = item[\"duration\"]\n\n release_date = datetime.strptime(release_time.split(\"T\")[0], \"%Y-%m-%d\")\n formatted_date = datetime.utcfromtimestamp(release_date.timestamp())\n\n url = f\"https://odysee.com/{name}:{claim_id}\"\n iframe_url = f\"https://odysee.com/$/embed/{name}:{claim_id}\"\n odysee_thumbnail = f\"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}\"\n formatted_duration = format_duration(duration)\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": channel,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": odysee_thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n\n\ndef fetch_traits(engine_traits: EngineTraits):\n \"\"\"\n Fetch languages from Odysee's source code.\n \"\"\"\n\n resp = get(\n 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long\n timeout=60,\n )\n\n if not resp.ok:\n print(\"ERROR: can't determine languages from Odysee\")\n return\n\n for line in resp.text.split(\"\\n\")[1:-4]:\n lang_tag = line.strip().split(\": \")[0].replace(\"'\", \"\")\n\n try:\n sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep=\"-\"))\n except babel.UnknownLocaleError:\n print(\"ERROR: %s is unknown by babel\" % lang_tag)\n continue\n\n conflict = engine_traits.languages.get(sxng_tag)\n if conflict:\n if conflict != lang_tag:\n print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, lang_tag))\n continue\n\n engine_traits.languages[sxng_tag] = lang_tag\n", "path": "searx/engines/odysee.py"}]}
| 1,306 | 677 |
gh_patches_debug_17292
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-2870
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Artist Credits for tag data, but "actual" artist data for filenaming
Currently beets will always normalise [artist credit](https://musicbrainz.org/doc/Artist_Credit) data to the current artist name. However, I want to see when playing the music when, e.g., [Orgi-E](https://musicbrainz.org/artist/345fe3da-b2cb-4ad4-a1a5-43afc903663d) was credited as [Klamfyr](https://musicbrainz.org/release/d09b3568-e9cc-4458-bcf7-0c215cca75ce), but I still like the normalisation for file tree organisation purposes. This should probably be an option though, as other people will likely want to always normalise the name (and others might want to not normalise the name in the path as well).
(Somewhat related morituri issues: thomasvs/morituri#80, thomasvs/morituri#48)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beets/autotag/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Adrian Sampson.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Facilities for automatically determining files' correct metadata.
17 """
18
19 from __future__ import division, absolute_import, print_function
20
21 from beets import logging
22 from beets import config
23
24 # Parts of external interface.
25 from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa
26 from .match import tag_item, tag_album, Proposal # noqa
27 from .match import Recommendation # noqa
28
29 # Global logger.
30 log = logging.getLogger('beets')
31
32
33 # Additional utilities for the main interface.
34
35 def apply_item_metadata(item, track_info):
36 """Set an item's metadata from its matched TrackInfo object.
37 """
38 item.artist = track_info.artist
39 item.artist_sort = track_info.artist_sort
40 item.artist_credit = track_info.artist_credit
41 item.title = track_info.title
42 item.mb_trackid = track_info.track_id
43 if track_info.artist_id:
44 item.mb_artistid = track_info.artist_id
45 if track_info.data_source:
46 item.data_source = track_info.data_source
47
48 if track_info.lyricist is not None:
49 item.lyricist = track_info.lyricist
50 if track_info.composer is not None:
51 item.composer = track_info.composer
52 if track_info.composer_sort is not None:
53 item.composer_sort = track_info.composer_sort
54 if track_info.arranger is not None:
55 item.arranger = track_info.arranger
56
57 # At the moment, the other metadata is left intact (including album
58 # and track number). Perhaps these should be emptied?
59
60
61 def apply_metadata(album_info, mapping):
62 """Set the items' metadata to match an AlbumInfo object using a
63 mapping from Items to TrackInfo objects.
64 """
65 for item, track_info in mapping.items():
66 # Album, artist, track count.
67 if track_info.artist:
68 item.artist = track_info.artist
69 else:
70 item.artist = album_info.artist
71 item.albumartist = album_info.artist
72 item.album = album_info.album
73
74 # Artist sort and credit names.
75 item.artist_sort = track_info.artist_sort or album_info.artist_sort
76 item.artist_credit = (track_info.artist_credit or
77 album_info.artist_credit)
78 item.albumartist_sort = album_info.artist_sort
79 item.albumartist_credit = album_info.artist_credit
80
81 # Release date.
82 for prefix in '', 'original_':
83 if config['original_date'] and not prefix:
84 # Ignore specific release date.
85 continue
86
87 for suffix in 'year', 'month', 'day':
88 key = prefix + suffix
89 value = getattr(album_info, key) or 0
90
91 # If we don't even have a year, apply nothing.
92 if suffix == 'year' and not value:
93 break
94
95 # Otherwise, set the fetched value (or 0 for the month
96 # and day if not available).
97 item[key] = value
98
99 # If we're using original release date for both fields,
100 # also set item.year = info.original_year, etc.
101 if config['original_date']:
102 item[suffix] = value
103
104 # Title.
105 item.title = track_info.title
106
107 if config['per_disc_numbering']:
108 # We want to let the track number be zero, but if the medium index
109 # is not provided we need to fall back to the overall index.
110 if track_info.medium_index is not None:
111 item.track = track_info.medium_index
112 else:
113 item.track = track_info.index
114 item.tracktotal = track_info.medium_total or len(album_info.tracks)
115 else:
116 item.track = track_info.index
117 item.tracktotal = len(album_info.tracks)
118
119 # Disc and disc count.
120 item.disc = track_info.medium
121 item.disctotal = album_info.mediums
122
123 # MusicBrainz IDs.
124 item.mb_trackid = track_info.track_id
125 item.mb_albumid = album_info.album_id
126 if track_info.artist_id:
127 item.mb_artistid = track_info.artist_id
128 else:
129 item.mb_artistid = album_info.artist_id
130 item.mb_albumartistid = album_info.artist_id
131 item.mb_releasegroupid = album_info.releasegroup_id
132
133 # Compilation flag.
134 item.comp = album_info.va
135
136 # Miscellaneous metadata.
137 for field in ('albumtype',
138 'label',
139 'asin',
140 'catalognum',
141 'script',
142 'language',
143 'country',
144 'albumstatus',
145 'albumdisambig',
146 'data_source',):
147 value = getattr(album_info, field)
148 if value is not None:
149 item[field] = value
150 if track_info.disctitle is not None:
151 item.disctitle = track_info.disctitle
152
153 if track_info.media is not None:
154 item.media = track_info.media
155
156 if track_info.lyricist is not None:
157 item.lyricist = track_info.lyricist
158 if track_info.composer is not None:
159 item.composer = track_info.composer
160 if track_info.composer_sort is not None:
161 item.composer_sort = track_info.composer_sort
162 if track_info.arranger is not None:
163 item.arranger = track_info.arranger
164
165 item.track_alt = track_info.track_alt
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/beets/autotag/__init__.py b/beets/autotag/__init__.py
--- a/beets/autotag/__init__.py
+++ b/beets/autotag/__init__.py
@@ -63,12 +63,19 @@
mapping from Items to TrackInfo objects.
"""
for item, track_info in mapping.items():
- # Album, artist, track count.
- if track_info.artist:
- item.artist = track_info.artist
+ # Artist or artist credit.
+ if config['artist_credit']:
+ item.artist = (track_info.artist_credit or
+ track_info.artist or
+ album_info.artist_credit or
+ album_info.artist)
+ item.albumartist = (album_info.artist_credit or
+ album_info.artist)
else:
- item.artist = album_info.artist
- item.albumartist = album_info.artist
+ item.artist = (track_info.artist or album_info.artist)
+ item.albumartist = album_info.artist
+
+ # Album.
item.album = album_info.album
# Artist sort and credit names.
|
{"golden_diff": "diff --git a/beets/autotag/__init__.py b/beets/autotag/__init__.py\n--- a/beets/autotag/__init__.py\n+++ b/beets/autotag/__init__.py\n@@ -63,12 +63,19 @@\n mapping from Items to TrackInfo objects.\n \"\"\"\n for item, track_info in mapping.items():\n- # Album, artist, track count.\n- if track_info.artist:\n- item.artist = track_info.artist\n+ # Artist or artist credit.\n+ if config['artist_credit']:\n+ item.artist = (track_info.artist_credit or\n+ track_info.artist or\n+ album_info.artist_credit or\n+ album_info.artist)\n+ item.albumartist = (album_info.artist_credit or\n+ album_info.artist)\n else:\n- item.artist = album_info.artist\n- item.albumartist = album_info.artist\n+ item.artist = (track_info.artist or album_info.artist)\n+ item.albumartist = album_info.artist\n+\n+ # Album.\n item.album = album_info.album\n \n # Artist sort and credit names.\n", "issue": "Use Artist Credits for tag data, but \"actual\" artist data for filenaming\nCurrently beets will always normalise [artist credit](https://musicbrainz.org/doc/Artist_Credit) data to the current artist name. However, I want to see when playing the music when, e.g., [Orgi-E](https://musicbrainz.org/artist/345fe3da-b2cb-4ad4-a1a5-43afc903663d) was credited as [Klamfyr](https://musicbrainz.org/release/d09b3568-e9cc-4458-bcf7-0c215cca75ce), but I still like the normalisation for file tree organisation purposes. This should probably be an option though, as other people will likely want to always normalise the name (and others might want to not normalise the name in the path as well).\n\n(Somewhat related morituri issues: thomasvs/morituri#80, thomasvs/morituri#48)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Facilities for automatically determining files' correct metadata.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets import logging\nfrom beets import config\n\n# Parts of external interface.\nfrom .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa\nfrom .match import tag_item, tag_album, Proposal # noqa\nfrom .match import Recommendation # noqa\n\n# Global logger.\nlog = logging.getLogger('beets')\n\n\n# Additional utilities for the main interface.\n\ndef apply_item_metadata(item, track_info):\n \"\"\"Set an item's metadata from its matched TrackInfo object.\n \"\"\"\n item.artist = track_info.artist\n item.artist_sort = track_info.artist_sort\n item.artist_credit = track_info.artist_credit\n item.title = track_info.title\n item.mb_trackid = track_info.track_id\n if track_info.artist_id:\n item.mb_artistid = track_info.artist_id\n if track_info.data_source:\n item.data_source = track_info.data_source\n\n if track_info.lyricist is not None:\n item.lyricist = track_info.lyricist\n if track_info.composer is not None:\n item.composer = track_info.composer\n if track_info.composer_sort is not None:\n item.composer_sort = track_info.composer_sort\n if track_info.arranger is not None:\n item.arranger = track_info.arranger\n\n # At the moment, the other metadata is left intact (including album\n # and track number). Perhaps these should be emptied?\n\n\ndef apply_metadata(album_info, mapping):\n \"\"\"Set the items' metadata to match an AlbumInfo object using a\n mapping from Items to TrackInfo objects.\n \"\"\"\n for item, track_info in mapping.items():\n # Album, artist, track count.\n if track_info.artist:\n item.artist = track_info.artist\n else:\n item.artist = album_info.artist\n item.albumartist = album_info.artist\n item.album = album_info.album\n\n # Artist sort and credit names.\n item.artist_sort = track_info.artist_sort or album_info.artist_sort\n item.artist_credit = (track_info.artist_credit or\n album_info.artist_credit)\n item.albumartist_sort = album_info.artist_sort\n item.albumartist_credit = album_info.artist_credit\n\n # Release date.\n for prefix in '', 'original_':\n if config['original_date'] and not prefix:\n # Ignore specific release date.\n continue\n\n for suffix in 'year', 'month', 'day':\n key = prefix + suffix\n value = getattr(album_info, key) or 0\n\n # If we don't even have a year, apply nothing.\n if suffix == 'year' and not value:\n break\n\n # Otherwise, set the fetched value (or 0 for the month\n # and day if not available).\n item[key] = value\n\n # If we're using original release date for both fields,\n # also set item.year = info.original_year, etc.\n if config['original_date']:\n item[suffix] = value\n\n # Title.\n item.title = track_info.title\n\n if config['per_disc_numbering']:\n # We want to let the track number be zero, but if the medium index\n # is not provided we need to fall back to the overall index.\n if track_info.medium_index is not None:\n item.track = track_info.medium_index\n else:\n item.track = track_info.index\n item.tracktotal = track_info.medium_total or len(album_info.tracks)\n else:\n item.track = track_info.index\n item.tracktotal = len(album_info.tracks)\n\n # Disc and disc count.\n item.disc = track_info.medium\n item.disctotal = album_info.mediums\n\n # MusicBrainz IDs.\n item.mb_trackid = track_info.track_id\n item.mb_albumid = album_info.album_id\n if track_info.artist_id:\n item.mb_artistid = track_info.artist_id\n else:\n item.mb_artistid = album_info.artist_id\n item.mb_albumartistid = album_info.artist_id\n item.mb_releasegroupid = album_info.releasegroup_id\n\n # Compilation flag.\n item.comp = album_info.va\n\n # Miscellaneous metadata.\n for field in ('albumtype',\n 'label',\n 'asin',\n 'catalognum',\n 'script',\n 'language',\n 'country',\n 'albumstatus',\n 'albumdisambig',\n 'data_source',):\n value = getattr(album_info, field)\n if value is not None:\n item[field] = value\n if track_info.disctitle is not None:\n item.disctitle = track_info.disctitle\n\n if track_info.media is not None:\n item.media = track_info.media\n\n if track_info.lyricist is not None:\n item.lyricist = track_info.lyricist\n if track_info.composer is not None:\n item.composer = track_info.composer\n if track_info.composer_sort is not None:\n item.composer_sort = track_info.composer_sort\n if track_info.arranger is not None:\n item.arranger = track_info.arranger\n\n item.track_alt = track_info.track_alt\n", "path": "beets/autotag/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Facilities for automatically determining files' correct metadata.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets import logging\nfrom beets import config\n\n# Parts of external interface.\nfrom .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa\nfrom .match import tag_item, tag_album, Proposal # noqa\nfrom .match import Recommendation # noqa\n\n# Global logger.\nlog = logging.getLogger('beets')\n\n\n# Additional utilities for the main interface.\n\ndef apply_item_metadata(item, track_info):\n \"\"\"Set an item's metadata from its matched TrackInfo object.\n \"\"\"\n item.artist = track_info.artist\n item.artist_sort = track_info.artist_sort\n item.artist_credit = track_info.artist_credit\n item.title = track_info.title\n item.mb_trackid = track_info.track_id\n if track_info.artist_id:\n item.mb_artistid = track_info.artist_id\n if track_info.data_source:\n item.data_source = track_info.data_source\n\n if track_info.lyricist is not None:\n item.lyricist = track_info.lyricist\n if track_info.composer is not None:\n item.composer = track_info.composer\n if track_info.composer_sort is not None:\n item.composer_sort = track_info.composer_sort\n if track_info.arranger is not None:\n item.arranger = track_info.arranger\n\n # At the moment, the other metadata is left intact (including album\n # and track number). Perhaps these should be emptied?\n\n\ndef apply_metadata(album_info, mapping):\n \"\"\"Set the items' metadata to match an AlbumInfo object using a\n mapping from Items to TrackInfo objects.\n \"\"\"\n for item, track_info in mapping.items():\n # Artist or artist credit.\n if config['artist_credit']:\n item.artist = (track_info.artist_credit or\n track_info.artist or\n album_info.artist_credit or\n album_info.artist)\n item.albumartist = (album_info.artist_credit or\n album_info.artist)\n else:\n item.artist = (track_info.artist or album_info.artist)\n item.albumartist = album_info.artist\n\n # Album.\n item.album = album_info.album\n\n # Artist sort and credit names.\n item.artist_sort = track_info.artist_sort or album_info.artist_sort\n item.artist_credit = (track_info.artist_credit or\n album_info.artist_credit)\n item.albumartist_sort = album_info.artist_sort\n item.albumartist_credit = album_info.artist_credit\n\n # Release date.\n for prefix in '', 'original_':\n if config['original_date'] and not prefix:\n # Ignore specific release date.\n continue\n\n for suffix in 'year', 'month', 'day':\n key = prefix + suffix\n value = getattr(album_info, key) or 0\n\n # If we don't even have a year, apply nothing.\n if suffix == 'year' and not value:\n break\n\n # Otherwise, set the fetched value (or 0 for the month\n # and day if not available).\n item[key] = value\n\n # If we're using original release date for both fields,\n # also set item.year = info.original_year, etc.\n if config['original_date']:\n item[suffix] = value\n\n # Title.\n item.title = track_info.title\n\n if config['per_disc_numbering']:\n # We want to let the track number be zero, but if the medium index\n # is not provided we need to fall back to the overall index.\n if track_info.medium_index is not None:\n item.track = track_info.medium_index\n else:\n item.track = track_info.index\n item.tracktotal = track_info.medium_total or len(album_info.tracks)\n else:\n item.track = track_info.index\n item.tracktotal = len(album_info.tracks)\n\n # Disc and disc count.\n item.disc = track_info.medium\n item.disctotal = album_info.mediums\n\n # MusicBrainz IDs.\n item.mb_trackid = track_info.track_id\n item.mb_albumid = album_info.album_id\n if track_info.artist_id:\n item.mb_artistid = track_info.artist_id\n else:\n item.mb_artistid = album_info.artist_id\n item.mb_albumartistid = album_info.artist_id\n item.mb_releasegroupid = album_info.releasegroup_id\n\n # Compilation flag.\n item.comp = album_info.va\n\n # Miscellaneous metadata.\n for field in ('albumtype',\n 'label',\n 'asin',\n 'catalognum',\n 'script',\n 'language',\n 'country',\n 'albumstatus',\n 'albumdisambig',\n 'data_source',):\n value = getattr(album_info, field)\n if value is not None:\n item[field] = value\n if track_info.disctitle is not None:\n item.disctitle = track_info.disctitle\n\n if track_info.media is not None:\n item.media = track_info.media\n\n if track_info.lyricist is not None:\n item.lyricist = track_info.lyricist\n if track_info.composer is not None:\n item.composer = track_info.composer\n if track_info.composer_sort is not None:\n item.composer_sort = track_info.composer_sort\n if track_info.arranger is not None:\n item.arranger = track_info.arranger\n\n item.track_alt = track_info.track_alt\n", "path": "beets/autotag/__init__.py"}]}
| 2,177 | 249 |
gh_patches_debug_3528
|
rasdani/github-patches
|
git_diff
|
pytorch__examples-1109
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
word Language Model bug
self.decoder = nn.Linear(**ninp**, ntoken) in model.py line 124 shoud be "nhid"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `word_language_model/model.py`
Content:
```
1 import math
2 import torch
3 import torch.nn as nn
4 import torch.nn.functional as F
5
6 class RNNModel(nn.Module):
7 """Container module with an encoder, a recurrent module, and a decoder."""
8
9 def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
10 super(RNNModel, self).__init__()
11 self.ntoken = ntoken
12 self.drop = nn.Dropout(dropout)
13 self.encoder = nn.Embedding(ntoken, ninp)
14 if rnn_type in ['LSTM', 'GRU']:
15 self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
16 else:
17 try:
18 nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
19 except KeyError as e:
20 raise ValueError( """An invalid option for `--model` was supplied,
21 options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""") from e
22 self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
23 self.decoder = nn.Linear(nhid, ntoken)
24
25 # Optionally tie weights as in:
26 # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
27 # https://arxiv.org/abs/1608.05859
28 # and
29 # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
30 # https://arxiv.org/abs/1611.01462
31 if tie_weights:
32 if nhid != ninp:
33 raise ValueError('When using the tied flag, nhid must be equal to emsize')
34 self.decoder.weight = self.encoder.weight
35
36 self.init_weights()
37
38 self.rnn_type = rnn_type
39 self.nhid = nhid
40 self.nlayers = nlayers
41
42 def init_weights(self):
43 initrange = 0.1
44 nn.init.uniform_(self.encoder.weight, -initrange, initrange)
45 nn.init.zeros_(self.decoder.bias)
46 nn.init.uniform_(self.decoder.weight, -initrange, initrange)
47
48 def forward(self, input, hidden):
49 emb = self.drop(self.encoder(input))
50 output, hidden = self.rnn(emb, hidden)
51 output = self.drop(output)
52 decoded = self.decoder(output)
53 decoded = decoded.view(-1, self.ntoken)
54 return F.log_softmax(decoded, dim=1), hidden
55
56 def init_hidden(self, bsz):
57 weight = next(self.parameters())
58 if self.rnn_type == 'LSTM':
59 return (weight.new_zeros(self.nlayers, bsz, self.nhid),
60 weight.new_zeros(self.nlayers, bsz, self.nhid))
61 else:
62 return weight.new_zeros(self.nlayers, bsz, self.nhid)
63
64 # Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
65 class PositionalEncoding(nn.Module):
66 r"""Inject some information about the relative or absolute position of the tokens in the sequence.
67 The positional encodings have the same dimension as the embeddings, so that the two can be summed.
68 Here, we use sine and cosine functions of different frequencies.
69 .. math:
70 \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
71 \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
72 \text{where pos is the word position and i is the embed idx)
73 Args:
74 d_model: the embed dim (required).
75 dropout: the dropout value (default=0.1).
76 max_len: the max. length of the incoming sequence (default=5000).
77 Examples:
78 >>> pos_encoder = PositionalEncoding(d_model)
79 """
80
81 def __init__(self, d_model, dropout=0.1, max_len=5000):
82 super(PositionalEncoding, self).__init__()
83 self.dropout = nn.Dropout(p=dropout)
84
85 pe = torch.zeros(max_len, d_model)
86 position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
87 div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
88 pe[:, 0::2] = torch.sin(position * div_term)
89 pe[:, 1::2] = torch.cos(position * div_term)
90 pe = pe.unsqueeze(0).transpose(0, 1)
91 self.register_buffer('pe', pe)
92
93 def forward(self, x):
94 r"""Inputs of forward function
95 Args:
96 x: the sequence fed to the positional encoder model (required).
97 Shape:
98 x: [sequence length, batch size, embed dim]
99 output: [sequence length, batch size, embed dim]
100 Examples:
101 >>> output = pos_encoder(x)
102 """
103
104 x = x + self.pe[:x.size(0), :]
105 return self.dropout(x)
106
107 class TransformerModel(nn.Module):
108 """Container module with an encoder, a recurrent or transformer module, and a decoder."""
109
110 def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
111 super(TransformerModel, self).__init__()
112 try:
113 from torch.nn import TransformerEncoder, TransformerEncoderLayer
114 except BaseException as e:
115 raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or '
116 'lower.') from e
117 self.model_type = 'Transformer'
118 self.src_mask = None
119 self.pos_encoder = PositionalEncoding(ninp, dropout)
120 encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
121 self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
122 self.encoder = nn.Embedding(ntoken, ninp)
123 self.ninp = ninp
124 self.decoder = nn.Linear(ninp, ntoken)
125
126 self.init_weights()
127
128 def _generate_square_subsequent_mask(self, sz):
129 mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
130 mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
131 return mask
132
133 def init_weights(self):
134 initrange = 0.1
135 nn.init.uniform_(self.encoder.weight, -initrange, initrange)
136 nn.init.zeros_(self.decoder.bias)
137 nn.init.uniform_(self.decoder.weight, -initrange, initrange)
138
139 def forward(self, src, has_mask=True):
140 if has_mask:
141 device = src.device
142 if self.src_mask is None or self.src_mask.size(0) != len(src):
143 mask = self._generate_square_subsequent_mask(len(src)).to(device)
144 self.src_mask = mask
145 else:
146 self.src_mask = None
147
148 src = self.encoder(src) * math.sqrt(self.ninp)
149 src = self.pos_encoder(src)
150 output = self.transformer_encoder(src, self.src_mask)
151 output = self.decoder(output)
152 return F.log_softmax(output, dim=-1)
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/word_language_model/model.py b/word_language_model/model.py
--- a/word_language_model/model.py
+++ b/word_language_model/model.py
@@ -121,7 +121,7 @@
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
- self.decoder = nn.Linear(ninp, ntoken)
+ self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
|
{"golden_diff": "diff --git a/word_language_model/model.py b/word_language_model/model.py\n--- a/word_language_model/model.py\n+++ b/word_language_model/model.py\n@@ -121,7 +121,7 @@\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n- self.decoder = nn.Linear(ninp, ntoken)\n+ self.decoder = nn.Linear(nhid, ntoken)\n \n self.init_weights()\n", "issue": "word Language Model bug\nself.decoder = nn.Linear(**ninp**, ntoken) in model.py line 124 shoud be \"nhid\" \n", "before_files": [{"content": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):\n super(RNNModel, self).__init__()\n self.ntoken = ntoken\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]\n except KeyError as e:\n raise ValueError( \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\") from e\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n if nhid != ninp:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.bias)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output)\n decoded = decoded.view(-1, self.ntoken)\n return F.log_softmax(decoded, dim=1), hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return (weight.new_zeros(self.nlayers, bsz, self.nhid),\n weight.new_zeros(self.nlayers, bsz, self.nhid))\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.\nclass PositionalEncoding(nn.Module):\n r\"\"\"Inject some information about the relative or absolute position of the tokens in the sequence.\n The positional encodings have the same dimension as the embeddings, so that the two can be summed.\n Here, we use sine and cosine functions of different frequencies.\n .. math:\n \\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))\n \\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))\n \\text{where pos is the word position and i is the embed idx)\n Args:\n d_model: the embed dim (required).\n dropout: the dropout value (default=0.1).\n max_len: the max. length of the incoming sequence (default=5000).\n Examples:\n >>> pos_encoder = PositionalEncoding(d_model)\n \"\"\"\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n r\"\"\"Inputs of forward function\n Args:\n x: the sequence fed to the positional encoder model (required).\n Shape:\n x: [sequence length, batch size, embed dim]\n output: [sequence length, batch size, embed dim]\n Examples:\n >>> output = pos_encoder(x)\n \"\"\"\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\nclass TransformerModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent or transformer module, and a decoder.\"\"\"\n\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n super(TransformerModel, self).__init__()\n try:\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n except BaseException as e:\n raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or '\n 'lower.') from e\n self.model_type = 'Transformer'\n self.src_mask = None\n self.pos_encoder = PositionalEncoding(ninp, dropout)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.bias)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, src, has_mask=True):\n if has_mask:\n device = src.device\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n else:\n self.src_mask = None\n\n src = self.encoder(src) * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n output = self.transformer_encoder(src, self.src_mask)\n output = self.decoder(output)\n return F.log_softmax(output, dim=-1)\n", "path": "word_language_model/model.py"}], "after_files": [{"content": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):\n super(RNNModel, self).__init__()\n self.ntoken = ntoken\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]\n except KeyError as e:\n raise ValueError( \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\") from e\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n if nhid != ninp:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.bias)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output)\n decoded = decoded.view(-1, self.ntoken)\n return F.log_softmax(decoded, dim=1), hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return (weight.new_zeros(self.nlayers, bsz, self.nhid),\n weight.new_zeros(self.nlayers, bsz, self.nhid))\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.\nclass PositionalEncoding(nn.Module):\n r\"\"\"Inject some information about the relative or absolute position of the tokens in the sequence.\n The positional encodings have the same dimension as the embeddings, so that the two can be summed.\n Here, we use sine and cosine functions of different frequencies.\n .. math:\n \\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))\n \\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))\n \\text{where pos is the word position and i is the embed idx)\n Args:\n d_model: the embed dim (required).\n dropout: the dropout value (default=0.1).\n max_len: the max. length of the incoming sequence (default=5000).\n Examples:\n >>> pos_encoder = PositionalEncoding(d_model)\n \"\"\"\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n r\"\"\"Inputs of forward function\n Args:\n x: the sequence fed to the positional encoder model (required).\n Shape:\n x: [sequence length, batch size, embed dim]\n output: [sequence length, batch size, embed dim]\n Examples:\n >>> output = pos_encoder(x)\n \"\"\"\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\nclass TransformerModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent or transformer module, and a decoder.\"\"\"\n\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n super(TransformerModel, self).__init__()\n try:\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n except BaseException as e:\n raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or '\n 'lower.') from e\n self.model_type = 'Transformer'\n self.src_mask = None\n self.pos_encoder = PositionalEncoding(ninp, dropout)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n self.decoder = nn.Linear(nhid, ntoken)\n\n self.init_weights()\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.bias)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, src, has_mask=True):\n if has_mask:\n device = src.device\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n else:\n self.src_mask = None\n\n src = self.encoder(src) * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n output = self.transformer_encoder(src, self.src_mask)\n output = self.decoder(output)\n return F.log_softmax(output, dim=-1)\n", "path": "word_language_model/model.py"}]}
| 2,261 | 117 |
gh_patches_debug_35169
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restore access to India-Gujarat (IN-GJ) wind and solar data
Hey everyone,
looks like SLDCGUJ has also removed the access to real-time data websites for wind and solar, just like they did for the conventional real-time data.
The solution should be really simple: We can use the same link which still gives access to that ["print" page](http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php). It has wind and solar generation on the right side, too ;)

Greetings, Alex
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/IN_GJ.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import re
4 import collections
5 from operator import itemgetter
6 import arrow
7 import requests
8 import itertools
9 from .lib import zonekey, web, IN
10 from .lib.validation import validate
11 from logging import getLogger
12
13
14 station_map = {
15 "coal": ["Ukai(1-5)+Ukai6",
16 "Wanakbori",
17 "Gandhinagar",
18 "Sikka(3-4)",
19 "KLTPS(1-3)+KLTPS4",
20 "SLPP(I+II)",
21 "Akrimota",
22 "TPAECo",
23 "EPGL(I+II)",
24 "Adani(I+II+III)",
25 "BECL(I+II)",
26 "CGPL"],
27 "hydro": ["Ukai(Hydro)",
28 "Kadana(Hydro)",
29 "SSP(RBPH)"],
30 "gas": ["Utran(Gas)(II)",
31 "Dhuvaran(Gas)(I)+(II)+(III)",
32 "GIPCL(I)+(II)",
33 "GSEG(I+II)",
34 "GPPC",
35 "CLPI",
36 "KAWAS",
37 "Sugen+Unosgn",
38 "JHANOR"],
39 "nuclear": ["KAPP"]
40 }
41
42
43 def split_and_sum(expression):
44 """
45 Avoid using literal_eval for simple addition expressions.
46 Returns sum of all positive numbers.
47 """
48
49 split_vals = expression.split('+')
50 float_vals = [float(v) for v in split_vals]
51 total = sum([v for v in float_vals if v > 0.0])
52
53 return total
54
55
56 def fetch_data(zone_key, session=None, logger=None):
57 session = session or requests.session()
58
59 values = collections.Counter()
60 zonekey.assert_zone_key(zone_key, 'IN-GJ')
61
62 solar_html = web.get_response_soup(
63 zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)
64 wind_html = web.get_response_soup(
65 zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)
66
67 values['date'] = arrow.get(
68 solar_html.find_all('tr')[0].text.split('\t')[-1].strip()
69 + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime
70 values['solar'] = split_and_sum(
71 solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
72 values['wind'] = split_and_sum(
73 wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
74
75 cookies_params = {
76 'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',
77 'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'
78 }
79
80 rows = web.get_response_soup(
81 zone_key,
82 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',
83 session).find_all('tr')
84
85 for row in rows:
86 elements = row.find_all('td')
87 if len(elements) > 3: # will find production rows
88 v1, v2 = (re.sub(r'\s+', r'', x.text)
89 for x in itemgetter(*[0, 3])(elements))
90 energy_type = [k for k, v in station_map.items() if v1 in v]
91 if len(energy_type) > 0:
92 v2 = split_and_sum(v2)
93 values[energy_type[0]] += v2
94 else:
95 if 'StationName' in (v1, v2): # meta data row
96 continue
97 elif 'DSMRate' in v2: # demand side management
98 continue
99 else:
100 try:
101 logger.warning(
102 'Unknown fuel for station name: {}'.format(v1),
103 extra={'key': zone_key})
104 v2 = split_and_sum(v2)
105 values['unknown'] += v2
106 except ValueError as e:
107 # handle float failures
108 logger.warning(
109 "couldn't convert {} to float".format(v2),
110 extra={'key': zone_key})
111 continue
112 elif len(elements) == 3: # will find consumption row
113 v1, v2 = (re.sub(r'\s+', r'', x.text)
114 for x in itemgetter(*[0, 2])(elements))
115 if v1 == 'GujaratCatered':
116 values['total consumption'] = split_and_sum(v2.split('MW')[0])
117 elif len(elements) == 1:
118 # CGPL/KAPP/KAWAS/JHANOR plants have a different html structure.
119 plant_name = re.sub(r'\s+', r'', elements[0].text)
120 known_plants = itertools.chain.from_iterable(station_map.values())
121
122 if plant_name in known_plants:
123 energy_type = [k for k, v in station_map.items() if plant_name in v][0]
124 generation_tag = row.find_all_next("td")[3]
125 val = float(re.sub(r'\s+', r'', generation_tag.text))
126 if val > 0:
127 values[energy_type] += val
128 else:
129 if plant_name and plant_name != 'GMR':
130 # GMR is outside Gujarat, sometimes plant_name is ''
131 logger.warning(
132 'Unknown fuel for station name: {}'.format(plant_name),
133 extra={'key': zone_key})
134
135 return values
136
137
138 def fetch_production(zone_key='IN-GJ', session=None, target_datetime=None,
139 logger=getLogger('IN-GJ')):
140 """
141 Requests the last known production mix (in MW) of a given country
142 Arguments:
143 zone_key: specifies which zone to get
144 session: request session passed in order to re-use an existing session
145 target_datetime: the datetime for which we want production data. If not provided, we should
146 default it to now. The provided target_datetime is timezone-aware in UTC.
147 logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically
148 Return:
149 A list of dictionaries in the form:
150 {
151 'zoneKey': 'FR',
152 'datetime': '2017-01-01T00:00:00Z',
153 'production': {
154 'biomass': 0.0,
155 'coal': 0.0,
156 'gas': 0.0,
157 'hydro': 0.0,
158 'nuclear': null,
159 'oil': 0.0,
160 'solar': 0.0,
161 'wind': 0.0,
162 'geothermal': 0.0,
163 'unknown': 0.0
164 },
165 'storage': {
166 'hydro': -10.0,
167 },
168 'source': 'mysource.com'
169 }
170 """
171 session = session or requests.session()
172 if target_datetime:
173 raise NotImplementedError(
174 'This parser is not yet able to parse past dates')
175
176 value_map = fetch_data(zone_key, session, logger=logger)
177
178 data = {
179 'zoneKey': zone_key,
180 'datetime': value_map['date'],
181 'production': {
182 'biomass': None,
183 'coal': value_map.get('coal', 0),
184 'gas': value_map.get('gas', 0),
185 'hydro': value_map.get('hydro', 0),
186 'nuclear': value_map.get('nuclear', 0),
187 'oil': None,
188 'solar': value_map.get('solar', 0),
189 'wind': value_map.get('wind', 0),
190 'geothermal': None,
191 'unknown': value_map.get('unknown', 0)
192 },
193 'storage': {
194 'hydro': None
195 },
196 'source': 'sldcguj.com',
197 }
198
199 valid_data = validate(data, logger, remove_negative=True, floor=7000)
200
201 return valid_data
202
203
204 def fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None,
205 logger=getLogger('IN-GJ')):
206 """
207 Method to get consumption data of Gujarat
208 :param zone_key:
209 :param session:
210 :return:
211 """
212 session = session or requests.session()
213 if target_datetime:
214 raise NotImplementedError(
215 'This parser is not yet able to parse past dates')
216
217 value_map = fetch_data(zone_key, session, logger=logger)
218
219 data = {
220 'zoneKey': zone_key,
221 'datetime': value_map['date'],
222 'consumption': value_map['total consumption'],
223 'source': 'sldcguj.com'
224 }
225
226 return data
227
228
229 if __name__ == '__main__':
230 session = requests.Session()
231 print(fetch_production('IN-GJ', session))
232 print(fetch_consumption('IN-GJ', session))
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/IN_GJ.py b/parsers/IN_GJ.py
--- a/parsers/IN_GJ.py
+++ b/parsers/IN_GJ.py
@@ -10,6 +10,7 @@
from .lib.validation import validate
from logging import getLogger
+SLDCGUJ_URL = 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php'
station_map = {
"coal": ["Ukai(1-5)+Ukai6",
@@ -59,29 +60,23 @@
values = collections.Counter()
zonekey.assert_zone_key(zone_key, 'IN-GJ')
- solar_html = web.get_response_soup(
- zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)
- wind_html = web.get_response_soup(
- zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)
-
- values['date'] = arrow.get(
- solar_html.find_all('tr')[0].text.split('\t')[-1].strip()
- + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime
- values['solar'] = split_and_sum(
- solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
- values['wind'] = split_and_sum(
- wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
-
cookies_params = {
'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',
'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'
}
- rows = web.get_response_soup(
- zone_key,
- 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',
- session).find_all('tr')
+ soup = web.get_response_soup(zone_key, SLDCGUJ_URL, session)
+ rows = soup.find_all('tr')
+ cells = [c.text.strip() for c in soup.find_all('td')]
+ # get wind and solar values
+ values['date'] = arrow.get(cells[1], 'DD-MM-YYYY HH:mm:ss').replace(
+ tzinfo='Asia/Kolkata')
+ [wind_solar_index] = [i for i, c in enumerate(cells) if c == '(Wind+Solar) Generation']
+ value = cells[wind_solar_index + 1]
+ values['wind'], values['solar'] = [int(v) for v in value.split(' + ')]
+
+ # get other production values
for row in rows:
elements = row.find_all('td')
if len(elements) > 3: # will find production rows
@@ -177,7 +172,7 @@
data = {
'zoneKey': zone_key,
- 'datetime': value_map['date'],
+ 'datetime': value_map['date'].datetime,
'production': {
'biomass': None,
'coal': value_map.get('coal', 0),
|
{"golden_diff": "diff --git a/parsers/IN_GJ.py b/parsers/IN_GJ.py\n--- a/parsers/IN_GJ.py\n+++ b/parsers/IN_GJ.py\n@@ -10,6 +10,7 @@\n from .lib.validation import validate\n from logging import getLogger\n \n+SLDCGUJ_URL = 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php'\n \n station_map = {\n \"coal\": [\"Ukai(1-5)+Ukai6\",\n@@ -59,29 +60,23 @@\n values = collections.Counter()\n zonekey.assert_zone_key(zone_key, 'IN-GJ')\n \n- solar_html = web.get_response_soup(\n- zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)\n- wind_html = web.get_response_soup(\n- zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)\n-\n- values['date'] = arrow.get(\n- solar_html.find_all('tr')[0].text.split('\\t')[-1].strip()\n- + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime\n- values['solar'] = split_and_sum(\n- solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n- values['wind'] = split_and_sum(\n- wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n-\n cookies_params = {\n 'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',\n 'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'\n }\n \n- rows = web.get_response_soup(\n- zone_key,\n- 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',\n- session).find_all('tr')\n+ soup = web.get_response_soup(zone_key, SLDCGUJ_URL, session)\n+ rows = soup.find_all('tr')\n+ cells = [c.text.strip() for c in soup.find_all('td')]\n \n+ # get wind and solar values\n+ values['date'] = arrow.get(cells[1], 'DD-MM-YYYY HH:mm:ss').replace(\n+ tzinfo='Asia/Kolkata')\n+ [wind_solar_index] = [i for i, c in enumerate(cells) if c == '(Wind+Solar) Generation']\n+ value = cells[wind_solar_index + 1]\n+ values['wind'], values['solar'] = [int(v) for v in value.split(' + ')]\n+\n+ # get other production values\n for row in rows:\n elements = row.find_all('td')\n if len(elements) > 3: # will find production rows\n@@ -177,7 +172,7 @@\n \n data = {\n 'zoneKey': zone_key,\n- 'datetime': value_map['date'],\n+ 'datetime': value_map['date'].datetime,\n 'production': {\n 'biomass': None,\n 'coal': value_map.get('coal', 0),\n", "issue": "Restore access to India-Gujarat (IN-GJ) wind and solar data\nHey everyone,\r\n\r\nlooks like SLDCGUJ has also removed the access to real-time data websites for wind and solar, just like they did for the conventional real-time data.\r\nThe solution should be really simple: We can use the same link which still gives access to that [\"print\" page](http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php). It has wind and solar generation on the right side, too ;)\r\n\r\n\r\n\r\nGreetings, Alex\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport re\nimport collections\nfrom operator import itemgetter\nimport arrow\nimport requests\nimport itertools\nfrom .lib import zonekey, web, IN\nfrom .lib.validation import validate\nfrom logging import getLogger\n\n\nstation_map = {\n \"coal\": [\"Ukai(1-5)+Ukai6\",\n \"Wanakbori\",\n \"Gandhinagar\",\n \"Sikka(3-4)\",\n \"KLTPS(1-3)+KLTPS4\",\n \"SLPP(I+II)\",\n \"Akrimota\",\n \"TPAECo\",\n \"EPGL(I+II)\",\n \"Adani(I+II+III)\",\n \"BECL(I+II)\",\n \"CGPL\"],\n \"hydro\": [\"Ukai(Hydro)\",\n \"Kadana(Hydro)\",\n \"SSP(RBPH)\"],\n \"gas\": [\"Utran(Gas)(II)\",\n \"Dhuvaran(Gas)(I)+(II)+(III)\",\n \"GIPCL(I)+(II)\",\n \"GSEG(I+II)\",\n \"GPPC\",\n \"CLPI\",\n \"KAWAS\",\n \"Sugen+Unosgn\",\n \"JHANOR\"],\n \"nuclear\": [\"KAPP\"]\n}\n\n\ndef split_and_sum(expression):\n \"\"\"\n Avoid using literal_eval for simple addition expressions.\n Returns sum of all positive numbers.\n \"\"\"\n\n split_vals = expression.split('+')\n float_vals = [float(v) for v in split_vals]\n total = sum([v for v in float_vals if v > 0.0])\n\n return total\n\n\ndef fetch_data(zone_key, session=None, logger=None):\n session = session or requests.session()\n\n values = collections.Counter()\n zonekey.assert_zone_key(zone_key, 'IN-GJ')\n\n solar_html = web.get_response_soup(\n zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)\n wind_html = web.get_response_soup(\n zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)\n\n values['date'] = arrow.get(\n solar_html.find_all('tr')[0].text.split('\\t')[-1].strip()\n + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime\n values['solar'] = split_and_sum(\n solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n values['wind'] = split_and_sum(\n wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n\n cookies_params = {\n 'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',\n 'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'\n }\n\n rows = web.get_response_soup(\n zone_key,\n 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',\n session).find_all('tr')\n\n for row in rows:\n elements = row.find_all('td')\n if len(elements) > 3: # will find production rows\n v1, v2 = (re.sub(r'\\s+', r'', x.text)\n for x in itemgetter(*[0, 3])(elements))\n energy_type = [k for k, v in station_map.items() if v1 in v]\n if len(energy_type) > 0:\n v2 = split_and_sum(v2)\n values[energy_type[0]] += v2\n else:\n if 'StationName' in (v1, v2): # meta data row\n continue\n elif 'DSMRate' in v2: # demand side management\n continue\n else:\n try:\n logger.warning(\n 'Unknown fuel for station name: {}'.format(v1),\n extra={'key': zone_key})\n v2 = split_and_sum(v2)\n values['unknown'] += v2\n except ValueError as e:\n # handle float failures\n logger.warning(\n \"couldn't convert {} to float\".format(v2),\n extra={'key': zone_key})\n continue\n elif len(elements) == 3: # will find consumption row\n v1, v2 = (re.sub(r'\\s+', r'', x.text)\n for x in itemgetter(*[0, 2])(elements))\n if v1 == 'GujaratCatered':\n values['total consumption'] = split_and_sum(v2.split('MW')[0])\n elif len(elements) == 1:\n # CGPL/KAPP/KAWAS/JHANOR plants have a different html structure.\n plant_name = re.sub(r'\\s+', r'', elements[0].text)\n known_plants = itertools.chain.from_iterable(station_map.values())\n\n if plant_name in known_plants:\n energy_type = [k for k, v in station_map.items() if plant_name in v][0]\n generation_tag = row.find_all_next(\"td\")[3]\n val = float(re.sub(r'\\s+', r'', generation_tag.text))\n if val > 0:\n values[energy_type] += val\n else:\n if plant_name and plant_name != 'GMR':\n # GMR is outside Gujarat, sometimes plant_name is ''\n logger.warning(\n 'Unknown fuel for station name: {}'.format(plant_name),\n extra={'key': zone_key})\n\n return values\n\n\ndef fetch_production(zone_key='IN-GJ', session=None, target_datetime=None,\n logger=getLogger('IN-GJ')):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key: specifies which zone to get\n session: request session passed in order to re-use an existing session\n target_datetime: the datetime for which we want production data. If not provided, we should\n default it to now. The provided target_datetime is timezone-aware in UTC.\n logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n session = session or requests.session()\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n\n value_map = fetch_data(zone_key, session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': value_map['date'],\n 'production': {\n 'biomass': None,\n 'coal': value_map.get('coal', 0),\n 'gas': value_map.get('gas', 0),\n 'hydro': value_map.get('hydro', 0),\n 'nuclear': value_map.get('nuclear', 0),\n 'oil': None,\n 'solar': value_map.get('solar', 0),\n 'wind': value_map.get('wind', 0),\n 'geothermal': None,\n 'unknown': value_map.get('unknown', 0)\n },\n 'storage': {\n 'hydro': None\n },\n 'source': 'sldcguj.com',\n }\n\n valid_data = validate(data, logger, remove_negative=True, floor=7000)\n\n return valid_data\n\n\ndef fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None,\n logger=getLogger('IN-GJ')):\n \"\"\"\n Method to get consumption data of Gujarat\n :param zone_key:\n :param session:\n :return:\n \"\"\"\n session = session or requests.session()\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n\n value_map = fetch_data(zone_key, session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': value_map['date'],\n 'consumption': value_map['total consumption'],\n 'source': 'sldcguj.com'\n }\n\n return data\n\n\nif __name__ == '__main__':\n session = requests.Session()\n print(fetch_production('IN-GJ', session))\n print(fetch_consumption('IN-GJ', session))\n", "path": "parsers/IN_GJ.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport re\nimport collections\nfrom operator import itemgetter\nimport arrow\nimport requests\nimport itertools\nfrom .lib import zonekey, web, IN\nfrom .lib.validation import validate\nfrom logging import getLogger\n\nSLDCGUJ_URL = 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php'\n\nstation_map = {\n \"coal\": [\"Ukai(1-5)+Ukai6\",\n \"Wanakbori\",\n \"Gandhinagar\",\n \"Sikka(3-4)\",\n \"KLTPS(1-3)+KLTPS4\",\n \"SLPP(I+II)\",\n \"Akrimota\",\n \"TPAECo\",\n \"EPGL(I+II)\",\n \"Adani(I+II+III)\",\n \"BECL(I+II)\",\n \"CGPL\"],\n \"hydro\": [\"Ukai(Hydro)\",\n \"Kadana(Hydro)\",\n \"SSP(RBPH)\"],\n \"gas\": [\"Utran(Gas)(II)\",\n \"Dhuvaran(Gas)(I)+(II)+(III)\",\n \"GIPCL(I)+(II)\",\n \"GSEG(I+II)\",\n \"GPPC\",\n \"CLPI\",\n \"KAWAS\",\n \"Sugen+Unosgn\",\n \"JHANOR\"],\n \"nuclear\": [\"KAPP\"]\n}\n\n\ndef split_and_sum(expression):\n \"\"\"\n Avoid using literal_eval for simple addition expressions.\n Returns sum of all positive numbers.\n \"\"\"\n\n split_vals = expression.split('+')\n float_vals = [float(v) for v in split_vals]\n total = sum([v for v in float_vals if v > 0.0])\n\n return total\n\n\ndef fetch_data(zone_key, session=None, logger=None):\n session = session or requests.session()\n\n values = collections.Counter()\n zonekey.assert_zone_key(zone_key, 'IN-GJ')\n\n cookies_params = {\n 'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',\n 'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'\n }\n\n soup = web.get_response_soup(zone_key, SLDCGUJ_URL, session)\n rows = soup.find_all('tr')\n cells = [c.text.strip() for c in soup.find_all('td')]\n\n # get wind and solar values\n values['date'] = arrow.get(cells[1], 'DD-MM-YYYY HH:mm:ss').replace(\n tzinfo='Asia/Kolkata')\n [wind_solar_index] = [i for i, c in enumerate(cells) if c == '(Wind+Solar) Generation']\n value = cells[wind_solar_index + 1]\n values['wind'], values['solar'] = [int(v) for v in value.split(' + ')]\n\n # get other production values\n for row in rows:\n elements = row.find_all('td')\n if len(elements) > 3: # will find production rows\n v1, v2 = (re.sub(r'\\s+', r'', x.text)\n for x in itemgetter(*[0, 3])(elements))\n energy_type = [k for k, v in station_map.items() if v1 in v]\n if len(energy_type) > 0:\n v2 = split_and_sum(v2)\n values[energy_type[0]] += v2\n else:\n if 'StationName' in (v1, v2): # meta data row\n continue\n elif 'DSMRate' in v2: # demand side management\n continue\n else:\n try:\n logger.warning(\n 'Unknown fuel for station name: {}'.format(v1),\n extra={'key': zone_key})\n v2 = split_and_sum(v2)\n values['unknown'] += v2\n except ValueError as e:\n # handle float failures\n logger.warning(\n \"couldn't convert {} to float\".format(v2),\n extra={'key': zone_key})\n continue\n elif len(elements) == 3: # will find consumption row\n v1, v2 = (re.sub(r'\\s+', r'', x.text)\n for x in itemgetter(*[0, 2])(elements))\n if v1 == 'GujaratCatered':\n values['total consumption'] = split_and_sum(v2.split('MW')[0])\n elif len(elements) == 1:\n # CGPL/KAPP/KAWAS/JHANOR plants have a different html structure.\n plant_name = re.sub(r'\\s+', r'', elements[0].text)\n known_plants = itertools.chain.from_iterable(station_map.values())\n\n if plant_name in known_plants:\n energy_type = [k for k, v in station_map.items() if plant_name in v][0]\n generation_tag = row.find_all_next(\"td\")[3]\n val = float(re.sub(r'\\s+', r'', generation_tag.text))\n if val > 0:\n values[energy_type] += val\n else:\n if plant_name and plant_name != 'GMR':\n # GMR is outside Gujarat, sometimes plant_name is ''\n logger.warning(\n 'Unknown fuel for station name: {}'.format(plant_name),\n extra={'key': zone_key})\n\n return values\n\n\ndef fetch_production(zone_key='IN-GJ', session=None, target_datetime=None,\n logger=getLogger('IN-GJ')):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key: specifies which zone to get\n session: request session passed in order to re-use an existing session\n target_datetime: the datetime for which we want production data. If not provided, we should\n default it to now. The provided target_datetime is timezone-aware in UTC.\n logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n session = session or requests.session()\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n\n value_map = fetch_data(zone_key, session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': value_map['date'].datetime,\n 'production': {\n 'biomass': None,\n 'coal': value_map.get('coal', 0),\n 'gas': value_map.get('gas', 0),\n 'hydro': value_map.get('hydro', 0),\n 'nuclear': value_map.get('nuclear', 0),\n 'oil': None,\n 'solar': value_map.get('solar', 0),\n 'wind': value_map.get('wind', 0),\n 'geothermal': None,\n 'unknown': value_map.get('unknown', 0)\n },\n 'storage': {\n 'hydro': None\n },\n 'source': 'sldcguj.com',\n }\n\n valid_data = validate(data, logger, remove_negative=True, floor=7000)\n\n return valid_data\n\n\ndef fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None,\n logger=getLogger('IN-GJ')):\n \"\"\"\n Method to get consumption data of Gujarat\n :param zone_key:\n :param session:\n :return:\n \"\"\"\n session = session or requests.session()\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n\n value_map = fetch_data(zone_key, session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': value_map['date'],\n 'consumption': value_map['total consumption'],\n 'source': 'sldcguj.com'\n }\n\n return data\n\n\nif __name__ == '__main__':\n session = requests.Session()\n print(fetch_production('IN-GJ', session))\n print(fetch_consumption('IN-GJ', session))\n", "path": "parsers/IN_GJ.py"}]}
| 3,013 | 737 |
gh_patches_debug_40191
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`nextret` keeps going after the binary segfaults
If the binary segfaults when running `nextret` (and potentially the other `next*` commands), it keeps trying to continue until you kill it:
```
Program received signal SIGSEGV, Segmentation fault.
0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca "%s", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c
ommon/vfscanf-internal.c:339
339 in ./stdio-common/vfscanf-internal.c
Program received signal SIGSEGV, Segmentation fault.
0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca "%s", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c
ommon/vfscanf-internal.c:339
339 in ./stdio-common/vfscanf-internal.c
Program received signal SIGSEGV, Segmentation fault.
0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca "%s", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c
ommon/vfscanf-internal.c:339
339 in ./stdio-common/vfscanf-internal.c
```
We should stop after the first segfault and return to the prompt.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/gdblib/proc.py`
Content:
```
1 """
2 Provides values which would be available from /proc which
3 are not fulfilled by other modules and some process/gdb flow
4 related information.
5 """
6
7 import functools
8 import sys
9 from types import ModuleType
10 from typing import Any
11 from typing import Callable
12
13 import gdb
14
15 import pwndbg.gdblib.qemu
16 import pwndbg.lib.memoize
17
18
19 class module(ModuleType):
20 @property
21 def pid(self):
22 # QEMU usermode emulation always returns 42000 for some reason.
23 # In any case, we can't use the info.
24 if pwndbg.gdblib.qemu.is_qemu_usermode():
25 return pwndbg.gdblib.qemu.pid()
26
27 i = gdb.selected_inferior()
28 if i is not None:
29 return i.pid
30 return 0
31
32 @property
33 def tid(self):
34 if pwndbg.gdblib.qemu.is_qemu_usermode():
35 return pwndbg.gdblib.qemu.pid()
36
37 i = gdb.selected_thread()
38 if i is not None:
39 return i.ptid[1]
40
41 return self.pid
42
43 @property
44 def alive(self):
45 return gdb.selected_thread() is not None
46
47 @property
48 def thread_is_stopped(self):
49 """
50 This detects whether selected thread is stopped.
51 It is not stopped in situations when gdb is executing commands
52 that are attached to a breakpoint by `command` command.
53
54 For more info see issue #229 ( https://github.com/pwndbg/pwndbg/issues/299 )
55 :return: Whether gdb executes commands attached to bp with `command` command.
56 """
57 return gdb.selected_thread().is_stopped()
58
59 @property
60 def exe(self):
61 """
62 Returns the debugged file name.
63
64 On remote targets, this may be prefixed with "target:" string.
65 See this by executing those in two terminals:
66 1. gdbserver 127.0.0.1:1234 /bin/ls
67 2. gdb -ex "target remote :1234" -ex "pi pwndbg.gdblib.proc.exe"
68
69 If you need to process the debugged file use:
70 `pwndbg.gdblib.file.get_file(pwndbg.gdblib.proc.exe)`
71 """
72 return gdb.current_progspace().filename
73
74 def OnlyWhenRunning(self, func):
75 @functools.wraps(func)
76 def wrapper(*a, **kw):
77 if self.alive:
78 return func(*a, **kw)
79
80 return wrapper
81
82
83 OnlyWhenRunning: Callable[[Any], Any]
84 # To prevent garbage collection
85 tether = sys.modules[__name__]
86
87 sys.modules[__name__] = module(__name__, "")
88
```
Path: `pwndbg/gdblib/next.py`
Content:
```
1 """
2 Commands for setting temporary breakpoints on the next
3 instruction of some type (call, branch, etc.)
4 """
5
6 import re
7
8 import capstone
9 import gdb
10
11 import pwndbg.disasm
12 import pwndbg.gdblib.events
13 import pwndbg.gdblib.proc
14 import pwndbg.gdblib.regs
15 from pwndbg.color import message
16
17 jumps = set((capstone.CS_GRP_CALL, capstone.CS_GRP_JUMP, capstone.CS_GRP_RET, capstone.CS_GRP_IRET))
18
19 interrupts = set((capstone.CS_GRP_INT,))
20
21
22 @pwndbg.gdblib.events.exit
23 def clear_temp_breaks():
24 if not pwndbg.gdblib.proc.alive:
25 breakpoints = gdb.breakpoints()
26 if breakpoints:
27 for bp in breakpoints:
28 if (
29 bp.temporary and not bp.visible
30 ): # visible is used instead of internal because older gdb's don't support internal
31 bp.delete()
32
33
34 def next_int(address=None):
35 """
36 If there is a syscall in the current basic black,
37 return the instruction of the one closest to $PC.
38
39 Otherwise, return None.
40 """
41 if address is None:
42 ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)
43 if not ins:
44 return None
45 address = ins.next
46
47 ins = pwndbg.disasm.one(address)
48 while ins:
49 if set(ins.groups) & jumps:
50 return None
51 if set(ins.groups) & interrupts:
52 return ins
53 ins = pwndbg.disasm.one(ins.next)
54
55 return None
56
57
58 def next_branch(address=None):
59 if address is None:
60 ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)
61 if not ins:
62 return None
63 address = ins.next
64
65 ins = pwndbg.disasm.one(address)
66 while ins:
67 if set(ins.groups) & jumps:
68 return ins
69 ins = pwndbg.disasm.one(ins.next)
70
71 return None
72
73
74 def break_next_branch(address=None):
75 ins = next_branch(address)
76
77 if ins:
78 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True)
79 gdb.execute("continue", from_tty=False, to_string=True)
80 return ins
81
82
83 def break_next_interrupt(address=None):
84 ins = next_int(address)
85
86 if ins:
87 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True)
88 gdb.execute("continue", from_tty=False, to_string=True)
89 return ins
90
91
92 def break_next_call(symbol_regex=None):
93 while pwndbg.gdblib.proc.alive:
94 ins = break_next_branch()
95
96 if not ins:
97 break
98
99 # continue if not a call
100 if capstone.CS_GRP_CALL not in ins.groups:
101 continue
102
103 # return call if we don't search for a symbol
104 if not symbol_regex:
105 return ins
106
107 # return call if we match target address
108 if ins.target_const and re.match("%s$" % symbol_regex, hex(ins.target)):
109 return ins
110
111 # return call if we match symbol name
112 if ins.symbol and re.match("%s$" % symbol_regex, ins.symbol):
113 return ins
114
115
116 def break_next_ret(address=None):
117 while pwndbg.gdblib.proc.alive:
118 ins = break_next_branch(address)
119
120 if not ins:
121 break
122
123 if capstone.CS_GRP_RET in ins.groups:
124 return ins
125
126
127 def break_on_program_code():
128 """
129 Breaks on next instruction that belongs to process' objfile code.
130 :return: True for success, False when process ended or when pc is at the code.
131 """
132 exe = pwndbg.gdblib.proc.exe
133 binary_exec_page_ranges = [
134 (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute
135 ]
136
137 pc = pwndbg.gdblib.regs.pc
138 for start, end in binary_exec_page_ranges:
139 if start <= pc < end:
140 print(message.error("The pc is already at the binary objfile code. Not stepping."))
141 return False
142
143 while pwndbg.gdblib.proc.alive:
144 gdb.execute("si", from_tty=False, to_string=False)
145
146 pc = pwndbg.gdblib.regs.pc
147 for start, end in binary_exec_page_ranges:
148 if start <= pc < end:
149 return True
150
151 return False
152
153
154 def break_on_next(address=None):
155 address = address or pwndbg.gdblib.regs.pc
156 ins = pwndbg.disasm.one(address)
157
158 gdb.Breakpoint("*%#x" % (ins.address + ins.size), temporary=True)
159 gdb.execute("continue", from_tty=False, to_string=True)
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/gdblib/next.py b/pwndbg/gdblib/next.py
--- a/pwndbg/gdblib/next.py
+++ b/pwndbg/gdblib/next.py
@@ -91,6 +91,10 @@
def break_next_call(symbol_regex=None):
while pwndbg.gdblib.proc.alive:
+ # Break on signal as it may be a segfault
+ if pwndbg.gdblib.proc.stopped_with_signal:
+ return
+
ins = break_next_branch()
if not ins:
@@ -115,6 +119,10 @@
def break_next_ret(address=None):
while pwndbg.gdblib.proc.alive:
+ # Break on signal as it may be a segfault
+ if pwndbg.gdblib.proc.stopped_with_signal:
+ return
+
ins = break_next_branch(address)
if not ins:
@@ -126,13 +134,14 @@
def break_on_program_code():
"""
- Breaks on next instruction that belongs to process' objfile code.
- :return: True for success, False when process ended or when pc is at the code.
+ Breaks on next instruction that belongs to process' objfile code
+
+ :return: True for success, False when process ended or when pc is not at the code or if a signal occurred
"""
exe = pwndbg.gdblib.proc.exe
- binary_exec_page_ranges = [
+ binary_exec_page_ranges = tuple(
(p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute
- ]
+ )
pc = pwndbg.gdblib.regs.pc
for start, end in binary_exec_page_ranges:
@@ -140,12 +149,18 @@
print(message.error("The pc is already at the binary objfile code. Not stepping."))
return False
- while pwndbg.gdblib.proc.alive:
- gdb.execute("si", from_tty=False, to_string=False)
+ proc = pwndbg.gdblib.proc
+ regs = pwndbg.gdblib.regs
+
+ while proc.alive:
+ # Break on signal as it may be a segfault
+ if proc.stopped_with_signal:
+ return False
+
+ o = gdb.execute("si", from_tty=False, to_string=True)
- pc = pwndbg.gdblib.regs.pc
for start, end in binary_exec_page_ranges:
- if start <= pc < end:
+ if start <= regs.pc < end:
return True
return False
diff --git a/pwndbg/gdblib/proc.py b/pwndbg/gdblib/proc.py
--- a/pwndbg/gdblib/proc.py
+++ b/pwndbg/gdblib/proc.py
@@ -42,6 +42,11 @@
@property
def alive(self):
+ """
+ Informs whether the process has a thread. However, note that it will
+ still return True for a segfaulted thread. To detect that, consider
+ using the `stopped_with_signal` method.
+ """
return gdb.selected_thread() is not None
@property
@@ -56,6 +61,15 @@
"""
return gdb.selected_thread().is_stopped()
+ @property
+ def stopped_with_signal(self) -> bool:
+ """
+ Returns whether the program has stopped with a signal
+
+ Can be used to detect segfaults (but will also detect other signals)
+ """
+ return "It stopped with signal " in gdb.execute("info program", to_string=True)
+
@property
def exe(self):
"""
|
{"golden_diff": "diff --git a/pwndbg/gdblib/next.py b/pwndbg/gdblib/next.py\n--- a/pwndbg/gdblib/next.py\n+++ b/pwndbg/gdblib/next.py\n@@ -91,6 +91,10 @@\n \n def break_next_call(symbol_regex=None):\n while pwndbg.gdblib.proc.alive:\n+ # Break on signal as it may be a segfault\n+ if pwndbg.gdblib.proc.stopped_with_signal:\n+ return\n+\n ins = break_next_branch()\n \n if not ins:\n@@ -115,6 +119,10 @@\n \n def break_next_ret(address=None):\n while pwndbg.gdblib.proc.alive:\n+ # Break on signal as it may be a segfault\n+ if pwndbg.gdblib.proc.stopped_with_signal:\n+ return\n+\n ins = break_next_branch(address)\n \n if not ins:\n@@ -126,13 +134,14 @@\n \n def break_on_program_code():\n \"\"\"\n- Breaks on next instruction that belongs to process' objfile code.\n- :return: True for success, False when process ended or when pc is at the code.\n+ Breaks on next instruction that belongs to process' objfile code\n+\n+ :return: True for success, False when process ended or when pc is not at the code or if a signal occurred\n \"\"\"\n exe = pwndbg.gdblib.proc.exe\n- binary_exec_page_ranges = [\n+ binary_exec_page_ranges = tuple(\n (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute\n- ]\n+ )\n \n pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n@@ -140,12 +149,18 @@\n print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n return False\n \n- while pwndbg.gdblib.proc.alive:\n- gdb.execute(\"si\", from_tty=False, to_string=False)\n+ proc = pwndbg.gdblib.proc\n+ regs = pwndbg.gdblib.regs\n+\n+ while proc.alive:\n+ # Break on signal as it may be a segfault\n+ if proc.stopped_with_signal:\n+ return False\n+\n+ o = gdb.execute(\"si\", from_tty=False, to_string=True)\n \n- pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n- if start <= pc < end:\n+ if start <= regs.pc < end:\n return True\n \n return False\ndiff --git a/pwndbg/gdblib/proc.py b/pwndbg/gdblib/proc.py\n--- a/pwndbg/gdblib/proc.py\n+++ b/pwndbg/gdblib/proc.py\n@@ -42,6 +42,11 @@\n \n @property\n def alive(self):\n+ \"\"\"\n+ Informs whether the process has a thread. However, note that it will\n+ still return True for a segfaulted thread. To detect that, consider\n+ using the `stopped_with_signal` method.\n+ \"\"\"\n return gdb.selected_thread() is not None\n \n @property\n@@ -56,6 +61,15 @@\n \"\"\"\n return gdb.selected_thread().is_stopped()\n \n+ @property\n+ def stopped_with_signal(self) -> bool:\n+ \"\"\"\n+ Returns whether the program has stopped with a signal\n+\n+ Can be used to detect segfaults (but will also detect other signals)\n+ \"\"\"\n+ return \"It stopped with signal \" in gdb.execute(\"info program\", to_string=True)\n+\n @property\n def exe(self):\n \"\"\"\n", "issue": "`nextret` keeps going after the binary segfaults\nIf the binary segfaults when running `nextret` (and potentially the other `next*` commands), it keeps trying to continue until you kill it:\r\n```\r\nProgram received signal SIGSEGV, Segmentation fault.\r\n0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca \"%s\", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c\r\nommon/vfscanf-internal.c:339\r\n339 in ./stdio-common/vfscanf-internal.c\r\n\r\nProgram received signal SIGSEGV, Segmentation fault.\r\n0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca \"%s\", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c\r\nommon/vfscanf-internal.c:339\r\n339 in ./stdio-common/vfscanf-internal.c\r\n\r\nProgram received signal SIGSEGV, Segmentation fault.\r\n0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca \"%s\", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c\r\nommon/vfscanf-internal.c:339\r\n339 in ./stdio-common/vfscanf-internal.c\r\n```\r\n\r\nWe should stop after the first segfault and return to the prompt.\n", "before_files": [{"content": "\"\"\"\nProvides values which would be available from /proc which\nare not fulfilled by other modules and some process/gdb flow\nrelated information.\n\"\"\"\n\nimport functools\nimport sys\nfrom types import ModuleType\nfrom typing import Any\nfrom typing import Callable\n\nimport gdb\n\nimport pwndbg.gdblib.qemu\nimport pwndbg.lib.memoize\n\n\nclass module(ModuleType):\n @property\n def pid(self):\n # QEMU usermode emulation always returns 42000 for some reason.\n # In any case, we can't use the info.\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_inferior()\n if i is not None:\n return i.pid\n return 0\n\n @property\n def tid(self):\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_thread()\n if i is not None:\n return i.ptid[1]\n\n return self.pid\n\n @property\n def alive(self):\n return gdb.selected_thread() is not None\n\n @property\n def thread_is_stopped(self):\n \"\"\"\n This detects whether selected thread is stopped.\n It is not stopped in situations when gdb is executing commands\n that are attached to a breakpoint by `command` command.\n\n For more info see issue #229 ( https://github.com/pwndbg/pwndbg/issues/299 )\n :return: Whether gdb executes commands attached to bp with `command` command.\n \"\"\"\n return gdb.selected_thread().is_stopped()\n\n @property\n def exe(self):\n \"\"\"\n Returns the debugged file name.\n\n On remote targets, this may be prefixed with \"target:\" string.\n See this by executing those in two terminals:\n 1. gdbserver 127.0.0.1:1234 /bin/ls\n 2. gdb -ex \"target remote :1234\" -ex \"pi pwndbg.gdblib.proc.exe\"\n\n If you need to process the debugged file use:\n `pwndbg.gdblib.file.get_file(pwndbg.gdblib.proc.exe)`\n \"\"\"\n return gdb.current_progspace().filename\n\n def OnlyWhenRunning(self, func):\n @functools.wraps(func)\n def wrapper(*a, **kw):\n if self.alive:\n return func(*a, **kw)\n\n return wrapper\n\n\nOnlyWhenRunning: Callable[[Any], Any]\n# To prevent garbage collection\ntether = sys.modules[__name__]\n\nsys.modules[__name__] = module(__name__, \"\")\n", "path": "pwndbg/gdblib/proc.py"}, {"content": "\"\"\"\nCommands for setting temporary breakpoints on the next\ninstruction of some type (call, branch, etc.)\n\"\"\"\n\nimport re\n\nimport capstone\nimport gdb\n\nimport pwndbg.disasm\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.proc\nimport pwndbg.gdblib.regs\nfrom pwndbg.color import message\n\njumps = set((capstone.CS_GRP_CALL, capstone.CS_GRP_JUMP, capstone.CS_GRP_RET, capstone.CS_GRP_IRET))\n\ninterrupts = set((capstone.CS_GRP_INT,))\n\n\[email protected]\ndef clear_temp_breaks():\n if not pwndbg.gdblib.proc.alive:\n breakpoints = gdb.breakpoints()\n if breakpoints:\n for bp in breakpoints:\n if (\n bp.temporary and not bp.visible\n ): # visible is used instead of internal because older gdb's don't support internal\n bp.delete()\n\n\ndef next_int(address=None):\n \"\"\"\n If there is a syscall in the current basic black,\n return the instruction of the one closest to $PC.\n\n Otherwise, return None.\n \"\"\"\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return None\n if set(ins.groups) & interrupts:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef next_branch(address=None):\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef break_next_branch(address=None):\n ins = next_branch(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_interrupt(address=None):\n ins = next_int(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_call(symbol_regex=None):\n while pwndbg.gdblib.proc.alive:\n ins = break_next_branch()\n\n if not ins:\n break\n\n # continue if not a call\n if capstone.CS_GRP_CALL not in ins.groups:\n continue\n\n # return call if we don't search for a symbol\n if not symbol_regex:\n return ins\n\n # return call if we match target address\n if ins.target_const and re.match(\"%s$\" % symbol_regex, hex(ins.target)):\n return ins\n\n # return call if we match symbol name\n if ins.symbol and re.match(\"%s$\" % symbol_regex, ins.symbol):\n return ins\n\n\ndef break_next_ret(address=None):\n while pwndbg.gdblib.proc.alive:\n ins = break_next_branch(address)\n\n if not ins:\n break\n\n if capstone.CS_GRP_RET in ins.groups:\n return ins\n\n\ndef break_on_program_code():\n \"\"\"\n Breaks on next instruction that belongs to process' objfile code.\n :return: True for success, False when process ended or when pc is at the code.\n \"\"\"\n exe = pwndbg.gdblib.proc.exe\n binary_exec_page_ranges = [\n (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute\n ]\n\n pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n if start <= pc < end:\n print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n return False\n\n while pwndbg.gdblib.proc.alive:\n gdb.execute(\"si\", from_tty=False, to_string=False)\n\n pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n if start <= pc < end:\n return True\n\n return False\n\n\ndef break_on_next(address=None):\n address = address or pwndbg.gdblib.regs.pc\n ins = pwndbg.disasm.one(address)\n\n gdb.Breakpoint(\"*%#x\" % (ins.address + ins.size), temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n", "path": "pwndbg/gdblib/next.py"}], "after_files": [{"content": "\"\"\"\nProvides values which would be available from /proc which\nare not fulfilled by other modules and some process/gdb flow\nrelated information.\n\"\"\"\n\nimport functools\nimport sys\nfrom types import ModuleType\nfrom typing import Any\nfrom typing import Callable\n\nimport gdb\n\nimport pwndbg.gdblib.qemu\nimport pwndbg.lib.memoize\n\n\nclass module(ModuleType):\n @property\n def pid(self):\n # QEMU usermode emulation always returns 42000 for some reason.\n # In any case, we can't use the info.\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_inferior()\n if i is not None:\n return i.pid\n return 0\n\n @property\n def tid(self):\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_thread()\n if i is not None:\n return i.ptid[1]\n\n return self.pid\n\n @property\n def alive(self):\n \"\"\"\n Informs whether the process has a thread. However, note that it will\n still return True for a segfaulted thread. To detect that, consider\n using the `stopped_with_signal` method.\n \"\"\"\n return gdb.selected_thread() is not None\n\n @property\n def thread_is_stopped(self):\n \"\"\"\n This detects whether selected thread is stopped.\n It is not stopped in situations when gdb is executing commands\n that are attached to a breakpoint by `command` command.\n\n For more info see issue #229 ( https://github.com/pwndbg/pwndbg/issues/299 )\n :return: Whether gdb executes commands attached to bp with `command` command.\n \"\"\"\n return gdb.selected_thread().is_stopped()\n\n @property\n def stopped_with_signal(self) -> bool:\n \"\"\"\n Returns whether the program has stopped with a signal\n\n Can be used to detect segfaults (but will also detect other signals)\n \"\"\"\n return \"It stopped with signal \" in gdb.execute(\"info program\", to_string=True)\n\n @property\n def exe(self):\n \"\"\"\n Returns the debugged file name.\n\n On remote targets, this may be prefixed with \"target:\" string.\n See this by executing those in two terminals:\n 1. gdbserver 127.0.0.1:1234 /bin/ls\n 2. gdb -ex \"target remote :1234\" -ex \"pi pwndbg.gdblib.proc.exe\"\n\n If you need to process the debugged file use:\n `pwndbg.gdblib.file.get_file(pwndbg.gdblib.proc.exe)`\n \"\"\"\n return gdb.current_progspace().filename\n\n def OnlyWhenRunning(self, func):\n @functools.wraps(func)\n def wrapper(*a, **kw):\n if self.alive:\n return func(*a, **kw)\n\n return wrapper\n\n\nOnlyWhenRunning: Callable[[Any], Any]\n# To prevent garbage collection\ntether = sys.modules[__name__]\n\nsys.modules[__name__] = module(__name__, \"\")\n", "path": "pwndbg/gdblib/proc.py"}, {"content": "\"\"\"\nCommands for setting temporary breakpoints on the next\ninstruction of some type (call, branch, etc.)\n\"\"\"\n\nimport re\n\nimport capstone\nimport gdb\n\nimport pwndbg.disasm\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.proc\nimport pwndbg.gdblib.regs\nfrom pwndbg.color import message\n\njumps = set((capstone.CS_GRP_CALL, capstone.CS_GRP_JUMP, capstone.CS_GRP_RET, capstone.CS_GRP_IRET))\n\ninterrupts = set((capstone.CS_GRP_INT,))\n\n\[email protected]\ndef clear_temp_breaks():\n if not pwndbg.gdblib.proc.alive:\n breakpoints = gdb.breakpoints()\n if breakpoints:\n for bp in breakpoints:\n if (\n bp.temporary and not bp.visible\n ): # visible is used instead of internal because older gdb's don't support internal\n bp.delete()\n\n\ndef next_int(address=None):\n \"\"\"\n If there is a syscall in the current basic black,\n return the instruction of the one closest to $PC.\n\n Otherwise, return None.\n \"\"\"\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return None\n if set(ins.groups) & interrupts:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef next_branch(address=None):\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef break_next_branch(address=None):\n ins = next_branch(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_interrupt(address=None):\n ins = next_int(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_call(symbol_regex=None):\n while pwndbg.gdblib.proc.alive:\n # Break on signal as it may be a segfault\n if pwndbg.gdblib.proc.stopped_with_signal:\n return\n\n ins = break_next_branch()\n\n if not ins:\n break\n\n # continue if not a call\n if capstone.CS_GRP_CALL not in ins.groups:\n continue\n\n # return call if we don't search for a symbol\n if not symbol_regex:\n return ins\n\n # return call if we match target address\n if ins.target_const and re.match(\"%s$\" % symbol_regex, hex(ins.target)):\n return ins\n\n # return call if we match symbol name\n if ins.symbol and re.match(\"%s$\" % symbol_regex, ins.symbol):\n return ins\n\n\ndef break_next_ret(address=None):\n while pwndbg.gdblib.proc.alive:\n # Break on signal as it may be a segfault\n if pwndbg.gdblib.proc.stopped_with_signal:\n return\n\n ins = break_next_branch(address)\n\n if not ins:\n break\n\n if capstone.CS_GRP_RET in ins.groups:\n return ins\n\n\ndef break_on_program_code():\n \"\"\"\n Breaks on next instruction that belongs to process' objfile code\n\n :return: True for success, False when process ended or when pc is not at the code or if a signal occurred\n \"\"\"\n exe = pwndbg.gdblib.proc.exe\n binary_exec_page_ranges = tuple(\n (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute\n )\n\n pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n if start <= pc < end:\n print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n return False\n\n proc = pwndbg.gdblib.proc\n regs = pwndbg.gdblib.regs\n\n while proc.alive:\n # Break on signal as it may be a segfault\n if proc.stopped_with_signal:\n return False\n\n o = gdb.execute(\"si\", from_tty=False, to_string=True)\n\n for start, end in binary_exec_page_ranges:\n if start <= regs.pc < end:\n return True\n\n return False\n\n\ndef break_on_next(address=None):\n address = address or pwndbg.gdblib.regs.pc\n ins = pwndbg.disasm.one(address)\n\n gdb.Breakpoint(\"*%#x\" % (ins.address + ins.size), temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n", "path": "pwndbg/gdblib/next.py"}]}
| 2,965 | 859 |
gh_patches_debug_8333
|
rasdani/github-patches
|
git_diff
|
scverse__scanpy-771
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sc.tl.umap error with init_pos="paga"
Hello,
Trying to use sc.tl.umap with initial positions from sc.tl.paga. Seems an error with UMAP from the error log. But it is only called when calling paga positions, UMAP works otherwise.
I do see https://github.com/theislab/scanpy/issues/666, and https://github.com/lmcinnes/umap/pull/262, but I am already running scanpy 1.4.4 and umap 0.3.9 so I don't understand how to solve the issue?
Attached error. Any suggestions?
Thanks!
<img width="575" alt="Screen Shot 2019-08-05 at 19 02 18" src="https://user-images.githubusercontent.com/20108378/62485139-c560df80-b7b3-11e9-8333-7e511c263a79.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/tools/_umap.py`
Content:
```
1 import numpy as np
2 from pandas.api.types import is_numeric_dtype
3 from sklearn.utils import check_random_state, check_array
4
5 from ._utils import get_init_pos_from_paga, choose_representation
6 from .._settings import settings
7 from .. import logging as logg
8
9
10 def umap(
11 adata,
12 min_dist=0.5,
13 spread=1.0,
14 n_components=2,
15 maxiter=None,
16 alpha=1.0,
17 gamma=1.0,
18 negative_sample_rate=5,
19 init_pos='spectral',
20 random_state=0,
21 a=None,
22 b=None,
23 copy=False,
24 ):
25 """Embed the neighborhood graph using UMAP [McInnes18]_.
26
27 UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
28 technique suitable for visualizing high-dimensional data. Besides tending to
29 be faster than tSNE, it optimizes the embedding such that it best reflects
30 the topology of the data, which we represent throughout Scanpy using a
31 neighborhood graph. tSNE, by contrast, optimizes the distribution of
32 nearest-neighbor distances in the embedding such that these best match the
33 distribution of distances in the high-dimensional space. We use the
34 implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
35 [McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
36 <https://doi.org/10.1101/298430>`__.
37
38 Parameters
39 ----------
40 adata : :class:`~anndata.AnnData`
41 Annotated data matrix.
42 min_dist : `float`, optional (default: 0.5)
43 The effective minimum distance between embedded points. Smaller values
44 will result in a more clustered/clumped embedding where nearby points on
45 the manifold are drawn closer together, while larger values will result
46 on a more even dispersal of points. The value should be set relative to
47 the ``spread`` value, which determines the scale at which embedded
48 points will be spread out. The default of in the `umap-learn` package is
49 0.1.
50 spread : `float` (optional, default 1.0)
51 The effective scale of embedded points. In combination with `min_dist`
52 this determines how clustered/clumped the embedded points are.
53 n_components : `int`, optional (default: 2)
54 The number of dimensions of the embedding.
55 maxiter : `int`, optional (default: `None`)
56 The number of iterations (epochs) of the optimization. Called `n_epochs`
57 in the original UMAP.
58 alpha : `float`, optional (default: 1.0)
59 The initial learning rate for the embedding optimization.
60 gamma : `float` (optional, default 1.0)
61 Weighting applied to negative samples in low dimensional embedding
62 optimization. Values higher than one will result in greater weight
63 being given to negative samples.
64 negative_sample_rate : `int` (optional, default 5)
65 The number of negative edge/1-simplex samples to use per positive
66 edge/1-simplex sample in optimizing the low dimensional embedding.
67 init_pos : `string` or `np.array`, optional (default: 'spectral')
68 How to initialize the low dimensional embedding. Called `init` in the
69 original UMAP.
70 Options are:
71
72 * Any key for `adata.obsm`.
73 * 'paga': positions from :func:`~scanpy.api.pl.paga`.
74 * 'spectral': use a spectral embedding of the graph.
75 * 'random': assign initial embedding positions at random.
76 * A numpy array of initial embedding positions.
77 random_state : `int`, `RandomState` or `None`, optional (default: 0)
78 If `int`, `random_state` is the seed used by the random number generator;
79 If `RandomState`, `random_state` is the random number generator;
80 If `None`, the random number generator is the `RandomState` instance used
81 by `np.random`.
82 a : `float` (optional, default `None`)
83 More specific parameters controlling the embedding. If `None` these
84 values are set automatically as determined by `min_dist` and
85 `spread`.
86 b : `float` (optional, default `None`)
87 More specific parameters controlling the embedding. If `None` these
88 values are set automatically as determined by `min_dist` and
89 `spread`.
90 copy : `bool` (default: `False`)
91 Return a copy instead of writing to adata.
92
93 Returns
94 -------
95 Depending on `copy`, returns or updates `adata` with the following fields.
96
97 **X_umap** : `adata.obsm` field
98 UMAP coordinates of data.
99 """
100 adata = adata.copy() if copy else adata
101 if 'neighbors' not in adata.uns:
102 raise ValueError(
103 'Did not find \'neighbors/connectivities\'. Run `sc.pp.neighbors` first.')
104 start = logg.info('computing UMAP')
105 if ('params' not in adata.uns['neighbors']
106 or adata.uns['neighbors']['params']['method'] != 'umap'):
107 logg.warning('neighbors/connectivities have not been computed using umap')
108 from umap.umap_ import find_ab_params, simplicial_set_embedding
109 if a is None or b is None:
110 a, b = find_ab_params(spread, min_dist)
111 else:
112 a = a
113 b = b
114
115 if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
116 init_coords = adata.obsm[init_pos]
117 elif isinstance(init_pos, str) and init_pos == 'paga':
118 init_coords = get_init_pos_from_paga(adata, random_state=random_state)
119 else:
120 init_coords = init_pos # Let umap handle it
121 if hasattr(init_coords, "dtype") and is_numeric_dtype(init_pos):
122 init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
123
124 random_state = check_random_state(random_state)
125 n_epochs = 0 if maxiter is None else maxiter
126 neigh_params = adata.uns['neighbors']['params']
127 X = choose_representation(
128 adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)
129 # the data matrix X is really only used for determining the number of connected components
130 # for the init condition in the UMAP embedding
131 X_umap = simplicial_set_embedding(
132 X,
133 adata.uns['neighbors']['connectivities'].tocoo(),
134 n_components,
135 alpha,
136 a,
137 b,
138 gamma,
139 negative_sample_rate,
140 n_epochs,
141 init_coords,
142 random_state,
143 neigh_params.get('metric', 'euclidean'),
144 neigh_params.get('metric_kwds', {}),
145 verbose=settings.verbosity > 3,
146 )
147 adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates
148 logg.info(
149 ' finished',
150 time=start,
151 deep=(
152 'added\n'
153 " 'X_umap', UMAP coordinates (adata.obsm)"
154 ),
155 )
156 return adata if copy else None
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scanpy/tools/_umap.py b/scanpy/tools/_umap.py
--- a/scanpy/tools/_umap.py
+++ b/scanpy/tools/_umap.py
@@ -118,7 +118,7 @@
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
- if hasattr(init_coords, "dtype") and is_numeric_dtype(init_pos):
+ if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
random_state = check_random_state(random_state)
|
{"golden_diff": "diff --git a/scanpy/tools/_umap.py b/scanpy/tools/_umap.py\n--- a/scanpy/tools/_umap.py\n+++ b/scanpy/tools/_umap.py\n@@ -118,7 +118,7 @@\n init_coords = get_init_pos_from_paga(adata, random_state=random_state)\n else:\n init_coords = init_pos # Let umap handle it\n- if hasattr(init_coords, \"dtype\") and is_numeric_dtype(init_pos):\n+ if hasattr(init_coords, \"dtype\"):\n init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)\n \n random_state = check_random_state(random_state)\n", "issue": "sc.tl.umap error with init_pos=\"paga\"\nHello,\r\n\r\nTrying to use sc.tl.umap with initial positions from sc.tl.paga. Seems an error with UMAP from the error log. But it is only called when calling paga positions, UMAP works otherwise.\r\n\r\nI do see https://github.com/theislab/scanpy/issues/666, and https://github.com/lmcinnes/umap/pull/262, but I am already running scanpy 1.4.4 and umap 0.3.9 so I don't understand how to solve the issue?\r\n\r\nAttached error. Any suggestions? \r\nThanks!\r\n\r\n<img width=\"575\" alt=\"Screen Shot 2019-08-05 at 19 02 18\" src=\"https://user-images.githubusercontent.com/20108378/62485139-c560df80-b7b3-11e9-8333-7e511c263a79.png\">\r\n\n", "before_files": [{"content": "import numpy as np\nfrom pandas.api.types import is_numeric_dtype\nfrom sklearn.utils import check_random_state, check_array\n\nfrom ._utils import get_init_pos_from_paga, choose_representation\nfrom .._settings import settings\nfrom .. import logging as logg\n\n\ndef umap(\n adata,\n min_dist=0.5,\n spread=1.0,\n n_components=2,\n maxiter=None,\n alpha=1.0,\n gamma=1.0,\n negative_sample_rate=5,\n init_pos='spectral',\n random_state=0,\n a=None,\n b=None,\n copy=False,\n):\n \"\"\"Embed the neighborhood graph using UMAP [McInnes18]_.\n\n UMAP (Uniform Manifold Approximation and Projection) is a manifold learning\n technique suitable for visualizing high-dimensional data. Besides tending to\n be faster than tSNE, it optimizes the embedding such that it best reflects\n the topology of the data, which we represent throughout Scanpy using a\n neighborhood graph. tSNE, by contrast, optimizes the distribution of\n nearest-neighbor distances in the embedding such that these best match the\n distribution of distances in the high-dimensional space. We use the\n implementation of `umap-learn <https://github.com/lmcinnes/umap>`__\n [McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint\n <https://doi.org/10.1101/298430>`__.\n\n Parameters\n ----------\n adata : :class:`~anndata.AnnData`\n Annotated data matrix.\n min_dist : `float`, optional (default: 0.5)\n The effective minimum distance between embedded points. Smaller values\n will result in a more clustered/clumped embedding where nearby points on\n the manifold are drawn closer together, while larger values will result\n on a more even dispersal of points. The value should be set relative to\n the ``spread`` value, which determines the scale at which embedded\n points will be spread out. The default of in the `umap-learn` package is\n 0.1.\n spread : `float` (optional, default 1.0)\n The effective scale of embedded points. In combination with `min_dist`\n this determines how clustered/clumped the embedded points are.\n n_components : `int`, optional (default: 2)\n The number of dimensions of the embedding.\n maxiter : `int`, optional (default: `None`)\n The number of iterations (epochs) of the optimization. Called `n_epochs`\n in the original UMAP.\n alpha : `float`, optional (default: 1.0)\n The initial learning rate for the embedding optimization.\n gamma : `float` (optional, default 1.0)\n Weighting applied to negative samples in low dimensional embedding\n optimization. Values higher than one will result in greater weight\n being given to negative samples.\n negative_sample_rate : `int` (optional, default 5)\n The number of negative edge/1-simplex samples to use per positive\n edge/1-simplex sample in optimizing the low dimensional embedding.\n init_pos : `string` or `np.array`, optional (default: 'spectral')\n How to initialize the low dimensional embedding. Called `init` in the\n original UMAP.\n Options are:\n\n * Any key for `adata.obsm`.\n * 'paga': positions from :func:`~scanpy.api.pl.paga`.\n * 'spectral': use a spectral embedding of the graph.\n * 'random': assign initial embedding positions at random.\n * A numpy array of initial embedding positions.\n random_state : `int`, `RandomState` or `None`, optional (default: 0)\n If `int`, `random_state` is the seed used by the random number generator;\n If `RandomState`, `random_state` is the random number generator;\n If `None`, the random number generator is the `RandomState` instance used\n by `np.random`.\n a : `float` (optional, default `None`)\n More specific parameters controlling the embedding. If `None` these\n values are set automatically as determined by `min_dist` and\n `spread`.\n b : `float` (optional, default `None`)\n More specific parameters controlling the embedding. If `None` these\n values are set automatically as determined by `min_dist` and\n `spread`.\n copy : `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with the following fields.\n\n **X_umap** : `adata.obsm` field\n UMAP coordinates of data.\n \"\"\"\n adata = adata.copy() if copy else adata\n if 'neighbors' not in adata.uns:\n raise ValueError(\n 'Did not find \\'neighbors/connectivities\\'. Run `sc.pp.neighbors` first.')\n start = logg.info('computing UMAP')\n if ('params' not in adata.uns['neighbors']\n or adata.uns['neighbors']['params']['method'] != 'umap'):\n logg.warning('neighbors/connectivities have not been computed using umap')\n from umap.umap_ import find_ab_params, simplicial_set_embedding\n if a is None or b is None:\n a, b = find_ab_params(spread, min_dist)\n else:\n a = a\n b = b\n\n if isinstance(init_pos, str) and init_pos in adata.obsm.keys():\n init_coords = adata.obsm[init_pos]\n elif isinstance(init_pos, str) and init_pos == 'paga':\n init_coords = get_init_pos_from_paga(adata, random_state=random_state)\n else:\n init_coords = init_pos # Let umap handle it\n if hasattr(init_coords, \"dtype\") and is_numeric_dtype(init_pos):\n init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)\n\n random_state = check_random_state(random_state)\n n_epochs = 0 if maxiter is None else maxiter\n neigh_params = adata.uns['neighbors']['params']\n X = choose_representation(\n adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)\n # the data matrix X is really only used for determining the number of connected components\n # for the init condition in the UMAP embedding\n X_umap = simplicial_set_embedding(\n X,\n adata.uns['neighbors']['connectivities'].tocoo(),\n n_components,\n alpha,\n a,\n b,\n gamma,\n negative_sample_rate,\n n_epochs,\n init_coords,\n random_state,\n neigh_params.get('metric', 'euclidean'),\n neigh_params.get('metric_kwds', {}),\n verbose=settings.verbosity > 3,\n )\n adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates\n logg.info(\n ' finished',\n time=start,\n deep=(\n 'added\\n'\n \" 'X_umap', UMAP coordinates (adata.obsm)\"\n ),\n )\n return adata if copy else None\n", "path": "scanpy/tools/_umap.py"}], "after_files": [{"content": "import numpy as np\nfrom pandas.api.types import is_numeric_dtype\nfrom sklearn.utils import check_random_state, check_array\n\nfrom ._utils import get_init_pos_from_paga, choose_representation\nfrom .._settings import settings\nfrom .. import logging as logg\n\n\ndef umap(\n adata,\n min_dist=0.5,\n spread=1.0,\n n_components=2,\n maxiter=None,\n alpha=1.0,\n gamma=1.0,\n negative_sample_rate=5,\n init_pos='spectral',\n random_state=0,\n a=None,\n b=None,\n copy=False,\n):\n \"\"\"Embed the neighborhood graph using UMAP [McInnes18]_.\n\n UMAP (Uniform Manifold Approximation and Projection) is a manifold learning\n technique suitable for visualizing high-dimensional data. Besides tending to\n be faster than tSNE, it optimizes the embedding such that it best reflects\n the topology of the data, which we represent throughout Scanpy using a\n neighborhood graph. tSNE, by contrast, optimizes the distribution of\n nearest-neighbor distances in the embedding such that these best match the\n distribution of distances in the high-dimensional space. We use the\n implementation of `umap-learn <https://github.com/lmcinnes/umap>`__\n [McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint\n <https://doi.org/10.1101/298430>`__.\n\n Parameters\n ----------\n adata : :class:`~anndata.AnnData`\n Annotated data matrix.\n min_dist : `float`, optional (default: 0.5)\n The effective minimum distance between embedded points. Smaller values\n will result in a more clustered/clumped embedding where nearby points on\n the manifold are drawn closer together, while larger values will result\n on a more even dispersal of points. The value should be set relative to\n the ``spread`` value, which determines the scale at which embedded\n points will be spread out. The default of in the `umap-learn` package is\n 0.1.\n spread : `float` (optional, default 1.0)\n The effective scale of embedded points. In combination with `min_dist`\n this determines how clustered/clumped the embedded points are.\n n_components : `int`, optional (default: 2)\n The number of dimensions of the embedding.\n maxiter : `int`, optional (default: `None`)\n The number of iterations (epochs) of the optimization. Called `n_epochs`\n in the original UMAP.\n alpha : `float`, optional (default: 1.0)\n The initial learning rate for the embedding optimization.\n gamma : `float` (optional, default 1.0)\n Weighting applied to negative samples in low dimensional embedding\n optimization. Values higher than one will result in greater weight\n being given to negative samples.\n negative_sample_rate : `int` (optional, default 5)\n The number of negative edge/1-simplex samples to use per positive\n edge/1-simplex sample in optimizing the low dimensional embedding.\n init_pos : `string` or `np.array`, optional (default: 'spectral')\n How to initialize the low dimensional embedding. Called `init` in the\n original UMAP.\n Options are:\n\n * Any key for `adata.obsm`.\n * 'paga': positions from :func:`~scanpy.api.pl.paga`.\n * 'spectral': use a spectral embedding of the graph.\n * 'random': assign initial embedding positions at random.\n * A numpy array of initial embedding positions.\n random_state : `int`, `RandomState` or `None`, optional (default: 0)\n If `int`, `random_state` is the seed used by the random number generator;\n If `RandomState`, `random_state` is the random number generator;\n If `None`, the random number generator is the `RandomState` instance used\n by `np.random`.\n a : `float` (optional, default `None`)\n More specific parameters controlling the embedding. If `None` these\n values are set automatically as determined by `min_dist` and\n `spread`.\n b : `float` (optional, default `None`)\n More specific parameters controlling the embedding. If `None` these\n values are set automatically as determined by `min_dist` and\n `spread`.\n copy : `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with the following fields.\n\n **X_umap** : `adata.obsm` field\n UMAP coordinates of data.\n \"\"\"\n adata = adata.copy() if copy else adata\n if 'neighbors' not in adata.uns:\n raise ValueError(\n 'Did not find \\'neighbors/connectivities\\'. Run `sc.pp.neighbors` first.')\n start = logg.info('computing UMAP')\n if ('params' not in adata.uns['neighbors']\n or adata.uns['neighbors']['params']['method'] != 'umap'):\n logg.warning('neighbors/connectivities have not been computed using umap')\n from umap.umap_ import find_ab_params, simplicial_set_embedding\n if a is None or b is None:\n a, b = find_ab_params(spread, min_dist)\n else:\n a = a\n b = b\n\n if isinstance(init_pos, str) and init_pos in adata.obsm.keys():\n init_coords = adata.obsm[init_pos]\n elif isinstance(init_pos, str) and init_pos == 'paga':\n init_coords = get_init_pos_from_paga(adata, random_state=random_state)\n else:\n init_coords = init_pos # Let umap handle it\n if hasattr(init_coords, \"dtype\"):\n init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)\n\n random_state = check_random_state(random_state)\n n_epochs = 0 if maxiter is None else maxiter\n neigh_params = adata.uns['neighbors']['params']\n X = choose_representation(\n adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)\n # the data matrix X is really only used for determining the number of connected components\n # for the init condition in the UMAP embedding\n X_umap = simplicial_set_embedding(\n X,\n adata.uns['neighbors']['connectivities'].tocoo(),\n n_components,\n alpha,\n a,\n b,\n gamma,\n negative_sample_rate,\n n_epochs,\n init_coords,\n random_state,\n neigh_params.get('metric', 'euclidean'),\n neigh_params.get('metric_kwds', {}),\n verbose=settings.verbosity > 3,\n )\n adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates\n logg.info(\n ' finished',\n time=start,\n deep=(\n 'added\\n'\n \" 'X_umap', UMAP coordinates (adata.obsm)\"\n ),\n )\n return adata if copy else None\n", "path": "scanpy/tools/_umap.py"}]}
| 2,470 | 150 |
gh_patches_debug_7196
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-16020
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Splash screen can take a lot of space in low resolution screens
When starting Spyder in my laptop screen (1366 x 768) the splash looks like this (kind of big):

However in my secondary screen (1920×1080) the splash looks better:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/app/utils.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """Utility functions for the Spyder application."""
8
9 # Standard library imports
10 import glob
11 import logging
12 import os
13 import os.path as osp
14 import re
15 import sys
16
17 # Third-party imports
18 import psutil
19 from qtpy.QtCore import QCoreApplication, Qt
20 from qtpy.QtGui import QColor, QPalette, QPixmap
21 from qtpy.QtWidgets import QSplashScreen
22
23 # Local imports
24 from spyder.config.base import (DEV, get_conf_path, get_debug_level,
25 running_under_pytest)
26 from spyder.utils.image_path_manager import get_image_path
27 from spyder.utils.qthelpers import file_uri
28 from spyder.utils.external.dafsa.dafsa import DAFSA
29 from spyder.utils.stylesheet import QStylePalette
30
31 # For spyder-ide/spyder#7447.
32 try:
33 from qtpy.QtQuick import QQuickWindow, QSGRendererInterface
34 except Exception:
35 QQuickWindow = QSGRendererInterface = None
36
37
38 root_logger = logging.getLogger()
39 FILTER_NAMES = os.environ.get('SPYDER_FILTER_LOG', "").split(',')
40 FILTER_NAMES = [f.strip() for f in FILTER_NAMES]
41
42
43 class Spy:
44 """
45 This is used to inject a 'spy' object in the internal console
46 namespace to inspect Spyder internals.
47
48 Attributes:
49 app Reference to main QApplication object
50 window Reference to spyder.MainWindow widget
51 """
52 def __init__(self, app, window):
53 self.app = app
54 self.window = window
55
56 def __dir__(self):
57 return (list(self.__dict__.keys()) +
58 [x for x in dir(self.__class__) if x[0] != '_'])
59
60
61 def get_python_doc_path():
62 """
63 Return Python documentation path
64 (Windows: return the PythonXX.chm path if available)
65 """
66 if os.name == 'nt':
67 doc_path = osp.join(sys.prefix, "Doc")
68 if not osp.isdir(doc_path):
69 return
70 python_chm = [path for path in os.listdir(doc_path)
71 if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
72 if python_chm:
73 return file_uri(osp.join(doc_path, python_chm[0]))
74 else:
75 vinf = sys.version_info
76 doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
77 python_doc = osp.join(doc_path, "index.html")
78 if osp.isfile(python_doc):
79 return file_uri(python_doc)
80
81
82 def set_opengl_implementation(option):
83 """
84 Set the OpenGL implementation used by Spyder.
85
86 See spyder-ide/spyder#7447 for the details.
87 """
88 if option == 'software':
89 QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
90 if QQuickWindow is not None:
91 QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)
92 elif option == 'desktop':
93 QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
94 if QQuickWindow is not None:
95 QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
96 elif option == 'gles':
97 QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
98 if QQuickWindow is not None:
99 QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
100
101
102 def setup_logging(cli_options):
103 """Setup logging with cli options defined by the user."""
104 if cli_options.debug_info or get_debug_level() > 0:
105 levels = {2: logging.INFO, 3: logging.DEBUG}
106 log_level = levels[get_debug_level()]
107 log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'
108
109 console_filters = cli_options.filter_log.split(',')
110 console_filters = [x.strip() for x in console_filters]
111 console_filters = console_filters + FILTER_NAMES
112 console_filters = [x for x in console_filters if x != '']
113
114 handlers = [logging.StreamHandler()]
115 if cli_options.debug_output == 'file':
116 log_file = 'spyder-debug.log'
117 handlers.append(
118 logging.FileHandler(filename=log_file, mode='w+')
119 )
120 else:
121 log_file = None
122
123 match_func = lambda x: True
124 if console_filters != [''] and len(console_filters) > 0:
125 dafsa = DAFSA(console_filters)
126 match_func = lambda x: (dafsa.lookup(x, stop_on_prefix=True)
127 is not None)
128
129 formatter = logging.Formatter(log_format)
130
131 class ModuleFilter(logging.Filter):
132 """Filter messages based on module name prefix."""
133
134 def filter(self, record):
135 return match_func(record.name)
136
137 filter = ModuleFilter()
138 root_logger.setLevel(log_level)
139 for handler in handlers:
140 handler.addFilter(filter)
141 handler.setFormatter(formatter)
142 handler.setLevel(log_level)
143 root_logger.addHandler(handler)
144
145
146 def delete_lsp_log_files():
147 """Delete previous dead Spyder instances LSP log files."""
148 regex = re.compile(r'.*_.*_(\d+)[.]log')
149 files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
150 for f in files:
151 match = regex.match(f)
152 if match is not None:
153 pid = int(match.group(1))
154 if not psutil.pid_exists(pid):
155 os.remove(f)
156
157
158 def qt_message_handler(msg_type, msg_log_context, msg_string):
159 """
160 Qt warning messages are intercepted by this handler.
161
162 On some operating systems, warning messages might be displayed
163 even if the actual message does not apply. This filter adds a
164 blacklist for messages that are being printed for no apparent
165 reason. Anything else will get printed in the internal console.
166
167 In DEV mode, all messages are printed.
168 """
169 BLACKLIST = [
170 'QMainWidget::resizeDocks: all sizes need to be larger than 0',
171 ]
172 if DEV or msg_string not in BLACKLIST:
173 print(msg_string) # spyder: test-skip
174
175
176 def create_splash_screen():
177 """Create splash screen."""
178 if not running_under_pytest():
179 splash = QSplashScreen(QPixmap(get_image_path('splash')))
180 splash_font = splash.font()
181 splash_font.setPixelSize(14)
182 splash.setFont(splash_font)
183 else:
184 splash = None
185
186 return splash
187
188
189 def set_links_color(app):
190 """
191 Fix color for links.
192
193 This was taken from QDarkstyle, which is MIT licensed.
194 """
195 color = QStylePalette.COLOR_ACCENT_3
196 qcolor = QColor(color)
197
198 app_palette = app.palette()
199 app_palette.setColor(QPalette.Normal, QPalette.Link, qcolor)
200 app.setPalette(app_palette)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/spyder/app/utils.py b/spyder/app/utils.py
--- a/spyder/app/utils.py
+++ b/spyder/app/utils.py
@@ -176,7 +176,9 @@
def create_splash_screen():
"""Create splash screen."""
if not running_under_pytest():
- splash = QSplashScreen(QPixmap(get_image_path('splash')))
+ pixmap = QPixmap(get_image_path('splash'))
+ splash = QSplashScreen(
+ pixmap.scaledToWidth(500, Qt.SmoothTransformation))
splash_font = splash.font()
splash_font.setPixelSize(14)
splash.setFont(splash_font)
|
{"golden_diff": "diff --git a/spyder/app/utils.py b/spyder/app/utils.py\n--- a/spyder/app/utils.py\n+++ b/spyder/app/utils.py\n@@ -176,7 +176,9 @@\n def create_splash_screen():\n \"\"\"Create splash screen.\"\"\"\n if not running_under_pytest():\n- splash = QSplashScreen(QPixmap(get_image_path('splash')))\n+ pixmap = QPixmap(get_image_path('splash'))\n+ splash = QSplashScreen(\n+ pixmap.scaledToWidth(500, Qt.SmoothTransformation))\n splash_font = splash.font()\n splash_font.setPixelSize(14)\n splash.setFont(splash_font)\n", "issue": "Splash screen can take a lot of space in low resolution screens\nWhen starting Spyder in my laptop screen (1366 x 768) the splash looks like this (kind of big):\r\n\r\n\r\n\r\n\r\nHowever in my secondary screen (1920\u00d71080) the splash looks better:\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Utility functions for the Spyder application.\"\"\"\n\n# Standard library imports\nimport glob\nimport logging\nimport os\nimport os.path as osp\nimport re\nimport sys\n\n# Third-party imports\nimport psutil\nfrom qtpy.QtCore import QCoreApplication, Qt\nfrom qtpy.QtGui import QColor, QPalette, QPixmap\nfrom qtpy.QtWidgets import QSplashScreen\n\n# Local imports\nfrom spyder.config.base import (DEV, get_conf_path, get_debug_level,\n running_under_pytest)\nfrom spyder.utils.image_path_manager import get_image_path\nfrom spyder.utils.qthelpers import file_uri\nfrom spyder.utils.external.dafsa.dafsa import DAFSA\nfrom spyder.utils.stylesheet import QStylePalette\n\n# For spyder-ide/spyder#7447.\ntry:\n from qtpy.QtQuick import QQuickWindow, QSGRendererInterface\nexcept Exception:\n QQuickWindow = QSGRendererInterface = None\n\n\nroot_logger = logging.getLogger()\nFILTER_NAMES = os.environ.get('SPYDER_FILTER_LOG', \"\").split(',')\nFILTER_NAMES = [f.strip() for f in FILTER_NAMES]\n\n\nclass Spy:\n \"\"\"\n This is used to inject a 'spy' object in the internal console\n namespace to inspect Spyder internals.\n\n Attributes:\n app Reference to main QApplication object\n window Reference to spyder.MainWindow widget\n \"\"\"\n def __init__(self, app, window):\n self.app = app\n self.window = window\n\n def __dir__(self):\n return (list(self.__dict__.keys()) +\n [x for x in dir(self.__class__) if x[0] != '_'])\n\n\ndef get_python_doc_path():\n \"\"\"\n Return Python documentation path\n (Windows: return the PythonXX.chm path if available)\n \"\"\"\n if os.name == 'nt':\n doc_path = osp.join(sys.prefix, \"Doc\")\n if not osp.isdir(doc_path):\n return\n python_chm = [path for path in os.listdir(doc_path)\n if re.match(r\"(?i)Python[0-9]{3,6}.chm\", path)]\n if python_chm:\n return file_uri(osp.join(doc_path, python_chm[0]))\n else:\n vinf = sys.version_info\n doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])\n python_doc = osp.join(doc_path, \"index.html\")\n if osp.isfile(python_doc):\n return file_uri(python_doc)\n\n\ndef set_opengl_implementation(option):\n \"\"\"\n Set the OpenGL implementation used by Spyder.\n\n See spyder-ide/spyder#7447 for the details.\n \"\"\"\n if option == 'software':\n QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)\n elif option == 'desktop':\n QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)\n elif option == 'gles':\n QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)\n\n\ndef setup_logging(cli_options):\n \"\"\"Setup logging with cli options defined by the user.\"\"\"\n if cli_options.debug_info or get_debug_level() > 0:\n levels = {2: logging.INFO, 3: logging.DEBUG}\n log_level = levels[get_debug_level()]\n log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'\n\n console_filters = cli_options.filter_log.split(',')\n console_filters = [x.strip() for x in console_filters]\n console_filters = console_filters + FILTER_NAMES\n console_filters = [x for x in console_filters if x != '']\n\n handlers = [logging.StreamHandler()]\n if cli_options.debug_output == 'file':\n log_file = 'spyder-debug.log'\n handlers.append(\n logging.FileHandler(filename=log_file, mode='w+')\n )\n else:\n log_file = None\n\n match_func = lambda x: True\n if console_filters != [''] and len(console_filters) > 0:\n dafsa = DAFSA(console_filters)\n match_func = lambda x: (dafsa.lookup(x, stop_on_prefix=True)\n is not None)\n\n formatter = logging.Formatter(log_format)\n\n class ModuleFilter(logging.Filter):\n \"\"\"Filter messages based on module name prefix.\"\"\"\n\n def filter(self, record):\n return match_func(record.name)\n\n filter = ModuleFilter()\n root_logger.setLevel(log_level)\n for handler in handlers:\n handler.addFilter(filter)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n root_logger.addHandler(handler)\n\n\ndef delete_lsp_log_files():\n \"\"\"Delete previous dead Spyder instances LSP log files.\"\"\"\n regex = re.compile(r'.*_.*_(\\d+)[.]log')\n files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))\n for f in files:\n match = regex.match(f)\n if match is not None:\n pid = int(match.group(1))\n if not psutil.pid_exists(pid):\n os.remove(f)\n\n\ndef qt_message_handler(msg_type, msg_log_context, msg_string):\n \"\"\"\n Qt warning messages are intercepted by this handler.\n\n On some operating systems, warning messages might be displayed\n even if the actual message does not apply. This filter adds a\n blacklist for messages that are being printed for no apparent\n reason. Anything else will get printed in the internal console.\n\n In DEV mode, all messages are printed.\n \"\"\"\n BLACKLIST = [\n 'QMainWidget::resizeDocks: all sizes need to be larger than 0',\n ]\n if DEV or msg_string not in BLACKLIST:\n print(msg_string) # spyder: test-skip\n\n\ndef create_splash_screen():\n \"\"\"Create splash screen.\"\"\"\n if not running_under_pytest():\n splash = QSplashScreen(QPixmap(get_image_path('splash')))\n splash_font = splash.font()\n splash_font.setPixelSize(14)\n splash.setFont(splash_font)\n else:\n splash = None\n\n return splash\n\n\ndef set_links_color(app):\n \"\"\"\n Fix color for links.\n\n This was taken from QDarkstyle, which is MIT licensed.\n \"\"\"\n color = QStylePalette.COLOR_ACCENT_3\n qcolor = QColor(color)\n\n app_palette = app.palette()\n app_palette.setColor(QPalette.Normal, QPalette.Link, qcolor)\n app.setPalette(app_palette)\n", "path": "spyder/app/utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Utility functions for the Spyder application.\"\"\"\n\n# Standard library imports\nimport glob\nimport logging\nimport os\nimport os.path as osp\nimport re\nimport sys\n\n# Third-party imports\nimport psutil\nfrom qtpy.QtCore import QCoreApplication, Qt\nfrom qtpy.QtGui import QColor, QPalette, QPixmap\nfrom qtpy.QtWidgets import QSplashScreen\n\n# Local imports\nfrom spyder.config.base import (DEV, get_conf_path, get_debug_level,\n running_under_pytest)\nfrom spyder.utils.image_path_manager import get_image_path\nfrom spyder.utils.qthelpers import file_uri\nfrom spyder.utils.external.dafsa.dafsa import DAFSA\nfrom spyder.utils.stylesheet import QStylePalette\n\n# For spyder-ide/spyder#7447.\ntry:\n from qtpy.QtQuick import QQuickWindow, QSGRendererInterface\nexcept Exception:\n QQuickWindow = QSGRendererInterface = None\n\n\nroot_logger = logging.getLogger()\nFILTER_NAMES = os.environ.get('SPYDER_FILTER_LOG', \"\").split(',')\nFILTER_NAMES = [f.strip() for f in FILTER_NAMES]\n\n\nclass Spy:\n \"\"\"\n This is used to inject a 'spy' object in the internal console\n namespace to inspect Spyder internals.\n\n Attributes:\n app Reference to main QApplication object\n window Reference to spyder.MainWindow widget\n \"\"\"\n def __init__(self, app, window):\n self.app = app\n self.window = window\n\n def __dir__(self):\n return (list(self.__dict__.keys()) +\n [x for x in dir(self.__class__) if x[0] != '_'])\n\n\ndef get_python_doc_path():\n \"\"\"\n Return Python documentation path\n (Windows: return the PythonXX.chm path if available)\n \"\"\"\n if os.name == 'nt':\n doc_path = osp.join(sys.prefix, \"Doc\")\n if not osp.isdir(doc_path):\n return\n python_chm = [path for path in os.listdir(doc_path)\n if re.match(r\"(?i)Python[0-9]{3,6}.chm\", path)]\n if python_chm:\n return file_uri(osp.join(doc_path, python_chm[0]))\n else:\n vinf = sys.version_info\n doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])\n python_doc = osp.join(doc_path, \"index.html\")\n if osp.isfile(python_doc):\n return file_uri(python_doc)\n\n\ndef set_opengl_implementation(option):\n \"\"\"\n Set the OpenGL implementation used by Spyder.\n\n See spyder-ide/spyder#7447 for the details.\n \"\"\"\n if option == 'software':\n QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)\n elif option == 'desktop':\n QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)\n elif option == 'gles':\n QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)\n\n\ndef setup_logging(cli_options):\n \"\"\"Setup logging with cli options defined by the user.\"\"\"\n if cli_options.debug_info or get_debug_level() > 0:\n levels = {2: logging.INFO, 3: logging.DEBUG}\n log_level = levels[get_debug_level()]\n log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'\n\n console_filters = cli_options.filter_log.split(',')\n console_filters = [x.strip() for x in console_filters]\n console_filters = console_filters + FILTER_NAMES\n console_filters = [x for x in console_filters if x != '']\n\n handlers = [logging.StreamHandler()]\n if cli_options.debug_output == 'file':\n log_file = 'spyder-debug.log'\n handlers.append(\n logging.FileHandler(filename=log_file, mode='w+')\n )\n else:\n log_file = None\n\n match_func = lambda x: True\n if console_filters != [''] and len(console_filters) > 0:\n dafsa = DAFSA(console_filters)\n match_func = lambda x: (dafsa.lookup(x, stop_on_prefix=True)\n is not None)\n\n formatter = logging.Formatter(log_format)\n\n class ModuleFilter(logging.Filter):\n \"\"\"Filter messages based on module name prefix.\"\"\"\n\n def filter(self, record):\n return match_func(record.name)\n\n filter = ModuleFilter()\n root_logger.setLevel(log_level)\n for handler in handlers:\n handler.addFilter(filter)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n root_logger.addHandler(handler)\n\n\ndef delete_lsp_log_files():\n \"\"\"Delete previous dead Spyder instances LSP log files.\"\"\"\n regex = re.compile(r'.*_.*_(\\d+)[.]log')\n files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))\n for f in files:\n match = regex.match(f)\n if match is not None:\n pid = int(match.group(1))\n if not psutil.pid_exists(pid):\n os.remove(f)\n\n\ndef qt_message_handler(msg_type, msg_log_context, msg_string):\n \"\"\"\n Qt warning messages are intercepted by this handler.\n\n On some operating systems, warning messages might be displayed\n even if the actual message does not apply. This filter adds a\n blacklist for messages that are being printed for no apparent\n reason. Anything else will get printed in the internal console.\n\n In DEV mode, all messages are printed.\n \"\"\"\n BLACKLIST = [\n 'QMainWidget::resizeDocks: all sizes need to be larger than 0',\n ]\n if DEV or msg_string not in BLACKLIST:\n print(msg_string) # spyder: test-skip\n\n\ndef create_splash_screen():\n \"\"\"Create splash screen.\"\"\"\n if not running_under_pytest():\n pixmap = QPixmap(get_image_path('splash'))\n splash = QSplashScreen(\n pixmap.scaledToWidth(500, Qt.SmoothTransformation))\n splash_font = splash.font()\n splash_font.setPixelSize(14)\n splash.setFont(splash_font)\n else:\n splash = None\n\n return splash\n\n\ndef set_links_color(app):\n \"\"\"\n Fix color for links.\n\n This was taken from QDarkstyle, which is MIT licensed.\n \"\"\"\n color = QStylePalette.COLOR_ACCENT_3\n qcolor = QColor(color)\n\n app_palette = app.palette()\n app_palette.setColor(QPalette.Normal, QPalette.Link, qcolor)\n app.setPalette(app_palette)\n", "path": "spyder/app/utils.py"}]}
| 2,448 | 145 |
gh_patches_debug_32530
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2744
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
new strawberry.lazy not compatible with from __future__ import annotations
# Description
The new way of lazy referencing via strawberry.lazy is not compatible with
`from __future__ import annotations`
It would be nice to have compatibility as typing.Self is not available for python <=3.10 (the main reason to have `from __future__ import annotations`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/utils/typing.py`
Content:
```
1 import ast
2 import sys
3 import typing
4 from collections.abc import AsyncGenerator
5 from functools import lru_cache
6 from typing import ( # type: ignore
7 TYPE_CHECKING,
8 Any,
9 Callable,
10 ClassVar,
11 Dict,
12 ForwardRef,
13 Generic,
14 Optional,
15 Tuple,
16 Type,
17 TypeVar,
18 Union,
19 _eval_type,
20 _GenericAlias,
21 _SpecialForm,
22 cast,
23 overload,
24 )
25 from typing_extensions import Annotated, get_args, get_origin
26
27 ast_unparse = getattr(ast, "unparse", None)
28 # ast.unparse is only available on python 3.9+. For older versions we will
29 # use `astunparse.unparse`.
30 # We are also using "not TYPE_CHECKING" here because mypy gives an erorr
31 # on tests because "astunparse" is missing stubs, but the mypy action says
32 # that the comment is unused.
33 if not TYPE_CHECKING and ast_unparse is None:
34 import astunparse
35
36 ast_unparse = astunparse.unparse
37
38
39 @lru_cache()
40 def get_generic_alias(type_: Type) -> Type:
41 """Get the generic alias for a type.
42
43 Given a type, its generic alias from `typing` module will be returned
44 if it exists. For example:
45
46 >>> get_generic_alias(list)
47 typing.List
48 >>> get_generic_alias(dict)
49 typing.Dict
50
51 This is mostly useful for python versions prior to 3.9, to get a version
52 of a concrete type which supports `__class_getitem__`. In 3.9+ types like
53 `list`/`dict`/etc are subscriptable and can be used directly instead
54 of their generic alias version.
55 """
56 if isinstance(type_, _SpecialForm):
57 return type_
58
59 for attr_name in dir(typing):
60 # ignore private attributes, they are not Generic aliases
61 if attr_name.startswith("_"): # pragma: no cover
62 continue
63
64 attr = getattr(typing, attr_name)
65 # _GenericAlias overrides all the methods that we can use to know if
66 # this is a subclass of it. But if it has an "_inst" attribute
67 # then it for sure is a _GenericAlias
68 if hasattr(attr, "_inst") and attr.__origin__ is type_:
69 return attr
70
71 raise AssertionError(f"No GenericAlias available for {type_}") # pragma: no cover
72
73
74 def is_list(annotation: object) -> bool:
75 """Returns True if annotation is a List"""
76
77 annotation_origin = getattr(annotation, "__origin__", None)
78
79 return annotation_origin == list
80
81
82 def is_union(annotation: object) -> bool:
83 """Returns True if annotation is a Union"""
84
85 # this check is needed because unions declared with the new syntax `A | B`
86 # don't have a `__origin__` property on them, but they are instances of
87 # `UnionType`, which is only available in Python 3.10+
88 if sys.version_info >= (3, 10):
89 from types import UnionType
90
91 if isinstance(annotation, UnionType):
92 return True
93
94 # unions declared as Union[A, B] fall through to this check, even on python 3.10+
95
96 annotation_origin = getattr(annotation, "__origin__", None)
97
98 return annotation_origin == Union
99
100
101 def is_optional(annotation: Type) -> bool:
102 """Returns True if the annotation is Optional[SomeType]"""
103
104 # Optionals are represented as unions
105
106 if not is_union(annotation):
107 return False
108
109 types = annotation.__args__
110
111 # A Union to be optional needs to have at least one None type
112 return any(x == None.__class__ for x in types)
113
114
115 def get_optional_annotation(annotation: Type) -> Type:
116 types = annotation.__args__
117
118 non_none_types = tuple(x for x in types if x != None.__class__)
119
120 # if we have multiple non none types we want to return a copy of this
121 # type (normally a Union type).
122
123 if len(non_none_types) > 1:
124 return annotation.copy_with(non_none_types)
125
126 return non_none_types[0]
127
128
129 def get_list_annotation(annotation: Type) -> Type:
130 return annotation.__args__[0]
131
132
133 def is_concrete_generic(annotation: type) -> bool:
134 ignored_generics = (list, tuple, Union, ClassVar, AsyncGenerator)
135 return (
136 isinstance(annotation, _GenericAlias)
137 and annotation.__origin__ not in ignored_generics
138 )
139
140
141 def is_generic_subclass(annotation: type) -> bool:
142 return isinstance(annotation, type) and issubclass(
143 annotation, Generic # type:ignore
144 )
145
146
147 def is_generic(annotation: type) -> bool:
148 """Returns True if the annotation is or extends a generic."""
149
150 return (
151 # TODO: These two lines appear to have the same effect. When will an
152 # annotation have parameters but not satisfy the first condition?
153 (is_generic_subclass(annotation) or is_concrete_generic(annotation))
154 and bool(get_parameters(annotation))
155 )
156
157
158 def is_type_var(annotation: Type) -> bool:
159 """Returns True if the annotation is a TypeVar."""
160
161 return isinstance(annotation, TypeVar)
162
163
164 def get_parameters(annotation: Type) -> Union[Tuple[object], Tuple[()]]:
165 if (
166 isinstance(annotation, _GenericAlias)
167 or isinstance(annotation, type)
168 and issubclass(annotation, Generic) # type:ignore
169 and annotation is not Generic
170 ):
171 return annotation.__parameters__
172 else:
173 return () # pragma: no cover
174
175
176 @overload
177 def _ast_replace_union_operation(expr: ast.expr) -> ast.expr:
178 ...
179
180
181 @overload
182 def _ast_replace_union_operation(expr: ast.Expr) -> ast.Expr:
183 ...
184
185
186 def _ast_replace_union_operation(
187 expr: Union[ast.Expr, ast.expr]
188 ) -> Union[ast.Expr, ast.expr]:
189 if isinstance(expr, ast.Expr) and isinstance(
190 expr.value, (ast.BinOp, ast.Subscript)
191 ):
192 expr = ast.Expr(_ast_replace_union_operation(expr.value))
193 elif isinstance(expr, ast.BinOp):
194 left = _ast_replace_union_operation(expr.left)
195 right = _ast_replace_union_operation(expr.right)
196 expr = ast.Subscript(
197 ast.Name(id="Union"),
198 ast.Tuple([left, right], ast.Load()),
199 ast.Load(),
200 )
201 elif isinstance(expr, ast.Tuple):
202 expr = ast.Tuple(
203 [_ast_replace_union_operation(elt) for elt in expr.elts],
204 ast.Load(),
205 )
206 elif isinstance(expr, ast.Subscript):
207 if hasattr(ast, "Index") and isinstance(expr.slice, ast.Index):
208 expr = ast.Subscript(
209 expr.value,
210 # The cast is required for mypy on python 3.7 and 3.8
211 ast.Index(_ast_replace_union_operation(cast(Any, expr.slice).value)),
212 ast.Load(),
213 )
214 elif isinstance(expr.slice, (ast.BinOp, ast.Tuple)):
215 expr = ast.Subscript(
216 expr.value,
217 _ast_replace_union_operation(expr.slice),
218 ast.Load(),
219 )
220
221 return expr
222
223
224 def eval_type(
225 type_: Any,
226 globalns: Optional[Dict] = None,
227 localns: Optional[Dict] = None,
228 ) -> Type:
229 """Evaluates a type, resolving forward references."""
230 from strawberry.auto import StrawberryAuto
231 from strawberry.lazy_type import StrawberryLazyReference
232 from strawberry.private import StrawberryPrivate
233
234 globalns = globalns or {}
235 # If this is not a string, maybe its args are (e.g. List["Foo"])
236 if isinstance(type_, ForwardRef):
237 # For Python 3.10+, we can use the built-in _eval_type function directly.
238 # It will handle "|" notations properly
239 if sys.version_info < (3, 10):
240 parsed = _ast_replace_union_operation(
241 cast(ast.Expr, ast.parse(type_.__forward_arg__).body[0])
242 )
243
244 # We replaced "a | b" with "Union[a, b], so make sure Union can be resolved
245 # at globalns because it may not be there
246 if "Union" not in globalns:
247 globalns["Union"] = Union
248
249 assert ast_unparse
250 type_ = ForwardRef(ast_unparse(parsed))
251
252 return _eval_type(type_, globalns, localns)
253
254 origin = get_origin(type_)
255 if origin is not None:
256 args = get_args(type_)
257 if origin is Annotated:
258 for arg in args[1:]:
259 if isinstance(arg, StrawberryPrivate):
260 return type_
261
262 if isinstance(arg, StrawberryLazyReference):
263 remaining_args = [
264 a
265 for a in args[1:]
266 if not isinstance(arg, StrawberryLazyReference)
267 ]
268 args = (arg.resolve_forward_ref(args[0]), *remaining_args)
269 break
270 if isinstance(arg, StrawberryAuto):
271 remaining_args = [
272 a for a in args[1:] if not isinstance(arg, StrawberryAuto)
273 ]
274 args = (arg, *remaining_args)
275 break
276
277 # If we have only a StrawberryLazyReference and no more annotations,
278 # we need to return the argument directly because Annotated
279 # will raise an error if trying to instantiate it with only
280 # one argument.
281 if len(args) == 1:
282 return args[0]
283
284 # python 3.10 will return UnionType for origin, and it cannot be
285 # subscripted like Union[Foo, Bar]
286 if sys.version_info >= (3, 10):
287 from types import UnionType
288
289 if origin is UnionType:
290 origin = Union
291
292 # Future annotations in older versions will eval generic aliases to their
293 # real types (i.e. List[foo] will have its origin set to list instead
294 # of List). If that type is not subscriptable, retrieve its generic
295 # alias version instead.
296 if sys.version_info < (3, 9) and not hasattr(origin, "__class_getitem__"):
297 origin = get_generic_alias(origin)
298
299 type_ = (
300 origin[tuple(eval_type(a, globalns, localns) for a in args)]
301 if args
302 else origin
303 )
304
305 return type_
306
307
308 _T = TypeVar("_T")
309
310
311 def __dataclass_transform__(
312 *,
313 eq_default: bool = True,
314 order_default: bool = False,
315 kw_only_default: bool = False,
316 field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
317 ) -> Callable[[_T], _T]:
318 return lambda a: a
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/strawberry/utils/typing.py b/strawberry/utils/typing.py
--- a/strawberry/utils/typing.py
+++ b/strawberry/utils/typing.py
@@ -1,4 +1,5 @@
import ast
+import re
import sys
import typing
from collections.abc import AsyncGenerator
@@ -221,6 +222,11 @@
return expr
+_annotated_re = re.compile(
+ r"(Annotated\[)(?P<type>\w*),(?P<args>.*)(\])",
+)
+
+
def eval_type(
type_: Any,
globalns: Optional[Dict] = None,
@@ -249,6 +255,37 @@
assert ast_unparse
type_ = ForwardRef(ast_unparse(parsed))
+ # When using forward refs, the whole
+ # Annotated[SomeType, strabwerry.lazy("type.module")] is a forward ref,
+ # and trying to _eval_type on it will fail. Take a different approach
+ # here to resolve lazy types by execing the annotated args and resolving
+ # the type directly.
+ annotated_match = _annotated_re.search(type_.__forward_arg__)
+ if annotated_match:
+ gdict = annotated_match.groupdict()
+ # FIXME: Eval the remaining annotated args to get their real values
+ # We might want to refactor how we import lazy modules to avoid having
+ # to eval the code in here
+ args = eval(f'({gdict["args"]}, )', globalns, localns) # noqa: PGH001
+ lazy_ref = next(
+ (arg for arg in args if isinstance(arg, StrawberryLazyReference)),
+ None,
+ )
+ if lazy_ref is not None:
+ remaining = [
+ a for a in args if not isinstance(a, StrawberryLazyReference)
+ ]
+ type_ = lazy_ref.resolve_forward_ref(ForwardRef(gdict["type"]))
+ # If we only had a StrawberryLazyReference, we can return the type
+ # directly. It already did its job!
+ if not remaining:
+ return type_
+
+ # Otherwise return the type annotated with the remaining annotations
+ return Annotated.__class_getitem__( # type: ignore
+ (type_, *remaining),
+ )
+
return _eval_type(type_, globalns, localns)
origin = get_origin(type_)
|
{"golden_diff": "diff --git a/strawberry/utils/typing.py b/strawberry/utils/typing.py\n--- a/strawberry/utils/typing.py\n+++ b/strawberry/utils/typing.py\n@@ -1,4 +1,5 @@\n import ast\n+import re\n import sys\n import typing\n from collections.abc import AsyncGenerator\n@@ -221,6 +222,11 @@\n return expr\n \n \n+_annotated_re = re.compile(\n+ r\"(Annotated\\[)(?P<type>\\w*),(?P<args>.*)(\\])\",\n+)\n+\n+\n def eval_type(\n type_: Any,\n globalns: Optional[Dict] = None,\n@@ -249,6 +255,37 @@\n assert ast_unparse\n type_ = ForwardRef(ast_unparse(parsed))\n \n+ # When using forward refs, the whole\n+ # Annotated[SomeType, strabwerry.lazy(\"type.module\")] is a forward ref,\n+ # and trying to _eval_type on it will fail. Take a different approach\n+ # here to resolve lazy types by execing the annotated args and resolving\n+ # the type directly.\n+ annotated_match = _annotated_re.search(type_.__forward_arg__)\n+ if annotated_match:\n+ gdict = annotated_match.groupdict()\n+ # FIXME: Eval the remaining annotated args to get their real values\n+ # We might want to refactor how we import lazy modules to avoid having\n+ # to eval the code in here\n+ args = eval(f'({gdict[\"args\"]}, )', globalns, localns) # noqa: PGH001\n+ lazy_ref = next(\n+ (arg for arg in args if isinstance(arg, StrawberryLazyReference)),\n+ None,\n+ )\n+ if lazy_ref is not None:\n+ remaining = [\n+ a for a in args if not isinstance(a, StrawberryLazyReference)\n+ ]\n+ type_ = lazy_ref.resolve_forward_ref(ForwardRef(gdict[\"type\"]))\n+ # If we only had a StrawberryLazyReference, we can return the type\n+ # directly. It already did its job!\n+ if not remaining:\n+ return type_\n+\n+ # Otherwise return the type annotated with the remaining annotations\n+ return Annotated.__class_getitem__( # type: ignore\n+ (type_, *remaining),\n+ )\n+\n return _eval_type(type_, globalns, localns)\n \n origin = get_origin(type_)\n", "issue": "new strawberry.lazy not compatible with from __future__ import annotations\n# Description\r\n\r\nThe new way of lazy referencing via strawberry.lazy is not compatible with \r\n`from __future__ import annotations`\r\n\r\nIt would be nice to have compatibility as typing.Self is not available for python <=3.10 (the main reason to have `from __future__ import annotations`)\n", "before_files": [{"content": "import ast\nimport sys\nimport typing\nfrom collections.abc import AsyncGenerator\nfrom functools import lru_cache\nfrom typing import ( # type: ignore\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n ForwardRef,\n Generic,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n _eval_type,\n _GenericAlias,\n _SpecialForm,\n cast,\n overload,\n)\nfrom typing_extensions import Annotated, get_args, get_origin\n\nast_unparse = getattr(ast, \"unparse\", None)\n# ast.unparse is only available on python 3.9+. For older versions we will\n# use `astunparse.unparse`.\n# We are also using \"not TYPE_CHECKING\" here because mypy gives an erorr\n# on tests because \"astunparse\" is missing stubs, but the mypy action says\n# that the comment is unused.\nif not TYPE_CHECKING and ast_unparse is None:\n import astunparse\n\n ast_unparse = astunparse.unparse\n\n\n@lru_cache()\ndef get_generic_alias(type_: Type) -> Type:\n \"\"\"Get the generic alias for a type.\n\n Given a type, its generic alias from `typing` module will be returned\n if it exists. For example:\n\n >>> get_generic_alias(list)\n typing.List\n >>> get_generic_alias(dict)\n typing.Dict\n\n This is mostly useful for python versions prior to 3.9, to get a version\n of a concrete type which supports `__class_getitem__`. In 3.9+ types like\n `list`/`dict`/etc are subscriptable and can be used directly instead\n of their generic alias version.\n \"\"\"\n if isinstance(type_, _SpecialForm):\n return type_\n\n for attr_name in dir(typing):\n # ignore private attributes, they are not Generic aliases\n if attr_name.startswith(\"_\"): # pragma: no cover\n continue\n\n attr = getattr(typing, attr_name)\n # _GenericAlias overrides all the methods that we can use to know if\n # this is a subclass of it. But if it has an \"_inst\" attribute\n # then it for sure is a _GenericAlias\n if hasattr(attr, \"_inst\") and attr.__origin__ is type_:\n return attr\n\n raise AssertionError(f\"No GenericAlias available for {type_}\") # pragma: no cover\n\n\ndef is_list(annotation: object) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list\n\n\ndef is_union(annotation: object) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n # this check is needed because unions declared with the new syntax `A | B`\n # don't have a `__origin__` property on them, but they are instances of\n # `UnionType`, which is only available in Python 3.10+\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if isinstance(annotation, UnionType):\n return True\n\n # unions declared as Union[A, B] fall through to this check, even on python 3.10+\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == Union\n\n\ndef is_optional(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n\n if not is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any(x == None.__class__ for x in types)\n\n\ndef get_optional_annotation(annotation: Type) -> Type:\n types = annotation.__args__\n\n non_none_types = tuple(x for x in types if x != None.__class__)\n\n # if we have multiple non none types we want to return a copy of this\n # type (normally a Union type).\n\n if len(non_none_types) > 1:\n return annotation.copy_with(non_none_types)\n\n return non_none_types[0]\n\n\ndef get_list_annotation(annotation: Type) -> Type:\n return annotation.__args__[0]\n\n\ndef is_concrete_generic(annotation: type) -> bool:\n ignored_generics = (list, tuple, Union, ClassVar, AsyncGenerator)\n return (\n isinstance(annotation, _GenericAlias)\n and annotation.__origin__ not in ignored_generics\n )\n\n\ndef is_generic_subclass(annotation: type) -> bool:\n return isinstance(annotation, type) and issubclass(\n annotation, Generic # type:ignore\n )\n\n\ndef is_generic(annotation: type) -> bool:\n \"\"\"Returns True if the annotation is or extends a generic.\"\"\"\n\n return (\n # TODO: These two lines appear to have the same effect. When will an\n # annotation have parameters but not satisfy the first condition?\n (is_generic_subclass(annotation) or is_concrete_generic(annotation))\n and bool(get_parameters(annotation))\n )\n\n\ndef is_type_var(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is a TypeVar.\"\"\"\n\n return isinstance(annotation, TypeVar)\n\n\ndef get_parameters(annotation: Type) -> Union[Tuple[object], Tuple[()]]:\n if (\n isinstance(annotation, _GenericAlias)\n or isinstance(annotation, type)\n and issubclass(annotation, Generic) # type:ignore\n and annotation is not Generic\n ):\n return annotation.__parameters__\n else:\n return () # pragma: no cover\n\n\n@overload\ndef _ast_replace_union_operation(expr: ast.expr) -> ast.expr:\n ...\n\n\n@overload\ndef _ast_replace_union_operation(expr: ast.Expr) -> ast.Expr:\n ...\n\n\ndef _ast_replace_union_operation(\n expr: Union[ast.Expr, ast.expr]\n) -> Union[ast.Expr, ast.expr]:\n if isinstance(expr, ast.Expr) and isinstance(\n expr.value, (ast.BinOp, ast.Subscript)\n ):\n expr = ast.Expr(_ast_replace_union_operation(expr.value))\n elif isinstance(expr, ast.BinOp):\n left = _ast_replace_union_operation(expr.left)\n right = _ast_replace_union_operation(expr.right)\n expr = ast.Subscript(\n ast.Name(id=\"Union\"),\n ast.Tuple([left, right], ast.Load()),\n ast.Load(),\n )\n elif isinstance(expr, ast.Tuple):\n expr = ast.Tuple(\n [_ast_replace_union_operation(elt) for elt in expr.elts],\n ast.Load(),\n )\n elif isinstance(expr, ast.Subscript):\n if hasattr(ast, \"Index\") and isinstance(expr.slice, ast.Index):\n expr = ast.Subscript(\n expr.value,\n # The cast is required for mypy on python 3.7 and 3.8\n ast.Index(_ast_replace_union_operation(cast(Any, expr.slice).value)),\n ast.Load(),\n )\n elif isinstance(expr.slice, (ast.BinOp, ast.Tuple)):\n expr = ast.Subscript(\n expr.value,\n _ast_replace_union_operation(expr.slice),\n ast.Load(),\n )\n\n return expr\n\n\ndef eval_type(\n type_: Any,\n globalns: Optional[Dict] = None,\n localns: Optional[Dict] = None,\n) -> Type:\n \"\"\"Evaluates a type, resolving forward references.\"\"\"\n from strawberry.auto import StrawberryAuto\n from strawberry.lazy_type import StrawberryLazyReference\n from strawberry.private import StrawberryPrivate\n\n globalns = globalns or {}\n # If this is not a string, maybe its args are (e.g. List[\"Foo\"])\n if isinstance(type_, ForwardRef):\n # For Python 3.10+, we can use the built-in _eval_type function directly.\n # It will handle \"|\" notations properly\n if sys.version_info < (3, 10):\n parsed = _ast_replace_union_operation(\n cast(ast.Expr, ast.parse(type_.__forward_arg__).body[0])\n )\n\n # We replaced \"a | b\" with \"Union[a, b], so make sure Union can be resolved\n # at globalns because it may not be there\n if \"Union\" not in globalns:\n globalns[\"Union\"] = Union\n\n assert ast_unparse\n type_ = ForwardRef(ast_unparse(parsed))\n\n return _eval_type(type_, globalns, localns)\n\n origin = get_origin(type_)\n if origin is not None:\n args = get_args(type_)\n if origin is Annotated:\n for arg in args[1:]:\n if isinstance(arg, StrawberryPrivate):\n return type_\n\n if isinstance(arg, StrawberryLazyReference):\n remaining_args = [\n a\n for a in args[1:]\n if not isinstance(arg, StrawberryLazyReference)\n ]\n args = (arg.resolve_forward_ref(args[0]), *remaining_args)\n break\n if isinstance(arg, StrawberryAuto):\n remaining_args = [\n a for a in args[1:] if not isinstance(arg, StrawberryAuto)\n ]\n args = (arg, *remaining_args)\n break\n\n # If we have only a StrawberryLazyReference and no more annotations,\n # we need to return the argument directly because Annotated\n # will raise an error if trying to instantiate it with only\n # one argument.\n if len(args) == 1:\n return args[0]\n\n # python 3.10 will return UnionType for origin, and it cannot be\n # subscripted like Union[Foo, Bar]\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if origin is UnionType:\n origin = Union\n\n # Future annotations in older versions will eval generic aliases to their\n # real types (i.e. List[foo] will have its origin set to list instead\n # of List). If that type is not subscriptable, retrieve its generic\n # alias version instead.\n if sys.version_info < (3, 9) and not hasattr(origin, \"__class_getitem__\"):\n origin = get_generic_alias(origin)\n\n type_ = (\n origin[tuple(eval_type(a, globalns, localns) for a in args)]\n if args\n else origin\n )\n\n return type_\n\n\n_T = TypeVar(\"_T\")\n\n\ndef __dataclass_transform__(\n *,\n eq_default: bool = True,\n order_default: bool = False,\n kw_only_default: bool = False,\n field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),\n) -> Callable[[_T], _T]:\n return lambda a: a\n", "path": "strawberry/utils/typing.py"}], "after_files": [{"content": "import ast\nimport re\nimport sys\nimport typing\nfrom collections.abc import AsyncGenerator\nfrom functools import lru_cache\nfrom typing import ( # type: ignore\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n ForwardRef,\n Generic,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n _eval_type,\n _GenericAlias,\n _SpecialForm,\n cast,\n overload,\n)\nfrom typing_extensions import Annotated, get_args, get_origin\n\nast_unparse = getattr(ast, \"unparse\", None)\n# ast.unparse is only available on python 3.9+. For older versions we will\n# use `astunparse.unparse`.\n# We are also using \"not TYPE_CHECKING\" here because mypy gives an erorr\n# on tests because \"astunparse\" is missing stubs, but the mypy action says\n# that the comment is unused.\nif not TYPE_CHECKING and ast_unparse is None:\n import astunparse\n\n ast_unparse = astunparse.unparse\n\n\n@lru_cache()\ndef get_generic_alias(type_: Type) -> Type:\n \"\"\"Get the generic alias for a type.\n\n Given a type, its generic alias from `typing` module will be returned\n if it exists. For example:\n\n >>> get_generic_alias(list)\n typing.List\n >>> get_generic_alias(dict)\n typing.Dict\n\n This is mostly useful for python versions prior to 3.9, to get a version\n of a concrete type which supports `__class_getitem__`. In 3.9+ types like\n `list`/`dict`/etc are subscriptable and can be used directly instead\n of their generic alias version.\n \"\"\"\n if isinstance(type_, _SpecialForm):\n return type_\n\n for attr_name in dir(typing):\n # ignore private attributes, they are not Generic aliases\n if attr_name.startswith(\"_\"): # pragma: no cover\n continue\n\n attr = getattr(typing, attr_name)\n # _GenericAlias overrides all the methods that we can use to know if\n # this is a subclass of it. But if it has an \"_inst\" attribute\n # then it for sure is a _GenericAlias\n if hasattr(attr, \"_inst\") and attr.__origin__ is type_:\n return attr\n\n raise AssertionError(f\"No GenericAlias available for {type_}\") # pragma: no cover\n\n\ndef is_list(annotation: object) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list\n\n\ndef is_union(annotation: object) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n # this check is needed because unions declared with the new syntax `A | B`\n # don't have a `__origin__` property on them, but they are instances of\n # `UnionType`, which is only available in Python 3.10+\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if isinstance(annotation, UnionType):\n return True\n\n # unions declared as Union[A, B] fall through to this check, even on python 3.10+\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == Union\n\n\ndef is_optional(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n\n if not is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any(x == None.__class__ for x in types)\n\n\ndef get_optional_annotation(annotation: Type) -> Type:\n types = annotation.__args__\n\n non_none_types = tuple(x for x in types if x != None.__class__)\n\n # if we have multiple non none types we want to return a copy of this\n # type (normally a Union type).\n\n if len(non_none_types) > 1:\n return annotation.copy_with(non_none_types)\n\n return non_none_types[0]\n\n\ndef get_list_annotation(annotation: Type) -> Type:\n return annotation.__args__[0]\n\n\ndef is_concrete_generic(annotation: type) -> bool:\n ignored_generics = (list, tuple, Union, ClassVar, AsyncGenerator)\n return (\n isinstance(annotation, _GenericAlias)\n and annotation.__origin__ not in ignored_generics\n )\n\n\ndef is_generic_subclass(annotation: type) -> bool:\n return isinstance(annotation, type) and issubclass(\n annotation, Generic # type:ignore\n )\n\n\ndef is_generic(annotation: type) -> bool:\n \"\"\"Returns True if the annotation is or extends a generic.\"\"\"\n\n return (\n # TODO: These two lines appear to have the same effect. When will an\n # annotation have parameters but not satisfy the first condition?\n (is_generic_subclass(annotation) or is_concrete_generic(annotation))\n and bool(get_parameters(annotation))\n )\n\n\ndef is_type_var(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is a TypeVar.\"\"\"\n\n return isinstance(annotation, TypeVar)\n\n\ndef get_parameters(annotation: Type) -> Union[Tuple[object], Tuple[()]]:\n if (\n isinstance(annotation, _GenericAlias)\n or isinstance(annotation, type)\n and issubclass(annotation, Generic) # type:ignore\n and annotation is not Generic\n ):\n return annotation.__parameters__\n else:\n return () # pragma: no cover\n\n\n@overload\ndef _ast_replace_union_operation(expr: ast.expr) -> ast.expr:\n ...\n\n\n@overload\ndef _ast_replace_union_operation(expr: ast.Expr) -> ast.Expr:\n ...\n\n\ndef _ast_replace_union_operation(\n expr: Union[ast.Expr, ast.expr]\n) -> Union[ast.Expr, ast.expr]:\n if isinstance(expr, ast.Expr) and isinstance(\n expr.value, (ast.BinOp, ast.Subscript)\n ):\n expr = ast.Expr(_ast_replace_union_operation(expr.value))\n elif isinstance(expr, ast.BinOp):\n left = _ast_replace_union_operation(expr.left)\n right = _ast_replace_union_operation(expr.right)\n expr = ast.Subscript(\n ast.Name(id=\"Union\"),\n ast.Tuple([left, right], ast.Load()),\n ast.Load(),\n )\n elif isinstance(expr, ast.Tuple):\n expr = ast.Tuple(\n [_ast_replace_union_operation(elt) for elt in expr.elts],\n ast.Load(),\n )\n elif isinstance(expr, ast.Subscript):\n if hasattr(ast, \"Index\") and isinstance(expr.slice, ast.Index):\n expr = ast.Subscript(\n expr.value,\n # The cast is required for mypy on python 3.7 and 3.8\n ast.Index(_ast_replace_union_operation(cast(Any, expr.slice).value)),\n ast.Load(),\n )\n elif isinstance(expr.slice, (ast.BinOp, ast.Tuple)):\n expr = ast.Subscript(\n expr.value,\n _ast_replace_union_operation(expr.slice),\n ast.Load(),\n )\n\n return expr\n\n\n_annotated_re = re.compile(\n r\"(Annotated\\[)(?P<type>\\w*),(?P<args>.*)(\\])\",\n)\n\n\ndef eval_type(\n type_: Any,\n globalns: Optional[Dict] = None,\n localns: Optional[Dict] = None,\n) -> Type:\n \"\"\"Evaluates a type, resolving forward references.\"\"\"\n from strawberry.auto import StrawberryAuto\n from strawberry.lazy_type import StrawberryLazyReference\n from strawberry.private import StrawberryPrivate\n\n globalns = globalns or {}\n # If this is not a string, maybe its args are (e.g. List[\"Foo\"])\n if isinstance(type_, ForwardRef):\n # For Python 3.10+, we can use the built-in _eval_type function directly.\n # It will handle \"|\" notations properly\n if sys.version_info < (3, 10):\n parsed = _ast_replace_union_operation(\n cast(ast.Expr, ast.parse(type_.__forward_arg__).body[0])\n )\n\n # We replaced \"a | b\" with \"Union[a, b], so make sure Union can be resolved\n # at globalns because it may not be there\n if \"Union\" not in globalns:\n globalns[\"Union\"] = Union\n\n assert ast_unparse\n type_ = ForwardRef(ast_unparse(parsed))\n\n # When using forward refs, the whole\n # Annotated[SomeType, strabwerry.lazy(\"type.module\")] is a forward ref,\n # and trying to _eval_type on it will fail. Take a different approach\n # here to resolve lazy types by execing the annotated args and resolving\n # the type directly.\n annotated_match = _annotated_re.search(type_.__forward_arg__)\n if annotated_match:\n gdict = annotated_match.groupdict()\n # FIXME: Eval the remaining annotated args to get their real values\n # We might want to refactor how we import lazy modules to avoid having\n # to eval the code in here\n args = eval(f'({gdict[\"args\"]}, )', globalns, localns) # noqa: PGH001\n lazy_ref = next(\n (arg for arg in args if isinstance(arg, StrawberryLazyReference)),\n None,\n )\n if lazy_ref is not None:\n remaining = [\n a for a in args if not isinstance(a, StrawberryLazyReference)\n ]\n type_ = lazy_ref.resolve_forward_ref(ForwardRef(gdict[\"type\"]))\n # If we only had a StrawberryLazyReference, we can return the type\n # directly. It already did its job!\n if not remaining:\n return type_\n\n # Otherwise return the type annotated with the remaining annotations\n return Annotated.__class_getitem__( # type: ignore\n (type_, *remaining),\n )\n\n return _eval_type(type_, globalns, localns)\n\n origin = get_origin(type_)\n if origin is not None:\n args = get_args(type_)\n if origin is Annotated:\n for arg in args[1:]:\n if isinstance(arg, StrawberryPrivate):\n return type_\n\n if isinstance(arg, StrawberryLazyReference):\n remaining_args = [\n a\n for a in args[1:]\n if not isinstance(arg, StrawberryLazyReference)\n ]\n args = (arg.resolve_forward_ref(args[0]), *remaining_args)\n break\n if isinstance(arg, StrawberryAuto):\n remaining_args = [\n a for a in args[1:] if not isinstance(arg, StrawberryAuto)\n ]\n args = (arg, *remaining_args)\n break\n\n # If we have only a StrawberryLazyReference and no more annotations,\n # we need to return the argument directly because Annotated\n # will raise an error if trying to instantiate it with only\n # one argument.\n if len(args) == 1:\n return args[0]\n\n # python 3.10 will return UnionType for origin, and it cannot be\n # subscripted like Union[Foo, Bar]\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if origin is UnionType:\n origin = Union\n\n # Future annotations in older versions will eval generic aliases to their\n # real types (i.e. List[foo] will have its origin set to list instead\n # of List). If that type is not subscriptable, retrieve its generic\n # alias version instead.\n if sys.version_info < (3, 9) and not hasattr(origin, \"__class_getitem__\"):\n origin = get_generic_alias(origin)\n\n type_ = (\n origin[tuple(eval_type(a, globalns, localns) for a in args)]\n if args\n else origin\n )\n\n return type_\n\n\n_T = TypeVar(\"_T\")\n\n\ndef __dataclass_transform__(\n *,\n eq_default: bool = True,\n order_default: bool = False,\n kw_only_default: bool = False,\n field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),\n) -> Callable[[_T], _T]:\n return lambda a: a\n", "path": "strawberry/utils/typing.py"}]}
| 3,531 | 554 |
gh_patches_debug_22417
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unicode error: python 2 + merge conflict + non-ascii commit message
The important part of the stack:
```
File "...python2.7/site-packages/pre_commit/commands/run.py", line 52, in get_filenames
return getter(include_expr, exclude_expr)
File "...python2.7/site-packages/pre_commit/util.py", line 46, in wrapper
ret = wrapper._cache[key] = func(*args)
File "...python2.7/site-packages/pre_commit/git.py", line 98, in wrapper
for filename in all_file_list_strategy()
File "...python2.7/site-packages/pre_commit/util.py", line 46, in wrapper
ret = wrapper._cache[key] = func(*args)
File "...python2.7/site-packages/pre_commit/git.py", line 64, in get_conflicted_files
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
File "...python2.7/site-packages/pre_commit/git.py", line 54, in parse_merge_msg_for_conflicts
if line.startswith(('\t', '#\t'))
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 37: ordinal not in range(128)
```
An easy fix: https://github.com/pre-commit/pre-commit/blob/e3b14c35f782ed464e3f96b44e8509048187689f/pre_commit/git.py#L63
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import functools
4 import logging
5 import os.path
6 import re
7 import sys
8
9 from pre_commit.errors import FatalError
10 from pre_commit.util import CalledProcessError
11 from pre_commit.util import cmd_output
12 from pre_commit.util import memoize_by_cwd
13
14
15 logger = logging.getLogger('pre_commit')
16
17
18 def get_root():
19 try:
20 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
21 except CalledProcessError:
22 raise FatalError(
23 'git failed. Is it installed, and are you in a Git repository '
24 'directory?'
25 )
26
27
28 def get_git_dir(git_root):
29 return os.path.normpath(os.path.join(
30 git_root,
31 cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),
32 ))
33
34
35 def get_remote_url(git_root):
36 ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
37 return ret.strip()
38
39
40 def is_in_merge_conflict():
41 git_dir = get_git_dir('.')
42 return (
43 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
44 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
45 )
46
47
48 def parse_merge_msg_for_conflicts(merge_msg):
49 # Conflicted files start with tabs
50 return [
51 line.lstrip('#').strip()
52 for line in merge_msg.splitlines()
53 # '#\t' for git 2.4.1
54 if line.startswith(('\t', '#\t'))
55 ]
56
57
58 @memoize_by_cwd
59 def get_conflicted_files():
60 logger.info('Checking merge-conflict files only.')
61 # Need to get the conflicted files from the MERGE_MSG because they could
62 # have resolved the conflict by choosing one side or the other
63 merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
64 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
65
66 # This will get the rest of the changes made after the merge.
67 # If they resolved the merge conflict by choosing a mesh of both sides
68 # this will also include the conflicted files
69 tree_hash = cmd_output('git', 'write-tree')[1].strip()
70 merge_diff_filenames = cmd_output(
71 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',
72 )[1].splitlines()
73 return set(merge_conflict_filenames) | set(merge_diff_filenames)
74
75
76 @memoize_by_cwd
77 def get_staged_files():
78 return cmd_output(
79 'git', 'diff', '--staged', '--name-only',
80 # Everything except for D
81 '--diff-filter=ACMRTUXB'
82 )[1].splitlines()
83
84
85 @memoize_by_cwd
86 def get_all_files():
87 return cmd_output('git', 'ls-files')[1].splitlines()
88
89
90 def get_files_matching(all_file_list_strategy):
91 @functools.wraps(all_file_list_strategy)
92 @memoize_by_cwd
93 def wrapper(include_expr, exclude_expr):
94 include_regex = re.compile(include_expr)
95 exclude_regex = re.compile(exclude_expr)
96 return {
97 filename
98 for filename in all_file_list_strategy()
99 if (
100 include_regex.search(filename) and
101 not exclude_regex.search(filename) and
102 os.path.lexists(filename)
103 )
104 }
105 return wrapper
106
107
108 get_staged_files_matching = get_files_matching(get_staged_files)
109 get_all_files_matching = get_files_matching(get_all_files)
110 get_conflicted_files_matching = get_files_matching(get_conflicted_files)
111
112
113 def check_for_cygwin_mismatch():
114 """See https://github.com/pre-commit/pre-commit/issues/354"""
115 if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
116 is_cygwin_python = sys.platform == 'cygwin'
117 toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
118 is_cygwin_git = toplevel.startswith('/')
119
120 if is_cygwin_python ^ is_cygwin_git:
121 exe_type = {True: '(cygwin)', False: '(windows)'}
122 logger.warn(
123 'pre-commit has detected a mix of cygwin python / git\n'
124 'This combination is not supported, it is likely you will '
125 'receive an error later in the program.\n'
126 'Make sure to use cygwin git+python while using cygwin\n'
127 'These can be installed through the cygwin installer.\n'
128 ' - python {}\n'
129 ' - git {}\n'.format(
130 exe_type[is_cygwin_python],
131 exe_type[is_cygwin_git],
132 )
133 )
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -48,10 +48,10 @@
def parse_merge_msg_for_conflicts(merge_msg):
# Conflicted files start with tabs
return [
- line.lstrip('#').strip()
+ line.lstrip(b'#').strip().decode('UTF-8')
for line in merge_msg.splitlines()
# '#\t' for git 2.4.1
- if line.startswith(('\t', '#\t'))
+ if line.startswith((b'\t', b'#\t'))
]
@@ -60,7 +60,7 @@
logger.info('Checking merge-conflict files only.')
# Need to get the conflicted files from the MERGE_MSG because they could
# have resolved the conflict by choosing one side or the other
- merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
# This will get the rest of the changes made after the merge.
|
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -48,10 +48,10 @@\n def parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n- line.lstrip('#').strip()\n+ line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n- if line.startswith(('\\t', '#\\t'))\n+ if line.startswith((b'\\t', b'#\\t'))\n ]\n \n \n@@ -60,7 +60,7 @@\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n- merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n \n # This will get the rest of the changes made after the merge.\n", "issue": "Unicode error: python 2 + merge conflict + non-ascii commit message\nThe important part of the stack:\r\n\r\n```\r\n File \"...python2.7/site-packages/pre_commit/commands/run.py\", line 52, in get_filenames\r\n return getter(include_expr, exclude_expr)\r\n File \"...python2.7/site-packages/pre_commit/util.py\", line 46, in wrapper\r\n ret = wrapper._cache[key] = func(*args)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 98, in wrapper\r\n for filename in all_file_list_strategy()\r\n File \"...python2.7/site-packages/pre_commit/util.py\", line 46, in wrapper\r\n ret = wrapper._cache[key] = func(*args)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 64, in get_conflicted_files\r\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 54, in parse_merge_msg_for_conflicts\r\n if line.startswith(('\\t', '#\\t'))\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 37: ordinal not in range(128)\r\n```\r\n\r\nAn easy fix: https://github.com/pre-commit/pre-commit/blob/e3b14c35f782ed464e3f96b44e8509048187689f/pre_commit/git.py#L63\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\nimport sys\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return {\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n }\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python],\n exe_type[is_cygwin_git],\n )\n )\n", "path": "pre_commit/git.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\nimport sys\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return {\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n }\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python],\n exe_type[is_cygwin_git],\n )\n )\n", "path": "pre_commit/git.py"}]}
| 1,939 | 277 |
gh_patches_debug_25875
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2292
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid query parameters cause internal server error
```
File "akvo/rest/filters.py", line 61, in filter_queryset
queryset = getattr(queryset, param)(**args_or_kwargs)
```
In the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}.
We should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/filters.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo Reporting is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import ast
8
9 from django.db.models import Q
10
11 from rest_framework import filters
12
13
14 class RSRGenericFilterBackend(filters.BaseFilterBackend):
15
16 def filter_queryset(self, request, queryset, view):
17 """
18 Return a queryset possibly filtered by query param values.
19 The filter looks for the query param keys filter and exclude
20 For each of these query param the value is evaluated using ast.literal_eval() and used as
21 kwargs in queryset.filter and queryset.exclude respectively.
22
23 Example URLs:
24 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}
25 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}
26
27 It's also possible to specify models to be included in select_related() and
28 prefetch_related() calls on the queryset, but specifying these in lists of strings as the
29 values for the query sting params select_relates and prefetch_related.
30
31 Example:
32 http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']
33
34 Finally limited support for filtering on multiple arguments using logical OR between
35 those expressions is available. To use this supply two or more query string keywords on the
36 form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q
37 object. All those Q objects created are used in a queryset.filter() call concatenated using
38 the | operator.
39 """
40 def eval_query_value(request, key):
41 """
42 Use ast.literal_eval() to evaluate a query string value as a python data type object
43 :param request: the django request object
44 :param param: the query string param key
45 :return: a python data type object, or None if literal_eval() fails
46 """
47 value = request.QUERY_PARAMS.get(key, None)
48 try:
49 return ast.literal_eval(value)
50 except ValueError:
51 return None
52
53 qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']
54
55 # evaluate each query string param, and apply the queryset method with the same name
56 for param in qs_params:
57 args_or_kwargs = eval_query_value(request, param)
58 if args_or_kwargs:
59 # filter and exclude are called with a dict kwarg, the _related methods with a list
60 if param in ['filter', 'exclude',]:
61 queryset = getattr(queryset, param)(**args_or_kwargs)
62 else:
63 queryset = getattr(queryset, param)(*args_or_kwargs)
64
65 # support for Q expressions, limited to OR-concatenated filtering
66 if request.QUERY_PARAMS.get('q_filter1', None):
67 i = 1
68 q_queries = []
69 while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):
70 query_arg = eval_query_value(request, 'q_filter{}'.format(i))
71 if query_arg:
72 q_queries += [query_arg]
73 i += 1
74
75 q_expr = Q(**q_queries[0])
76 for query in q_queries[1:]:
77 q_expr = q_expr | Q(**query)
78
79 queryset = queryset.filter(q_expr)
80
81 return queryset
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py
--- a/akvo/rest/filters.py
+++ b/akvo/rest/filters.py
@@ -7,8 +7,10 @@
import ast
from django.db.models import Q
+from django.core.exceptions import FieldError
-from rest_framework import filters
+from rest_framework import filters, status
+from rest_framework.exceptions import APIException
class RSRGenericFilterBackend(filters.BaseFilterBackend):
@@ -57,10 +59,14 @@
args_or_kwargs = eval_query_value(request, param)
if args_or_kwargs:
# filter and exclude are called with a dict kwarg, the _related methods with a list
- if param in ['filter', 'exclude',]:
- queryset = getattr(queryset, param)(**args_or_kwargs)
- else:
- queryset = getattr(queryset, param)(*args_or_kwargs)
+ try:
+ if param in ['filter', 'exclude',]:
+ queryset = getattr(queryset, param)(**args_or_kwargs)
+ else:
+ queryset = getattr(queryset, param)(*args_or_kwargs)
+
+ except FieldError as e:
+ raise APIException("Error in request: {message}".format(message=e.message))
# support for Q expressions, limited to OR-concatenated filtering
if request.QUERY_PARAMS.get('q_filter1', None):
|
{"golden_diff": "diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py\n--- a/akvo/rest/filters.py\n+++ b/akvo/rest/filters.py\n@@ -7,8 +7,10 @@\n import ast\n \n from django.db.models import Q\n+from django.core.exceptions import FieldError\n \n-from rest_framework import filters\n+from rest_framework import filters, status\n+from rest_framework.exceptions import APIException\n \n \n class RSRGenericFilterBackend(filters.BaseFilterBackend):\n@@ -57,10 +59,14 @@\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n- if param in ['filter', 'exclude',]:\n- queryset = getattr(queryset, param)(**args_or_kwargs)\n- else:\n- queryset = getattr(queryset, param)(*args_or_kwargs)\n+ try:\n+ if param in ['filter', 'exclude',]:\n+ queryset = getattr(queryset, param)(**args_or_kwargs)\n+ else:\n+ queryset = getattr(queryset, param)(*args_or_kwargs)\n+\n+ except FieldError as e:\n+ raise APIException(\"Error in request: {message}\".format(message=e.message))\n \n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n", "issue": "Invalid query parameters cause internal server error\n```\n File \"akvo/rest/filters.py\", line 61, in filter_queryset\n queryset = getattr(queryset, param)(**args_or_kwargs)\n```\n\nIn the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}. \n\nWe should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport ast\n\nfrom django.db.models import Q\n\nfrom rest_framework import filters\n\n\nclass RSRGenericFilterBackend(filters.BaseFilterBackend):\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a queryset possibly filtered by query param values.\n The filter looks for the query param keys filter and exclude\n For each of these query param the value is evaluated using ast.literal_eval() and used as\n kwargs in queryset.filter and queryset.exclude respectively.\n\n Example URLs:\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}\n\n It's also possible to specify models to be included in select_related() and\n prefetch_related() calls on the queryset, but specifying these in lists of strings as the\n values for the query sting params select_relates and prefetch_related.\n\n Example:\n http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']\n\n Finally limited support for filtering on multiple arguments using logical OR between\n those expressions is available. To use this supply two or more query string keywords on the\n form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q\n object. All those Q objects created are used in a queryset.filter() call concatenated using\n the | operator.\n \"\"\"\n def eval_query_value(request, key):\n \"\"\"\n Use ast.literal_eval() to evaluate a query string value as a python data type object\n :param request: the django request object\n :param param: the query string param key\n :return: a python data type object, or None if literal_eval() fails\n \"\"\"\n value = request.QUERY_PARAMS.get(key, None)\n try:\n return ast.literal_eval(value)\n except ValueError:\n return None\n\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']\n\n # evaluate each query string param, and apply the queryset method with the same name\n for param in qs_params:\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n if param in ['filter', 'exclude',]:\n queryset = getattr(queryset, param)(**args_or_kwargs)\n else:\n queryset = getattr(queryset, param)(*args_or_kwargs)\n\n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n i = 1\n q_queries = []\n while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):\n query_arg = eval_query_value(request, 'q_filter{}'.format(i))\n if query_arg:\n q_queries += [query_arg]\n i += 1\n\n q_expr = Q(**q_queries[0])\n for query in q_queries[1:]:\n q_expr = q_expr | Q(**query)\n\n queryset = queryset.filter(q_expr)\n\n return queryset\n", "path": "akvo/rest/filters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport ast\n\nfrom django.db.models import Q\nfrom django.core.exceptions import FieldError\n\nfrom rest_framework import filters, status\nfrom rest_framework.exceptions import APIException\n\n\nclass RSRGenericFilterBackend(filters.BaseFilterBackend):\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a queryset possibly filtered by query param values.\n The filter looks for the query param keys filter and exclude\n For each of these query param the value is evaluated using ast.literal_eval() and used as\n kwargs in queryset.filter and queryset.exclude respectively.\n\n Example URLs:\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}\n\n It's also possible to specify models to be included in select_related() and\n prefetch_related() calls on the queryset, but specifying these in lists of strings as the\n values for the query sting params select_relates and prefetch_related.\n\n Example:\n http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']\n\n Finally limited support for filtering on multiple arguments using logical OR between\n those expressions is available. To use this supply two or more query string keywords on the\n form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q\n object. All those Q objects created are used in a queryset.filter() call concatenated using\n the | operator.\n \"\"\"\n def eval_query_value(request, key):\n \"\"\"\n Use ast.literal_eval() to evaluate a query string value as a python data type object\n :param request: the django request object\n :param param: the query string param key\n :return: a python data type object, or None if literal_eval() fails\n \"\"\"\n value = request.QUERY_PARAMS.get(key, None)\n try:\n return ast.literal_eval(value)\n except ValueError:\n return None\n\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']\n\n # evaluate each query string param, and apply the queryset method with the same name\n for param in qs_params:\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n try:\n if param in ['filter', 'exclude',]:\n queryset = getattr(queryset, param)(**args_or_kwargs)\n else:\n queryset = getattr(queryset, param)(*args_or_kwargs)\n\n except FieldError as e:\n raise APIException(\"Error in request: {message}\".format(message=e.message))\n\n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n i = 1\n q_queries = []\n while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):\n query_arg = eval_query_value(request, 'q_filter{}'.format(i))\n if query_arg:\n q_queries += [query_arg]\n i += 1\n\n q_expr = Q(**q_queries[0])\n for query in q_queries[1:]:\n q_expr = q_expr | Q(**query)\n\n queryset = queryset.filter(q_expr)\n\n return queryset\n", "path": "akvo/rest/filters.py"}]}
| 1,303 | 311 |
gh_patches_debug_14879
|
rasdani/github-patches
|
git_diff
|
pymeasure__pymeasure-547
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.10: ManagedWindow TypeError
Hi all,
I am trying to use pymeasure for the first time. But when running the example "ManagedWindow" I get the following error message as soon as I press the "queue" button in the GUI and nothing will be plotted.
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\manager.py in _update_progress(self, progress)
158 def _update_progress(self, progress):
159 if self.is_running():
--> 160 self._running_experiment.browser_item.setProgress(progress)
161
162 def _update_status(self, status):
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\browser.py in setProgress(self, progress)
73
74 def setProgress(self, progress):
---> 75 self.progressbar.setValue(progress)
76
77 class Browser(QtGui.QTreeWidget):
TypeError: setValue(self, int): argument 1 has unexpected type 'float'---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\manager.py in _finish(self)
253 experiment = self._running_experiment
254 self._clean_up()
--> 255 experiment.browser_item.setProgress(100.)
256 experiment.curve.update()
257 self.finished.emit(experiment)
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\browser.py in setProgress(self, progress)
73
74 def setProgress(self, progress):
---> 75 self.progressbar.setValue(progress)
76
77 class Browser(QtGui.QTreeWidget):
TypeError: setValue(self, int): argument 1 has unexpected type 'float'An exception has occurred, use %tb to see the full traceback.
```
I'm using Windows and Python 3.10.
Do you have an idea what is wrong? Probably something with the data type of progress. But what should I change?
Thanks for your help.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymeasure/display/browser.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2021 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import logging
26
27 from os.path import basename
28
29 from .Qt import QtCore, QtGui
30 from ..experiment import Procedure
31
32 log = logging.getLogger(__name__)
33 log.addHandler(logging.NullHandler())
34
35
36 class BrowserItem(QtGui.QTreeWidgetItem):
37 """ Represent a row in the :class:`~pymeasure.display.browser.Browser` tree widget """
38
39 def __init__(self, results, color, parent=None):
40 super().__init__(parent)
41
42 pixelmap = QtGui.QPixmap(24, 24)
43 pixelmap.fill(color)
44 self.setIcon(0, QtGui.QIcon(pixelmap))
45 self.setFlags(self.flags() | QtCore.Qt.ItemIsUserCheckable)
46 self.setCheckState(0, QtCore.Qt.Checked)
47 self.setText(1, basename(results.data_filename))
48
49 self.setStatus(results.procedure.status)
50
51 self.progressbar = QtGui.QProgressBar()
52 self.progressbar.setRange(0, 100)
53 self.progressbar.setValue(0)
54
55 def setStatus(self, status):
56 status_label = {
57 Procedure.QUEUED: 'Queued', Procedure.RUNNING: 'Running',
58 Procedure.FAILED: 'Failed', Procedure.ABORTED: 'Aborted',
59 Procedure.FINISHED: 'Finished'}
60 self.setText(3, status_label[status])
61
62 if status == Procedure.FAILED or status == Procedure.ABORTED:
63 # Set progress bar color to red
64 return # Commented this out
65 self.progressbar.setStyleSheet("""
66 QProgressBar {
67 border: 1px solid #AAAAAA;
68 border-radius: 5px;
69 text-align: center;
70 }
71 QProgressBar::chunk {
72 background-color: red;
73 }
74 """)
75
76 def setProgress(self, progress):
77 self.progressbar.setValue(progress)
78
79
80 class Browser(QtGui.QTreeWidget):
81 """Graphical list view of :class:`Experiment<pymeasure.display.manager.Experiment>`
82 objects allowing the user to view the status of queued Experiments as well as
83 loading and displaying data from previous runs.
84
85 In order that different Experiments be displayed within the same Browser,
86 they must have entries in `DATA_COLUMNS` corresponding to the
87 `measured_quantities` of the Browser.
88 """
89
90 def __init__(self, procedure_class, display_parameters,
91 measured_quantities, sort_by_filename=False, parent=None):
92 super().__init__(parent)
93 self.display_parameters = display_parameters
94 self.procedure_class = procedure_class
95 self.measured_quantities = measured_quantities
96
97 header_labels = ["Graph", "Filename", "Progress", "Status"]
98 for parameter in self.display_parameters:
99 header_labels.append(getattr(self.procedure_class, parameter).name)
100
101 self.setColumnCount(len(header_labels))
102 self.setHeaderLabels(header_labels)
103 self.setSortingEnabled(True)
104 if sort_by_filename:
105 self.sortItems(1, QtCore.Qt.AscendingOrder)
106
107 for i, width in enumerate([80, 140]):
108 self.header().resizeSection(i, width)
109
110 def add(self, experiment):
111 """Add a :class:`Experiment<pymeasure.display.manager.Experiment>` object
112 to the Browser. This function checks to make sure that the Experiment
113 measures the appropriate quantities to warrant its inclusion, and then
114 adds a BrowserItem to the Browser, filling all relevant columns with
115 Parameter data.
116 """
117 experiment_parameters = experiment.procedure.parameter_objects()
118 experiment_parameter_names = list(experiment_parameters.keys())
119
120 for measured_quantity in self.measured_quantities:
121 if measured_quantity not in experiment.procedure.DATA_COLUMNS:
122 raise Exception("Procedure does not measure the"
123 " %s quantity." % measured_quantity)
124
125 # Set the relevant fields within the BrowserItem if
126 # that Parameter is implemented
127 item = experiment.browser_item
128 for i, column in enumerate(self.display_parameters):
129 if column in experiment_parameter_names:
130 item.setText(i + 4, str(experiment_parameters[column]))
131
132 self.addTopLevelItem(item)
133 self.setItemWidget(item, 2, item.progressbar)
134 return item
135
```
Path: `setup.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2021 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25
26 from setuptools import setup, find_packages
27
28 setup(
29 name='PyMeasure',
30 version='0.9.0',
31 author='PyMeasure Developers',
32 packages=find_packages(),
33 scripts=[],
34 url='https://github.com/pymeasure/pymeasure',
35 download_url='https://github.com/pymeasure/pymeasure/tarball/v0.9.0',
36 license='MIT License',
37 description='Scientific measurement library for instruments, experiments, and live-plotting',
38 long_description=open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(),
39 install_requires=[
40 "numpy >= 1.6.1",
41 "pandas >= 0.14",
42 "pyvisa >= 1.8",
43 "pyserial >= 2.7",
44 "pyqtgraph >= 0.9.10"
45 ],
46 extras_require={
47 'matplotlib': ['matplotlib >= 2.0.2'],
48 'tcp': [
49 'pyzmq >= 16.0.2',
50 'cloudpickle >= 0.3.1'
51 ],
52 'python-vxi11': ['python-vxi11 >= 0.9']
53 },
54 setup_requires=[
55 'pytest-runner'
56 ],
57 tests_require=[
58 'pytest >= 2.9.1',
59 'pytest-qt >= 2.4.0',
60 'pyvisa-sim >= 0.4.0',
61 ],
62 classifiers=[
63 "Development Status :: 4 - Beta",
64 "Intended Audience :: Science/Research",
65 "License :: OSI Approved :: MIT License",
66 "Operating System :: MacOS",
67 "Operating System :: Microsoft :: Windows",
68 "Operating System :: POSIX",
69 "Operating System :: Unix",
70 "Programming Language :: Python :: 3 :: Only",
71 "Programming Language :: Python :: 3.7",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Topic :: Scientific/Engineering",
75 ],
76 keywords="measure instrument experiment control automate graph plot"
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pymeasure/display/browser.py b/pymeasure/display/browser.py
--- a/pymeasure/display/browser.py
+++ b/pymeasure/display/browser.py
@@ -74,7 +74,7 @@
""")
def setProgress(self, progress):
- self.progressbar.setValue(progress)
+ self.progressbar.setValue(int(progress))
class Browser(QtGui.QTreeWidget):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -71,6 +71,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
],
keywords="measure instrument experiment control automate graph plot"
|
{"golden_diff": "diff --git a/pymeasure/display/browser.py b/pymeasure/display/browser.py\n--- a/pymeasure/display/browser.py\n+++ b/pymeasure/display/browser.py\n@@ -74,7 +74,7 @@\n \"\"\")\n \n def setProgress(self, progress):\n- self.progressbar.setValue(progress)\n+ self.progressbar.setValue(int(progress))\n \n \n class Browser(QtGui.QTreeWidget):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,6 +71,7 @@\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n", "issue": "Python 3.10: ManagedWindow TypeError\nHi all,\r\nI am trying to use pymeasure for the first time. But when running the example \"ManagedWindow\" I get the following error message as soon as I press the \"queue\" button in the GUI and nothing will be plotted. \r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\manager.py in _update_progress(self, progress)\r\n 158 def _update_progress(self, progress):\r\n 159 if self.is_running():\r\n--> 160 self._running_experiment.browser_item.setProgress(progress)\r\n 161 \r\n 162 def _update_status(self, status):\r\n\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\browser.py in setProgress(self, progress)\r\n 73 \r\n 74 def setProgress(self, progress):\r\n---> 75 self.progressbar.setValue(progress)\r\n 76 \r\n 77 class Browser(QtGui.QTreeWidget):\r\n\r\nTypeError: setValue(self, int): argument 1 has unexpected type 'float'---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\manager.py in _finish(self)\r\n 253 experiment = self._running_experiment\r\n 254 self._clean_up()\r\n--> 255 experiment.browser_item.setProgress(100.)\r\n 256 experiment.curve.update()\r\n 257 self.finished.emit(experiment)\r\n\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\browser.py in setProgress(self, progress)\r\n 73 \r\n 74 def setProgress(self, progress):\r\n---> 75 self.progressbar.setValue(progress)\r\n 76 \r\n 77 class Browser(QtGui.QTreeWidget):\r\n\r\nTypeError: setValue(self, int): argument 1 has unexpected type 'float'An exception has occurred, use %tb to see the full traceback.\r\n```\r\n\r\nI'm using Windows and Python 3.10.\r\n\r\nDo you have an idea what is wrong? Probably something with the data type of progress. But what should I change? \r\nThanks for your help.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nfrom os.path import basename\n\nfrom .Qt import QtCore, QtGui\nfrom ..experiment import Procedure\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass BrowserItem(QtGui.QTreeWidgetItem):\n \"\"\" Represent a row in the :class:`~pymeasure.display.browser.Browser` tree widget \"\"\"\n\n def __init__(self, results, color, parent=None):\n super().__init__(parent)\n\n pixelmap = QtGui.QPixmap(24, 24)\n pixelmap.fill(color)\n self.setIcon(0, QtGui.QIcon(pixelmap))\n self.setFlags(self.flags() | QtCore.Qt.ItemIsUserCheckable)\n self.setCheckState(0, QtCore.Qt.Checked)\n self.setText(1, basename(results.data_filename))\n\n self.setStatus(results.procedure.status)\n\n self.progressbar = QtGui.QProgressBar()\n self.progressbar.setRange(0, 100)\n self.progressbar.setValue(0)\n\n def setStatus(self, status):\n status_label = {\n Procedure.QUEUED: 'Queued', Procedure.RUNNING: 'Running',\n Procedure.FAILED: 'Failed', Procedure.ABORTED: 'Aborted',\n Procedure.FINISHED: 'Finished'}\n self.setText(3, status_label[status])\n\n if status == Procedure.FAILED or status == Procedure.ABORTED:\n # Set progress bar color to red\n return # Commented this out\n self.progressbar.setStyleSheet(\"\"\"\n QProgressBar {\n border: 1px solid #AAAAAA;\n border-radius: 5px;\n text-align: center;\n }\n QProgressBar::chunk {\n background-color: red;\n }\n \"\"\")\n\n def setProgress(self, progress):\n self.progressbar.setValue(progress)\n\n\nclass Browser(QtGui.QTreeWidget):\n \"\"\"Graphical list view of :class:`Experiment<pymeasure.display.manager.Experiment>`\n objects allowing the user to view the status of queued Experiments as well as\n loading and displaying data from previous runs.\n\n In order that different Experiments be displayed within the same Browser,\n they must have entries in `DATA_COLUMNS` corresponding to the\n `measured_quantities` of the Browser.\n \"\"\"\n\n def __init__(self, procedure_class, display_parameters,\n measured_quantities, sort_by_filename=False, parent=None):\n super().__init__(parent)\n self.display_parameters = display_parameters\n self.procedure_class = procedure_class\n self.measured_quantities = measured_quantities\n\n header_labels = [\"Graph\", \"Filename\", \"Progress\", \"Status\"]\n for parameter in self.display_parameters:\n header_labels.append(getattr(self.procedure_class, parameter).name)\n\n self.setColumnCount(len(header_labels))\n self.setHeaderLabels(header_labels)\n self.setSortingEnabled(True)\n if sort_by_filename:\n self.sortItems(1, QtCore.Qt.AscendingOrder)\n\n for i, width in enumerate([80, 140]):\n self.header().resizeSection(i, width)\n\n def add(self, experiment):\n \"\"\"Add a :class:`Experiment<pymeasure.display.manager.Experiment>` object\n to the Browser. This function checks to make sure that the Experiment\n measures the appropriate quantities to warrant its inclusion, and then\n adds a BrowserItem to the Browser, filling all relevant columns with\n Parameter data.\n \"\"\"\n experiment_parameters = experiment.procedure.parameter_objects()\n experiment_parameter_names = list(experiment_parameters.keys())\n\n for measured_quantity in self.measured_quantities:\n if measured_quantity not in experiment.procedure.DATA_COLUMNS:\n raise Exception(\"Procedure does not measure the\"\n \" %s quantity.\" % measured_quantity)\n\n # Set the relevant fields within the BrowserItem if\n # that Parameter is implemented\n item = experiment.browser_item\n for i, column in enumerate(self.display_parameters):\n if column in experiment_parameter_names:\n item.setText(i + 4, str(experiment_parameters[column]))\n\n self.addTopLevelItem(item)\n self.setItemWidget(item, 2, item.progressbar)\n return item\n", "path": "pymeasure/display/browser.py"}, {"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.9.0',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/pymeasure/pymeasure',\n download_url='https://github.com/pymeasure/pymeasure/tarball/v0.9.0',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n extras_require={\n 'matplotlib': ['matplotlib >= 2.0.2'],\n 'tcp': [\n 'pyzmq >= 16.0.2',\n 'cloudpickle >= 0.3.1'\n ],\n 'python-vxi11': ['python-vxi11 >= 0.9']\n },\n setup_requires=[\n 'pytest-runner'\n ],\n tests_require=[\n 'pytest >= 2.9.1',\n 'pytest-qt >= 2.4.0',\n 'pyvisa-sim >= 0.4.0',\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nfrom os.path import basename\n\nfrom .Qt import QtCore, QtGui\nfrom ..experiment import Procedure\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass BrowserItem(QtGui.QTreeWidgetItem):\n \"\"\" Represent a row in the :class:`~pymeasure.display.browser.Browser` tree widget \"\"\"\n\n def __init__(self, results, color, parent=None):\n super().__init__(parent)\n\n pixelmap = QtGui.QPixmap(24, 24)\n pixelmap.fill(color)\n self.setIcon(0, QtGui.QIcon(pixelmap))\n self.setFlags(self.flags() | QtCore.Qt.ItemIsUserCheckable)\n self.setCheckState(0, QtCore.Qt.Checked)\n self.setText(1, basename(results.data_filename))\n\n self.setStatus(results.procedure.status)\n\n self.progressbar = QtGui.QProgressBar()\n self.progressbar.setRange(0, 100)\n self.progressbar.setValue(0)\n\n def setStatus(self, status):\n status_label = {\n Procedure.QUEUED: 'Queued', Procedure.RUNNING: 'Running',\n Procedure.FAILED: 'Failed', Procedure.ABORTED: 'Aborted',\n Procedure.FINISHED: 'Finished'}\n self.setText(3, status_label[status])\n\n if status == Procedure.FAILED or status == Procedure.ABORTED:\n # Set progress bar color to red\n return # Commented this out\n self.progressbar.setStyleSheet(\"\"\"\n QProgressBar {\n border: 1px solid #AAAAAA;\n border-radius: 5px;\n text-align: center;\n }\n QProgressBar::chunk {\n background-color: red;\n }\n \"\"\")\n\n def setProgress(self, progress):\n self.progressbar.setValue(int(progress))\n\n\nclass Browser(QtGui.QTreeWidget):\n \"\"\"Graphical list view of :class:`Experiment<pymeasure.display.manager.Experiment>`\n objects allowing the user to view the status of queued Experiments as well as\n loading and displaying data from previous runs.\n\n In order that different Experiments be displayed within the same Browser,\n they must have entries in `DATA_COLUMNS` corresponding to the\n `measured_quantities` of the Browser.\n \"\"\"\n\n def __init__(self, procedure_class, display_parameters,\n measured_quantities, sort_by_filename=False, parent=None):\n super().__init__(parent)\n self.display_parameters = display_parameters\n self.procedure_class = procedure_class\n self.measured_quantities = measured_quantities\n\n header_labels = [\"Graph\", \"Filename\", \"Progress\", \"Status\"]\n for parameter in self.display_parameters:\n header_labels.append(getattr(self.procedure_class, parameter).name)\n\n self.setColumnCount(len(header_labels))\n self.setHeaderLabels(header_labels)\n self.setSortingEnabled(True)\n if sort_by_filename:\n self.sortItems(1, QtCore.Qt.AscendingOrder)\n\n for i, width in enumerate([80, 140]):\n self.header().resizeSection(i, width)\n\n def add(self, experiment):\n \"\"\"Add a :class:`Experiment<pymeasure.display.manager.Experiment>` object\n to the Browser. This function checks to make sure that the Experiment\n measures the appropriate quantities to warrant its inclusion, and then\n adds a BrowserItem to the Browser, filling all relevant columns with\n Parameter data.\n \"\"\"\n experiment_parameters = experiment.procedure.parameter_objects()\n experiment_parameter_names = list(experiment_parameters.keys())\n\n for measured_quantity in self.measured_quantities:\n if measured_quantity not in experiment.procedure.DATA_COLUMNS:\n raise Exception(\"Procedure does not measure the\"\n \" %s quantity.\" % measured_quantity)\n\n # Set the relevant fields within the BrowserItem if\n # that Parameter is implemented\n item = experiment.browser_item\n for i, column in enumerate(self.display_parameters):\n if column in experiment_parameter_names:\n item.setText(i + 4, str(experiment_parameters[column]))\n\n self.addTopLevelItem(item)\n self.setItemWidget(item, 2, item.progressbar)\n return item\n", "path": "pymeasure/display/browser.py"}, {"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.9.0',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/pymeasure/pymeasure',\n download_url='https://github.com/pymeasure/pymeasure/tarball/v0.9.0',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n extras_require={\n 'matplotlib': ['matplotlib >= 2.0.2'],\n 'tcp': [\n 'pyzmq >= 16.0.2',\n 'cloudpickle >= 0.3.1'\n ],\n 'python-vxi11': ['python-vxi11 >= 0.9']\n },\n setup_requires=[\n 'pytest-runner'\n ],\n tests_require=[\n 'pytest >= 2.9.1',\n 'pytest-qt >= 2.4.0',\n 'pyvisa-sim >= 0.4.0',\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}]}
| 3,069 | 188 |
gh_patches_debug_37662
|
rasdani/github-patches
|
git_diff
|
GPflow__GPflow-2063
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing documentation for SquaredExponential kernel
# Documentation
*Is there anything missing in the docs?*
Documentation of gpflow.kernels.stationaries.SquaredExponential went missing sometime after 2.4.0
It is there in [2.4.0](https://gpflow.github.io/GPflow/2.4.0/api/gpflow/kernels/index.html#gpflow-kernels-squaredexponential).
In [2.5.0](https://gpflow.github.io/GPflow/2.5.0/api/gpflow/kernels/index.html#gpflow-kernels-rbf) (and [currently](https://gpflow.github.io/GPflow/develop/api/gpflow/kernels/index.html#gpflow-kernels-rbf)), the entry was replaced by the RBF "as an alias of `gpflow.kernels.stationaries.SquaredExponential`", but there is no `SquaredExponential` entry.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/generate_module_rst.py`
Content:
```
1 # Copyright 2019 GPflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Script to autogenerate .rst files for autodocumentation of classes and modules in GPflow.
15 To be run by the CI system to update docs.
16 """
17 import inspect
18 from dataclasses import dataclass
19 from pathlib import Path
20 from types import ModuleType
21 from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union
22
23 from gpflow.utilities import Dispatcher
24
25 RST_LEVEL_SYMBOLS = ["=", "-", "~", '"', "'", "^"]
26
27 IGNORE_MODULES = {
28 "gpflow.covariances.dispatch",
29 "gpflow.conditionals.dispatch",
30 "gpflow.expectations.dispatch",
31 "gpflow.kullback_leiblers.dispatch",
32 "gpflow.versions",
33 }
34
35
36 def _header(header: str, level: int) -> str:
37 return f"{header}\n{RST_LEVEL_SYMBOLS[level] * len(header)}"
38
39
40 @dataclass
41 class DocumentableDispatcher:
42
43 name: str
44 obj: Dispatcher
45
46 def implementations(self) -> Mapping[Callable[..., Any], List[Type[Any]]]:
47 implementations: Dict[Callable[..., Any], List[Type[Any]]] = {}
48 for args, impl in self.obj.funcs.items():
49 implementations.setdefault(impl, []).append(args)
50 return implementations
51
52 def write(self, out: TextIO) -> None:
53 out.write(
54 f"""
55 {_header(self.name, 2)}
56
57 This function uses multiple dispatch, which will depend on the type of argument passed in:
58 """
59 )
60 for impl, argss in self.implementations().items():
61 impl_name = f"{impl.__module__}.{impl.__name__}"
62
63 out.write(
64 """
65 .. code-block:: python
66
67 """
68 )
69 for args in argss:
70 arg_names = ", ".join([a.__name__ for a in args])
71 out.write(f" {self.name}( {arg_names} )\n")
72 out.write(f" # dispatch to -> {impl_name}(...)\n")
73 out.write(
74 f"""
75 .. autofunction:: {impl_name}
76 """
77 )
78
79
80 @dataclass
81 class DocumentableClass:
82
83 name: str
84 obj: Type[Any]
85
86 def write(self, out: TextIO) -> None:
87 out.write(
88 f"""
89 {_header(self.name, 2)}
90
91 .. autoclass:: {self.name}
92 :show-inheritance:
93 :members:
94 """
95 )
96
97
98 @dataclass
99 class DocumentableFunction:
100
101 name: str
102 obj: Callable[..., Any]
103
104 def write(self, out: TextIO) -> None:
105 out.write(
106 f"""
107 {_header(self.name, 2)}
108
109 .. autofunction:: {self.name}
110 """
111 )
112
113
114 @dataclass
115 class DocumentableModule:
116
117 name: str
118 obj: ModuleType
119 modules: List["DocumentableModule"]
120 classes: List[DocumentableClass]
121 functions: List[Union[DocumentableDispatcher, DocumentableFunction]]
122
123 @staticmethod
124 def collect(
125 root: ModuleType,
126 ) -> "DocumentableModule":
127 root_name = root.__name__
128 exported_names = set(getattr(root, "__all__", []))
129
130 modules: List["DocumentableModule"] = []
131 classes: List[DocumentableClass] = []
132 functions: List[Union[DocumentableDispatcher, DocumentableFunction]] = []
133
134 for key in dir(root):
135 if key.startswith("_"):
136 continue
137
138 child = getattr(root, key)
139 child_name = root_name + "." + key
140 if child_name in IGNORE_MODULES:
141 continue
142
143 # pylint: disable=cell-var-from-loop
144 def _should_ignore(child: Union[Callable[..., Any], Type[Any]]) -> bool:
145 declared_in_root = child.__module__ == root_name
146 explicitly_exported = key in exported_names
147 return not (declared_in_root or explicitly_exported)
148
149 # pylint: enable=cell-var-from-loop
150
151 if isinstance(child, Dispatcher):
152 functions.append(DocumentableDispatcher(child_name, child))
153 elif inspect.ismodule(child):
154 if child.__name__ != child_name: # Ignore imports of modules.
155 continue
156 modules.append(DocumentableModule.collect(child))
157 elif inspect.isclass(child):
158 if _should_ignore(child):
159 continue
160 classes.append(DocumentableClass(child_name, child))
161 elif inspect.isfunction(child):
162 if _should_ignore(child):
163 continue
164 functions.append(DocumentableFunction(child_name, child))
165
166 return DocumentableModule(root_name, root, modules, classes, functions)
167
168 def seen_in_dispatchers(self, seen: Set[int]) -> None:
169 for module in self.modules:
170 module.seen_in_dispatchers(seen)
171 for function in self.functions:
172 if isinstance(function, DocumentableDispatcher):
173 impls = function.obj.funcs.values()
174 for impl in impls:
175 seen.add(id(impl))
176
177 def prune_duplicates(self) -> None:
178 seen: Set[int] = set()
179 self.seen_in_dispatchers(seen)
180
181 # Breadth-first search so that we prefer objects with shorter names.
182 todo = Deque([self])
183 while todo:
184 module = todo.popleft()
185
186 new_classes = []
187 for c in module.classes:
188 if id(c.obj) not in seen:
189 seen.add(id(c.obj))
190 new_classes.append(c)
191 module.classes = new_classes
192
193 new_functions = []
194 for f in module.functions:
195 if id(f.obj) not in seen:
196 seen.add(id(f.obj))
197 new_functions.append(f)
198 module.functions = new_functions
199
200 todo.extend(module.modules)
201
202 def prune_empty_modules(self) -> None:
203 new_modules = []
204 for m in self.modules:
205 m.prune_empty_modules()
206
207 if m.modules or m.classes or m.functions:
208 new_modules.append(m)
209 self.modules = new_modules
210
211 def prune(self) -> None:
212 self.prune_duplicates()
213 self.prune_empty_modules()
214
215 def write_modules(self, out: TextIO) -> None:
216 if not self.modules:
217 return
218
219 out.write(
220 f"""
221 {_header('Modules', 1)}
222
223 .. toctree::
224 :maxdepth: 1
225
226 """
227 )
228 for module in self.modules:
229 out.write(f" {module.name} <{module.name.split('.')[-1]}/index>\n")
230
231 def write_classes(self, out: TextIO) -> None:
232 if not self.classes:
233 return
234
235 out.write(
236 f"""
237 {_header('Classes', 1)}
238 """
239 )
240 for cls in self.classes:
241 cls.write(out)
242
243 def write_functions(self, out: TextIO) -> None:
244 if not self.functions:
245 return
246
247 out.write(
248 f"""
249 {_header('Functions', 1)}
250 """
251 )
252 for function in self.functions:
253 function.write(out)
254
255 def write(self, path: Path) -> None:
256 dir_path = path / f"{self.name.replace('.', '/')}"
257 dir_path.mkdir(parents=True, exist_ok=True)
258 index_path = dir_path / "index.rst"
259 with index_path.open("wt") as out:
260 print("Writing", index_path)
261 out.write(
262 f"""{_header(self.name, 0)}
263
264 .. THIS IS AN AUTOGENERATED RST FILE
265
266 .. automodule:: {self.name}
267 """
268 )
269 self.write_modules(out)
270 self.write_classes(out)
271 self.write_functions(out)
272
273 for module in self.modules:
274 module.write(path)
275
276 def str_into(self, indent: int, lines: List[str]) -> None:
277 lines.append(2 * indent * " " + "Module: " + self.name)
278 for module in self.modules:
279 module.str_into(indent + 1, lines)
280 for cls in self.classes:
281 lines.append(2 * (indent + 1) * " " + "Class: " + cls.name)
282 for function in self.functions:
283 lines.append(2 * (indent + 1) * " " + "Function: " + function.name)
284
285 def __str__(self) -> str:
286 lines: List[str] = []
287 self.str_into(0, lines)
288 return "\n".join(lines)
289
290
291 def generate_module_rst(module: ModuleType, dest: Path) -> None:
292 """
293 Traverses the given `module` and generates `.rst` files for Sphinx.
294 """
295 docs = DocumentableModule.collect(module)
296 docs.prune()
297 docs.write(dest)
298
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/generate_module_rst.py b/doc/generate_module_rst.py
--- a/doc/generate_module_rst.py
+++ b/doc/generate_module_rst.py
@@ -18,7 +18,7 @@
from dataclasses import dataclass
from pathlib import Path
from types import ModuleType
-from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union
+from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Tuple, Type, Union
from gpflow.utilities import Dispatcher
@@ -165,17 +165,19 @@
return DocumentableModule(root_name, root, modules, classes, functions)
- def seen_in_dispatchers(self, seen: Set[int]) -> None:
+ def seen_in_dispatchers(self, seen: Set[Tuple[str, int]]) -> None:
for module in self.modules:
module.seen_in_dispatchers(seen)
for function in self.functions:
if isinstance(function, DocumentableDispatcher):
impls = function.obj.funcs.values()
for impl in impls:
- seen.add(id(impl))
+ # See comment below (for classes) about aliases.
+ key = (impl.__name__, id(impl))
+ seen.add(key)
def prune_duplicates(self) -> None:
- seen: Set[int] = set()
+ seen: Set[Tuple[str, int]] = set()
self.seen_in_dispatchers(seen)
# Breadth-first search so that we prefer objects with shorter names.
@@ -185,15 +187,20 @@
new_classes = []
for c in module.classes:
- if id(c.obj) not in seen:
- seen.add(id(c.obj))
+ # Account for objects to have aliases, hence include the object name in the key.
+ # We want to generate documentation for both the alias and the original object.
+ key = (c.name[c.name.rfind(".") + 1 :], id(c.obj))
+ if key not in seen:
+ seen.add(key)
new_classes.append(c)
module.classes = new_classes
new_functions = []
for f in module.functions:
- if id(f.obj) not in seen:
- seen.add(id(f.obj))
+ # See comment above (for classes) about aliases.
+ key = (f.name[f.name.rfind(".") + 1 :], id(f.obj))
+ if key not in seen:
+ seen.add(key)
new_functions.append(f)
module.functions = new_functions
|
{"golden_diff": "diff --git a/doc/generate_module_rst.py b/doc/generate_module_rst.py\n--- a/doc/generate_module_rst.py\n+++ b/doc/generate_module_rst.py\n@@ -18,7 +18,7 @@\n from dataclasses import dataclass\n from pathlib import Path\n from types import ModuleType\n-from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union\n+from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Tuple, Type, Union\n \n from gpflow.utilities import Dispatcher\n \n@@ -165,17 +165,19 @@\n \n return DocumentableModule(root_name, root, modules, classes, functions)\n \n- def seen_in_dispatchers(self, seen: Set[int]) -> None:\n+ def seen_in_dispatchers(self, seen: Set[Tuple[str, int]]) -> None:\n for module in self.modules:\n module.seen_in_dispatchers(seen)\n for function in self.functions:\n if isinstance(function, DocumentableDispatcher):\n impls = function.obj.funcs.values()\n for impl in impls:\n- seen.add(id(impl))\n+ # See comment below (for classes) about aliases.\n+ key = (impl.__name__, id(impl))\n+ seen.add(key)\n \n def prune_duplicates(self) -> None:\n- seen: Set[int] = set()\n+ seen: Set[Tuple[str, int]] = set()\n self.seen_in_dispatchers(seen)\n \n # Breadth-first search so that we prefer objects with shorter names.\n@@ -185,15 +187,20 @@\n \n new_classes = []\n for c in module.classes:\n- if id(c.obj) not in seen:\n- seen.add(id(c.obj))\n+ # Account for objects to have aliases, hence include the object name in the key.\n+ # We want to generate documentation for both the alias and the original object.\n+ key = (c.name[c.name.rfind(\".\") + 1 :], id(c.obj))\n+ if key not in seen:\n+ seen.add(key)\n new_classes.append(c)\n module.classes = new_classes\n \n new_functions = []\n for f in module.functions:\n- if id(f.obj) not in seen:\n- seen.add(id(f.obj))\n+ # See comment above (for classes) about aliases.\n+ key = (f.name[f.name.rfind(\".\") + 1 :], id(f.obj))\n+ if key not in seen:\n+ seen.add(key)\n new_functions.append(f)\n module.functions = new_functions\n", "issue": "Missing documentation for SquaredExponential kernel\n# Documentation\r\n\r\n*Is there anything missing in the docs?*\r\n\r\nDocumentation of gpflow.kernels.stationaries.SquaredExponential went missing sometime after 2.4.0\r\n\r\nIt is there in [2.4.0](https://gpflow.github.io/GPflow/2.4.0/api/gpflow/kernels/index.html#gpflow-kernels-squaredexponential).\r\n\r\nIn [2.5.0](https://gpflow.github.io/GPflow/2.5.0/api/gpflow/kernels/index.html#gpflow-kernels-rbf) (and [currently](https://gpflow.github.io/GPflow/develop/api/gpflow/kernels/index.html#gpflow-kernels-rbf)), the entry was replaced by the RBF \"as an alias of `gpflow.kernels.stationaries.SquaredExponential`\", but there is no `SquaredExponential` entry.\r\n\n", "before_files": [{"content": "# Copyright 2019 GPflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script to autogenerate .rst files for autodocumentation of classes and modules in GPflow.\nTo be run by the CI system to update docs.\n\"\"\"\nimport inspect\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union\n\nfrom gpflow.utilities import Dispatcher\n\nRST_LEVEL_SYMBOLS = [\"=\", \"-\", \"~\", '\"', \"'\", \"^\"]\n\nIGNORE_MODULES = {\n \"gpflow.covariances.dispatch\",\n \"gpflow.conditionals.dispatch\",\n \"gpflow.expectations.dispatch\",\n \"gpflow.kullback_leiblers.dispatch\",\n \"gpflow.versions\",\n}\n\n\ndef _header(header: str, level: int) -> str:\n return f\"{header}\\n{RST_LEVEL_SYMBOLS[level] * len(header)}\"\n\n\n@dataclass\nclass DocumentableDispatcher:\n\n name: str\n obj: Dispatcher\n\n def implementations(self) -> Mapping[Callable[..., Any], List[Type[Any]]]:\n implementations: Dict[Callable[..., Any], List[Type[Any]]] = {}\n for args, impl in self.obj.funcs.items():\n implementations.setdefault(impl, []).append(args)\n return implementations\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\nThis function uses multiple dispatch, which will depend on the type of argument passed in:\n\"\"\"\n )\n for impl, argss in self.implementations().items():\n impl_name = f\"{impl.__module__}.{impl.__name__}\"\n\n out.write(\n \"\"\"\n.. code-block:: python\n\n\"\"\"\n )\n for args in argss:\n arg_names = \", \".join([a.__name__ for a in args])\n out.write(f\" {self.name}( {arg_names} )\\n\")\n out.write(f\" # dispatch to -> {impl_name}(...)\\n\")\n out.write(\n f\"\"\"\n.. autofunction:: {impl_name}\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableClass:\n\n name: str\n obj: Type[Any]\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\n.. autoclass:: {self.name}\n :show-inheritance:\n :members:\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableFunction:\n\n name: str\n obj: Callable[..., Any]\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\n.. autofunction:: {self.name}\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableModule:\n\n name: str\n obj: ModuleType\n modules: List[\"DocumentableModule\"]\n classes: List[DocumentableClass]\n functions: List[Union[DocumentableDispatcher, DocumentableFunction]]\n\n @staticmethod\n def collect(\n root: ModuleType,\n ) -> \"DocumentableModule\":\n root_name = root.__name__\n exported_names = set(getattr(root, \"__all__\", []))\n\n modules: List[\"DocumentableModule\"] = []\n classes: List[DocumentableClass] = []\n functions: List[Union[DocumentableDispatcher, DocumentableFunction]] = []\n\n for key in dir(root):\n if key.startswith(\"_\"):\n continue\n\n child = getattr(root, key)\n child_name = root_name + \".\" + key\n if child_name in IGNORE_MODULES:\n continue\n\n # pylint: disable=cell-var-from-loop\n def _should_ignore(child: Union[Callable[..., Any], Type[Any]]) -> bool:\n declared_in_root = child.__module__ == root_name\n explicitly_exported = key in exported_names\n return not (declared_in_root or explicitly_exported)\n\n # pylint: enable=cell-var-from-loop\n\n if isinstance(child, Dispatcher):\n functions.append(DocumentableDispatcher(child_name, child))\n elif inspect.ismodule(child):\n if child.__name__ != child_name: # Ignore imports of modules.\n continue\n modules.append(DocumentableModule.collect(child))\n elif inspect.isclass(child):\n if _should_ignore(child):\n continue\n classes.append(DocumentableClass(child_name, child))\n elif inspect.isfunction(child):\n if _should_ignore(child):\n continue\n functions.append(DocumentableFunction(child_name, child))\n\n return DocumentableModule(root_name, root, modules, classes, functions)\n\n def seen_in_dispatchers(self, seen: Set[int]) -> None:\n for module in self.modules:\n module.seen_in_dispatchers(seen)\n for function in self.functions:\n if isinstance(function, DocumentableDispatcher):\n impls = function.obj.funcs.values()\n for impl in impls:\n seen.add(id(impl))\n\n def prune_duplicates(self) -> None:\n seen: Set[int] = set()\n self.seen_in_dispatchers(seen)\n\n # Breadth-first search so that we prefer objects with shorter names.\n todo = Deque([self])\n while todo:\n module = todo.popleft()\n\n new_classes = []\n for c in module.classes:\n if id(c.obj) not in seen:\n seen.add(id(c.obj))\n new_classes.append(c)\n module.classes = new_classes\n\n new_functions = []\n for f in module.functions:\n if id(f.obj) not in seen:\n seen.add(id(f.obj))\n new_functions.append(f)\n module.functions = new_functions\n\n todo.extend(module.modules)\n\n def prune_empty_modules(self) -> None:\n new_modules = []\n for m in self.modules:\n m.prune_empty_modules()\n\n if m.modules or m.classes or m.functions:\n new_modules.append(m)\n self.modules = new_modules\n\n def prune(self) -> None:\n self.prune_duplicates()\n self.prune_empty_modules()\n\n def write_modules(self, out: TextIO) -> None:\n if not self.modules:\n return\n\n out.write(\n f\"\"\"\n{_header('Modules', 1)}\n\n.. toctree::\n :maxdepth: 1\n\n\"\"\"\n )\n for module in self.modules:\n out.write(f\" {module.name} <{module.name.split('.')[-1]}/index>\\n\")\n\n def write_classes(self, out: TextIO) -> None:\n if not self.classes:\n return\n\n out.write(\n f\"\"\"\n{_header('Classes', 1)}\n\"\"\"\n )\n for cls in self.classes:\n cls.write(out)\n\n def write_functions(self, out: TextIO) -> None:\n if not self.functions:\n return\n\n out.write(\n f\"\"\"\n{_header('Functions', 1)}\n\"\"\"\n )\n for function in self.functions:\n function.write(out)\n\n def write(self, path: Path) -> None:\n dir_path = path / f\"{self.name.replace('.', '/')}\"\n dir_path.mkdir(parents=True, exist_ok=True)\n index_path = dir_path / \"index.rst\"\n with index_path.open(\"wt\") as out:\n print(\"Writing\", index_path)\n out.write(\n f\"\"\"{_header(self.name, 0)}\n\n.. THIS IS AN AUTOGENERATED RST FILE\n\n.. automodule:: {self.name}\n\"\"\"\n )\n self.write_modules(out)\n self.write_classes(out)\n self.write_functions(out)\n\n for module in self.modules:\n module.write(path)\n\n def str_into(self, indent: int, lines: List[str]) -> None:\n lines.append(2 * indent * \" \" + \"Module: \" + self.name)\n for module in self.modules:\n module.str_into(indent + 1, lines)\n for cls in self.classes:\n lines.append(2 * (indent + 1) * \" \" + \"Class: \" + cls.name)\n for function in self.functions:\n lines.append(2 * (indent + 1) * \" \" + \"Function: \" + function.name)\n\n def __str__(self) -> str:\n lines: List[str] = []\n self.str_into(0, lines)\n return \"\\n\".join(lines)\n\n\ndef generate_module_rst(module: ModuleType, dest: Path) -> None:\n \"\"\"\n Traverses the given `module` and generates `.rst` files for Sphinx.\n \"\"\"\n docs = DocumentableModule.collect(module)\n docs.prune()\n docs.write(dest)\n", "path": "doc/generate_module_rst.py"}], "after_files": [{"content": "# Copyright 2019 GPflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script to autogenerate .rst files for autodocumentation of classes and modules in GPflow.\nTo be run by the CI system to update docs.\n\"\"\"\nimport inspect\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Tuple, Type, Union\n\nfrom gpflow.utilities import Dispatcher\n\nRST_LEVEL_SYMBOLS = [\"=\", \"-\", \"~\", '\"', \"'\", \"^\"]\n\nIGNORE_MODULES = {\n \"gpflow.covariances.dispatch\",\n \"gpflow.conditionals.dispatch\",\n \"gpflow.expectations.dispatch\",\n \"gpflow.kullback_leiblers.dispatch\",\n \"gpflow.versions\",\n}\n\n\ndef _header(header: str, level: int) -> str:\n return f\"{header}\\n{RST_LEVEL_SYMBOLS[level] * len(header)}\"\n\n\n@dataclass\nclass DocumentableDispatcher:\n\n name: str\n obj: Dispatcher\n\n def implementations(self) -> Mapping[Callable[..., Any], List[Type[Any]]]:\n implementations: Dict[Callable[..., Any], List[Type[Any]]] = {}\n for args, impl in self.obj.funcs.items():\n implementations.setdefault(impl, []).append(args)\n return implementations\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\nThis function uses multiple dispatch, which will depend on the type of argument passed in:\n\"\"\"\n )\n for impl, argss in self.implementations().items():\n impl_name = f\"{impl.__module__}.{impl.__name__}\"\n\n out.write(\n \"\"\"\n.. code-block:: python\n\n\"\"\"\n )\n for args in argss:\n arg_names = \", \".join([a.__name__ for a in args])\n out.write(f\" {self.name}( {arg_names} )\\n\")\n out.write(f\" # dispatch to -> {impl_name}(...)\\n\")\n out.write(\n f\"\"\"\n.. autofunction:: {impl_name}\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableClass:\n\n name: str\n obj: Type[Any]\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\n.. autoclass:: {self.name}\n :show-inheritance:\n :members:\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableFunction:\n\n name: str\n obj: Callable[..., Any]\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\n.. autofunction:: {self.name}\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableModule:\n\n name: str\n obj: ModuleType\n modules: List[\"DocumentableModule\"]\n classes: List[DocumentableClass]\n functions: List[Union[DocumentableDispatcher, DocumentableFunction]]\n\n @staticmethod\n def collect(\n root: ModuleType,\n ) -> \"DocumentableModule\":\n root_name = root.__name__\n exported_names = set(getattr(root, \"__all__\", []))\n\n modules: List[\"DocumentableModule\"] = []\n classes: List[DocumentableClass] = []\n functions: List[Union[DocumentableDispatcher, DocumentableFunction]] = []\n\n for key in dir(root):\n if key.startswith(\"_\"):\n continue\n\n child = getattr(root, key)\n child_name = root_name + \".\" + key\n if child_name in IGNORE_MODULES:\n continue\n\n # pylint: disable=cell-var-from-loop\n def _should_ignore(child: Union[Callable[..., Any], Type[Any]]) -> bool:\n declared_in_root = child.__module__ == root_name\n explicitly_exported = key in exported_names\n return not (declared_in_root or explicitly_exported)\n\n # pylint: enable=cell-var-from-loop\n\n if isinstance(child, Dispatcher):\n functions.append(DocumentableDispatcher(child_name, child))\n elif inspect.ismodule(child):\n if child.__name__ != child_name: # Ignore imports of modules.\n continue\n modules.append(DocumentableModule.collect(child))\n elif inspect.isclass(child):\n if _should_ignore(child):\n continue\n classes.append(DocumentableClass(child_name, child))\n elif inspect.isfunction(child):\n if _should_ignore(child):\n continue\n functions.append(DocumentableFunction(child_name, child))\n\n return DocumentableModule(root_name, root, modules, classes, functions)\n\n def seen_in_dispatchers(self, seen: Set[Tuple[str, int]]) -> None:\n for module in self.modules:\n module.seen_in_dispatchers(seen)\n for function in self.functions:\n if isinstance(function, DocumentableDispatcher):\n impls = function.obj.funcs.values()\n for impl in impls:\n # See comment below (for classes) about aliases.\n key = (impl.__name__, id(impl))\n seen.add(key)\n\n def prune_duplicates(self) -> None:\n seen: Set[Tuple[str, int]] = set()\n self.seen_in_dispatchers(seen)\n\n # Breadth-first search so that we prefer objects with shorter names.\n todo = Deque([self])\n while todo:\n module = todo.popleft()\n\n new_classes = []\n for c in module.classes:\n # Account for objects to have aliases, hence include the object name in the key.\n # We want to generate documentation for both the alias and the original object.\n key = (c.name[c.name.rfind(\".\") + 1 :], id(c.obj))\n if key not in seen:\n seen.add(key)\n new_classes.append(c)\n module.classes = new_classes\n\n new_functions = []\n for f in module.functions:\n # See comment above (for classes) about aliases.\n key = (f.name[f.name.rfind(\".\") + 1 :], id(f.obj))\n if key not in seen:\n seen.add(key)\n new_functions.append(f)\n module.functions = new_functions\n\n todo.extend(module.modules)\n\n def prune_empty_modules(self) -> None:\n new_modules = []\n for m in self.modules:\n m.prune_empty_modules()\n\n if m.modules or m.classes or m.functions:\n new_modules.append(m)\n self.modules = new_modules\n\n def prune(self) -> None:\n self.prune_duplicates()\n self.prune_empty_modules()\n\n def write_modules(self, out: TextIO) -> None:\n if not self.modules:\n return\n\n out.write(\n f\"\"\"\n{_header('Modules', 1)}\n\n.. toctree::\n :maxdepth: 1\n\n\"\"\"\n )\n for module in self.modules:\n out.write(f\" {module.name} <{module.name.split('.')[-1]}/index>\\n\")\n\n def write_classes(self, out: TextIO) -> None:\n if not self.classes:\n return\n\n out.write(\n f\"\"\"\n{_header('Classes', 1)}\n\"\"\"\n )\n for cls in self.classes:\n cls.write(out)\n\n def write_functions(self, out: TextIO) -> None:\n if not self.functions:\n return\n\n out.write(\n f\"\"\"\n{_header('Functions', 1)}\n\"\"\"\n )\n for function in self.functions:\n function.write(out)\n\n def write(self, path: Path) -> None:\n dir_path = path / f\"{self.name.replace('.', '/')}\"\n dir_path.mkdir(parents=True, exist_ok=True)\n index_path = dir_path / \"index.rst\"\n with index_path.open(\"wt\") as out:\n print(\"Writing\", index_path)\n out.write(\n f\"\"\"{_header(self.name, 0)}\n\n.. THIS IS AN AUTOGENERATED RST FILE\n\n.. automodule:: {self.name}\n\"\"\"\n )\n self.write_modules(out)\n self.write_classes(out)\n self.write_functions(out)\n\n for module in self.modules:\n module.write(path)\n\n def str_into(self, indent: int, lines: List[str]) -> None:\n lines.append(2 * indent * \" \" + \"Module: \" + self.name)\n for module in self.modules:\n module.str_into(indent + 1, lines)\n for cls in self.classes:\n lines.append(2 * (indent + 1) * \" \" + \"Class: \" + cls.name)\n for function in self.functions:\n lines.append(2 * (indent + 1) * \" \" + \"Function: \" + function.name)\n\n def __str__(self) -> str:\n lines: List[str] = []\n self.str_into(0, lines)\n return \"\\n\".join(lines)\n\n\ndef generate_module_rst(module: ModuleType, dest: Path) -> None:\n \"\"\"\n Traverses the given `module` and generates `.rst` files for Sphinx.\n \"\"\"\n docs = DocumentableModule.collect(module)\n docs.prune()\n docs.write(dest)\n", "path": "doc/generate_module_rst.py"}]}
| 3,225 | 574 |
gh_patches_debug_38763
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-227
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
atexit module can't handle multiple instances
The `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.
Additionally, this means that `pwnlib.atexit` does not guarantee order.
Per the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:
_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._
However, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/atexit.py`
Content:
```
1 """
2 Replacement for the Python standard library's atexit.py.
3
4 Whereas the standard :mod:`atexit` module only defines :func:`atexit.register`,
5 this replacement module also defines :func:`unregister`.
6
7 This module also fixes a the issue that exceptions raised by an exit handler is
8 printed twice when the standard :mod:`atexit` is used.
9 """
10
11 import sys, traceback
12
13 __all__ = ['register', 'unregister']
14
15 _handlers = {}
16
17 def register(func, *args, **kwargs):
18 """register(func, *args, **kwargs)
19
20 Registers a function to be called when an unhandled exception occurs. The
21 function will be called with positional arguments `args` and keyword
22 arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
23
24 If `func` is already registered then `args` and `kwargs` will be updated.
25
26 This function can be used as a decorator::
27
28 def f():
29 ...
30 atexit.register(f)
31
32 is equivalent to this::
33
34 @atexit.register
35 def f():
36 ...
37
38 """
39 _handlers[func] = (args, kwargs)
40 return func
41
42 def unregister(func):
43 """unregister(func)
44
45 Remove `func` from the collection of registered functions. If `func` isn't
46 registered this is a no-op.
47 """
48 if func in _handlers:
49 del _handlers[func]
50
51 def _run_handlers():
52 """_run_handlers()
53
54 Run registered exit handlers. The order is arbitrary.
55
56 If a handler raises an exception, it will be printed but nothing else
57 happens, i.e. other handlers will be run and `sys.excepthook` will not be
58 called for that reason.
59 """
60 for func, (args, kwargs) in _handlers.items():
61 try:
62 func(*args, **kwargs)
63 except SystemExit:
64 pass
65 except:
66 # extract the current exception and rewind the traceback to where it
67 # originated
68 typ, val, tb = sys.exc_info()
69 traceback.print_exception(typ, val, tb.tb_next)
70
71 # if there's already an exitfunc registered be sure to run that too
72 if hasattr(sys, "exitfunc"):
73 register(sys.exitfunc)
74
75 sys.exitfunc = _run_handlers
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py
--- a/pwnlib/atexit.py
+++ b/pwnlib/atexit.py
@@ -8,58 +8,76 @@
printed twice when the standard :mod:`atexit` is used.
"""
-import sys, traceback
+import sys, traceback, threading
+from .context import context
__all__ = ['register', 'unregister']
+_lock = threading.Lock()
+_ident = 0
_handlers = {}
def register(func, *args, **kwargs):
"""register(func, *args, **kwargs)
- Registers a function to be called when an unhandled exception occurs. The
- function will be called with positional arguments `args` and keyword
- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
+ Registers a function to be called on program termination. The function will
+ be called with positional arguments `args` and keyword arguments `kwargs`,
+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will
+ be the one used when the handler is run.
- If `func` is already registered then `args` and `kwargs` will be updated.
+ E.g. to suppress logging output from an exit-handler one could write::
- This function can be used as a decorator::
+ with context.local(log_level = 'error'):
+ atexit.register(handler)
- def f():
- ...
- atexit.register(f)
+ An identifier is returned which can be used to unregister the exit-handler.
- is equivalent to this::
+ This function can be used as a decorator::
@atexit.register
- def f():
+ def handler():
...
- """
- _handlers[func] = (args, kwargs)
- return func
+ Notice however that this will bind ``handler`` to the identifier and not the
+ actual exit-handler. The exit-handler can then be unregistered with::
+
+ atexit.unregister(handler)
-def unregister(func):
- """unregister(func)
+ This function is thread safe.
- Remove `func` from the collection of registered functions. If `func` isn't
- registered this is a no-op.
"""
- if func in _handlers:
- del _handlers[func]
+ global _ident
+ with _lock:
+ ident = _ident
+ _ident += 1
+ _handlers[ident] = (func, args, kwargs, vars(context))
+ return ident
+
+def unregister(ident):
+ """unregister(ident)
+
+ Remove the exit-handler identified by `ident` from the list of registered
+ handlers. If `ident` isn't registered this is a no-op.
+ """
+ if ident in _handlers:
+ del _handlers[ident]
def _run_handlers():
"""_run_handlers()
- Run registered exit handlers. The order is arbitrary.
+ Run registered exit-handlers. They run in the reverse order of which they
+ were registered.
If a handler raises an exception, it will be printed but nothing else
happens, i.e. other handlers will be run and `sys.excepthook` will not be
called for that reason.
"""
- for func, (args, kwargs) in _handlers.items():
+ context.clear()
+ for _ident, (func, args, kwargs, ctx) in \
+ sorted(_handlers.items(), reverse = True):
try:
- func(*args, **kwargs)
+ with context.local(**ctx):
+ func(*args, **kwargs)
except SystemExit:
pass
except:
|
{"golden_diff": "diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py\n--- a/pwnlib/atexit.py\n+++ b/pwnlib/atexit.py\n@@ -8,58 +8,76 @@\n printed twice when the standard :mod:`atexit` is used.\n \"\"\"\n \n-import sys, traceback\n+import sys, traceback, threading\n+from .context import context\n \n __all__ = ['register', 'unregister']\n \n+_lock = threading.Lock()\n+_ident = 0\n _handlers = {}\n \n def register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n \n- Registers a function to be called when an unhandled exception occurs. The\n- function will be called with positional arguments `args` and keyword\n- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n+ Registers a function to be called on program termination. The function will\n+ be called with positional arguments `args` and keyword arguments `kwargs`,\n+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n+ be the one used when the handler is run.\n \n- If `func` is already registered then `args` and `kwargs` will be updated.\n+ E.g. to suppress logging output from an exit-handler one could write::\n \n- This function can be used as a decorator::\n+ with context.local(log_level = 'error'):\n+ atexit.register(handler)\n \n- def f():\n- ...\n- atexit.register(f)\n+ An identifier is returned which can be used to unregister the exit-handler.\n \n- is equivalent to this::\n+ This function can be used as a decorator::\n \n @atexit.register\n- def f():\n+ def handler():\n ...\n \n- \"\"\"\n- _handlers[func] = (args, kwargs)\n- return func\n+ Notice however that this will bind ``handler`` to the identifier and not the\n+ actual exit-handler. The exit-handler can then be unregistered with::\n+\n+ atexit.unregister(handler)\n \n-def unregister(func):\n- \"\"\"unregister(func)\n+ This function is thread safe.\n \n- Remove `func` from the collection of registered functions. If `func` isn't\n- registered this is a no-op.\n \"\"\"\n- if func in _handlers:\n- del _handlers[func]\n+ global _ident\n+ with _lock:\n+ ident = _ident\n+ _ident += 1\n+ _handlers[ident] = (func, args, kwargs, vars(context))\n+ return ident\n+\n+def unregister(ident):\n+ \"\"\"unregister(ident)\n+\n+ Remove the exit-handler identified by `ident` from the list of registered\n+ handlers. If `ident` isn't registered this is a no-op.\n+ \"\"\"\n+ if ident in _handlers:\n+ del _handlers[ident]\n \n def _run_handlers():\n \"\"\"_run_handlers()\n \n- Run registered exit handlers. The order is arbitrary.\n+ Run registered exit-handlers. They run in the reverse order of which they\n+ were registered.\n \n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n- for func, (args, kwargs) in _handlers.items():\n+ context.clear()\n+ for _ident, (func, args, kwargs, ctx) in \\\n+ sorted(_handlers.items(), reverse = True):\n try:\n- func(*args, **kwargs)\n+ with context.local(**ctx):\n+ func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n", "issue": "atexit module can't handle multiple instances\nThe `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.\n\nAdditionally, this means that `pwnlib.atexit` does not guarantee order.\n\nPer the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:\n\n_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._\n\nHowever, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.\n\n", "before_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\n\nimport sys, traceback\n\n__all__ = ['register', 'unregister']\n\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called when an unhandled exception occurs. The\n function will be called with positional arguments `args` and keyword\n arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n\n If `func` is already registered then `args` and `kwargs` will be updated.\n\n This function can be used as a decorator::\n\n def f():\n ...\n atexit.register(f)\n\n is equivalent to this::\n\n @atexit.register\n def f():\n ...\n\n \"\"\"\n _handlers[func] = (args, kwargs)\n return func\n\ndef unregister(func):\n \"\"\"unregister(func)\n\n Remove `func` from the collection of registered functions. If `func` isn't\n registered this is a no-op.\n \"\"\"\n if func in _handlers:\n del _handlers[func]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit handlers. The order is arbitrary.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n for func, (args, kwargs) in _handlers.items():\n try:\n func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}], "after_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\n\nimport sys, traceback, threading\nfrom .context import context\n\n__all__ = ['register', 'unregister']\n\n_lock = threading.Lock()\n_ident = 0\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called on program termination. The function will\n be called with positional arguments `args` and keyword arguments `kwargs`,\n i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n be the one used when the handler is run.\n\n E.g. to suppress logging output from an exit-handler one could write::\n\n with context.local(log_level = 'error'):\n atexit.register(handler)\n\n An identifier is returned which can be used to unregister the exit-handler.\n\n This function can be used as a decorator::\n\n @atexit.register\n def handler():\n ...\n\n Notice however that this will bind ``handler`` to the identifier and not the\n actual exit-handler. The exit-handler can then be unregistered with::\n\n atexit.unregister(handler)\n\n This function is thread safe.\n\n \"\"\"\n global _ident\n with _lock:\n ident = _ident\n _ident += 1\n _handlers[ident] = (func, args, kwargs, vars(context))\n return ident\n\ndef unregister(ident):\n \"\"\"unregister(ident)\n\n Remove the exit-handler identified by `ident` from the list of registered\n handlers. If `ident` isn't registered this is a no-op.\n \"\"\"\n if ident in _handlers:\n del _handlers[ident]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit-handlers. They run in the reverse order of which they\n were registered.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n context.clear()\n for _ident, (func, args, kwargs, ctx) in \\\n sorted(_handlers.items(), reverse = True):\n try:\n with context.local(**ctx):\n func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}]}
| 1,090 | 835 |
gh_patches_debug_27177
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-2177
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`context` gives wrong jump target for `jalr ra, ra, -0x16` in RISC-V
The jump target shown as annotation in the disassembly window is wrong:
```c++
pwndbg> si
pwndbg> set emulate on
pwndbg> context
► 0x1008e <_start+4> jalr ra, ra, -0x16 <main+8>
pwndbg> si
pwndbg> context
► 0x10074 <main> addi a7, zero, 0x40
```
Interestingly, `nearpc` shows the correct jump target:
```c++
► 0x1008e <_start+4> jalr ra, ra, -0x16 <main>
```
A workaround is `set emulate off`.
### test binary
[Download test binary](https://github.com/pwndbg/pwndbg/files/15366965/riscv-emu-bug.bin.txt). I created it from:
```asm
# riscv32-unknown-linux-gnu-as -march=rv32imac2p0 -o riscv-emu-bug.o riscv-emu-bug.s
# riscv32-unknown-linux-gnu-ld --no-relax -o riscv-emu-bug riscv-emu-bug.o
.section .rodata
greeting: .asciz "Hello world\n"
.equ greetlen, . - greeting
.section .text
main:
li a7, 64 # write
li a0, 1
la a1, greeting
li a2, greetlen
ecall
ret
.global _start
_start:
call main
li a7, 93 # exit
li a0, 0
ecall
```
### version information
pwndbg version: a1ddb3c0cd5e480710aa48f04ac560d659731bd8 (my fork)
```
Platform: Linux-6.9.1-arch1-1-x86_64-with-glibc2.39
OS: Arch Linux
OS ABI: #1 SMP PREEMPT_DYNAMIC Fri, 17 May 2024 16:56:38 +0000
Architecture: x86_64
Endian: little
Charset: utf-8
Width: 119
Height: 58
Gdb: 14.2
Python: 3.12.3 (main, Apr 23 2024, 09:16:07) [GCC 13.2.1 20240417]
Pwndbg: 2024.02.14 build: a1ddb3c0
Capstone: 5.0.1280
Unicorn: 2.0.1
This GDB was configured as follows:
configure --host=x86_64-pc-linux-gnu --target=x86_64-pc-linux-gnu
--with-auto-load-dir=$debugdir:$datadir/auto-load
--with-auto-load-safe-path=$debugdir:$datadir/auto-load
--with-expat
--with-gdb-datadir=/build/share/gdb (relocatable)
--with-jit-reader-dir=/build/lib/gdb (relocatable)
--without-libunwind-ia64
--with-lzma
--without-babeltrace
--without-intel-pt
--with-xxhash
--with-python=/usr
--with-python-libdir=/usr/lib
--with-debuginfod
--with-curses
--with-guile
--without-amd-dbgapi
--enable-source-highlight
--enable-threading
--enable-tui
--with-system-readline
--with-separate-debug-dir=/build/lib/debug (relocatable)
--with-system-gdbinit=/etc/gdb/gdbinit
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/disasm/riscv.py`
Content:
```
1 from __future__ import annotations
2
3 from capstone import * # noqa: F403
4 from capstone.riscv import * # noqa: F403
5
6 import pwndbg.disasm.arch
7 import pwndbg.gdblib.arch
8 import pwndbg.gdblib.regs
9 from pwndbg.disasm.instruction import InstructionCondition
10 from pwndbg.disasm.instruction import PwndbgInstruction
11 from pwndbg.emu.emulator import Emulator
12
13
14 class DisassemblyAssistant(pwndbg.disasm.arch.DisassemblyAssistant):
15 def __init__(self, architecture) -> None:
16 super().__init__(architecture)
17 self.architecture = architecture
18
19 def _is_condition_taken(
20 self, instruction: PwndbgInstruction, emu: Emulator | None
21 ) -> InstructionCondition:
22 # B-type instructions have two source registers that are compared
23 src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
24 # compressed instructions c.beqz and c.bnez only use one register operand.
25 if instruction.op_count(CS_OP_REG) > 1:
26 src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)
27 else:
28 src2_unsigned = 0
29
30 if self.architecture == "rv32":
31 src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000) << 1)
32 src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000) << 1)
33 elif self.architecture == "rv64":
34 src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000_00000000) << 1)
35 src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000_00000000) << 1)
36 else:
37 raise NotImplementedError(f"architecture '{self.architecture}' not implemented")
38
39 condition = {
40 RISCV_INS_BEQ: src1_signed == src2_signed,
41 RISCV_INS_BNE: src1_signed != src2_signed,
42 RISCV_INS_BLT: src1_signed < src2_signed,
43 RISCV_INS_BGE: src1_signed >= src2_signed,
44 RISCV_INS_BLTU: src1_unsigned < src2_unsigned,
45 RISCV_INS_BGEU: src1_unsigned >= src2_unsigned,
46 RISCV_INS_C_BEQZ: src1_signed == 0,
47 RISCV_INS_C_BNEZ: src1_signed != 0,
48 }.get(instruction.id, None)
49
50 if condition is None:
51 return InstructionCondition.UNDETERMINED
52
53 return InstructionCondition.TRUE if bool(condition) else InstructionCondition.FALSE
54
55 def condition(self, instruction: PwndbgInstruction, emu: Emulator) -> InstructionCondition:
56 """Checks if the current instruction is a jump that is taken.
57 Returns None if the instruction is executed unconditionally,
58 True if the instruction is executed for sure, False otherwise.
59 """
60 # JAL / JALR is unconditional
61 if RISCV_GRP_CALL in instruction.groups:
62 return InstructionCondition.UNDETERMINED
63
64 # We can't reason about anything except the current instruction
65 # as the comparison result is dependent on the register state.
66 if instruction.address != pwndbg.gdblib.regs.pc:
67 return InstructionCondition.UNDETERMINED
68
69 # Determine if the conditional jump is taken
70 if RISCV_GRP_BRANCH_RELATIVE in instruction.groups:
71 return self._is_condition_taken(instruction, emu)
72
73 return InstructionCondition.UNDETERMINED
74
75 def resolve_target(self, instruction: PwndbgInstruction, emu: Emulator | None, call=False):
76 """Return the address of the jump / conditional jump,
77 None if the next address is not dependent on instruction.
78 """
79 ptrmask = pwndbg.gdblib.arch.ptrmask
80 # JAL is unconditional and independent of current register status
81 if instruction.id in [RISCV_INS_JAL, RISCV_INS_C_JAL]:
82 # But that doesn't apply to ARM anyways :)
83 return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask
84
85 # We can't reason about anything except the current instruction
86 # as the comparison result is dependent on the register state.
87 if instruction.address != pwndbg.gdblib.regs.pc:
88 return None
89
90 # Determine if the conditional jump is taken
91 if RISCV_GRP_BRANCH_RELATIVE in instruction.groups and self._is_condition_taken(
92 instruction, emu
93 ):
94 return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask
95
96 # Determine the target address of the indirect jump
97 if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:
98 target = (
99 self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
100 + instruction.op_find(CS_OP_IMM, 1).imm
101 ) & ptrmask
102 # Clear the lowest bit without knowing the register width
103 return target ^ (target & 1)
104
105 return super().resolve_target(instruction, emu, call)
106
107
108 assistant_rv32 = DisassemblyAssistant("rv32")
109 assistant_rv64 = DisassemblyAssistant("rv64")
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/disasm/riscv.py b/pwndbg/disasm/riscv.py
--- a/pwndbg/disasm/riscv.py
+++ b/pwndbg/disasm/riscv.py
@@ -20,10 +20,10 @@
self, instruction: PwndbgInstruction, emu: Emulator | None
) -> InstructionCondition:
# B-type instructions have two source registers that are compared
- src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
+ src1_unsigned = instruction.op_find(CS_OP_REG, 1).before_value
# compressed instructions c.beqz and c.bnez only use one register operand.
if instruction.op_count(CS_OP_REG) > 1:
- src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)
+ src2_unsigned = instruction.op_find(CS_OP_REG, 2).before_value
else:
src2_unsigned = 0
@@ -96,7 +96,7 @@
# Determine the target address of the indirect jump
if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:
target = (
- self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
+ instruction.op_find(CS_OP_REG, 1).before_value
+ instruction.op_find(CS_OP_IMM, 1).imm
) & ptrmask
# Clear the lowest bit without knowing the register width
|
{"golden_diff": "diff --git a/pwndbg/disasm/riscv.py b/pwndbg/disasm/riscv.py\n--- a/pwndbg/disasm/riscv.py\n+++ b/pwndbg/disasm/riscv.py\n@@ -20,10 +20,10 @@\n self, instruction: PwndbgInstruction, emu: Emulator | None\n ) -> InstructionCondition:\n # B-type instructions have two source registers that are compared\n- src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n+ src1_unsigned = instruction.op_find(CS_OP_REG, 1).before_value\n # compressed instructions c.beqz and c.bnez only use one register operand.\n if instruction.op_count(CS_OP_REG) > 1:\n- src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)\n+ src2_unsigned = instruction.op_find(CS_OP_REG, 2).before_value\n else:\n src2_unsigned = 0\n \n@@ -96,7 +96,7 @@\n # Determine the target address of the indirect jump\n if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:\n target = (\n- self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n+ instruction.op_find(CS_OP_REG, 1).before_value\n + instruction.op_find(CS_OP_IMM, 1).imm\n ) & ptrmask\n # Clear the lowest bit without knowing the register width\n", "issue": "`context` gives wrong jump target for `jalr ra, ra, -0x16` in RISC-V\nThe jump target shown as annotation in the disassembly window is wrong:\r\n\r\n```c++\r\npwndbg> si\r\npwndbg> set emulate on\r\npwndbg> context\r\n \u25ba 0x1008e <_start+4> jalr ra, ra, -0x16 <main+8>\r\npwndbg> si\r\npwndbg> context\r\n \u25ba 0x10074 <main> addi a7, zero, 0x40\r\n```\r\n\r\nInterestingly, `nearpc` shows the correct jump target:\r\n```c++\r\n \u25ba 0x1008e <_start+4> jalr ra, ra, -0x16 <main>\r\n ```\r\nA workaround is `set emulate off`.\r\n\r\n\r\n### test binary\r\n[Download test binary](https://github.com/pwndbg/pwndbg/files/15366965/riscv-emu-bug.bin.txt). I created it from:\r\n\r\n```asm\r\n# riscv32-unknown-linux-gnu-as -march=rv32imac2p0 -o riscv-emu-bug.o riscv-emu-bug.s\r\n# riscv32-unknown-linux-gnu-ld --no-relax -o riscv-emu-bug riscv-emu-bug.o\r\n\r\n.section .rodata\r\n\r\ngreeting: .asciz \"Hello world\\n\"\r\n.equ greetlen, . - greeting\r\n\r\n.section .text\r\nmain:\r\n li a7, 64 # write\r\n li a0, 1\r\n la a1, greeting\r\n li a2, greetlen\r\n ecall\r\n ret\r\n\r\n.global _start\r\n_start:\r\n call main\r\n li a7, 93 # exit\r\n li a0, 0\r\n ecall\r\n```\r\n\r\n\r\n### version information\r\n\r\npwndbg version: a1ddb3c0cd5e480710aa48f04ac560d659731bd8 (my fork)\r\n```\r\nPlatform: Linux-6.9.1-arch1-1-x86_64-with-glibc2.39\r\nOS: Arch Linux\r\nOS ABI: #1 SMP PREEMPT_DYNAMIC Fri, 17 May 2024 16:56:38 +0000\r\nArchitecture: x86_64\r\nEndian: little\r\nCharset: utf-8\r\nWidth: 119\r\nHeight: 58\r\nGdb: 14.2\r\nPython: 3.12.3 (main, Apr 23 2024, 09:16:07) [GCC 13.2.1 20240417]\r\nPwndbg: 2024.02.14 build: a1ddb3c0\r\nCapstone: 5.0.1280\r\nUnicorn: 2.0.1\r\nThis GDB was configured as follows:\r\n configure --host=x86_64-pc-linux-gnu --target=x86_64-pc-linux-gnu\r\n\t --with-auto-load-dir=$debugdir:$datadir/auto-load\r\n\t --with-auto-load-safe-path=$debugdir:$datadir/auto-load\r\n\t --with-expat\r\n\t --with-gdb-datadir=/build/share/gdb (relocatable)\r\n\t --with-jit-reader-dir=/build/lib/gdb (relocatable)\r\n\t --without-libunwind-ia64\r\n\t --with-lzma\r\n\t --without-babeltrace\r\n\t --without-intel-pt\r\n\t --with-xxhash\r\n\t --with-python=/usr\r\n\t --with-python-libdir=/usr/lib\r\n\t --with-debuginfod\r\n\t --with-curses\r\n\t --with-guile\r\n\t --without-amd-dbgapi\r\n\t --enable-source-highlight\r\n\t --enable-threading\r\n\t --enable-tui\r\n\t --with-system-readline\r\n\t --with-separate-debug-dir=/build/lib/debug (relocatable)\r\n\t --with-system-gdbinit=/etc/gdb/gdbinit\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom capstone import * # noqa: F403\nfrom capstone.riscv import * # noqa: F403\n\nimport pwndbg.disasm.arch\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.regs\nfrom pwndbg.disasm.instruction import InstructionCondition\nfrom pwndbg.disasm.instruction import PwndbgInstruction\nfrom pwndbg.emu.emulator import Emulator\n\n\nclass DisassemblyAssistant(pwndbg.disasm.arch.DisassemblyAssistant):\n def __init__(self, architecture) -> None:\n super().__init__(architecture)\n self.architecture = architecture\n\n def _is_condition_taken(\n self, instruction: PwndbgInstruction, emu: Emulator | None\n ) -> InstructionCondition:\n # B-type instructions have two source registers that are compared\n src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n # compressed instructions c.beqz and c.bnez only use one register operand.\n if instruction.op_count(CS_OP_REG) > 1:\n src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)\n else:\n src2_unsigned = 0\n\n if self.architecture == \"rv32\":\n src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000) << 1)\n src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000) << 1)\n elif self.architecture == \"rv64\":\n src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000_00000000) << 1)\n src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000_00000000) << 1)\n else:\n raise NotImplementedError(f\"architecture '{self.architecture}' not implemented\")\n\n condition = {\n RISCV_INS_BEQ: src1_signed == src2_signed,\n RISCV_INS_BNE: src1_signed != src2_signed,\n RISCV_INS_BLT: src1_signed < src2_signed,\n RISCV_INS_BGE: src1_signed >= src2_signed,\n RISCV_INS_BLTU: src1_unsigned < src2_unsigned,\n RISCV_INS_BGEU: src1_unsigned >= src2_unsigned,\n RISCV_INS_C_BEQZ: src1_signed == 0,\n RISCV_INS_C_BNEZ: src1_signed != 0,\n }.get(instruction.id, None)\n\n if condition is None:\n return InstructionCondition.UNDETERMINED\n\n return InstructionCondition.TRUE if bool(condition) else InstructionCondition.FALSE\n\n def condition(self, instruction: PwndbgInstruction, emu: Emulator) -> InstructionCondition:\n \"\"\"Checks if the current instruction is a jump that is taken.\n Returns None if the instruction is executed unconditionally,\n True if the instruction is executed for sure, False otherwise.\n \"\"\"\n # JAL / JALR is unconditional\n if RISCV_GRP_CALL in instruction.groups:\n return InstructionCondition.UNDETERMINED\n\n # We can't reason about anything except the current instruction\n # as the comparison result is dependent on the register state.\n if instruction.address != pwndbg.gdblib.regs.pc:\n return InstructionCondition.UNDETERMINED\n\n # Determine if the conditional jump is taken\n if RISCV_GRP_BRANCH_RELATIVE in instruction.groups:\n return self._is_condition_taken(instruction, emu)\n\n return InstructionCondition.UNDETERMINED\n\n def resolve_target(self, instruction: PwndbgInstruction, emu: Emulator | None, call=False):\n \"\"\"Return the address of the jump / conditional jump,\n None if the next address is not dependent on instruction.\n \"\"\"\n ptrmask = pwndbg.gdblib.arch.ptrmask\n # JAL is unconditional and independent of current register status\n if instruction.id in [RISCV_INS_JAL, RISCV_INS_C_JAL]:\n # But that doesn't apply to ARM anyways :)\n return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask\n\n # We can't reason about anything except the current instruction\n # as the comparison result is dependent on the register state.\n if instruction.address != pwndbg.gdblib.regs.pc:\n return None\n\n # Determine if the conditional jump is taken\n if RISCV_GRP_BRANCH_RELATIVE in instruction.groups and self._is_condition_taken(\n instruction, emu\n ):\n return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask\n\n # Determine the target address of the indirect jump\n if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:\n target = (\n self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n + instruction.op_find(CS_OP_IMM, 1).imm\n ) & ptrmask\n # Clear the lowest bit without knowing the register width\n return target ^ (target & 1)\n\n return super().resolve_target(instruction, emu, call)\n\n\nassistant_rv32 = DisassemblyAssistant(\"rv32\")\nassistant_rv64 = DisassemblyAssistant(\"rv64\")\n", "path": "pwndbg/disasm/riscv.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom capstone import * # noqa: F403\nfrom capstone.riscv import * # noqa: F403\n\nimport pwndbg.disasm.arch\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.regs\nfrom pwndbg.disasm.instruction import InstructionCondition\nfrom pwndbg.disasm.instruction import PwndbgInstruction\nfrom pwndbg.emu.emulator import Emulator\n\n\nclass DisassemblyAssistant(pwndbg.disasm.arch.DisassemblyAssistant):\n def __init__(self, architecture) -> None:\n super().__init__(architecture)\n self.architecture = architecture\n\n def _is_condition_taken(\n self, instruction: PwndbgInstruction, emu: Emulator | None\n ) -> InstructionCondition:\n # B-type instructions have two source registers that are compared\n src1_unsigned = instruction.op_find(CS_OP_REG, 1).before_value\n # compressed instructions c.beqz and c.bnez only use one register operand.\n if instruction.op_count(CS_OP_REG) > 1:\n src2_unsigned = instruction.op_find(CS_OP_REG, 2).before_value\n else:\n src2_unsigned = 0\n\n if self.architecture == \"rv32\":\n src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000) << 1)\n src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000) << 1)\n elif self.architecture == \"rv64\":\n src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000_00000000) << 1)\n src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000_00000000) << 1)\n else:\n raise NotImplementedError(f\"architecture '{self.architecture}' not implemented\")\n\n condition = {\n RISCV_INS_BEQ: src1_signed == src2_signed,\n RISCV_INS_BNE: src1_signed != src2_signed,\n RISCV_INS_BLT: src1_signed < src2_signed,\n RISCV_INS_BGE: src1_signed >= src2_signed,\n RISCV_INS_BLTU: src1_unsigned < src2_unsigned,\n RISCV_INS_BGEU: src1_unsigned >= src2_unsigned,\n RISCV_INS_C_BEQZ: src1_signed == 0,\n RISCV_INS_C_BNEZ: src1_signed != 0,\n }.get(instruction.id, None)\n\n if condition is None:\n return InstructionCondition.UNDETERMINED\n\n return InstructionCondition.TRUE if bool(condition) else InstructionCondition.FALSE\n\n def condition(self, instruction: PwndbgInstruction, emu: Emulator) -> InstructionCondition:\n \"\"\"Checks if the current instruction is a jump that is taken.\n Returns None if the instruction is executed unconditionally,\n True if the instruction is executed for sure, False otherwise.\n \"\"\"\n # JAL / JALR is unconditional\n if RISCV_GRP_CALL in instruction.groups:\n return InstructionCondition.UNDETERMINED\n\n # We can't reason about anything except the current instruction\n # as the comparison result is dependent on the register state.\n if instruction.address != pwndbg.gdblib.regs.pc:\n return InstructionCondition.UNDETERMINED\n\n # Determine if the conditional jump is taken\n if RISCV_GRP_BRANCH_RELATIVE in instruction.groups:\n return self._is_condition_taken(instruction, emu)\n\n return InstructionCondition.UNDETERMINED\n\n def resolve_target(self, instruction: PwndbgInstruction, emu: Emulator | None, call=False):\n \"\"\"Return the address of the jump / conditional jump,\n None if the next address is not dependent on instruction.\n \"\"\"\n ptrmask = pwndbg.gdblib.arch.ptrmask\n # JAL is unconditional and independent of current register status\n if instruction.id in [RISCV_INS_JAL, RISCV_INS_C_JAL]:\n # But that doesn't apply to ARM anyways :)\n return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask\n\n # We can't reason about anything except the current instruction\n # as the comparison result is dependent on the register state.\n if instruction.address != pwndbg.gdblib.regs.pc:\n return None\n\n # Determine if the conditional jump is taken\n if RISCV_GRP_BRANCH_RELATIVE in instruction.groups and self._is_condition_taken(\n instruction, emu\n ):\n return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask\n\n # Determine the target address of the indirect jump\n if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:\n target = (\n instruction.op_find(CS_OP_REG, 1).before_value\n + instruction.op_find(CS_OP_IMM, 1).imm\n ) & ptrmask\n # Clear the lowest bit without knowing the register width\n return target ^ (target & 1)\n\n return super().resolve_target(instruction, emu, call)\n\n\nassistant_rv32 = DisassemblyAssistant(\"rv32\")\nassistant_rv64 = DisassemblyAssistant(\"rv64\")\n", "path": "pwndbg/disasm/riscv.py"}]}
| 2,648 | 352 |
gh_patches_debug_25215
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-1971
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate Python 3.6 support
## 🚀 Feature
Deprecate python 3.6 -- support Python >= 3.7
______________________________________________________________________
#### Consider also to contribute to Kornia universe projects :)
<sub>
- [**Tutorials**](https://github.com/kornia/tutorials): our repository containing the tutorials.
</sub>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/x/utils.py`
Content:
```
1 # TODO: remove the type: ignore in below after deprecating python 3.6
2 from dataclasses import dataclass, field # type: ignore
3 from enum import Enum
4 from typing import Dict
5
6 import torch.nn as nn
7
8 from kornia.metrics.average_meter import AverageMeter
9
10 # import yaml # type: ignore
11
12
13 class TrainerState(Enum):
14 STARTING = 0
15 TRAINING = 1
16 VALIDATE = 2
17 TERMINATE = 3
18
19
20 # NOTE: this class needs to be redefined according to the needed parameters.
21 @dataclass
22 class Configuration:
23 data_path: str = field(default="./", metadata={"help": "The input data directory."})
24 batch_size: int = field(default=1, metadata={"help": "The number of batches for the training dataloader."})
25 num_epochs: int = field(default=1, metadata={"help": "The number of epochs to run the training."})
26 lr: float = field(default=1e-3, metadata={"help": "The learning rate to be used for the optimize."})
27 output_path: str = field(default="./output", metadata={"help": "The output data directory."})
28 image_size: tuple = field(default=(224, 224), metadata={"help": "The input image size."})
29
30 # TODO: possibly remove because hydra already do this
31 # def __init__(self, **entries):
32 # for k, v in entries.items():
33 # self.__dict__[k] = Configuration(**v) if isinstance(v, dict) else v
34
35 # @classmethod
36 # def from_yaml(cls, config_file: str):
37 # """Create an instance of the configuration from a yaml file."""
38 # with open(config_file) as f:
39 # data = yaml.safe_load(f)
40 # return cls(**data)
41
42
43 class Lambda(nn.Module):
44 """Module to create a lambda function as nn.Module.
45
46 Args:
47 fcn: a pointer to any function.
48
49 Example:
50 >>> import torch
51 >>> import kornia as K
52 >>> fcn = Lambda(lambda x: K.geometry.resize(x, (32, 16)))
53 >>> fcn(torch.rand(1, 4, 64, 32)).shape
54 torch.Size([1, 4, 32, 16])
55 """
56
57 def __init__(self, fcn):
58 super().__init__()
59 self.fcn = fcn
60
61 def forward(self, x):
62 return self.fcn(x)
63
64
65 class StatsTracker:
66 """Stats tracker for computing metrics on the fly."""
67
68 def __init__(self) -> None:
69 self._stats: Dict[str, AverageMeter] = {}
70
71 @property
72 def stats(self):
73 return self._stats
74
75 def update(self, key: str, val: float, batch_size: int) -> None:
76 """Update the stats by the key value pair."""
77 if key not in self._stats:
78 self._stats[key] = AverageMeter()
79 self._stats[key].update(val, batch_size)
80
81 def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:
82 """Update the stats by the dict."""
83 for k, v in dic.items():
84 self.update(k, v, batch_size)
85
86 def __repr__(self) -> str:
87 return " ".join([f"{k.upper()}: {v.val:.2f} {v.val:.2f} " for k, v in self._stats.items()])
88
89 def as_dict(self) -> Dict[str, AverageMeter]:
90 """Return the dict format."""
91 return self._stats
92
```
Path: `setup.py`
Content:
```
1 # Welcome to the Kornia setup.py.
2 import sys
3
4 # Make sure that kornia is running on Python 3.6.0 or later
5 # (to avoid running into this bug: https://bugs.python.org/issue29246)
6
7 if sys.version_info < (3, 6, 0):
8 raise RuntimeError("Kornia requires Python 3.6.0 or later.")
9
10
11 from setuptools import setup
12
13 setup()
14
```
Path: `kornia/utils/_compat.py`
Content:
```
1 from typing import TYPE_CHECKING, List, Optional, Tuple
2
3 import torch
4 from torch import Tensor
5
6 from packaging import version
7
8
9 def torch_version() -> str:
10 """Parse the `torch.__version__` variable and removes +cu*/cpu."""
11 return torch.__version__.split('+')[0]
12
13
14 # TODO: replace by torch_version_ge``
15 def torch_version_geq(major, minor) -> bool:
16 _version = version.parse(torch_version())
17 return _version >= version.parse(f"{major}.{minor}")
18
19
20 def torch_version_lt(major: int, minor: int, patch: int) -> bool:
21 _version = version.parse(torch_version())
22 return _version < version.parse(f"{major}.{minor}.{patch}")
23
24
25 def torch_version_le(major: int, minor: int, patch: int) -> bool:
26 _version = version.parse(torch_version())
27 return _version <= version.parse(f"{major}.{minor}.{patch}")
28
29
30 def torch_version_ge(major: int, minor: int, patch: int) -> bool:
31 _version = version.parse(torch_version())
32 return _version >= version.parse(f"{major}.{minor}.{patch}")
33
34
35 if version.parse(torch_version()) > version.parse("1.7.1"):
36 # TODO: remove the type: ignore once Python 3.6 is deprecated.
37 # It turns out that Pytorch has no attribute `torch.linalg` for
38 # Python 3.6 / PyTorch 1.7.0, 1.7.1
39 from torch.linalg import qr as linalg_qr # type: ignore
40 else:
41 from torch import qr as linalg_qr # type: ignore # noqa: F401
42
43
44 if torch_version_ge(1, 10, 0):
45
46 if not TYPE_CHECKING:
47
48 def torch_meshgrid(tensors: List[Tensor], indexing: str):
49 return torch.meshgrid(tensors, indexing=indexing)
50
51 else:
52
53 if TYPE_CHECKING:
54
55 def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
56 return torch.meshgrid(tensors)
57
58 else:
59
60 def torch_meshgrid(tensors: List[Tensor], indexing: str):
61 return torch.meshgrid(tensors)
62
63
64 if torch_version_ge(1, 10, 0):
65 torch_inference_mode = torch.inference_mode
66 else:
67 torch_inference_mode = torch.no_grad
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py
--- a/kornia/utils/_compat.py
+++ b/kornia/utils/_compat.py
@@ -33,12 +33,9 @@
if version.parse(torch_version()) > version.parse("1.7.1"):
- # TODO: remove the type: ignore once Python 3.6 is deprecated.
- # It turns out that Pytorch has no attribute `torch.linalg` for
- # Python 3.6 / PyTorch 1.7.0, 1.7.1
- from torch.linalg import qr as linalg_qr # type: ignore
+ from torch.linalg import qr as linalg_qr
else:
- from torch import qr as linalg_qr # type: ignore # noqa: F401
+ from torch import qr as linalg_qr # noqa: F401
if torch_version_ge(1, 10, 0):
diff --git a/kornia/x/utils.py b/kornia/x/utils.py
--- a/kornia/x/utils.py
+++ b/kornia/x/utils.py
@@ -1,5 +1,4 @@
-# TODO: remove the type: ignore in below after deprecating python 3.6
-from dataclasses import dataclass, field # type: ignore
+from dataclasses import dataclass, field
from enum import Enum
from typing import Dict
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,11 +1,11 @@
# Welcome to the Kornia setup.py.
import sys
-# Make sure that kornia is running on Python 3.6.0 or later
+# Make sure that kornia is running on Python 3.7.0 or later
# (to avoid running into this bug: https://bugs.python.org/issue29246)
-if sys.version_info < (3, 6, 0):
- raise RuntimeError("Kornia requires Python 3.6.0 or later.")
+if sys.version_info < (3, 7, 0):
+ raise RuntimeError("Kornia requires Python 3.7.0 or later.")
from setuptools import setup
|
{"golden_diff": "diff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py\n--- a/kornia/utils/_compat.py\n+++ b/kornia/utils/_compat.py\n@@ -33,12 +33,9 @@\n \n \n if version.parse(torch_version()) > version.parse(\"1.7.1\"):\n- # TODO: remove the type: ignore once Python 3.6 is deprecated.\n- # It turns out that Pytorch has no attribute `torch.linalg` for\n- # Python 3.6 / PyTorch 1.7.0, 1.7.1\n- from torch.linalg import qr as linalg_qr # type: ignore\n+ from torch.linalg import qr as linalg_qr\n else:\n- from torch import qr as linalg_qr # type: ignore # noqa: F401\n+ from torch import qr as linalg_qr # noqa: F401\n \n \n if torch_version_ge(1, 10, 0):\ndiff --git a/kornia/x/utils.py b/kornia/x/utils.py\n--- a/kornia/x/utils.py\n+++ b/kornia/x/utils.py\n@@ -1,5 +1,4 @@\n-# TODO: remove the type: ignore in below after deprecating python 3.6\n-from dataclasses import dataclass, field # type: ignore\n+from dataclasses import dataclass, field\n from enum import Enum\n from typing import Dict\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,11 +1,11 @@\n # Welcome to the Kornia setup.py.\n import sys\n \n-# Make sure that kornia is running on Python 3.6.0 or later\n+# Make sure that kornia is running on Python 3.7.0 or later\n # (to avoid running into this bug: https://bugs.python.org/issue29246)\n \n-if sys.version_info < (3, 6, 0):\n- raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n+if sys.version_info < (3, 7, 0):\n+ raise RuntimeError(\"Kornia requires Python 3.7.0 or later.\")\n \n \n from setuptools import setup\n", "issue": "Deprecate Python 3.6 support\n## \ud83d\ude80 Feature\r\n\r\nDeprecate python 3.6 -- support Python >= 3.7\r\n\r\n______________________________________________________________________\r\n\r\n#### Consider also to contribute to Kornia universe projects :)\r\n\r\n<sub>\r\n\r\n- [**Tutorials**](https://github.com/kornia/tutorials): our repository containing the tutorials.\r\n\r\n</sub>\r\n\n", "before_files": [{"content": "# TODO: remove the type: ignore in below after deprecating python 3.6\nfrom dataclasses import dataclass, field # type: ignore\nfrom enum import Enum\nfrom typing import Dict\n\nimport torch.nn as nn\n\nfrom kornia.metrics.average_meter import AverageMeter\n\n# import yaml # type: ignore\n\n\nclass TrainerState(Enum):\n STARTING = 0\n TRAINING = 1\n VALIDATE = 2\n TERMINATE = 3\n\n\n# NOTE: this class needs to be redefined according to the needed parameters.\n@dataclass\nclass Configuration:\n data_path: str = field(default=\"./\", metadata={\"help\": \"The input data directory.\"})\n batch_size: int = field(default=1, metadata={\"help\": \"The number of batches for the training dataloader.\"})\n num_epochs: int = field(default=1, metadata={\"help\": \"The number of epochs to run the training.\"})\n lr: float = field(default=1e-3, metadata={\"help\": \"The learning rate to be used for the optimize.\"})\n output_path: str = field(default=\"./output\", metadata={\"help\": \"The output data directory.\"})\n image_size: tuple = field(default=(224, 224), metadata={\"help\": \"The input image size.\"})\n\n # TODO: possibly remove because hydra already do this\n # def __init__(self, **entries):\n # for k, v in entries.items():\n # self.__dict__[k] = Configuration(**v) if isinstance(v, dict) else v\n\n # @classmethod\n # def from_yaml(cls, config_file: str):\n # \"\"\"Create an instance of the configuration from a yaml file.\"\"\"\n # with open(config_file) as f:\n # data = yaml.safe_load(f)\n # return cls(**data)\n\n\nclass Lambda(nn.Module):\n \"\"\"Module to create a lambda function as nn.Module.\n\n Args:\n fcn: a pointer to any function.\n\n Example:\n >>> import torch\n >>> import kornia as K\n >>> fcn = Lambda(lambda x: K.geometry.resize(x, (32, 16)))\n >>> fcn(torch.rand(1, 4, 64, 32)).shape\n torch.Size([1, 4, 32, 16])\n \"\"\"\n\n def __init__(self, fcn):\n super().__init__()\n self.fcn = fcn\n\n def forward(self, x):\n return self.fcn(x)\n\n\nclass StatsTracker:\n \"\"\"Stats tracker for computing metrics on the fly.\"\"\"\n\n def __init__(self) -> None:\n self._stats: Dict[str, AverageMeter] = {}\n\n @property\n def stats(self):\n return self._stats\n\n def update(self, key: str, val: float, batch_size: int) -> None:\n \"\"\"Update the stats by the key value pair.\"\"\"\n if key not in self._stats:\n self._stats[key] = AverageMeter()\n self._stats[key].update(val, batch_size)\n\n def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:\n \"\"\"Update the stats by the dict.\"\"\"\n for k, v in dic.items():\n self.update(k, v, batch_size)\n\n def __repr__(self) -> str:\n return \" \".join([f\"{k.upper()}: {v.val:.2f} {v.val:.2f} \" for k, v in self._stats.items()])\n\n def as_dict(self) -> Dict[str, AverageMeter]:\n \"\"\"Return the dict format.\"\"\"\n return self._stats\n", "path": "kornia/x/utils.py"}, {"content": "# Welcome to the Kornia setup.py.\nimport sys\n\n# Make sure that kornia is running on Python 3.6.0 or later\n# (to avoid running into this bug: https://bugs.python.org/issue29246)\n\nif sys.version_info < (3, 6, 0):\n raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n\n\nfrom setuptools import setup\n\nsetup()\n", "path": "setup.py"}, {"content": "from typing import TYPE_CHECKING, List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom packaging import version\n\n\ndef torch_version() -> str:\n \"\"\"Parse the `torch.__version__` variable and removes +cu*/cpu.\"\"\"\n return torch.__version__.split('+')[0]\n\n\n# TODO: replace by torch_version_ge``\ndef torch_version_geq(major, minor) -> bool:\n _version = version.parse(torch_version())\n return _version >= version.parse(f\"{major}.{minor}\")\n\n\ndef torch_version_lt(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version < version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_le(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version <= version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_ge(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version >= version.parse(f\"{major}.{minor}.{patch}\")\n\n\nif version.parse(torch_version()) > version.parse(\"1.7.1\"):\n # TODO: remove the type: ignore once Python 3.6 is deprecated.\n # It turns out that Pytorch has no attribute `torch.linalg` for\n # Python 3.6 / PyTorch 1.7.0, 1.7.1\n from torch.linalg import qr as linalg_qr # type: ignore\nelse:\n from torch import qr as linalg_qr # type: ignore # noqa: F401\n\n\nif torch_version_ge(1, 10, 0):\n\n if not TYPE_CHECKING:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors, indexing=indexing)\n\nelse:\n\n if TYPE_CHECKING:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:\n return torch.meshgrid(tensors)\n\n else:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors)\n\n\nif torch_version_ge(1, 10, 0):\n torch_inference_mode = torch.inference_mode\nelse:\n torch_inference_mode = torch.no_grad\n", "path": "kornia/utils/_compat.py"}], "after_files": [{"content": "from dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Dict\n\nimport torch.nn as nn\n\nfrom kornia.metrics.average_meter import AverageMeter\n\n# import yaml # type: ignore\n\n\nclass TrainerState(Enum):\n STARTING = 0\n TRAINING = 1\n VALIDATE = 2\n TERMINATE = 3\n\n\n# NOTE: this class needs to be redefined according to the needed parameters.\n@dataclass\nclass Configuration:\n data_path: str = field(default=\"./\", metadata={\"help\": \"The input data directory.\"})\n batch_size: int = field(default=1, metadata={\"help\": \"The number of batches for the training dataloader.\"})\n num_epochs: int = field(default=1, metadata={\"help\": \"The number of epochs to run the training.\"})\n lr: float = field(default=1e-3, metadata={\"help\": \"The learning rate to be used for the optimize.\"})\n output_path: str = field(default=\"./output\", metadata={\"help\": \"The output data directory.\"})\n image_size: tuple = field(default=(224, 224), metadata={\"help\": \"The input image size.\"})\n\n # TODO: possibly remove because hydra already do this\n # def __init__(self, **entries):\n # for k, v in entries.items():\n # self.__dict__[k] = Configuration(**v) if isinstance(v, dict) else v\n\n # @classmethod\n # def from_yaml(cls, config_file: str):\n # \"\"\"Create an instance of the configuration from a yaml file.\"\"\"\n # with open(config_file) as f:\n # data = yaml.safe_load(f)\n # return cls(**data)\n\n\nclass Lambda(nn.Module):\n \"\"\"Module to create a lambda function as nn.Module.\n\n Args:\n fcn: a pointer to any function.\n\n Example:\n >>> import torch\n >>> import kornia as K\n >>> fcn = Lambda(lambda x: K.geometry.resize(x, (32, 16)))\n >>> fcn(torch.rand(1, 4, 64, 32)).shape\n torch.Size([1, 4, 32, 16])\n \"\"\"\n\n def __init__(self, fcn):\n super().__init__()\n self.fcn = fcn\n\n def forward(self, x):\n return self.fcn(x)\n\n\nclass StatsTracker:\n \"\"\"Stats tracker for computing metrics on the fly.\"\"\"\n\n def __init__(self) -> None:\n self._stats: Dict[str, AverageMeter] = {}\n\n @property\n def stats(self):\n return self._stats\n\n def update(self, key: str, val: float, batch_size: int) -> None:\n \"\"\"Update the stats by the key value pair.\"\"\"\n if key not in self._stats:\n self._stats[key] = AverageMeter()\n self._stats[key].update(val, batch_size)\n\n def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:\n \"\"\"Update the stats by the dict.\"\"\"\n for k, v in dic.items():\n self.update(k, v, batch_size)\n\n def __repr__(self) -> str:\n return \" \".join([f\"{k.upper()}: {v.val:.2f} {v.val:.2f} \" for k, v in self._stats.items()])\n\n def as_dict(self) -> Dict[str, AverageMeter]:\n \"\"\"Return the dict format.\"\"\"\n return self._stats\n", "path": "kornia/x/utils.py"}, {"content": "# Welcome to the Kornia setup.py.\nimport sys\n\n# Make sure that kornia is running on Python 3.7.0 or later\n# (to avoid running into this bug: https://bugs.python.org/issue29246)\n\nif sys.version_info < (3, 7, 0):\n raise RuntimeError(\"Kornia requires Python 3.7.0 or later.\")\n\n\nfrom setuptools import setup\n\nsetup()\n", "path": "setup.py"}, {"content": "from typing import TYPE_CHECKING, List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom packaging import version\n\n\ndef torch_version() -> str:\n \"\"\"Parse the `torch.__version__` variable and removes +cu*/cpu.\"\"\"\n return torch.__version__.split('+')[0]\n\n\n# TODO: replace by torch_version_ge``\ndef torch_version_geq(major, minor) -> bool:\n _version = version.parse(torch_version())\n return _version >= version.parse(f\"{major}.{minor}\")\n\n\ndef torch_version_lt(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version < version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_le(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version <= version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_ge(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version >= version.parse(f\"{major}.{minor}.{patch}\")\n\n\nif version.parse(torch_version()) > version.parse(\"1.7.1\"):\n from torch.linalg import qr as linalg_qr\nelse:\n from torch import qr as linalg_qr # noqa: F401\n\n\nif torch_version_ge(1, 10, 0):\n\n if not TYPE_CHECKING:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors, indexing=indexing)\n\nelse:\n\n if TYPE_CHECKING:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:\n return torch.meshgrid(tensors)\n\n else:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors)\n\n\nif torch_version_ge(1, 10, 0):\n torch_inference_mode = torch.inference_mode\nelse:\n torch_inference_mode = torch.no_grad\n", "path": "kornia/utils/_compat.py"}]}
| 2,138 | 508 |
gh_patches_debug_20120
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-2874
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Yahoo results for simplified Chinese search sometimes have the first character cut off
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
The latest commit as of this report, 75587d03a6766c87f2f1964c82080393559ed623
**How did you install SearXNG?**
```
make run
```
**What happened?**
As the title says. Current code redirects simplified search to the Hong Kong site, which actually uses traditional Chinese, but marks simplified pages with a `<span class="fz-s fc-obsidian ml-4">簡</span>` after the title text node, at the end:
```html
<h3 style="display:block;margin-top:24px;margin-bottom:2px;" class="title tc"><a style="line-height:26px" class=" d-ib fz-20 lh-26 td-hu tc va-bot mxw-100p" href="https://zh.wikipedia.org/wiki/%E7%B9%81%E4%BD%93%E5%AD%97" referrerpolicy="origin" target="_blank" data-439="6515fd318fbac" aria-label="繁体字 - 维基百科,自由的百科全书"><span class=" s-url d-ib p-abs t-0 l-0 fz-14 lh-20 fc-obsidian wr-bw ls-n pb-4">zh.wikipedia.org<span class=" fc-pewter"> › wiki › <b>繁体字</b></span></span>繁体字 - 维基百科,自由的百科全书<span class="fz-s fc-obsidian ml-4">簡</span></a></h3>
```
This character is extracted by `.xpath('span')` in the first line, which, without knowledge of this situation, actually extracted text from two `span`s, making the second line substring off by one:
https://github.com/searxng/searxng/blob/75587d03a6766c87f2f1964c82080393559ed623/searx/engines/yahoo.py#L139-L140
**How To Reproduce**
Search for some simplified Chinese keyword and look for a Yahoo result with 簡 being the last character of its title.
**Expected behavior**
That mark should be ignored, eliminating the off by one.
**Screenshots & Logs**
<img width="664" alt="problematic search result" src="https://github.com/searxng/searxng/assets/4474501/f8ea1959-0487-4fed-b93f-c3e9a061eaea">
You can see that the last breadcrumb is 繁体字, but the title says 体字, cutting off 繁; while there is an extraneous 簡 at the end.
**Additional context**
```diff
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index 0fdeacec2..4858e7e3f 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -136,8 +136,9 @@ def response(resp):
title = eval_xpath_getindex(result, './/h3/a', 0, default=None)
if title is None:
continue
- offset = len(extract_text(title.xpath('span')))
- title = extract_text(title)[offset:]
+ offset = len(extract_text(title.xpath('span[1]')))
+ offset_right = -len(extract_text(title.xpath('span[2]'))) or None
+ title = extract_text(title)[offset:offset_right]
content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
content = extract_text(content, allow_none=True)
```
This is an apparent fix, but I'm not sure it's worth it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/yahoo.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Yahoo Search (Web)
4
5 Languages are supported by mapping the language to a domain. If domain is not
6 found in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.
7
8 """
9
10 from urllib.parse import (
11 unquote,
12 urlencode,
13 )
14 from lxml import html
15
16 from searx.utils import (
17 eval_xpath_getindex,
18 eval_xpath_list,
19 extract_text,
20 )
21 from searx.enginelib.traits import EngineTraits
22
23 traits: EngineTraits
24
25 # about
26 about = {
27 "website": 'https://search.yahoo.com/',
28 "wikidata_id": None,
29 "official_api_documentation": 'https://developer.yahoo.com/api/',
30 "use_official_api": False,
31 "require_api_key": False,
32 "results": 'HTML',
33 }
34
35 # engine dependent config
36 categories = ['general', 'web']
37 paging = True
38 time_range_support = True
39 # send_accept_language_header = True
40
41 time_range_dict = {
42 'day': ('1d', 'd'),
43 'week': ('1w', 'w'),
44 'month': ('1m', 'm'),
45 }
46
47 lang2domain = {
48 'zh_chs': 'hk.search.yahoo.com',
49 'zh_cht': 'tw.search.yahoo.com',
50 'any': 'search.yahoo.com',
51 'en': 'search.yahoo.com',
52 'bg': 'search.yahoo.com',
53 'cs': 'search.yahoo.com',
54 'da': 'search.yahoo.com',
55 'el': 'search.yahoo.com',
56 'et': 'search.yahoo.com',
57 'he': 'search.yahoo.com',
58 'hr': 'search.yahoo.com',
59 'ja': 'search.yahoo.com',
60 'ko': 'search.yahoo.com',
61 'sk': 'search.yahoo.com',
62 'sl': 'search.yahoo.com',
63 }
64 """Map language to domain"""
65
66 locale_aliases = {
67 'zh': 'zh_Hans',
68 'zh-HK': 'zh_Hans',
69 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com
70 'zh-TW': 'zh_Hant',
71 }
72
73
74 def request(query, params):
75 """build request"""
76
77 lang = locale_aliases.get(params['language'], None)
78 if not lang:
79 lang = params['language'].split('-')[0]
80 lang = traits.get_language(lang, traits.all_locale)
81
82 offset = (params['pageno'] - 1) * 7 + 1
83 age, btf = time_range_dict.get(params['time_range'], ('', ''))
84
85 args = urlencode(
86 {
87 'p': query,
88 'ei': 'UTF-8',
89 'fl': 1,
90 'vl': 'lang_' + lang,
91 'btf': btf,
92 'fr2': 'time',
93 'age': age,
94 'b': offset,
95 'xargs': 0,
96 }
97 )
98
99 domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)
100 params['url'] = 'https://%s/search?%s' % (domain, args)
101 return params
102
103
104 def parse_url(url_string):
105 """remove yahoo-specific tracking-url"""
106
107 endings = ['/RS', '/RK']
108 endpositions = []
109 start = url_string.find('http', url_string.find('/RU=') + 1)
110
111 for ending in endings:
112 endpos = url_string.rfind(ending)
113 if endpos > -1:
114 endpositions.append(endpos)
115
116 if start == 0 or len(endpositions) == 0:
117 return url_string
118
119 end = min(endpositions)
120 return unquote(url_string[start:end])
121
122
123 def response(resp):
124 """parse response"""
125
126 results = []
127 dom = html.fromstring(resp.text)
128
129 # parse results
130 for result in eval_xpath_list(dom, '//div[contains(@class,"algo-sr")]'):
131 url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)
132 if url is None:
133 continue
134 url = parse_url(url)
135
136 title = eval_xpath_getindex(result, './/h3/a', 0, default=None)
137 if title is None:
138 continue
139 offset = len(extract_text(title.xpath('span')))
140 title = extract_text(title)[offset:]
141
142 content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
143 content = extract_text(content, allow_none=True)
144
145 # append result
146 results.append({'url': url, 'title': title, 'content': content})
147
148 for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]//table//a'):
149 # append suggestion
150 results.append({'suggestion': extract_text(suggestion)})
151
152 return results
153
154
155 def fetch_traits(engine_traits: EngineTraits):
156 """Fetch languages from yahoo"""
157
158 # pylint: disable=import-outside-toplevel
159 import babel
160 from searx import network
161 from searx.locales import language_tag
162
163 engine_traits.all_locale = 'any'
164
165 resp = network.get('https://search.yahoo.com/preferences/languages')
166 if not resp.ok:
167 print("ERROR: response from peertube is not OK.")
168
169 dom = html.fromstring(resp.text)
170 offset = len('lang_')
171
172 eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}
173
174 for val in eval_xpath_list(dom, '//div[contains(@class, "lang-item")]/input/@value'):
175 eng_tag = val[offset:]
176
177 try:
178 sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))
179 except babel.UnknownLocaleError:
180 print('ERROR: unknown language --> %s' % eng_tag)
181 continue
182
183 conflict = engine_traits.languages.get(sxng_tag)
184 if conflict:
185 if conflict != eng_tag:
186 print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
187 continue
188 engine_traits.languages[sxng_tag] = eng_tag
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -133,12 +133,7 @@
continue
url = parse_url(url)
- title = eval_xpath_getindex(result, './/h3/a', 0, default=None)
- if title is None:
- continue
- offset = len(extract_text(title.xpath('span')))
- title = extract_text(title)[offset:]
-
+ title = extract_text(result.xpath('.//h3//a/@aria-label'))
content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
content = extract_text(content, allow_none=True)
@@ -164,7 +159,7 @@
resp = network.get('https://search.yahoo.com/preferences/languages')
if not resp.ok:
- print("ERROR: response from peertube is not OK.")
+ print("ERROR: response from yahoo is not OK.")
dom = html.fromstring(resp.text)
offset = len('lang_')
|
{"golden_diff": "diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py\n--- a/searx/engines/yahoo.py\n+++ b/searx/engines/yahoo.py\n@@ -133,12 +133,7 @@\n continue\n url = parse_url(url)\n \n- title = eval_xpath_getindex(result, './/h3/a', 0, default=None)\n- if title is None:\n- continue\n- offset = len(extract_text(title.xpath('span')))\n- title = extract_text(title)[offset:]\n-\n+ title = extract_text(result.xpath('.//h3//a/@aria-label'))\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n \n@@ -164,7 +159,7 @@\n \n resp = network.get('https://search.yahoo.com/preferences/languages')\n if not resp.ok:\n- print(\"ERROR: response from peertube is not OK.\")\n+ print(\"ERROR: response from yahoo is not OK.\")\n \n dom = html.fromstring(resp.text)\n offset = len('lang_')\n", "issue": "Bug: Yahoo results for simplified Chinese search sometimes have the first character cut off\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n\r\nThe latest commit as of this report, 75587d03a6766c87f2f1964c82080393559ed623\r\n\r\n**How did you install SearXNG?**\r\n\r\n```\r\nmake run\r\n```\r\n\r\n**What happened?**\r\n\r\nAs the title says. Current code redirects simplified search to the Hong Kong site, which actually uses traditional Chinese, but marks simplified pages with a `<span class=\"fz-s fc-obsidian ml-4\">\u7c21</span>` after the title text node, at the end:\r\n\r\n```html\r\n<h3 style=\"display:block;margin-top:24px;margin-bottom:2px;\" class=\"title tc\"><a style=\"line-height:26px\" class=\" d-ib fz-20 lh-26 td-hu tc va-bot mxw-100p\" href=\"https://zh.wikipedia.org/wiki/%E7%B9%81%E4%BD%93%E5%AD%97\" referrerpolicy=\"origin\" target=\"_blank\" data-439=\"6515fd318fbac\" aria-label=\"\u7e41\u4f53\u5b57 - \u7ef4\u57fa\u767e\u79d1\uff0c\u81ea\u7531\u7684\u767e\u79d1\u5168\u4e66\"><span class=\" s-url d-ib p-abs t-0 l-0 fz-14 lh-20 fc-obsidian wr-bw ls-n pb-4\">zh.wikipedia.org<span class=\" fc-pewter\"> \u203a wiki \u203a <b>\u7e41\u4f53\u5b57</b></span></span>\u7e41\u4f53\u5b57 - \u7ef4\u57fa\u767e\u79d1\uff0c\u81ea\u7531\u7684\u767e\u79d1\u5168\u4e66<span class=\"fz-s fc-obsidian ml-4\">\u7c21</span></a></h3>\r\n```\r\n\r\nThis character is extracted by `.xpath('span')` in the first line, which, without knowledge of this situation, actually extracted text from two `span`s, making the second line substring off by one:\r\n\r\nhttps://github.com/searxng/searxng/blob/75587d03a6766c87f2f1964c82080393559ed623/searx/engines/yahoo.py#L139-L140\r\n\r\n**How To Reproduce**\r\n\r\nSearch for some simplified Chinese keyword and look for a Yahoo result with \u7c21 being the last character of its title.\r\n\r\n**Expected behavior**\r\n\r\nThat mark should be ignored, eliminating the off by one.\r\n\r\n**Screenshots & Logs**\r\n\r\n<img width=\"664\" alt=\"problematic search result\" src=\"https://github.com/searxng/searxng/assets/4474501/f8ea1959-0487-4fed-b93f-c3e9a061eaea\">\r\n\r\nYou can see that the last breadcrumb is \u7e41\u4f53\u5b57, but the title says \u4f53\u5b57, cutting off \u7e41; while there is an extraneous \u7c21 at the end.\r\n\r\n**Additional context**\r\n\r\n```diff\r\ndiff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py\r\nindex 0fdeacec2..4858e7e3f 100644\r\n--- a/searx/engines/yahoo.py\r\n+++ b/searx/engines/yahoo.py\r\n@@ -136,8 +136,9 @@ def response(resp):\r\n title = eval_xpath_getindex(result, './/h3/a', 0, default=None)\r\n if title is None:\r\n continue\r\n- offset = len(extract_text(title.xpath('span')))\r\n- title = extract_text(title)[offset:]\r\n+ offset = len(extract_text(title.xpath('span[1]')))\r\n+ offset_right = -len(extract_text(title.xpath('span[2]'))) or None\r\n+ title = extract_text(title)[offset:offset_right]\r\n\r\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\r\n content = extract_text(content, allow_none=True)\r\n```\r\n\r\nThis is an apparent fix, but I'm not sure it's worth it.\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Yahoo Search (Web)\n\nLanguages are supported by mapping the language to a domain. If domain is not\nfound in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.\n\n\"\"\"\n\nfrom urllib.parse import (\n unquote,\n urlencode,\n)\nfrom lxml import html\n\nfrom searx.utils import (\n eval_xpath_getindex,\n eval_xpath_list,\n extract_text,\n)\nfrom searx.enginelib.traits import EngineTraits\n\ntraits: EngineTraits\n\n# about\nabout = {\n \"website\": 'https://search.yahoo.com/',\n \"wikidata_id\": None,\n \"official_api_documentation\": 'https://developer.yahoo.com/api/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = True\n# send_accept_language_header = True\n\ntime_range_dict = {\n 'day': ('1d', 'd'),\n 'week': ('1w', 'w'),\n 'month': ('1m', 'm'),\n}\n\nlang2domain = {\n 'zh_chs': 'hk.search.yahoo.com',\n 'zh_cht': 'tw.search.yahoo.com',\n 'any': 'search.yahoo.com',\n 'en': 'search.yahoo.com',\n 'bg': 'search.yahoo.com',\n 'cs': 'search.yahoo.com',\n 'da': 'search.yahoo.com',\n 'el': 'search.yahoo.com',\n 'et': 'search.yahoo.com',\n 'he': 'search.yahoo.com',\n 'hr': 'search.yahoo.com',\n 'ja': 'search.yahoo.com',\n 'ko': 'search.yahoo.com',\n 'sk': 'search.yahoo.com',\n 'sl': 'search.yahoo.com',\n}\n\"\"\"Map language to domain\"\"\"\n\nlocale_aliases = {\n 'zh': 'zh_Hans',\n 'zh-HK': 'zh_Hans',\n 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com\n 'zh-TW': 'zh_Hant',\n}\n\n\ndef request(query, params):\n \"\"\"build request\"\"\"\n\n lang = locale_aliases.get(params['language'], None)\n if not lang:\n lang = params['language'].split('-')[0]\n lang = traits.get_language(lang, traits.all_locale)\n\n offset = (params['pageno'] - 1) * 7 + 1\n age, btf = time_range_dict.get(params['time_range'], ('', ''))\n\n args = urlencode(\n {\n 'p': query,\n 'ei': 'UTF-8',\n 'fl': 1,\n 'vl': 'lang_' + lang,\n 'btf': btf,\n 'fr2': 'time',\n 'age': age,\n 'b': offset,\n 'xargs': 0,\n }\n )\n\n domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)\n params['url'] = 'https://%s/search?%s' % (domain, args)\n return params\n\n\ndef parse_url(url_string):\n \"\"\"remove yahoo-specific tracking-url\"\"\"\n\n endings = ['/RS', '/RK']\n endpositions = []\n start = url_string.find('http', url_string.find('/RU=') + 1)\n\n for ending in endings:\n endpos = url_string.rfind(ending)\n if endpos > -1:\n endpositions.append(endpos)\n\n if start == 0 or len(endpositions) == 0:\n return url_string\n\n end = min(endpositions)\n return unquote(url_string[start:end])\n\n\ndef response(resp):\n \"\"\"parse response\"\"\"\n\n results = []\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in eval_xpath_list(dom, '//div[contains(@class,\"algo-sr\")]'):\n url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)\n if url is None:\n continue\n url = parse_url(url)\n\n title = eval_xpath_getindex(result, './/h3/a', 0, default=None)\n if title is None:\n continue\n offset = len(extract_text(title.xpath('span')))\n title = extract_text(title)[offset:]\n\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n\n for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]//table//a'):\n # append suggestion\n results.append({'suggestion': extract_text(suggestion)})\n\n return results\n\n\ndef fetch_traits(engine_traits: EngineTraits):\n \"\"\"Fetch languages from yahoo\"\"\"\n\n # pylint: disable=import-outside-toplevel\n import babel\n from searx import network\n from searx.locales import language_tag\n\n engine_traits.all_locale = 'any'\n\n resp = network.get('https://search.yahoo.com/preferences/languages')\n if not resp.ok:\n print(\"ERROR: response from peertube is not OK.\")\n\n dom = html.fromstring(resp.text)\n offset = len('lang_')\n\n eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}\n\n for val in eval_xpath_list(dom, '//div[contains(@class, \"lang-item\")]/input/@value'):\n eng_tag = val[offset:]\n\n try:\n sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))\n except babel.UnknownLocaleError:\n print('ERROR: unknown language --> %s' % eng_tag)\n continue\n\n conflict = engine_traits.languages.get(sxng_tag)\n if conflict:\n if conflict != eng_tag:\n print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, eng_tag))\n continue\n engine_traits.languages[sxng_tag] = eng_tag\n", "path": "searx/engines/yahoo.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Yahoo Search (Web)\n\nLanguages are supported by mapping the language to a domain. If domain is not\nfound in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.\n\n\"\"\"\n\nfrom urllib.parse import (\n unquote,\n urlencode,\n)\nfrom lxml import html\n\nfrom searx.utils import (\n eval_xpath_getindex,\n eval_xpath_list,\n extract_text,\n)\nfrom searx.enginelib.traits import EngineTraits\n\ntraits: EngineTraits\n\n# about\nabout = {\n \"website\": 'https://search.yahoo.com/',\n \"wikidata_id\": None,\n \"official_api_documentation\": 'https://developer.yahoo.com/api/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = True\n# send_accept_language_header = True\n\ntime_range_dict = {\n 'day': ('1d', 'd'),\n 'week': ('1w', 'w'),\n 'month': ('1m', 'm'),\n}\n\nlang2domain = {\n 'zh_chs': 'hk.search.yahoo.com',\n 'zh_cht': 'tw.search.yahoo.com',\n 'any': 'search.yahoo.com',\n 'en': 'search.yahoo.com',\n 'bg': 'search.yahoo.com',\n 'cs': 'search.yahoo.com',\n 'da': 'search.yahoo.com',\n 'el': 'search.yahoo.com',\n 'et': 'search.yahoo.com',\n 'he': 'search.yahoo.com',\n 'hr': 'search.yahoo.com',\n 'ja': 'search.yahoo.com',\n 'ko': 'search.yahoo.com',\n 'sk': 'search.yahoo.com',\n 'sl': 'search.yahoo.com',\n}\n\"\"\"Map language to domain\"\"\"\n\nlocale_aliases = {\n 'zh': 'zh_Hans',\n 'zh-HK': 'zh_Hans',\n 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com\n 'zh-TW': 'zh_Hant',\n}\n\n\ndef request(query, params):\n \"\"\"build request\"\"\"\n\n lang = locale_aliases.get(params['language'], None)\n if not lang:\n lang = params['language'].split('-')[0]\n lang = traits.get_language(lang, traits.all_locale)\n\n offset = (params['pageno'] - 1) * 7 + 1\n age, btf = time_range_dict.get(params['time_range'], ('', ''))\n\n args = urlencode(\n {\n 'p': query,\n 'ei': 'UTF-8',\n 'fl': 1,\n 'vl': 'lang_' + lang,\n 'btf': btf,\n 'fr2': 'time',\n 'age': age,\n 'b': offset,\n 'xargs': 0,\n }\n )\n\n domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)\n params['url'] = 'https://%s/search?%s' % (domain, args)\n return params\n\n\ndef parse_url(url_string):\n \"\"\"remove yahoo-specific tracking-url\"\"\"\n\n endings = ['/RS', '/RK']\n endpositions = []\n start = url_string.find('http', url_string.find('/RU=') + 1)\n\n for ending in endings:\n endpos = url_string.rfind(ending)\n if endpos > -1:\n endpositions.append(endpos)\n\n if start == 0 or len(endpositions) == 0:\n return url_string\n\n end = min(endpositions)\n return unquote(url_string[start:end])\n\n\ndef response(resp):\n \"\"\"parse response\"\"\"\n\n results = []\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in eval_xpath_list(dom, '//div[contains(@class,\"algo-sr\")]'):\n url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)\n if url is None:\n continue\n url = parse_url(url)\n\n title = extract_text(result.xpath('.//h3//a/@aria-label'))\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n\n for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]//table//a'):\n # append suggestion\n results.append({'suggestion': extract_text(suggestion)})\n\n return results\n\n\ndef fetch_traits(engine_traits: EngineTraits):\n \"\"\"Fetch languages from yahoo\"\"\"\n\n # pylint: disable=import-outside-toplevel\n import babel\n from searx import network\n from searx.locales import language_tag\n\n engine_traits.all_locale = 'any'\n\n resp = network.get('https://search.yahoo.com/preferences/languages')\n if not resp.ok:\n print(\"ERROR: response from yahoo is not OK.\")\n\n dom = html.fromstring(resp.text)\n offset = len('lang_')\n\n eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}\n\n for val in eval_xpath_list(dom, '//div[contains(@class, \"lang-item\")]/input/@value'):\n eng_tag = val[offset:]\n\n try:\n sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))\n except babel.UnknownLocaleError:\n print('ERROR: unknown language --> %s' % eng_tag)\n continue\n\n conflict = engine_traits.languages.get(sxng_tag)\n if conflict:\n if conflict != eng_tag:\n print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, eng_tag))\n continue\n engine_traits.languages[sxng_tag] = eng_tag\n", "path": "searx/engines/yahoo.py"}]}
| 3,080 | 269 |
gh_patches_debug_8954
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-1028
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
complete `.zenodo.json` metadata
Our `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):
https://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4
I feel like adding the contributors that are currently not listed to that file in a PR, and making that a "custom" (to add new contributors also to the zenodo metadata). I think that's a nice "reward" for contributions. Any objections?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Setup MNE-BIDS."""
2 import sys
3
4 from setuptools import setup
5
6 # Give setuptools a hint to complain if it's too old a version
7 SETUP_REQUIRES = ["setuptools >= 46.4.0"]
8 # This enables setuptools to install wheel on-the-fly
9 SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
10
11 version = None
12 with open('mne_bids/__init__.py', 'r') as fid:
13 for line in fid:
14 line = line.strip()
15 if line.startswith('__version__ = '):
16 version = line.split(' = ')[1].split('#')[0].strip('\'')
17 break
18 if version is None:
19 raise RuntimeError('Could not determine version')
20
21
22 if __name__ == "__main__":
23 setup(
24 version=version,
25 setup_requires=SETUP_REQUIRES,
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
"""Setup MNE-BIDS."""
+import os
import sys
from setuptools import setup
@@ -9,7 +10,7 @@
SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
version = None
-with open('mne_bids/__init__.py', 'r') as fid:
+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
for line in fid:
line = line.strip()
if line.startswith('__version__ = '):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n \"\"\"Setup MNE-BIDS.\"\"\"\n+import os\n import sys\n \n from setuptools import setup\n@@ -9,7 +10,7 @@\n SETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n \n version = None\n-with open('mne_bids/__init__.py', 'r') as fid:\n+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n", "issue": "complete `.zenodo.json` metadata\nOur `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4\r\n\r\nI feel like adding the contributors that are currently not listed to that file in a PR, and making that a \"custom\" (to add new contributors also to the zenodo metadata). I think that's a nice \"reward\" for contributions. Any objections?\n", "before_files": [{"content": "\"\"\"Setup MNE-BIDS.\"\"\"\nimport sys\n\nfrom setuptools import setup\n\n# Give setuptools a hint to complain if it's too old a version\nSETUP_REQUIRES = [\"setuptools >= 46.4.0\"]\n# This enables setuptools to install wheel on-the-fly\nSETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n\nversion = None\nwith open('mne_bids/__init__.py', 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n version = line.split(' = ')[1].split('#')[0].strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\nif __name__ == \"__main__\":\n setup(\n version=version,\n setup_requires=SETUP_REQUIRES,\n )\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nimport sys\n\nfrom setuptools import setup\n\n# Give setuptools a hint to complain if it's too old a version\nSETUP_REQUIRES = [\"setuptools >= 46.4.0\"]\n# This enables setuptools to install wheel on-the-fly\nSETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n version = line.split(' = ')[1].split('#')[0].strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\nif __name__ == \"__main__\":\n setup(\n version=version,\n setup_requires=SETUP_REQUIRES,\n )\n", "path": "setup.py"}]}
| 643 | 146 |
gh_patches_debug_37651
|
rasdani/github-patches
|
git_diff
|
getpelican__pelican-2952
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Explicit `fd.close` inpelican_quickstart.py needed?
Is there any reason we have that many explicit fd.close in `pelican/tools/pelican_quickstart.py`?
Shouldn't the close be handled by the context manager we use to open the fd in the first place? Can they be removed?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/tools/pelican_quickstart.py`
Content:
```
1 #!/usr/bin/env python
2
3 import argparse
4 import locale
5 import os
6
7 from jinja2 import Environment, FileSystemLoader
8
9 import pytz
10
11 try:
12 import readline # NOQA
13 except ImportError:
14 pass
15
16 try:
17 import tzlocal
18 _DEFAULT_TIMEZONE = tzlocal.get_localzone().zone
19 except ImportError:
20 _DEFAULT_TIMEZONE = 'Europe/Rome'
21
22 from pelican import __version__
23
24 locale.setlocale(locale.LC_ALL, '')
25 try:
26 _DEFAULT_LANGUAGE = locale.getlocale()[0]
27 except ValueError:
28 # Don't fail on macosx: "unknown locale: UTF-8"
29 _DEFAULT_LANGUAGE = None
30 if _DEFAULT_LANGUAGE is None:
31 _DEFAULT_LANGUAGE = 'en'
32 else:
33 _DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split('_')[0]
34
35 _TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
36 "templates")
37 _jinja_env = Environment(
38 loader=FileSystemLoader(_TEMPLATES_DIR),
39 trim_blocks=True,
40 )
41
42
43 _GITHUB_PAGES_BRANCHES = {
44 'personal': 'main',
45 'project': 'gh-pages'
46 }
47
48 CONF = {
49 'pelican': 'pelican',
50 'pelicanopts': '',
51 'basedir': os.curdir,
52 'ftp_host': 'localhost',
53 'ftp_user': 'anonymous',
54 'ftp_target_dir': '/',
55 'ssh_host': 'localhost',
56 'ssh_port': 22,
57 'ssh_user': 'root',
58 'ssh_target_dir': '/var/www',
59 's3_bucket': 'my_s3_bucket',
60 'cloudfiles_username': 'my_rackspace_username',
61 'cloudfiles_api_key': 'my_rackspace_api_key',
62 'cloudfiles_container': 'my_cloudfiles_container',
63 'dropbox_dir': '~/Dropbox/Public/',
64 'github_pages_branch': _GITHUB_PAGES_BRANCHES['project'],
65 'default_pagination': 10,
66 'siteurl': '',
67 'lang': _DEFAULT_LANGUAGE,
68 'timezone': _DEFAULT_TIMEZONE
69 }
70
71 # url for list of valid timezones
72 _TZ_URL = 'https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
73
74
75 # Create a 'marked' default path, to determine if someone has supplied
76 # a path on the command-line.
77 class _DEFAULT_PATH_TYPE(str):
78 is_default_path = True
79
80
81 _DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
82
83
84 def ask(question, answer=str, default=None, length=None):
85 if answer == str:
86 r = ''
87 while True:
88 if default:
89 r = input('> {} [{}] '.format(question, default))
90 else:
91 r = input('> {} '.format(question))
92
93 r = r.strip()
94
95 if len(r) <= 0:
96 if default:
97 r = default
98 break
99 else:
100 print('You must enter something')
101 else:
102 if length and len(r) != length:
103 print('Entry must be {} characters long'.format(length))
104 else:
105 break
106
107 return r
108
109 elif answer == bool:
110 r = None
111 while True:
112 if default is True:
113 r = input('> {} (Y/n) '.format(question))
114 elif default is False:
115 r = input('> {} (y/N) '.format(question))
116 else:
117 r = input('> {} (y/n) '.format(question))
118
119 r = r.strip().lower()
120
121 if r in ('y', 'yes'):
122 r = True
123 break
124 elif r in ('n', 'no'):
125 r = False
126 break
127 elif not r:
128 r = default
129 break
130 else:
131 print("You must answer 'yes' or 'no'")
132 return r
133 elif answer == int:
134 r = None
135 while True:
136 if default:
137 r = input('> {} [{}] '.format(question, default))
138 else:
139 r = input('> {} '.format(question))
140
141 r = r.strip()
142
143 if not r:
144 r = default
145 break
146
147 try:
148 r = int(r)
149 break
150 except ValueError:
151 print('You must enter an integer')
152 return r
153 else:
154 raise NotImplementedError(
155 'Argument `answer` must be str, bool, or integer')
156
157
158 def ask_timezone(question, default, tzurl):
159 """Prompt for time zone and validate input"""
160 lower_tz = [tz.lower() for tz in pytz.all_timezones]
161 while True:
162 r = ask(question, str, default)
163 r = r.strip().replace(' ', '_').lower()
164 if r in lower_tz:
165 r = pytz.all_timezones[lower_tz.index(r)]
166 break
167 else:
168 print('Please enter a valid time zone:\n'
169 ' (check [{}])'.format(tzurl))
170 return r
171
172
173 def main():
174 parser = argparse.ArgumentParser(
175 description="A kickstarter for Pelican",
176 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
177 parser.add_argument('-p', '--path', default=_DEFAULT_PATH,
178 help="The path to generate the blog into")
179 parser.add_argument('-t', '--title', metavar="title",
180 help='Set the title of the website')
181 parser.add_argument('-a', '--author', metavar="author",
182 help='Set the author name of the website')
183 parser.add_argument('-l', '--lang', metavar="lang",
184 help='Set the default web site language')
185
186 args = parser.parse_args()
187
188 print('''Welcome to pelican-quickstart v{v}.
189
190 This script will help you create a new Pelican-based website.
191
192 Please answer the following questions so this script can generate the files
193 needed by Pelican.
194
195 '''.format(v=__version__))
196
197 project = os.path.join(
198 os.environ.get('VIRTUAL_ENV', os.curdir), '.project')
199 no_path_was_specified = hasattr(args.path, 'is_default_path')
200 if os.path.isfile(project) and no_path_was_specified:
201 CONF['basedir'] = open(project).read().rstrip("\n")
202 print('Using project associated with current virtual environment. '
203 'Will save to:\n%s\n' % CONF['basedir'])
204 else:
205 CONF['basedir'] = os.path.abspath(os.path.expanduser(
206 ask('Where do you want to create your new web site?',
207 answer=str, default=args.path)))
208
209 CONF['sitename'] = ask('What will be the title of this web site?',
210 answer=str, default=args.title)
211 CONF['author'] = ask('Who will be the author of this web site?',
212 answer=str, default=args.author)
213 CONF['lang'] = ask('What will be the default language of this web site?',
214 str, args.lang or CONF['lang'], 2)
215
216 if ask('Do you want to specify a URL prefix? e.g., https://example.com ',
217 answer=bool, default=True):
218 CONF['siteurl'] = ask('What is your URL prefix? (see '
219 'above example; no trailing slash)',
220 str, CONF['siteurl'])
221
222 CONF['with_pagination'] = ask('Do you want to enable article pagination?',
223 bool, bool(CONF['default_pagination']))
224
225 if CONF['with_pagination']:
226 CONF['default_pagination'] = ask('How many articles per page '
227 'do you want?',
228 int, CONF['default_pagination'])
229 else:
230 CONF['default_pagination'] = False
231
232 CONF['timezone'] = ask_timezone('What is your time zone?',
233 CONF['timezone'], _TZ_URL)
234
235 automation = ask('Do you want to generate a tasks.py/Makefile '
236 'to automate generation and publishing?', bool, True)
237
238 if automation:
239 if ask('Do you want to upload your website using FTP?',
240 answer=bool, default=False):
241 CONF['ftp'] = True,
242 CONF['ftp_host'] = ask('What is the hostname of your FTP server?',
243 str, CONF['ftp_host'])
244 CONF['ftp_user'] = ask('What is your username on that server?',
245 str, CONF['ftp_user'])
246 CONF['ftp_target_dir'] = ask('Where do you want to put your '
247 'web site on that server?',
248 str, CONF['ftp_target_dir'])
249 if ask('Do you want to upload your website using SSH?',
250 answer=bool, default=False):
251 CONF['ssh'] = True,
252 CONF['ssh_host'] = ask('What is the hostname of your SSH server?',
253 str, CONF['ssh_host'])
254 CONF['ssh_port'] = ask('What is the port of your SSH server?',
255 int, CONF['ssh_port'])
256 CONF['ssh_user'] = ask('What is your username on that server?',
257 str, CONF['ssh_user'])
258 CONF['ssh_target_dir'] = ask('Where do you want to put your '
259 'web site on that server?',
260 str, CONF['ssh_target_dir'])
261
262 if ask('Do you want to upload your website using Dropbox?',
263 answer=bool, default=False):
264 CONF['dropbox'] = True,
265 CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',
266 str, CONF['dropbox_dir'])
267
268 if ask('Do you want to upload your website using S3?',
269 answer=bool, default=False):
270 CONF['s3'] = True,
271 CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',
272 str, CONF['s3_bucket'])
273
274 if ask('Do you want to upload your website using '
275 'Rackspace Cloud Files?', answer=bool, default=False):
276 CONF['cloudfiles'] = True,
277 CONF['cloudfiles_username'] = ask('What is your Rackspace '
278 'Cloud username?', str,
279 CONF['cloudfiles_username'])
280 CONF['cloudfiles_api_key'] = ask('What is your Rackspace '
281 'Cloud API key?', str,
282 CONF['cloudfiles_api_key'])
283 CONF['cloudfiles_container'] = ask('What is the name of your '
284 'Cloud Files container?',
285 str,
286 CONF['cloudfiles_container'])
287
288 if ask('Do you want to upload your website using GitHub Pages?',
289 answer=bool, default=False):
290 CONF['github'] = True,
291 if ask('Is this your personal page (username.github.io)?',
292 answer=bool, default=False):
293 CONF['github_pages_branch'] = \
294 _GITHUB_PAGES_BRANCHES['personal']
295 else:
296 CONF['github_pages_branch'] = \
297 _GITHUB_PAGES_BRANCHES['project']
298
299 try:
300 os.makedirs(os.path.join(CONF['basedir'], 'content'))
301 except OSError as e:
302 print('Error: {}'.format(e))
303
304 try:
305 os.makedirs(os.path.join(CONF['basedir'], 'output'))
306 except OSError as e:
307 print('Error: {}'.format(e))
308
309 try:
310 with open(os.path.join(CONF['basedir'], 'pelicanconf.py'),
311 'w', encoding='utf-8') as fd:
312 conf_python = dict()
313 for key, value in CONF.items():
314 conf_python[key] = repr(value)
315
316 _template = _jinja_env.get_template('pelicanconf.py.jinja2')
317 fd.write(_template.render(**conf_python))
318 fd.close()
319 except OSError as e:
320 print('Error: {}'.format(e))
321
322 try:
323 with open(os.path.join(CONF['basedir'], 'publishconf.py'),
324 'w', encoding='utf-8') as fd:
325 _template = _jinja_env.get_template('publishconf.py.jinja2')
326 fd.write(_template.render(**CONF))
327 fd.close()
328 except OSError as e:
329 print('Error: {}'.format(e))
330
331 if automation:
332 try:
333 with open(os.path.join(CONF['basedir'], 'tasks.py'),
334 'w', encoding='utf-8') as fd:
335 _template = _jinja_env.get_template('tasks.py.jinja2')
336 fd.write(_template.render(**CONF))
337 fd.close()
338 except OSError as e:
339 print('Error: {}'.format(e))
340 try:
341 with open(os.path.join(CONF['basedir'], 'Makefile'),
342 'w', encoding='utf-8') as fd:
343 py_v = 'python3'
344 _template = _jinja_env.get_template('Makefile.jinja2')
345 fd.write(_template.render(py_v=py_v, **CONF))
346 fd.close()
347 except OSError as e:
348 print('Error: {}'.format(e))
349
350 print('Done. Your new project is available at %s' % CONF['basedir'])
351
352
353 if __name__ == "__main__":
354 main()
355
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pelican/tools/pelican_quickstart.py b/pelican/tools/pelican_quickstart.py
--- a/pelican/tools/pelican_quickstart.py
+++ b/pelican/tools/pelican_quickstart.py
@@ -3,6 +3,7 @@
import argparse
import locale
import os
+from typing import Mapping
from jinja2 import Environment, FileSystemLoader
@@ -170,6 +171,16 @@
return r
+def render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str):
+ try:
+ with open(os.path.join(CONF['basedir'], target_path),
+ 'w', encoding='utf-8') as fd:
+ _template = _jinja_env.get_template(tmpl_name)
+ fd.write(_template.render(**tmpl_vars))
+ except OSError as e:
+ print('Error: {}'.format(e))
+
+
def main():
parser = argparse.ArgumentParser(
description="A kickstarter for Pelican",
@@ -306,46 +317,16 @@
except OSError as e:
print('Error: {}'.format(e))
- try:
- with open(os.path.join(CONF['basedir'], 'pelicanconf.py'),
- 'w', encoding='utf-8') as fd:
- conf_python = dict()
- for key, value in CONF.items():
- conf_python[key] = repr(value)
+ conf_python = dict()
+ for key, value in CONF.items():
+ conf_python[key] = repr(value)
+ render_jinja_template('pelicanconf.py.jinja2', conf_python, 'pelicanconf.py')
- _template = _jinja_env.get_template('pelicanconf.py.jinja2')
- fd.write(_template.render(**conf_python))
- fd.close()
- except OSError as e:
- print('Error: {}'.format(e))
-
- try:
- with open(os.path.join(CONF['basedir'], 'publishconf.py'),
- 'w', encoding='utf-8') as fd:
- _template = _jinja_env.get_template('publishconf.py.jinja2')
- fd.write(_template.render(**CONF))
- fd.close()
- except OSError as e:
- print('Error: {}'.format(e))
+ render_jinja_template('publishconf.py.jinja2', CONF, 'publishconf.py')
if automation:
- try:
- with open(os.path.join(CONF['basedir'], 'tasks.py'),
- 'w', encoding='utf-8') as fd:
- _template = _jinja_env.get_template('tasks.py.jinja2')
- fd.write(_template.render(**CONF))
- fd.close()
- except OSError as e:
- print('Error: {}'.format(e))
- try:
- with open(os.path.join(CONF['basedir'], 'Makefile'),
- 'w', encoding='utf-8') as fd:
- py_v = 'python3'
- _template = _jinja_env.get_template('Makefile.jinja2')
- fd.write(_template.render(py_v=py_v, **CONF))
- fd.close()
- except OSError as e:
- print('Error: {}'.format(e))
+ render_jinja_template('tasks.py.jinja2', CONF, 'tasks.py')
+ render_jinja_template('Makefile.jinja2', CONF, 'Makefile')
print('Done. Your new project is available at %s' % CONF['basedir'])
|
{"golden_diff": "diff --git a/pelican/tools/pelican_quickstart.py b/pelican/tools/pelican_quickstart.py\n--- a/pelican/tools/pelican_quickstart.py\n+++ b/pelican/tools/pelican_quickstart.py\n@@ -3,6 +3,7 @@\n import argparse\n import locale\n import os\n+from typing import Mapping\n \n from jinja2 import Environment, FileSystemLoader\n \n@@ -170,6 +171,16 @@\n return r\n \n \n+def render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str):\n+ try:\n+ with open(os.path.join(CONF['basedir'], target_path),\n+ 'w', encoding='utf-8') as fd:\n+ _template = _jinja_env.get_template(tmpl_name)\n+ fd.write(_template.render(**tmpl_vars))\n+ except OSError as e:\n+ print('Error: {}'.format(e))\n+\n+\n def main():\n parser = argparse.ArgumentParser(\n description=\"A kickstarter for Pelican\",\n@@ -306,46 +317,16 @@\n except OSError as e:\n print('Error: {}'.format(e))\n \n- try:\n- with open(os.path.join(CONF['basedir'], 'pelicanconf.py'),\n- 'w', encoding='utf-8') as fd:\n- conf_python = dict()\n- for key, value in CONF.items():\n- conf_python[key] = repr(value)\n+ conf_python = dict()\n+ for key, value in CONF.items():\n+ conf_python[key] = repr(value)\n+ render_jinja_template('pelicanconf.py.jinja2', conf_python, 'pelicanconf.py')\n \n- _template = _jinja_env.get_template('pelicanconf.py.jinja2')\n- fd.write(_template.render(**conf_python))\n- fd.close()\n- except OSError as e:\n- print('Error: {}'.format(e))\n-\n- try:\n- with open(os.path.join(CONF['basedir'], 'publishconf.py'),\n- 'w', encoding='utf-8') as fd:\n- _template = _jinja_env.get_template('publishconf.py.jinja2')\n- fd.write(_template.render(**CONF))\n- fd.close()\n- except OSError as e:\n- print('Error: {}'.format(e))\n+ render_jinja_template('publishconf.py.jinja2', CONF, 'publishconf.py')\n \n if automation:\n- try:\n- with open(os.path.join(CONF['basedir'], 'tasks.py'),\n- 'w', encoding='utf-8') as fd:\n- _template = _jinja_env.get_template('tasks.py.jinja2')\n- fd.write(_template.render(**CONF))\n- fd.close()\n- except OSError as e:\n- print('Error: {}'.format(e))\n- try:\n- with open(os.path.join(CONF['basedir'], 'Makefile'),\n- 'w', encoding='utf-8') as fd:\n- py_v = 'python3'\n- _template = _jinja_env.get_template('Makefile.jinja2')\n- fd.write(_template.render(py_v=py_v, **CONF))\n- fd.close()\n- except OSError as e:\n- print('Error: {}'.format(e))\n+ render_jinja_template('tasks.py.jinja2', CONF, 'tasks.py')\n+ render_jinja_template('Makefile.jinja2', CONF, 'Makefile')\n \n print('Done. Your new project is available at %s' % CONF['basedir'])\n", "issue": "Explicit `fd.close` inpelican_quickstart.py needed?\nIs there any reason we have that many explicit fd.close in `pelican/tools/pelican_quickstart.py`?\r\n\r\nShouldn't the close be handled by the context manager we use to open the fd in the first place? Can they be removed?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nimport locale\nimport os\n\nfrom jinja2 import Environment, FileSystemLoader\n\nimport pytz\n\ntry:\n import readline # NOQA\nexcept ImportError:\n pass\n\ntry:\n import tzlocal\n _DEFAULT_TIMEZONE = tzlocal.get_localzone().zone\nexcept ImportError:\n _DEFAULT_TIMEZONE = 'Europe/Rome'\n\nfrom pelican import __version__\n\nlocale.setlocale(locale.LC_ALL, '')\ntry:\n _DEFAULT_LANGUAGE = locale.getlocale()[0]\nexcept ValueError:\n # Don't fail on macosx: \"unknown locale: UTF-8\"\n _DEFAULT_LANGUAGE = None\nif _DEFAULT_LANGUAGE is None:\n _DEFAULT_LANGUAGE = 'en'\nelse:\n _DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split('_')[0]\n\n_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"templates\")\n_jinja_env = Environment(\n loader=FileSystemLoader(_TEMPLATES_DIR),\n trim_blocks=True,\n)\n\n\n_GITHUB_PAGES_BRANCHES = {\n 'personal': 'main',\n 'project': 'gh-pages'\n}\n\nCONF = {\n 'pelican': 'pelican',\n 'pelicanopts': '',\n 'basedir': os.curdir,\n 'ftp_host': 'localhost',\n 'ftp_user': 'anonymous',\n 'ftp_target_dir': '/',\n 'ssh_host': 'localhost',\n 'ssh_port': 22,\n 'ssh_user': 'root',\n 'ssh_target_dir': '/var/www',\n 's3_bucket': 'my_s3_bucket',\n 'cloudfiles_username': 'my_rackspace_username',\n 'cloudfiles_api_key': 'my_rackspace_api_key',\n 'cloudfiles_container': 'my_cloudfiles_container',\n 'dropbox_dir': '~/Dropbox/Public/',\n 'github_pages_branch': _GITHUB_PAGES_BRANCHES['project'],\n 'default_pagination': 10,\n 'siteurl': '',\n 'lang': _DEFAULT_LANGUAGE,\n 'timezone': _DEFAULT_TIMEZONE\n}\n\n# url for list of valid timezones\n_TZ_URL = 'https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'\n\n\n# Create a 'marked' default path, to determine if someone has supplied\n# a path on the command-line.\nclass _DEFAULT_PATH_TYPE(str):\n is_default_path = True\n\n\n_DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)\n\n\ndef ask(question, answer=str, default=None, length=None):\n if answer == str:\n r = ''\n while True:\n if default:\n r = input('> {} [{}] '.format(question, default))\n else:\n r = input('> {} '.format(question))\n\n r = r.strip()\n\n if len(r) <= 0:\n if default:\n r = default\n break\n else:\n print('You must enter something')\n else:\n if length and len(r) != length:\n print('Entry must be {} characters long'.format(length))\n else:\n break\n\n return r\n\n elif answer == bool:\n r = None\n while True:\n if default is True:\n r = input('> {} (Y/n) '.format(question))\n elif default is False:\n r = input('> {} (y/N) '.format(question))\n else:\n r = input('> {} (y/n) '.format(question))\n\n r = r.strip().lower()\n\n if r in ('y', 'yes'):\n r = True\n break\n elif r in ('n', 'no'):\n r = False\n break\n elif not r:\n r = default\n break\n else:\n print(\"You must answer 'yes' or 'no'\")\n return r\n elif answer == int:\n r = None\n while True:\n if default:\n r = input('> {} [{}] '.format(question, default))\n else:\n r = input('> {} '.format(question))\n\n r = r.strip()\n\n if not r:\n r = default\n break\n\n try:\n r = int(r)\n break\n except ValueError:\n print('You must enter an integer')\n return r\n else:\n raise NotImplementedError(\n 'Argument `answer` must be str, bool, or integer')\n\n\ndef ask_timezone(question, default, tzurl):\n \"\"\"Prompt for time zone and validate input\"\"\"\n lower_tz = [tz.lower() for tz in pytz.all_timezones]\n while True:\n r = ask(question, str, default)\n r = r.strip().replace(' ', '_').lower()\n if r in lower_tz:\n r = pytz.all_timezones[lower_tz.index(r)]\n break\n else:\n print('Please enter a valid time zone:\\n'\n ' (check [{}])'.format(tzurl))\n return r\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"A kickstarter for Pelican\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-p', '--path', default=_DEFAULT_PATH,\n help=\"The path to generate the blog into\")\n parser.add_argument('-t', '--title', metavar=\"title\",\n help='Set the title of the website')\n parser.add_argument('-a', '--author', metavar=\"author\",\n help='Set the author name of the website')\n parser.add_argument('-l', '--lang', metavar=\"lang\",\n help='Set the default web site language')\n\n args = parser.parse_args()\n\n print('''Welcome to pelican-quickstart v{v}.\n\nThis script will help you create a new Pelican-based website.\n\nPlease answer the following questions so this script can generate the files\nneeded by Pelican.\n\n '''.format(v=__version__))\n\n project = os.path.join(\n os.environ.get('VIRTUAL_ENV', os.curdir), '.project')\n no_path_was_specified = hasattr(args.path, 'is_default_path')\n if os.path.isfile(project) and no_path_was_specified:\n CONF['basedir'] = open(project).read().rstrip(\"\\n\")\n print('Using project associated with current virtual environment. '\n 'Will save to:\\n%s\\n' % CONF['basedir'])\n else:\n CONF['basedir'] = os.path.abspath(os.path.expanduser(\n ask('Where do you want to create your new web site?',\n answer=str, default=args.path)))\n\n CONF['sitename'] = ask('What will be the title of this web site?',\n answer=str, default=args.title)\n CONF['author'] = ask('Who will be the author of this web site?',\n answer=str, default=args.author)\n CONF['lang'] = ask('What will be the default language of this web site?',\n str, args.lang or CONF['lang'], 2)\n\n if ask('Do you want to specify a URL prefix? e.g., https://example.com ',\n answer=bool, default=True):\n CONF['siteurl'] = ask('What is your URL prefix? (see '\n 'above example; no trailing slash)',\n str, CONF['siteurl'])\n\n CONF['with_pagination'] = ask('Do you want to enable article pagination?',\n bool, bool(CONF['default_pagination']))\n\n if CONF['with_pagination']:\n CONF['default_pagination'] = ask('How many articles per page '\n 'do you want?',\n int, CONF['default_pagination'])\n else:\n CONF['default_pagination'] = False\n\n CONF['timezone'] = ask_timezone('What is your time zone?',\n CONF['timezone'], _TZ_URL)\n\n automation = ask('Do you want to generate a tasks.py/Makefile '\n 'to automate generation and publishing?', bool, True)\n\n if automation:\n if ask('Do you want to upload your website using FTP?',\n answer=bool, default=False):\n CONF['ftp'] = True,\n CONF['ftp_host'] = ask('What is the hostname of your FTP server?',\n str, CONF['ftp_host'])\n CONF['ftp_user'] = ask('What is your username on that server?',\n str, CONF['ftp_user'])\n CONF['ftp_target_dir'] = ask('Where do you want to put your '\n 'web site on that server?',\n str, CONF['ftp_target_dir'])\n if ask('Do you want to upload your website using SSH?',\n answer=bool, default=False):\n CONF['ssh'] = True,\n CONF['ssh_host'] = ask('What is the hostname of your SSH server?',\n str, CONF['ssh_host'])\n CONF['ssh_port'] = ask('What is the port of your SSH server?',\n int, CONF['ssh_port'])\n CONF['ssh_user'] = ask('What is your username on that server?',\n str, CONF['ssh_user'])\n CONF['ssh_target_dir'] = ask('Where do you want to put your '\n 'web site on that server?',\n str, CONF['ssh_target_dir'])\n\n if ask('Do you want to upload your website using Dropbox?',\n answer=bool, default=False):\n CONF['dropbox'] = True,\n CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',\n str, CONF['dropbox_dir'])\n\n if ask('Do you want to upload your website using S3?',\n answer=bool, default=False):\n CONF['s3'] = True,\n CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',\n str, CONF['s3_bucket'])\n\n if ask('Do you want to upload your website using '\n 'Rackspace Cloud Files?', answer=bool, default=False):\n CONF['cloudfiles'] = True,\n CONF['cloudfiles_username'] = ask('What is your Rackspace '\n 'Cloud username?', str,\n CONF['cloudfiles_username'])\n CONF['cloudfiles_api_key'] = ask('What is your Rackspace '\n 'Cloud API key?', str,\n CONF['cloudfiles_api_key'])\n CONF['cloudfiles_container'] = ask('What is the name of your '\n 'Cloud Files container?',\n str,\n CONF['cloudfiles_container'])\n\n if ask('Do you want to upload your website using GitHub Pages?',\n answer=bool, default=False):\n CONF['github'] = True,\n if ask('Is this your personal page (username.github.io)?',\n answer=bool, default=False):\n CONF['github_pages_branch'] = \\\n _GITHUB_PAGES_BRANCHES['personal']\n else:\n CONF['github_pages_branch'] = \\\n _GITHUB_PAGES_BRANCHES['project']\n\n try:\n os.makedirs(os.path.join(CONF['basedir'], 'content'))\n except OSError as e:\n print('Error: {}'.format(e))\n\n try:\n os.makedirs(os.path.join(CONF['basedir'], 'output'))\n except OSError as e:\n print('Error: {}'.format(e))\n\n try:\n with open(os.path.join(CONF['basedir'], 'pelicanconf.py'),\n 'w', encoding='utf-8') as fd:\n conf_python = dict()\n for key, value in CONF.items():\n conf_python[key] = repr(value)\n\n _template = _jinja_env.get_template('pelicanconf.py.jinja2')\n fd.write(_template.render(**conf_python))\n fd.close()\n except OSError as e:\n print('Error: {}'.format(e))\n\n try:\n with open(os.path.join(CONF['basedir'], 'publishconf.py'),\n 'w', encoding='utf-8') as fd:\n _template = _jinja_env.get_template('publishconf.py.jinja2')\n fd.write(_template.render(**CONF))\n fd.close()\n except OSError as e:\n print('Error: {}'.format(e))\n\n if automation:\n try:\n with open(os.path.join(CONF['basedir'], 'tasks.py'),\n 'w', encoding='utf-8') as fd:\n _template = _jinja_env.get_template('tasks.py.jinja2')\n fd.write(_template.render(**CONF))\n fd.close()\n except OSError as e:\n print('Error: {}'.format(e))\n try:\n with open(os.path.join(CONF['basedir'], 'Makefile'),\n 'w', encoding='utf-8') as fd:\n py_v = 'python3'\n _template = _jinja_env.get_template('Makefile.jinja2')\n fd.write(_template.render(py_v=py_v, **CONF))\n fd.close()\n except OSError as e:\n print('Error: {}'.format(e))\n\n print('Done. Your new project is available at %s' % CONF['basedir'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "pelican/tools/pelican_quickstart.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nimport locale\nimport os\nfrom typing import Mapping\n\nfrom jinja2 import Environment, FileSystemLoader\n\nimport pytz\n\ntry:\n import readline # NOQA\nexcept ImportError:\n pass\n\ntry:\n import tzlocal\n _DEFAULT_TIMEZONE = tzlocal.get_localzone().zone\nexcept ImportError:\n _DEFAULT_TIMEZONE = 'Europe/Rome'\n\nfrom pelican import __version__\n\nlocale.setlocale(locale.LC_ALL, '')\ntry:\n _DEFAULT_LANGUAGE = locale.getlocale()[0]\nexcept ValueError:\n # Don't fail on macosx: \"unknown locale: UTF-8\"\n _DEFAULT_LANGUAGE = None\nif _DEFAULT_LANGUAGE is None:\n _DEFAULT_LANGUAGE = 'en'\nelse:\n _DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split('_')[0]\n\n_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"templates\")\n_jinja_env = Environment(\n loader=FileSystemLoader(_TEMPLATES_DIR),\n trim_blocks=True,\n)\n\n\n_GITHUB_PAGES_BRANCHES = {\n 'personal': 'main',\n 'project': 'gh-pages'\n}\n\nCONF = {\n 'pelican': 'pelican',\n 'pelicanopts': '',\n 'basedir': os.curdir,\n 'ftp_host': 'localhost',\n 'ftp_user': 'anonymous',\n 'ftp_target_dir': '/',\n 'ssh_host': 'localhost',\n 'ssh_port': 22,\n 'ssh_user': 'root',\n 'ssh_target_dir': '/var/www',\n 's3_bucket': 'my_s3_bucket',\n 'cloudfiles_username': 'my_rackspace_username',\n 'cloudfiles_api_key': 'my_rackspace_api_key',\n 'cloudfiles_container': 'my_cloudfiles_container',\n 'dropbox_dir': '~/Dropbox/Public/',\n 'github_pages_branch': _GITHUB_PAGES_BRANCHES['project'],\n 'default_pagination': 10,\n 'siteurl': '',\n 'lang': _DEFAULT_LANGUAGE,\n 'timezone': _DEFAULT_TIMEZONE\n}\n\n# url for list of valid timezones\n_TZ_URL = 'https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'\n\n\n# Create a 'marked' default path, to determine if someone has supplied\n# a path on the command-line.\nclass _DEFAULT_PATH_TYPE(str):\n is_default_path = True\n\n\n_DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)\n\n\ndef ask(question, answer=str, default=None, length=None):\n if answer == str:\n r = ''\n while True:\n if default:\n r = input('> {} [{}] '.format(question, default))\n else:\n r = input('> {} '.format(question))\n\n r = r.strip()\n\n if len(r) <= 0:\n if default:\n r = default\n break\n else:\n print('You must enter something')\n else:\n if length and len(r) != length:\n print('Entry must be {} characters long'.format(length))\n else:\n break\n\n return r\n\n elif answer == bool:\n r = None\n while True:\n if default is True:\n r = input('> {} (Y/n) '.format(question))\n elif default is False:\n r = input('> {} (y/N) '.format(question))\n else:\n r = input('> {} (y/n) '.format(question))\n\n r = r.strip().lower()\n\n if r in ('y', 'yes'):\n r = True\n break\n elif r in ('n', 'no'):\n r = False\n break\n elif not r:\n r = default\n break\n else:\n print(\"You must answer 'yes' or 'no'\")\n return r\n elif answer == int:\n r = None\n while True:\n if default:\n r = input('> {} [{}] '.format(question, default))\n else:\n r = input('> {} '.format(question))\n\n r = r.strip()\n\n if not r:\n r = default\n break\n\n try:\n r = int(r)\n break\n except ValueError:\n print('You must enter an integer')\n return r\n else:\n raise NotImplementedError(\n 'Argument `answer` must be str, bool, or integer')\n\n\ndef ask_timezone(question, default, tzurl):\n \"\"\"Prompt for time zone and validate input\"\"\"\n lower_tz = [tz.lower() for tz in pytz.all_timezones]\n while True:\n r = ask(question, str, default)\n r = r.strip().replace(' ', '_').lower()\n if r in lower_tz:\n r = pytz.all_timezones[lower_tz.index(r)]\n break\n else:\n print('Please enter a valid time zone:\\n'\n ' (check [{}])'.format(tzurl))\n return r\n\n\ndef render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str):\n try:\n with open(os.path.join(CONF['basedir'], target_path),\n 'w', encoding='utf-8') as fd:\n _template = _jinja_env.get_template(tmpl_name)\n fd.write(_template.render(**tmpl_vars))\n except OSError as e:\n print('Error: {}'.format(e))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"A kickstarter for Pelican\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-p', '--path', default=_DEFAULT_PATH,\n help=\"The path to generate the blog into\")\n parser.add_argument('-t', '--title', metavar=\"title\",\n help='Set the title of the website')\n parser.add_argument('-a', '--author', metavar=\"author\",\n help='Set the author name of the website')\n parser.add_argument('-l', '--lang', metavar=\"lang\",\n help='Set the default web site language')\n\n args = parser.parse_args()\n\n print('''Welcome to pelican-quickstart v{v}.\n\nThis script will help you create a new Pelican-based website.\n\nPlease answer the following questions so this script can generate the files\nneeded by Pelican.\n\n '''.format(v=__version__))\n\n project = os.path.join(\n os.environ.get('VIRTUAL_ENV', os.curdir), '.project')\n no_path_was_specified = hasattr(args.path, 'is_default_path')\n if os.path.isfile(project) and no_path_was_specified:\n CONF['basedir'] = open(project).read().rstrip(\"\\n\")\n print('Using project associated with current virtual environment. '\n 'Will save to:\\n%s\\n' % CONF['basedir'])\n else:\n CONF['basedir'] = os.path.abspath(os.path.expanduser(\n ask('Where do you want to create your new web site?',\n answer=str, default=args.path)))\n\n CONF['sitename'] = ask('What will be the title of this web site?',\n answer=str, default=args.title)\n CONF['author'] = ask('Who will be the author of this web site?',\n answer=str, default=args.author)\n CONF['lang'] = ask('What will be the default language of this web site?',\n str, args.lang or CONF['lang'], 2)\n\n if ask('Do you want to specify a URL prefix? e.g., https://example.com ',\n answer=bool, default=True):\n CONF['siteurl'] = ask('What is your URL prefix? (see '\n 'above example; no trailing slash)',\n str, CONF['siteurl'])\n\n CONF['with_pagination'] = ask('Do you want to enable article pagination?',\n bool, bool(CONF['default_pagination']))\n\n if CONF['with_pagination']:\n CONF['default_pagination'] = ask('How many articles per page '\n 'do you want?',\n int, CONF['default_pagination'])\n else:\n CONF['default_pagination'] = False\n\n CONF['timezone'] = ask_timezone('What is your time zone?',\n CONF['timezone'], _TZ_URL)\n\n automation = ask('Do you want to generate a tasks.py/Makefile '\n 'to automate generation and publishing?', bool, True)\n\n if automation:\n if ask('Do you want to upload your website using FTP?',\n answer=bool, default=False):\n CONF['ftp'] = True,\n CONF['ftp_host'] = ask('What is the hostname of your FTP server?',\n str, CONF['ftp_host'])\n CONF['ftp_user'] = ask('What is your username on that server?',\n str, CONF['ftp_user'])\n CONF['ftp_target_dir'] = ask('Where do you want to put your '\n 'web site on that server?',\n str, CONF['ftp_target_dir'])\n if ask('Do you want to upload your website using SSH?',\n answer=bool, default=False):\n CONF['ssh'] = True,\n CONF['ssh_host'] = ask('What is the hostname of your SSH server?',\n str, CONF['ssh_host'])\n CONF['ssh_port'] = ask('What is the port of your SSH server?',\n int, CONF['ssh_port'])\n CONF['ssh_user'] = ask('What is your username on that server?',\n str, CONF['ssh_user'])\n CONF['ssh_target_dir'] = ask('Where do you want to put your '\n 'web site on that server?',\n str, CONF['ssh_target_dir'])\n\n if ask('Do you want to upload your website using Dropbox?',\n answer=bool, default=False):\n CONF['dropbox'] = True,\n CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',\n str, CONF['dropbox_dir'])\n\n if ask('Do you want to upload your website using S3?',\n answer=bool, default=False):\n CONF['s3'] = True,\n CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',\n str, CONF['s3_bucket'])\n\n if ask('Do you want to upload your website using '\n 'Rackspace Cloud Files?', answer=bool, default=False):\n CONF['cloudfiles'] = True,\n CONF['cloudfiles_username'] = ask('What is your Rackspace '\n 'Cloud username?', str,\n CONF['cloudfiles_username'])\n CONF['cloudfiles_api_key'] = ask('What is your Rackspace '\n 'Cloud API key?', str,\n CONF['cloudfiles_api_key'])\n CONF['cloudfiles_container'] = ask('What is the name of your '\n 'Cloud Files container?',\n str,\n CONF['cloudfiles_container'])\n\n if ask('Do you want to upload your website using GitHub Pages?',\n answer=bool, default=False):\n CONF['github'] = True,\n if ask('Is this your personal page (username.github.io)?',\n answer=bool, default=False):\n CONF['github_pages_branch'] = \\\n _GITHUB_PAGES_BRANCHES['personal']\n else:\n CONF['github_pages_branch'] = \\\n _GITHUB_PAGES_BRANCHES['project']\n\n try:\n os.makedirs(os.path.join(CONF['basedir'], 'content'))\n except OSError as e:\n print('Error: {}'.format(e))\n\n try:\n os.makedirs(os.path.join(CONF['basedir'], 'output'))\n except OSError as e:\n print('Error: {}'.format(e))\n\n conf_python = dict()\n for key, value in CONF.items():\n conf_python[key] = repr(value)\n render_jinja_template('pelicanconf.py.jinja2', conf_python, 'pelicanconf.py')\n\n render_jinja_template('publishconf.py.jinja2', CONF, 'publishconf.py')\n\n if automation:\n render_jinja_template('tasks.py.jinja2', CONF, 'tasks.py')\n render_jinja_template('Makefile.jinja2', CONF, 'Makefile')\n\n print('Done. Your new project is available at %s' % CONF['basedir'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "pelican/tools/pelican_quickstart.py"}]}
| 4,032 | 786 |
gh_patches_debug_60760
|
rasdani/github-patches
|
git_diff
|
coala__coala-bears-310
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GitCommitBear: body_line_length shall be 72 and not 73
The standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/vcs/git/GitCommitBear.py`
Content:
```
1 import re
2 import shutil
3 import os
4
5 from coalib.bears.GlobalBear import GlobalBear
6 from coalib.misc.ContextManagers import change_directory
7 from coalib.misc.Shell import run_shell_command
8 from coalib.results.Result import Result
9
10
11 class GitCommitBear(GlobalBear):
12
13 @classmethod
14 def check_prerequisites(cls):
15 if shutil.which("git") is None:
16 return "git is not installed."
17 else:
18 return True
19
20 def run(self,
21 shortlog_length: int=50,
22 body_line_length: int=73,
23 force_body: bool=False,
24 allow_empty_commit_message: bool=False,
25 shortlog_regex: str="",
26 shortlog_trailing_period: bool=None):
27 """
28 Checks the current git commit message at HEAD.
29
30 This bear ensures that the shortlog and body do not exceed a given
31 line-length and that a newline lies between them.
32
33 :param shortlog_length: The maximum length of the shortlog.
34 The shortlog is the first line of
35 the commit message. The newline
36 character at end does not count to
37 the length.
38 :param body_line_length: The maximum line-length of the body.
39 The newline character at each line
40 end does not count to the length.
41 :param force_body: Whether a body shall exist or not.
42 :param allow_empty_commit_message: Whether empty commit messages are
43 allowed or not.
44 :param shortlog_regex: A regex to check the shortlog with.
45 A full match of this regex is then
46 required. Passing an empty string
47 disable the regex-check.
48 :param shortlog_trailing_period: Whether a dot shall be enforced at
49 the end of the shortlog line.
50 Providing ``None`` means
51 "doesn't care".
52 """
53 with change_directory(self.get_config_dir() or os.getcwd()):
54 stdout, stderr = run_shell_command("git log -1 --pretty=%B")
55
56 if stderr:
57 self.err("git:", repr(stderr))
58 return
59
60 stdout = stdout.rstrip("\n").splitlines()
61
62 if len(stdout) == 0:
63 if not allow_empty_commit_message:
64 yield Result(self, "HEAD commit has no message.")
65 return
66
67 yield from self.check_shortlog(shortlog_length,
68 shortlog_regex,
69 shortlog_trailing_period,
70 stdout[0])
71 yield from self.check_body(body_line_length, force_body, stdout[1:])
72
73 def check_shortlog(self,
74 shortlog_length,
75 regex,
76 shortlog_trailing_period,
77 shortlog):
78 """
79 Checks the given shortlog.
80
81 :param shortlog_length: The maximum length of the shortlog.
82 The newline character at end does not
83 count to the length.
84 :param regex: A regex to check the shortlog with.
85 :param shortlog_trailing_period: Whether a dot shall be enforced at end
86 end or not (or ``None`` for "don't
87 care").
88 :param shortlog: The shortlog message string.
89 """
90 diff = len(shortlog) - shortlog_length
91 if diff > 0:
92 yield Result(self,
93 "Shortlog of HEAD commit is {} character(s) longer "
94 "than the limit ({} > {}).".format(
95 diff, len(shortlog), shortlog_length))
96
97 if (shortlog[-1] != ".") == shortlog_trailing_period:
98 yield Result(self,
99 "Shortlog of HEAD commit contains no period at end."
100 if shortlog_trailing_period else
101 "Shortlog of HEAD commit contains a period at end.")
102
103 if regex != "":
104 match = re.match(regex, shortlog)
105 # fullmatch() inside re-module exists sadly since 3.4, but we
106 # support 3.3 so we need to check that the regex matched completely
107 # ourselves.
108 if not match or match.end() != len(shortlog):
109 yield Result(
110 self,
111 "Shortlog of HEAD commit does not match given regex.")
112
113 def check_body(self, body_line_length, force_body, body):
114 """
115 Checks the given commit body.
116
117 :param body_line_length: The maximum line-length of the body. The
118 newline character at each line end does not
119 count to the length.
120 :param force_body: Whether a body shall exist or not.
121 :param body: The commit body splitted by lines.
122 """
123 if len(body) == 0:
124 if force_body:
125 yield Result(self, "No commit message body at HEAD.")
126 return
127
128 if body[0] != "":
129 yield Result(self, "No newline between shortlog and body at HEAD.")
130 return
131
132 if any(len(line) > body_line_length for line in body[1:]):
133 yield Result(self, "Body of HEAD commit contains too long lines.")
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py
--- a/bears/vcs/git/GitCommitBear.py
+++ b/bears/vcs/git/GitCommitBear.py
@@ -19,7 +19,7 @@
def run(self,
shortlog_length: int=50,
- body_line_length: int=73,
+ body_line_length: int=72,
force_body: bool=False,
allow_empty_commit_message: bool=False,
shortlog_regex: str="",
|
{"golden_diff": "diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py\n--- a/bears/vcs/git/GitCommitBear.py\n+++ b/bears/vcs/git/GitCommitBear.py\n@@ -19,7 +19,7 @@\n \n def run(self,\n shortlog_length: int=50,\n- body_line_length: int=73,\n+ body_line_length: int=72,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n", "issue": "GitCommitBear: body_line_length shall be 72 and not 73\nThe standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.\n\n", "before_files": [{"content": "import re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\n\n\nclass GitCommitBear(GlobalBear):\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n def run(self,\n shortlog_length: int=50,\n body_line_length: int=73,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None):\n \"\"\"\n Checks the current git commit message at HEAD.\n\n This bear ensures that the shortlog and body do not exceed a given\n line-length and that a newline lies between them.\n\n :param shortlog_length: The maximum length of the shortlog.\n The shortlog is the first line of\n the commit message. The newline\n character at end does not count to\n the length.\n :param body_line_length: The maximum line-length of the body.\n The newline character at each line\n end does not count to the length.\n :param force_body: Whether a body shall exist or not.\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n :param shortlog_regex: A regex to check the shortlog with.\n A full match of this regex is then\n required. Passing an empty string\n disable the regex-check.\n :param shortlog_trailing_period: Whether a dot shall be enforced at\n the end of the shortlog line.\n Providing ``None`` means\n \"doesn't care\".\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(shortlog_length,\n shortlog_regex,\n shortlog_trailing_period,\n stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n\n def check_shortlog(self,\n shortlog_length,\n regex,\n shortlog_trailing_period,\n shortlog):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog: The shortlog message string.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of HEAD commit is {} character(s) longer \"\n \"than the limit ({} > {}).\".format(\n diff, len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if regex != \"\":\n match = re.match(regex, shortlog)\n # fullmatch() inside re-module exists sadly since 3.4, but we\n # support 3.3 so we need to check that the regex matched completely\n # ourselves.\n if not match or match.end() != len(shortlog):\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex.\")\n\n def check_body(self, body_line_length, force_body, body):\n \"\"\"\n Checks the given commit body.\n\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param body: The commit body splitted by lines.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline between shortlog and body at HEAD.\")\n return\n\n if any(len(line) > body_line_length for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines.\")\n", "path": "bears/vcs/git/GitCommitBear.py"}], "after_files": [{"content": "import re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\n\n\nclass GitCommitBear(GlobalBear):\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n def run(self,\n shortlog_length: int=50,\n body_line_length: int=72,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None):\n \"\"\"\n Checks the current git commit message at HEAD.\n\n This bear ensures that the shortlog and body do not exceed a given\n line-length and that a newline lies between them.\n\n :param shortlog_length: The maximum length of the shortlog.\n The shortlog is the first line of\n the commit message. The newline\n character at end does not count to\n the length.\n :param body_line_length: The maximum line-length of the body.\n The newline character at each line\n end does not count to the length.\n :param force_body: Whether a body shall exist or not.\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n :param shortlog_regex: A regex to check the shortlog with.\n A full match of this regex is then\n required. Passing an empty string\n disable the regex-check.\n :param shortlog_trailing_period: Whether a dot shall be enforced at\n the end of the shortlog line.\n Providing ``None`` means\n \"doesn't care\".\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(shortlog_length,\n shortlog_regex,\n shortlog_trailing_period,\n stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n\n def check_shortlog(self,\n shortlog_length,\n regex,\n shortlog_trailing_period,\n shortlog):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog: The shortlog message string.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of HEAD commit is {} character(s) longer \"\n \"than the limit ({} > {}).\".format(\n diff, len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if regex != \"\":\n match = re.match(regex, shortlog)\n # fullmatch() inside re-module exists sadly since 3.4, but we\n # support 3.3 so we need to check that the regex matched completely\n # ourselves.\n if not match or match.end() != len(shortlog):\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex.\")\n\n def check_body(self, body_line_length, force_body, body):\n \"\"\"\n Checks the given commit body.\n\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param body: The commit body splitted by lines.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline between shortlog and body at HEAD.\")\n return\n\n if any(len(line) > body_line_length for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines.\")\n", "path": "bears/vcs/git/GitCommitBear.py"}]}
| 1,663 | 126 |
gh_patches_debug_8972
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-2367
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] pyro.distributions.InverseGamma.sample does not work on GPU
### Issue Description
`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:
```
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
```
I think the problem is in `__init__`:
```python
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
```
The argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.
### Environment
Ubuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0
### Code Snippet
```python
>>> import torch
>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution
>>> from pyro.distributions.transforms import PowerTransform
>>>
>>> concentration = torch.tensor(1.0).to("cuda")
>>> rate = torch.tensor(1.0).to("cuda")
>>>
>>> # InverseGamma.sample fails with an error
>>> InverseGamma(concentration, rate).sample()
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
>>>
>>> # The equivalent TransformedDistribution is fine
>>> TransformedDistribution(
... Gamma(concentration, rate),
... PowerTransform(torch.tensor(-1.0).to("cuda")),
... ).sample()
tensor(0.5707, device='cuda:0')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/inverse_gamma.py`
Content:
```
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from torch.distributions import constraints
5 from torch.distributions.transforms import PowerTransform
6 from pyro.distributions.torch import Gamma, TransformedDistribution
7
8
9 class InverseGamma(TransformedDistribution):
10 r"""
11 Creates an inverse-gamma distribution parameterized by
12 `concentration` and `rate`.
13
14 X ~ Gamma(concentration, rate)
15 Y = 1/X ~ InverseGamma(concentration, rate)
16
17 :param torch.Tensor concentration: the concentration parameter (i.e. alpha).
18 :param torch.Tensor rate: the rate parameter (i.e. beta).
19 """
20 arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}
21 support = constraints.positive
22 has_rsample = True
23
24 def __init__(self, concentration, rate, validate_args=None):
25 base_dist = Gamma(concentration, rate)
26 super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
27
28 def expand(self, batch_shape, _instance=None):
29 new = self._get_checked_instance(InverseGamma, _instance)
30 return super().expand(batch_shape, _instance=new)
31
32 @property
33 def concentration(self):
34 return self.base_dist.concentration
35
36 @property
37 def rate(self):
38 return self.base_dist.rate
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py
--- a/pyro/distributions/inverse_gamma.py
+++ b/pyro/distributions/inverse_gamma.py
@@ -23,7 +23,8 @@
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),
+ validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(InverseGamma, _instance)
|
{"golden_diff": "diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py\n--- a/pyro/distributions/inverse_gamma.py\n+++ b/pyro/distributions/inverse_gamma.py\n@@ -23,7 +23,8 @@\n \n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),\n+ validate_args=validate_args)\n \n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n", "issue": "[bug] pyro.distributions.InverseGamma.sample does not work on GPU\n### Issue Description\r\n`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:\r\n```\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n```\r\n\r\nI think the problem is in `__init__`:\r\n```python\r\n def __init__(self, concentration, rate, validate_args=None):\r\n base_dist = Gamma(concentration, rate)\r\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\r\n```\r\nThe argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.\r\n\r\n### Environment\r\nUbuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0\r\n\r\n### Code Snippet\r\n```python\r\n>>> import torch\r\n>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution\r\n>>> from pyro.distributions.transforms import PowerTransform\r\n>>>\r\n>>> concentration = torch.tensor(1.0).to(\"cuda\")\r\n>>> rate = torch.tensor(1.0).to(\"cuda\")\r\n>>> \r\n>>> # InverseGamma.sample fails with an error\r\n>>> InverseGamma(concentration, rate).sample()\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n>>> \r\n>>> # The equivalent TransformedDistribution is fine\r\n>>> TransformedDistribution(\r\n... Gamma(concentration, rate),\r\n... PowerTransform(torch.tensor(-1.0).to(\"cuda\")),\r\n... ).sample()\r\ntensor(0.5707, device='cuda:0')\r\n```\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom torch.distributions import constraints\nfrom torch.distributions.transforms import PowerTransform\nfrom pyro.distributions.torch import Gamma, TransformedDistribution\n\n\nclass InverseGamma(TransformedDistribution):\n r\"\"\"\n Creates an inverse-gamma distribution parameterized by\n `concentration` and `rate`.\n\n X ~ Gamma(concentration, rate)\n Y = 1/X ~ InverseGamma(concentration, rate)\n\n :param torch.Tensor concentration: the concentration parameter (i.e. alpha).\n :param torch.Tensor rate: the rate parameter (i.e. beta).\n \"\"\"\n arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}\n support = constraints.positive\n has_rsample = True\n\n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def concentration(self):\n return self.base_dist.concentration\n\n @property\n def rate(self):\n return self.base_dist.rate\n", "path": "pyro/distributions/inverse_gamma.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom torch.distributions import constraints\nfrom torch.distributions.transforms import PowerTransform\nfrom pyro.distributions.torch import Gamma, TransformedDistribution\n\n\nclass InverseGamma(TransformedDistribution):\n r\"\"\"\n Creates an inverse-gamma distribution parameterized by\n `concentration` and `rate`.\n\n X ~ Gamma(concentration, rate)\n Y = 1/X ~ InverseGamma(concentration, rate)\n\n :param torch.Tensor concentration: the concentration parameter (i.e. alpha).\n :param torch.Tensor rate: the rate parameter (i.e. beta).\n \"\"\"\n arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}\n support = constraints.positive\n has_rsample = True\n\n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),\n validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def concentration(self):\n return self.base_dist.concentration\n\n @property\n def rate(self):\n return self.base_dist.rate\n", "path": "pyro/distributions/inverse_gamma.py"}]}
| 1,053 | 160 |
gh_patches_debug_20498
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GET on default bucket increases quota usage
While investigating #1225, I discovered that *every* GET request to e.g. `/buckets/default/collections/some-collection` causes quota usage to increase. For example,
```
select * from records where collection_id='quota';
id | parent_id
| collection_id | last_modified |
data
-----------------+--------------------------------------------------------------
-------------------+---------------+----------------------------+---------------
----------------------------------------------------
bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5
| quota | 2017-05-22 18:07:14.715782 | {"record_count
": 0, "storage_size": 1745, "collection_count": 13}
collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n
ewest-collection-3 | quota | 2017-05-22 18:07:14.716923 | {"record_count
": 0, "storage_size": 540}
```
```
> http --auth 'user:pass6' 'localhost:8888/v1/buckets/default/collections/a-newest-collection-3'
HTTP/1.1 200 OK
..... more junk .....
```
```
select * from records where collection_id='quota';
id | parent_id
| collection_id | last_modified |
data
-----------------+--------------------------------------------------------------
-------------------+---------------+----------------------------+---------------
----------------------------------------------------
bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5
| quota | 2017-05-22 18:56:52.448115 | {"record_count
": 0, "storage_size": 1880, "collection_count": 14}
collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n
ewest-collection-3 | quota | 2017-05-22 18:56:52.4493 | {"record_count
": 0, "storage_size": 600}
```
This appears to be partly because every request that goes to the default bucket "creates" a bucket and collection.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/default_bucket/__init__.py`
Content:
```
1 import uuid
2
3 from pyramid import httpexceptions
4 from pyramid.settings import asbool
5 from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
6
7 from kinto.core.errors import raise_invalid
8 from kinto.core.events import ACTIONS
9 from kinto.core.utils import (
10 build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)
11
12 from kinto.authorization import RouteFactory
13 from kinto.views.buckets import Bucket
14 from kinto.views.collections import Collection
15
16
17 def create_bucket(request, bucket_id):
18 """Create a bucket if it doesn't exists."""
19 bucket_put = (request.method.lower() == 'put' and
20 request.path.endswith('buckets/default'))
21 # Do nothing if current request will already create the bucket.
22 if bucket_put:
23 return
24
25 # Do not intent to create multiple times per request (e.g. in batch).
26 already_created = request.bound_data.setdefault('buckets', {})
27 if bucket_id in already_created:
28 return
29
30 bucket_uri = instance_uri(request, 'bucket', id=bucket_id)
31 bucket = resource_create_object(request=request,
32 resource_cls=Bucket,
33 uri=bucket_uri)
34 already_created[bucket_id] = bucket
35
36
37 def create_collection(request, bucket_id):
38 # Do nothing if current request does not involve a collection.
39 subpath = request.matchdict.get('subpath')
40 if not (subpath and subpath.rstrip('/').startswith('collections/')):
41 return
42
43 collection_id = subpath.split('/')[1]
44 collection_uri = instance_uri(request, 'collection',
45 bucket_id=bucket_id,
46 id=collection_id)
47
48 # Do not intent to create multiple times per request (e.g. in batch).
49 already_created = request.bound_data.setdefault('collections', {})
50 if collection_uri in already_created:
51 return
52
53 # Do nothing if current request will already create the collection.
54 collection_put = (request.method.lower() == 'put' and
55 request.path.endswith(collection_id))
56 if collection_put:
57 return
58
59 collection = resource_create_object(request=request,
60 resource_cls=Collection,
61 uri=collection_uri)
62 already_created[collection_uri] = collection
63
64
65 def resource_create_object(request, resource_cls, uri):
66 """In the default bucket, the bucket and collection are implicitly
67 created. This helper instantiate the resource and simulate a request
68 with its RootFactory on the instantiated resource.
69 :returns: the created object
70 :rtype: dict
71 """
72 resource_name, matchdict = view_lookup(request, uri)
73
74 # Build a fake request, mainly used to populate the create events that
75 # will be triggered by the resource.
76 fakerequest = build_request(request, {
77 'method': 'PUT',
78 'path': uri,
79 })
80 fakerequest.matchdict = matchdict
81 fakerequest.bound_data = request.bound_data
82 fakerequest.authn_type = request.authn_type
83 fakerequest.selected_userid = request.selected_userid
84 fakerequest.errors = request.errors
85 fakerequest.current_resource_name = resource_name
86
87 obj_id = matchdict['id']
88
89 # Fake context, required to instantiate a resource.
90 context = RouteFactory(fakerequest)
91 context.resource_name = resource_name
92 resource = resource_cls(fakerequest, context)
93
94 # Check that provided id is valid for this resource.
95 if not resource.model.id_generator.match(obj_id):
96 error_details = {
97 'location': 'path',
98 'description': "Invalid {} id".format(resource_name)
99 }
100 raise_invalid(resource.request, **error_details)
101
102 data = {'id': obj_id}
103 obj = resource.model.create_record(data, ignore_conflict=True)
104 # Since the current request is not a resource (but a straight Service),
105 # we simulate a request on a resource.
106 # This will be used in the resource event payload.
107 resource.postprocess(obj, action=ACTIONS.CREATE)
108 return obj
109
110
111 def default_bucket(request):
112 if request.method.lower() == 'options':
113 path = request.path.replace('default', 'unknown')
114 subrequest = build_request(request, {
115 'method': 'OPTIONS',
116 'path': path
117 })
118 return request.invoke_subrequest(subrequest)
119
120 if Authenticated not in request.effective_principals:
121 # Pass through the forbidden_view_config
122 raise httpexceptions.HTTPForbidden()
123
124 settings = request.registry.settings
125
126 if asbool(settings['readonly']):
127 raise httpexceptions.HTTPMethodNotAllowed()
128
129 bucket_id = request.default_bucket_id
130
131 # Implicit object creations.
132 # Make sure bucket exists
133 create_bucket(request, bucket_id)
134 # Make sure the collection exists
135 create_collection(request, bucket_id)
136
137 path = request.path.replace('/buckets/default', '/buckets/{}'.format(bucket_id))
138 querystring = request.url[(request.url.index(request.path) +
139 len(request.path)):]
140 try:
141 # If 'id' is provided as 'default', replace with actual bucket id.
142 body = request.json
143 body['data']['id'] = body['data']['id'].replace('default', bucket_id)
144 except:
145 body = request.body or {"data": {}}
146 subrequest = build_request(request, {
147 'method': request.method,
148 'path': path + querystring,
149 'body': body,
150 })
151 subrequest.bound_data = request.bound_data
152
153 try:
154 response = request.invoke_subrequest(subrequest)
155 except httpexceptions.HTTPException as error:
156 is_redirect = error.status_code < 400
157 if error.content_type == 'application/json' or is_redirect:
158 response = reapply_cors(subrequest, error)
159 else:
160 # Ask the upper level to format the error.
161 raise error
162 return response
163
164
165 def default_bucket_id(request):
166 settings = request.registry.settings
167 secret = settings['userid_hmac_secret']
168 # Build the user unguessable bucket_id UUID from its user_id
169 digest = hmac_digest(secret, request.prefixed_userid)
170 return str(uuid.UUID(digest[:32]))
171
172
173 def get_user_info(request):
174 user_info = {
175 'id': request.prefixed_userid,
176 'bucket': request.default_bucket_id
177 }
178 return user_info
179
180
181 def includeme(config):
182 # Redirect default to the right endpoint
183 config.add_view(default_bucket,
184 route_name='default_bucket',
185 permission=NO_PERMISSION_REQUIRED)
186 config.add_view(default_bucket,
187 route_name='default_bucket_collection',
188 permission=NO_PERMISSION_REQUIRED)
189
190 config.add_route('default_bucket_collection',
191 '/buckets/default/{subpath:.*}')
192 config.add_route('default_bucket', '/buckets/default')
193
194 # Provide helpers
195 config.add_request_method(default_bucket_id, reify=True)
196 # Override kinto.core default user info
197 config.add_request_method(get_user_info)
198
199 config.add_api_capability(
200 "default_bucket",
201 description="The default bucket is an alias for a personal"
202 " bucket where collections are created implicitly.",
203 url="https://kinto.readthedocs.io/en/latest/api/1.x/"
204 "buckets.html#personal-bucket-default")
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py
--- a/kinto/plugins/default_bucket/__init__.py
+++ b/kinto/plugins/default_bucket/__init__.py
@@ -6,6 +6,7 @@
from kinto.core.errors import raise_invalid
from kinto.core.events import ACTIONS
+from kinto.core.storage.exceptions import UnicityError
from kinto.core.utils import (
build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)
@@ -100,7 +101,12 @@
raise_invalid(resource.request, **error_details)
data = {'id': obj_id}
- obj = resource.model.create_record(data, ignore_conflict=True)
+ try:
+ obj = resource.model.create_record(data)
+ except UnicityError as e:
+ # The record already exists; skip running events
+ return e.record
+
# Since the current request is not a resource (but a straight Service),
# we simulate a request on a resource.
# This will be used in the resource event payload.
|
{"golden_diff": "diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py\n--- a/kinto/plugins/default_bucket/__init__.py\n+++ b/kinto/plugins/default_bucket/__init__.py\n@@ -6,6 +6,7 @@\n \n from kinto.core.errors import raise_invalid\n from kinto.core.events import ACTIONS\n+from kinto.core.storage.exceptions import UnicityError\n from kinto.core.utils import (\n build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)\n \n@@ -100,7 +101,12 @@\n raise_invalid(resource.request, **error_details)\n \n data = {'id': obj_id}\n- obj = resource.model.create_record(data, ignore_conflict=True)\n+ try:\n+ obj = resource.model.create_record(data)\n+ except UnicityError as e:\n+ # The record already exists; skip running events\n+ return e.record\n+\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n", "issue": "GET on default bucket increases quota usage\nWhile investigating #1225, I discovered that *every* GET request to e.g. `/buckets/default/collections/some-collection` causes quota usage to increase. For example,\r\n\r\n```\r\nselect * from records where collection_id='quota';\r\n id | parent_id \r\n | collection_id | last_modified | \r\n data \r\n-----------------+--------------------------------------------------------------\r\n-------------------+---------------+----------------------------+---------------\r\n----------------------------------------------------\r\n bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5 \r\n | quota | 2017-05-22 18:07:14.715782 | {\"record_count\r\n\": 0, \"storage_size\": 1745, \"collection_count\": 13}\r\n collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n\r\newest-collection-3 | quota | 2017-05-22 18:07:14.716923 | {\"record_count\r\n\": 0, \"storage_size\": 540}\r\n```\r\n\r\n```\r\n> http --auth 'user:pass6' 'localhost:8888/v1/buckets/default/collections/a-newest-collection-3'\r\nHTTP/1.1 200 OK\r\n..... more junk .....\r\n```\r\n\r\n```\r\nselect * from records where collection_id='quota';\r\n id | parent_id \r\n | collection_id | last_modified | \r\n data \r\n-----------------+--------------------------------------------------------------\r\n-------------------+---------------+----------------------------+---------------\r\n----------------------------------------------------\r\n bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5 \r\n | quota | 2017-05-22 18:56:52.448115 | {\"record_count\r\n\": 0, \"storage_size\": 1880, \"collection_count\": 14}\r\n collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n\r\newest-collection-3 | quota | 2017-05-22 18:56:52.4493 | {\"record_count\r\n\": 0, \"storage_size\": 600}\r\n```\r\n\r\nThis appears to be partly because every request that goes to the default bucket \"creates\" a bucket and collection.\n", "before_files": [{"content": "import uuid\n\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom kinto.core.errors import raise_invalid\nfrom kinto.core.events import ACTIONS\nfrom kinto.core.utils import (\n build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n bucket_uri = instance_uri(request, 'bucket', id=bucket_id)\n bucket = resource_create_object(request=request,\n resource_cls=Bucket,\n uri=bucket_uri)\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.rstrip('/').startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = instance_uri(request, 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n collection = resource_create_object(request=request,\n resource_cls=Collection,\n uri=collection_uri)\n already_created[collection_uri] = collection\n\n\ndef resource_create_object(request, resource_cls, uri):\n \"\"\"In the default bucket, the bucket and collection are implicitly\n created. This helper instantiate the resource and simulate a request\n with its RootFactory on the instantiated resource.\n :returns: the created object\n :rtype: dict\n \"\"\"\n resource_name, matchdict = view_lookup(request, uri)\n\n # Build a fake request, mainly used to populate the create events that\n # will be triggered by the resource.\n fakerequest = build_request(request, {\n 'method': 'PUT',\n 'path': uri,\n })\n fakerequest.matchdict = matchdict\n fakerequest.bound_data = request.bound_data\n fakerequest.authn_type = request.authn_type\n fakerequest.selected_userid = request.selected_userid\n fakerequest.errors = request.errors\n fakerequest.current_resource_name = resource_name\n\n obj_id = matchdict['id']\n\n # Fake context, required to instantiate a resource.\n context = RouteFactory(fakerequest)\n context.resource_name = resource_name\n resource = resource_cls(fakerequest, context)\n\n # Check that provided id is valid for this resource.\n if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid {} id\".format(resource_name)\n }\n raise_invalid(resource.request, **error_details)\n\n data = {'id': obj_id}\n obj = resource.model.create_record(data, ignore_conflict=True)\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n resource.postprocess(obj, action=ACTIONS.CREATE)\n return obj\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n\n # Implicit object creations.\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n path = request.path.replace('/buckets/default', '/buckets/{}'.format(bucket_id))\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n try:\n # If 'id' is provided as 'default', replace with actual bucket id.\n body = request.json\n body['data']['id'] = body['data']['id'].replace('default', bucket_id)\n except:\n body = request.body or {\"data\": {}}\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': body,\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return str(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override kinto.core default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}], "after_files": [{"content": "import uuid\n\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom kinto.core.errors import raise_invalid\nfrom kinto.core.events import ACTIONS\nfrom kinto.core.storage.exceptions import UnicityError\nfrom kinto.core.utils import (\n build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n bucket_uri = instance_uri(request, 'bucket', id=bucket_id)\n bucket = resource_create_object(request=request,\n resource_cls=Bucket,\n uri=bucket_uri)\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.rstrip('/').startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = instance_uri(request, 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n collection = resource_create_object(request=request,\n resource_cls=Collection,\n uri=collection_uri)\n already_created[collection_uri] = collection\n\n\ndef resource_create_object(request, resource_cls, uri):\n \"\"\"In the default bucket, the bucket and collection are implicitly\n created. This helper instantiate the resource and simulate a request\n with its RootFactory on the instantiated resource.\n :returns: the created object\n :rtype: dict\n \"\"\"\n resource_name, matchdict = view_lookup(request, uri)\n\n # Build a fake request, mainly used to populate the create events that\n # will be triggered by the resource.\n fakerequest = build_request(request, {\n 'method': 'PUT',\n 'path': uri,\n })\n fakerequest.matchdict = matchdict\n fakerequest.bound_data = request.bound_data\n fakerequest.authn_type = request.authn_type\n fakerequest.selected_userid = request.selected_userid\n fakerequest.errors = request.errors\n fakerequest.current_resource_name = resource_name\n\n obj_id = matchdict['id']\n\n # Fake context, required to instantiate a resource.\n context = RouteFactory(fakerequest)\n context.resource_name = resource_name\n resource = resource_cls(fakerequest, context)\n\n # Check that provided id is valid for this resource.\n if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid {} id\".format(resource_name)\n }\n raise_invalid(resource.request, **error_details)\n\n data = {'id': obj_id}\n try:\n obj = resource.model.create_record(data)\n except UnicityError as e:\n # The record already exists; skip running events\n return e.record\n\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n resource.postprocess(obj, action=ACTIONS.CREATE)\n return obj\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n\n # Implicit object creations.\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n path = request.path.replace('/buckets/default', '/buckets/{}'.format(bucket_id))\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n try:\n # If 'id' is provided as 'default', replace with actual bucket id.\n body = request.json\n body['data']['id'] = body['data']['id'].replace('default', bucket_id)\n except:\n body = request.body or {\"data\": {}}\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': body,\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return str(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override kinto.core default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}]}
| 2,936 | 244 |
gh_patches_debug_14439
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-715
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fitting with log_gpu_memory=True fails in python3.6.
## Bug
Fitting with `log_gpu_memory=True` in the Trainer fails in python3.6 version.
### To Reproduce
1. Use python3.6 version
2. Create any trainer with `log_gpu_memory=True` option.
3. Then fit it.
3. See error:
```
/a/pytorch-lightning/pytorch_lightning/core/memory.py in get_gpu_memory_map()
237 encoding='utf-8',
238 capture_output=True,
--> 239 check=True)
240 # Convert lines into a dictionary
241 gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]
/usr/lib/python3.6/subprocess.py in run(input, timeout, check, *popenargs, **kwargs)
421 kwargs['stdin'] = PIPE
422
--> 423 with Popen(*popenargs, **kwargs) as process:
424 try:
425 stdout, stderr = process.communicate(input, timeout=timeout)
TypeError: __init__() got an unexpected keyword argument 'capture_output'
```
#### Code sample
```
trainer = Trainer(
log_gpu_memory=True,
# ....
)
trainer.fit()
```
### Expected behavior
For the same code there is no errors for python3.7
### Environment
pytorch: 1.2.0
Ubuntu 18.04
pytorch-lightning:
- installed to pip environment
- commit 7a1df80f4e98fca
- python setup.py develop
- version 0.6.0
python: 3.6.8
cuda: 10.0, V10.0.130
cudnn: 7.6.2
GPU: RTX 2080 TI
### Additional context
In the `setup.py`
python_requires='>=3.6',
But `capture_output` is used in `subprocess.run` calling, which is valid only for python3.7
See also workaround to maintain python3.6:
https://stackoverflow.com/questions/53209127/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/core/memory.py`
Content:
```
1 '''
2 Generates a summary of a model's layers and dimensionality
3 '''
4
5 import gc
6 import os
7 import subprocess
8
9 import numpy as np
10 import pandas as pd
11 import torch
12 import logging
13
14
15 class ModelSummary(object):
16
17 def __init__(self, model, mode='full'):
18 '''
19 Generates summaries of model layers and dimensions.
20 '''
21 self.model = model
22 self.mode = mode
23 self.in_sizes = []
24 self.out_sizes = []
25
26 self.summarize()
27
28 def __str__(self):
29 return self.summary.__str__()
30
31 def __repr__(self):
32 return self.summary.__str__()
33
34 def named_modules(self):
35 if self.mode == 'full':
36 mods = self.model.named_modules()
37 mods = list(mods)[1:] # do not include root module (LightningModule)
38 elif self.mode == 'top':
39 # the children are the top-level modules
40 mods = self.model.named_children()
41 else:
42 mods = []
43 return list(mods)
44
45 def get_variable_sizes(self):
46 '''Run sample input through each layer to get output sizes'''
47 mods = self.named_modules()
48 in_sizes = []
49 out_sizes = []
50 input_ = self.model.example_input_array
51
52 if self.model.on_gpu:
53 device = next(self.model.parameters()).get_device()
54 # test if input is a list or a tuple
55 if isinstance(input_, (list, tuple)):
56 input_ = [input_i.cuda(device) if torch.is_tensor(input_i) else input_i
57 for input_i in input_]
58 else:
59 input_ = input_.cuda(device)
60
61 if self.model.trainer.use_amp:
62 # test if it is not a list or a tuple
63 if isinstance(input_, (list, tuple)):
64 input_ = [input_i.half() if torch.is_tensor(input_i) else input_i
65 for input_i in input_]
66 else:
67 input_ = input_.half()
68
69 with torch.no_grad():
70
71 for _, m in mods:
72 if isinstance(input_, (list, tuple)): # pragma: no cover
73 out = m(*input_)
74 else:
75 out = m(input_)
76
77 if isinstance(input_, (list, tuple)): # pragma: no cover
78 in_size = []
79 for x in input_:
80 if type(x) is list:
81 in_size.append(len(x))
82 else:
83 in_size.append(x.size())
84 else:
85 in_size = np.array(input_.size())
86
87 in_sizes.append(in_size)
88
89 if isinstance(out, (list, tuple)): # pragma: no cover
90 out_size = np.asarray([x.size() for x in out])
91 else:
92 out_size = np.array(out.size())
93
94 out_sizes.append(out_size)
95 input_ = out
96
97 self.in_sizes = in_sizes
98 self.out_sizes = out_sizes
99 assert len(in_sizes) == len(out_sizes)
100 return
101
102 def get_layer_names(self):
103 '''Collect Layer Names'''
104 mods = self.named_modules()
105 names = []
106 layers = []
107 for name, m in mods:
108 names += [name]
109 layers += [str(m.__class__)]
110
111 layer_types = [x.split('.')[-1][:-2] for x in layers]
112
113 self.layer_names = names
114 self.layer_types = layer_types
115 return
116
117 def get_parameter_sizes(self):
118 '''Get sizes of all parameters in `model`'''
119 mods = self.named_modules()
120 sizes = []
121 for _, m in mods:
122 p = list(m.parameters())
123 modsz = []
124 for j in range(len(p)):
125 modsz.append(np.array(p[j].size()))
126 sizes.append(modsz)
127
128 self.param_sizes = sizes
129 return
130
131 def get_parameter_nums(self):
132 '''Get number of parameters in each layer'''
133 param_nums = []
134 for mod in self.param_sizes:
135 all_params = 0
136 for p in mod:
137 all_params += np.prod(p)
138 param_nums.append(all_params)
139 self.param_nums = param_nums
140 return
141
142 def make_summary(self):
143 '''
144 Makes a summary listing with:
145
146 Layer Name, Layer Type, Input Size, Output Size, Number of Parameters
147 '''
148
149 cols = ['Name', 'Type', 'Params']
150 if self.model.example_input_array is not None:
151 cols.extend(['In_sizes', 'Out_sizes'])
152
153 df = pd.DataFrame(np.zeros((len(self.layer_names), len(cols))))
154 df.columns = cols
155
156 df['Name'] = self.layer_names
157 df['Type'] = self.layer_types
158 df['Params'] = self.param_nums
159 df['Params'] = df['Params'].map(get_human_readable_count)
160
161 if self.model.example_input_array is not None:
162 df['In_sizes'] = self.in_sizes
163 df['Out_sizes'] = self.out_sizes
164
165 self.summary = df
166 return
167
168 def summarize(self):
169 self.get_layer_names()
170 self.get_parameter_sizes()
171 self.get_parameter_nums()
172
173 if self.model.example_input_array is not None:
174 self.get_variable_sizes()
175 self.make_summary()
176
177
178 def print_mem_stack(): # pragma: no cover
179 for obj in gc.get_objects():
180 try:
181 if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
182 logging.info(type(obj), obj.size())
183 except Exception:
184 pass
185
186
187 def count_mem_items(): # pragma: no cover
188 num_params = 0
189 num_tensors = 0
190 for obj in gc.get_objects():
191 try:
192 if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
193 obj_type = str(type(obj))
194 if 'parameter' in obj_type:
195 num_params += 1
196 else:
197 num_tensors += 1
198 except Exception:
199 pass
200
201 return num_params, num_tensors
202
203
204 def get_memory_profile(mode):
205 """
206 'all' means return memory for all gpus
207 'min_max' means return memory for max and min
208 :param mode:
209 :return:
210 """
211 memory_map = get_gpu_memory_map()
212
213 if mode == 'min_max':
214 min_index, min_memory = min(memory_map.items(), key=lambda item: item[1])
215 max_index, max_memory = max(memory_map.items(), key=lambda item: item[1])
216
217 memory_map = {min_index: min_memory, max_index: max_memory}
218
219 return memory_map
220
221
222 def get_gpu_memory_map():
223 """Get the current gpu usage.
224
225 Returns
226 -------
227 usage: dict
228 Keys are device ids as integers.
229 Values are memory usage as integers in MB.
230 """
231 result = subprocess.run(
232 [
233 'nvidia-smi',
234 '--query-gpu=memory.used',
235 '--format=csv,nounits,noheader',
236 ],
237 encoding='utf-8',
238 capture_output=True,
239 check=True)
240 # Convert lines into a dictionary
241 gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]
242 gpu_memory_map = {f'gpu_{index}': memory for index, memory in enumerate(gpu_memory)}
243 return gpu_memory_map
244
245
246 def get_human_readable_count(number):
247 """
248 Abbreviates an integer number with K, M, B, T for thousands, millions,
249 billions and trillions, respectively.
250 Examples:
251 123 -> 123
252 1234 -> 1 K (one thousand)
253 2e6 -> 2 M (two million)
254 3e9 -> 3 B (three billion)
255 4e12 -> 4 T (four trillion)
256 5e15 -> 5,000 T
257 :param number: a positive integer number
258 :returns a string formatted according to the pattern described above.
259 """
260 assert number >= 0
261 labels = [' ', 'K', 'M', 'B', 'T']
262 num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)
263 num_groups = int(np.ceil(num_digits / 3))
264 num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions
265 shift = -3 * (num_groups - 1)
266 number = number * (10 ** shift)
267 index = num_groups - 1
268 return f'{int(number):,d} {labels[index]}'
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py
--- a/pytorch_lightning/core/memory.py
+++ b/pytorch_lightning/core/memory.py
@@ -5,6 +5,7 @@
import gc
import os
import subprocess
+from subprocess import PIPE
import numpy as np
import pandas as pd
@@ -235,7 +236,8 @@
'--format=csv,nounits,noheader',
],
encoding='utf-8',
- capture_output=True,
+ # capture_output=True, # valid for python version >=3.7
+ stdout=PIPE, stderr=PIPE, # for backward compatibility with python version 3.6
check=True)
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]
|
{"golden_diff": "diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py\n--- a/pytorch_lightning/core/memory.py\n+++ b/pytorch_lightning/core/memory.py\n@@ -5,6 +5,7 @@\n import gc\n import os\n import subprocess\n+from subprocess import PIPE\n \n import numpy as np\n import pandas as pd\n@@ -235,7 +236,8 @@\n '--format=csv,nounits,noheader',\n ],\n encoding='utf-8',\n- capture_output=True,\n+ # capture_output=True, # valid for python version >=3.7\n+ stdout=PIPE, stderr=PIPE, # for backward compatibility with python version 3.6\n check=True)\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]\n", "issue": "Fitting with log_gpu_memory=True fails in python3.6.\n## Bug\r\n\r\nFitting with `log_gpu_memory=True` in the Trainer fails in python3.6 version.\r\n\r\n### To Reproduce\r\n\r\n1. Use python3.6 version\r\n2. Create any trainer with `log_gpu_memory=True` option.\r\n3. Then fit it.\r\n3. See error:\r\n\r\n```\r\n/a/pytorch-lightning/pytorch_lightning/core/memory.py in get_gpu_memory_map()\r\n 237 encoding='utf-8',\r\n 238 capture_output=True,\r\n--> 239 check=True)\r\n 240 # Convert lines into a dictionary\r\n 241 gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]\r\n\r\n/usr/lib/python3.6/subprocess.py in run(input, timeout, check, *popenargs, **kwargs)\r\n 421 kwargs['stdin'] = PIPE\r\n 422 \r\n--> 423 with Popen(*popenargs, **kwargs) as process:\r\n 424 try:\r\n 425 stdout, stderr = process.communicate(input, timeout=timeout)\r\n\r\nTypeError: __init__() got an unexpected keyword argument 'capture_output'\r\n\r\n```\r\n\r\n\r\n#### Code sample\r\n\r\n```\r\ntrainer = Trainer(\r\n log_gpu_memory=True,\r\n # ....\r\n)\r\ntrainer.fit()\r\n```\r\n\r\n### Expected behavior\r\n\r\nFor the same code there is no errors for python3.7\r\n\r\n### Environment\r\n\r\npytorch: 1.2.0\r\nUbuntu 18.04\r\npytorch-lightning: \r\n - installed to pip environment\r\n - commit 7a1df80f4e98fca\r\n - python setup.py develop\r\n - version 0.6.0 \r\npython: 3.6.8\r\ncuda: 10.0, V10.0.130 \r\ncudnn: 7.6.2\r\nGPU: RTX 2080 TI\r\n\r\n### Additional context\r\n\r\nIn the `setup.py`\r\n python_requires='>=3.6',\r\n\r\nBut `capture_output` is used in `subprocess.run` calling, which is valid only for python3.7\r\nSee also workaround to maintain python3.6:\r\nhttps://stackoverflow.com/questions/53209127/\r\n\r\n\n", "before_files": [{"content": "'''\nGenerates a summary of a model's layers and dimensionality\n'''\n\nimport gc\nimport os\nimport subprocess\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport logging\n\n\nclass ModelSummary(object):\n\n def __init__(self, model, mode='full'):\n '''\n Generates summaries of model layers and dimensions.\n '''\n self.model = model\n self.mode = mode\n self.in_sizes = []\n self.out_sizes = []\n\n self.summarize()\n\n def __str__(self):\n return self.summary.__str__()\n\n def __repr__(self):\n return self.summary.__str__()\n\n def named_modules(self):\n if self.mode == 'full':\n mods = self.model.named_modules()\n mods = list(mods)[1:] # do not include root module (LightningModule)\n elif self.mode == 'top':\n # the children are the top-level modules\n mods = self.model.named_children()\n else:\n mods = []\n return list(mods)\n\n def get_variable_sizes(self):\n '''Run sample input through each layer to get output sizes'''\n mods = self.named_modules()\n in_sizes = []\n out_sizes = []\n input_ = self.model.example_input_array\n\n if self.model.on_gpu:\n device = next(self.model.parameters()).get_device()\n # test if input is a list or a tuple\n if isinstance(input_, (list, tuple)):\n input_ = [input_i.cuda(device) if torch.is_tensor(input_i) else input_i\n for input_i in input_]\n else:\n input_ = input_.cuda(device)\n\n if self.model.trainer.use_amp:\n # test if it is not a list or a tuple\n if isinstance(input_, (list, tuple)):\n input_ = [input_i.half() if torch.is_tensor(input_i) else input_i\n for input_i in input_]\n else:\n input_ = input_.half()\n\n with torch.no_grad():\n\n for _, m in mods:\n if isinstance(input_, (list, tuple)): # pragma: no cover\n out = m(*input_)\n else:\n out = m(input_)\n\n if isinstance(input_, (list, tuple)): # pragma: no cover\n in_size = []\n for x in input_:\n if type(x) is list:\n in_size.append(len(x))\n else:\n in_size.append(x.size())\n else:\n in_size = np.array(input_.size())\n\n in_sizes.append(in_size)\n\n if isinstance(out, (list, tuple)): # pragma: no cover\n out_size = np.asarray([x.size() for x in out])\n else:\n out_size = np.array(out.size())\n\n out_sizes.append(out_size)\n input_ = out\n\n self.in_sizes = in_sizes\n self.out_sizes = out_sizes\n assert len(in_sizes) == len(out_sizes)\n return\n\n def get_layer_names(self):\n '''Collect Layer Names'''\n mods = self.named_modules()\n names = []\n layers = []\n for name, m in mods:\n names += [name]\n layers += [str(m.__class__)]\n\n layer_types = [x.split('.')[-1][:-2] for x in layers]\n\n self.layer_names = names\n self.layer_types = layer_types\n return\n\n def get_parameter_sizes(self):\n '''Get sizes of all parameters in `model`'''\n mods = self.named_modules()\n sizes = []\n for _, m in mods:\n p = list(m.parameters())\n modsz = []\n for j in range(len(p)):\n modsz.append(np.array(p[j].size()))\n sizes.append(modsz)\n\n self.param_sizes = sizes\n return\n\n def get_parameter_nums(self):\n '''Get number of parameters in each layer'''\n param_nums = []\n for mod in self.param_sizes:\n all_params = 0\n for p in mod:\n all_params += np.prod(p)\n param_nums.append(all_params)\n self.param_nums = param_nums\n return\n\n def make_summary(self):\n '''\n Makes a summary listing with:\n\n Layer Name, Layer Type, Input Size, Output Size, Number of Parameters\n '''\n\n cols = ['Name', 'Type', 'Params']\n if self.model.example_input_array is not None:\n cols.extend(['In_sizes', 'Out_sizes'])\n\n df = pd.DataFrame(np.zeros((len(self.layer_names), len(cols))))\n df.columns = cols\n\n df['Name'] = self.layer_names\n df['Type'] = self.layer_types\n df['Params'] = self.param_nums\n df['Params'] = df['Params'].map(get_human_readable_count)\n\n if self.model.example_input_array is not None:\n df['In_sizes'] = self.in_sizes\n df['Out_sizes'] = self.out_sizes\n\n self.summary = df\n return\n\n def summarize(self):\n self.get_layer_names()\n self.get_parameter_sizes()\n self.get_parameter_nums()\n\n if self.model.example_input_array is not None:\n self.get_variable_sizes()\n self.make_summary()\n\n\ndef print_mem_stack(): # pragma: no cover\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n logging.info(type(obj), obj.size())\n except Exception:\n pass\n\n\ndef count_mem_items(): # pragma: no cover\n num_params = 0\n num_tensors = 0\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n obj_type = str(type(obj))\n if 'parameter' in obj_type:\n num_params += 1\n else:\n num_tensors += 1\n except Exception:\n pass\n\n return num_params, num_tensors\n\n\ndef get_memory_profile(mode):\n \"\"\"\n 'all' means return memory for all gpus\n 'min_max' means return memory for max and min\n :param mode:\n :return:\n \"\"\"\n memory_map = get_gpu_memory_map()\n\n if mode == 'min_max':\n min_index, min_memory = min(memory_map.items(), key=lambda item: item[1])\n max_index, max_memory = max(memory_map.items(), key=lambda item: item[1])\n\n memory_map = {min_index: min_memory, max_index: max_memory}\n\n return memory_map\n\n\ndef get_gpu_memory_map():\n \"\"\"Get the current gpu usage.\n\n Returns\n -------\n usage: dict\n Keys are device ids as integers.\n Values are memory usage as integers in MB.\n \"\"\"\n result = subprocess.run(\n [\n 'nvidia-smi',\n '--query-gpu=memory.used',\n '--format=csv,nounits,noheader',\n ],\n encoding='utf-8',\n capture_output=True,\n check=True)\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]\n gpu_memory_map = {f'gpu_{index}': memory for index, memory in enumerate(gpu_memory)}\n return gpu_memory_map\n\n\ndef get_human_readable_count(number):\n \"\"\"\n Abbreviates an integer number with K, M, B, T for thousands, millions,\n billions and trillions, respectively.\n Examples:\n 123 -> 123\n 1234 -> 1 K (one thousand)\n 2e6 -> 2 M (two million)\n 3e9 -> 3 B (three billion)\n 4e12 -> 4 T (four trillion)\n 5e15 -> 5,000 T\n :param number: a positive integer number\n :returns a string formatted according to the pattern described above.\n \"\"\"\n assert number >= 0\n labels = [' ', 'K', 'M', 'B', 'T']\n num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)\n num_groups = int(np.ceil(num_digits / 3))\n num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions\n shift = -3 * (num_groups - 1)\n number = number * (10 ** shift)\n index = num_groups - 1\n return f'{int(number):,d} {labels[index]}'\n", "path": "pytorch_lightning/core/memory.py"}], "after_files": [{"content": "'''\nGenerates a summary of a model's layers and dimensionality\n'''\n\nimport gc\nimport os\nimport subprocess\nfrom subprocess import PIPE\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport logging\n\n\nclass ModelSummary(object):\n\n def __init__(self, model, mode='full'):\n '''\n Generates summaries of model layers and dimensions.\n '''\n self.model = model\n self.mode = mode\n self.in_sizes = []\n self.out_sizes = []\n\n self.summarize()\n\n def __str__(self):\n return self.summary.__str__()\n\n def __repr__(self):\n return self.summary.__str__()\n\n def named_modules(self):\n if self.mode == 'full':\n mods = self.model.named_modules()\n mods = list(mods)[1:] # do not include root module (LightningModule)\n elif self.mode == 'top':\n # the children are the top-level modules\n mods = self.model.named_children()\n else:\n mods = []\n return list(mods)\n\n def get_variable_sizes(self):\n '''Run sample input through each layer to get output sizes'''\n mods = self.named_modules()\n in_sizes = []\n out_sizes = []\n input_ = self.model.example_input_array\n\n if self.model.on_gpu:\n device = next(self.model.parameters()).get_device()\n # test if input is a list or a tuple\n if isinstance(input_, (list, tuple)):\n input_ = [input_i.cuda(device) if torch.is_tensor(input_i) else input_i\n for input_i in input_]\n else:\n input_ = input_.cuda(device)\n\n if self.model.trainer.use_amp:\n # test if it is not a list or a tuple\n if isinstance(input_, (list, tuple)):\n input_ = [input_i.half() if torch.is_tensor(input_i) else input_i\n for input_i in input_]\n else:\n input_ = input_.half()\n\n with torch.no_grad():\n\n for _, m in mods:\n if isinstance(input_, (list, tuple)): # pragma: no cover\n out = m(*input_)\n else:\n out = m(input_)\n\n if isinstance(input_, (list, tuple)): # pragma: no cover\n in_size = []\n for x in input_:\n if type(x) is list:\n in_size.append(len(x))\n else:\n in_size.append(x.size())\n else:\n in_size = np.array(input_.size())\n\n in_sizes.append(in_size)\n\n if isinstance(out, (list, tuple)): # pragma: no cover\n out_size = np.asarray([x.size() for x in out])\n else:\n out_size = np.array(out.size())\n\n out_sizes.append(out_size)\n input_ = out\n\n self.in_sizes = in_sizes\n self.out_sizes = out_sizes\n assert len(in_sizes) == len(out_sizes)\n return\n\n def get_layer_names(self):\n '''Collect Layer Names'''\n mods = self.named_modules()\n names = []\n layers = []\n for name, m in mods:\n names += [name]\n layers += [str(m.__class__)]\n\n layer_types = [x.split('.')[-1][:-2] for x in layers]\n\n self.layer_names = names\n self.layer_types = layer_types\n return\n\n def get_parameter_sizes(self):\n '''Get sizes of all parameters in `model`'''\n mods = self.named_modules()\n sizes = []\n for _, m in mods:\n p = list(m.parameters())\n modsz = []\n for j in range(len(p)):\n modsz.append(np.array(p[j].size()))\n sizes.append(modsz)\n\n self.param_sizes = sizes\n return\n\n def get_parameter_nums(self):\n '''Get number of parameters in each layer'''\n param_nums = []\n for mod in self.param_sizes:\n all_params = 0\n for p in mod:\n all_params += np.prod(p)\n param_nums.append(all_params)\n self.param_nums = param_nums\n return\n\n def make_summary(self):\n '''\n Makes a summary listing with:\n\n Layer Name, Layer Type, Input Size, Output Size, Number of Parameters\n '''\n\n cols = ['Name', 'Type', 'Params']\n if self.model.example_input_array is not None:\n cols.extend(['In_sizes', 'Out_sizes'])\n\n df = pd.DataFrame(np.zeros((len(self.layer_names), len(cols))))\n df.columns = cols\n\n df['Name'] = self.layer_names\n df['Type'] = self.layer_types\n df['Params'] = self.param_nums\n df['Params'] = df['Params'].map(get_human_readable_count)\n\n if self.model.example_input_array is not None:\n df['In_sizes'] = self.in_sizes\n df['Out_sizes'] = self.out_sizes\n\n self.summary = df\n return\n\n def summarize(self):\n self.get_layer_names()\n self.get_parameter_sizes()\n self.get_parameter_nums()\n\n if self.model.example_input_array is not None:\n self.get_variable_sizes()\n self.make_summary()\n\n\ndef print_mem_stack(): # pragma: no cover\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n logging.info(type(obj), obj.size())\n except Exception:\n pass\n\n\ndef count_mem_items(): # pragma: no cover\n num_params = 0\n num_tensors = 0\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n obj_type = str(type(obj))\n if 'parameter' in obj_type:\n num_params += 1\n else:\n num_tensors += 1\n except Exception:\n pass\n\n return num_params, num_tensors\n\n\ndef get_memory_profile(mode):\n \"\"\"\n 'all' means return memory for all gpus\n 'min_max' means return memory for max and min\n :param mode:\n :return:\n \"\"\"\n memory_map = get_gpu_memory_map()\n\n if mode == 'min_max':\n min_index, min_memory = min(memory_map.items(), key=lambda item: item[1])\n max_index, max_memory = max(memory_map.items(), key=lambda item: item[1])\n\n memory_map = {min_index: min_memory, max_index: max_memory}\n\n return memory_map\n\n\ndef get_gpu_memory_map():\n \"\"\"Get the current gpu usage.\n\n Returns\n -------\n usage: dict\n Keys are device ids as integers.\n Values are memory usage as integers in MB.\n \"\"\"\n result = subprocess.run(\n [\n 'nvidia-smi',\n '--query-gpu=memory.used',\n '--format=csv,nounits,noheader',\n ],\n encoding='utf-8',\n # capture_output=True, # valid for python version >=3.7\n stdout=PIPE, stderr=PIPE, # for backward compatibility with python version 3.6\n check=True)\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]\n gpu_memory_map = {f'gpu_{index}': memory for index, memory in enumerate(gpu_memory)}\n return gpu_memory_map\n\n\ndef get_human_readable_count(number):\n \"\"\"\n Abbreviates an integer number with K, M, B, T for thousands, millions,\n billions and trillions, respectively.\n Examples:\n 123 -> 123\n 1234 -> 1 K (one thousand)\n 2e6 -> 2 M (two million)\n 3e9 -> 3 B (three billion)\n 4e12 -> 4 T (four trillion)\n 5e15 -> 5,000 T\n :param number: a positive integer number\n :returns a string formatted according to the pattern described above.\n \"\"\"\n assert number >= 0\n labels = [' ', 'K', 'M', 'B', 'T']\n num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)\n num_groups = int(np.ceil(num_digits / 3))\n num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions\n shift = -3 * (num_groups - 1)\n number = number * (10 ** shift)\n index = num_groups - 1\n return f'{int(number):,d} {labels[index]}'\n", "path": "pytorch_lightning/core/memory.py"}]}
| 3,337 | 188 |
gh_patches_debug_7918
|
rasdani/github-patches
|
git_diff
|
NVIDIA__NVFlare-75
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CIFAR10 run_fl.py misses license header
https://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/cifar10/run_fl.py`
Content:
```
1 import argparse
2 import os
3 import time
4
5 from nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner
6
7
8 def main():
9 parser = argparse.ArgumentParser()
10 parser.add_argument("--run_number", type=int, default=100, help="FL run number to start at.")
11 parser.add_argument("--admin_dir", type=str, default="./admin/", help="Path to admin directory.")
12 parser.add_argument("--username", type=str, default="[email protected]", help="Admin username")
13 parser.add_argument("--app", type=str, default="cifar10_fedavg", help="App to be deployed")
14 parser.add_argument("--port", type=int, default=8003, help="The admin server port")
15 parser.add_argument("--poc", action='store_true', help="Whether admin uses POC mode.")
16 parser.add_argument("--min_clients", type=int, default=8, help="Minimum number of clients.")
17 args = parser.parse_args()
18
19 host = ""
20 port = args.port
21
22 assert os.path.isdir(args.admin_dir), f"admin directory does not exist at {args.admin_dir}"
23
24 # Set up certificate names and admin folders
25 upload_dir = os.path.join(args.admin_dir, "transfer")
26 if not os.path.isdir(upload_dir):
27 os.makedirs(upload_dir)
28 download_dir = os.path.join(args.admin_dir, "download")
29 if not os.path.isdir(download_dir):
30 os.makedirs(download_dir)
31
32 run_number = args.run_number
33
34 # Initialize the runner
35 runner = FLAdminAPIRunner(
36 host=host,
37 port=port,
38 username=args.username,
39 admin_dir=args.admin_dir,
40 poc=args.poc,
41 debug=False,
42 )
43
44 # Run
45 start = time.time()
46 runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,
47 timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours
48 print("Total training time", time.time() - start)
49
50
51 if __name__ == "__main__":
52 main()
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py
--- a/examples/cifar10/run_fl.py
+++ b/examples/cifar10/run_fl.py
@@ -1,3 +1,17 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import argparse
import os
import time
|
{"golden_diff": "diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py\n--- a/examples/cifar10/run_fl.py\n+++ b/examples/cifar10/run_fl.py\n@@ -1,3 +1,17 @@\n+# Copyright (c) 2021, NVIDIA CORPORATION.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n import argparse\n import os\n import time\n", "issue": "CIFAR10 run_fl.py misses license header\nhttps://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1\n", "before_files": [{"content": "import argparse\nimport os\nimport time\n\nfrom nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run_number\", type=int, default=100, help=\"FL run number to start at.\")\n parser.add_argument(\"--admin_dir\", type=str, default=\"./admin/\", help=\"Path to admin directory.\")\n parser.add_argument(\"--username\", type=str, default=\"[email protected]\", help=\"Admin username\")\n parser.add_argument(\"--app\", type=str, default=\"cifar10_fedavg\", help=\"App to be deployed\")\n parser.add_argument(\"--port\", type=int, default=8003, help=\"The admin server port\")\n parser.add_argument(\"--poc\", action='store_true', help=\"Whether admin uses POC mode.\")\n parser.add_argument(\"--min_clients\", type=int, default=8, help=\"Minimum number of clients.\")\n args = parser.parse_args()\n\n host = \"\"\n port = args.port\n\n assert os.path.isdir(args.admin_dir), f\"admin directory does not exist at {args.admin_dir}\"\n\n # Set up certificate names and admin folders\n upload_dir = os.path.join(args.admin_dir, \"transfer\")\n if not os.path.isdir(upload_dir):\n os.makedirs(upload_dir)\n download_dir = os.path.join(args.admin_dir, \"download\")\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n\n run_number = args.run_number\n\n # Initialize the runner\n runner = FLAdminAPIRunner(\n host=host,\n port=port,\n username=args.username,\n admin_dir=args.admin_dir,\n poc=args.poc,\n debug=False,\n )\n\n # Run\n start = time.time()\n runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,\n timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours\n print(\"Total training time\", time.time() - start)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/cifar10/run_fl.py"}], "after_files": [{"content": "# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport time\n\nfrom nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run_number\", type=int, default=100, help=\"FL run number to start at.\")\n parser.add_argument(\"--admin_dir\", type=str, default=\"./admin/\", help=\"Path to admin directory.\")\n parser.add_argument(\"--username\", type=str, default=\"[email protected]\", help=\"Admin username\")\n parser.add_argument(\"--app\", type=str, default=\"cifar10_fedavg\", help=\"App to be deployed\")\n parser.add_argument(\"--port\", type=int, default=8003, help=\"The admin server port\")\n parser.add_argument(\"--poc\", action='store_true', help=\"Whether admin uses POC mode.\")\n parser.add_argument(\"--min_clients\", type=int, default=8, help=\"Minimum number of clients.\")\n args = parser.parse_args()\n\n host = \"\"\n port = args.port\n\n assert os.path.isdir(args.admin_dir), f\"admin directory does not exist at {args.admin_dir}\"\n\n # Set up certificate names and admin folders\n upload_dir = os.path.join(args.admin_dir, \"transfer\")\n if not os.path.isdir(upload_dir):\n os.makedirs(upload_dir)\n download_dir = os.path.join(args.admin_dir, \"download\")\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n\n run_number = args.run_number\n\n # Initialize the runner\n runner = FLAdminAPIRunner(\n host=host,\n port=port,\n username=args.username,\n admin_dir=args.admin_dir,\n poc=args.poc,\n debug=False,\n )\n\n # Run\n start = time.time()\n runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,\n timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours\n print(\"Total training time\", time.time() - start)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/cifar10/run_fl.py"}]}
| 888 | 202 |
gh_patches_debug_29982
|
rasdani/github-patches
|
git_diff
|
astronomer__astro-sdk-453
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change `export_file` to return `File` object
**Context**
In order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.
* Astro-SDK version: 0.9..1
* Request by: @jlaneve
* Analysed by @dimberman
**Problem**
At the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:
```
output_file = File(path="/tmp/saved_df.csv")
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
export = aql.export_file(
input_data=table,
output_file=output_file,
if_exists="replace",
)
res_df = aql.load_file(input_file=output_file)
export >> res_df
```
**Desired behaviour**
```
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
exported_file = aql.export_file(
input_data=table,
output_file=File(path="/tmp/saved_df.csv"),
if_exists="replace",
)
res_df = aql.load_file(input_file=exported_file)
```
**Acceptance criteria**
* Change `export_file` so it returns the `File` instance, as opposed to `None`
Since there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/astro/sql/operators/export_file.py`
Content:
```
1 from typing import Optional, Union
2
3 import pandas as pd
4 from airflow.models import BaseOperator
5 from airflow.models.xcom_arg import XComArg
6
7 from astro.constants import ExportExistsStrategy
8 from astro.databases import create_database
9 from astro.files import File
10 from astro.sql.table import Table
11 from astro.utils.task_id_helper import get_task_id
12
13
14 class ExportFile(BaseOperator):
15 """Write SQL table to csv/parquet on local/S3/GCS.
16
17 :param input_data: Table to convert to file
18 :param output_file: File object containing the path to the file and connection id.
19 :param if_exists: Overwrite file if exists. Default False.
20 """
21
22 template_fields = ("input_data", "output_file")
23
24 def __init__(
25 self,
26 input_data: Union[Table, pd.DataFrame],
27 output_file: File,
28 if_exists: ExportExistsStrategy = "exception",
29 **kwargs,
30 ) -> None:
31 super().__init__(**kwargs)
32 self.output_file = output_file
33 self.input_data = input_data
34 self.if_exists = if_exists
35 self.kwargs = kwargs
36
37 def execute(self, context: dict) -> None:
38 """Write SQL table to csv/parquet on local/S3/GCS.
39
40 Infers SQL database type based on connection.
41 """
42 # Infer db type from `input_conn_id`.
43 if isinstance(self.input_data, Table):
44 database = create_database(self.input_data.conn_id)
45 self.input_data = database.populate_table_metadata(self.input_data)
46 df = database.export_table_to_pandas_dataframe(self.input_data)
47 elif isinstance(self.input_data, pd.DataFrame):
48 df = self.input_data
49 else:
50 raise ValueError(
51 f"Expected input_table to be Table or dataframe. Got {type(self.input_data)}"
52 )
53 # Write file if overwrite == True or if file doesn't exist.
54 if self.if_exists == "replace" or not self.output_file.exists():
55 self.output_file.create_from_dataframe(df)
56 else:
57 raise FileExistsError(f"{self.output_file.path} file already exists.")
58
59
60 def export_file(
61 input_data: Union[Table, pd.DataFrame],
62 output_file: File,
63 if_exists: ExportExistsStrategy = "exception",
64 task_id: Optional[str] = None,
65 **kwargs,
66 ) -> XComArg:
67 """Convert SaveFile into a function. Returns XComArg.
68
69 Returns an XComArg object.
70
71 :param output_file: Path and conn_id
72 :param input_data: Input table / dataframe
73 :param if_exists: Overwrite file if exists. Default "exception"
74 :param task_id: task id, optional
75 """
76
77 task_id = (
78 task_id if task_id is not None else get_task_id("export_file", output_file.path)
79 )
80
81 return ExportFile(
82 task_id=task_id,
83 output_file=output_file,
84 input_data=input_data,
85 if_exists=if_exists,
86 ).output
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py
--- a/src/astro/sql/operators/export_file.py
+++ b/src/astro/sql/operators/export_file.py
@@ -34,7 +34,7 @@
self.if_exists = if_exists
self.kwargs = kwargs
- def execute(self, context: dict) -> None:
+ def execute(self, context: dict) -> File:
"""Write SQL table to csv/parquet on local/S3/GCS.
Infers SQL database type based on connection.
@@ -53,6 +53,7 @@
# Write file if overwrite == True or if file doesn't exist.
if self.if_exists == "replace" or not self.output_file.exists():
self.output_file.create_from_dataframe(df)
+ return self.output_file
else:
raise FileExistsError(f"{self.output_file.path} file already exists.")
@@ -66,7 +67,20 @@
) -> XComArg:
"""Convert SaveFile into a function. Returns XComArg.
- Returns an XComArg object.
+ Returns an XComArg object of type File which matches the output_file parameter.
+
+ This will allow users to perform further actions with the exported file.
+
+ e.g.
+
+ with sample_dag:
+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
+ exported_file = aql.export_file(
+ input_data=table,
+ output_file=File(path="/tmp/saved_df.csv"),
+ if_exists="replace",
+ )
+ res_df = aql.load_file(input_file=exported_file)
:param output_file: Path and conn_id
:param input_data: Input table / dataframe
|
{"golden_diff": "diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py\n--- a/src/astro/sql/operators/export_file.py\n+++ b/src/astro/sql/operators/export_file.py\n@@ -34,7 +34,7 @@\n self.if_exists = if_exists\n self.kwargs = kwargs\n \n- def execute(self, context: dict) -> None:\n+ def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n \n Infers SQL database type based on connection.\n@@ -53,6 +53,7 @@\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n+ return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n \n@@ -66,7 +67,20 @@\n ) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n \n- Returns an XComArg object.\n+ Returns an XComArg object of type File which matches the output_file parameter.\n+\n+ This will allow users to perform further actions with the exported file.\n+\n+ e.g.\n+\n+ with sample_dag:\n+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n+ exported_file = aql.export_file(\n+ input_data=table,\n+ output_file=File(path=\"/tmp/saved_df.csv\"),\n+ if_exists=\"replace\",\n+ )\n+ res_df = aql.load_file(input_file=exported_file)\n \n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n", "issue": "Change `export_file` to return `File` object\n**Context**\r\n\r\nIn order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.\r\n\r\n* Astro-SDK version: 0.9..1\r\n* Request by: @jlaneve\r\n* Analysed by @dimberman \r\n\r\n**Problem**\r\n\r\nAt the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:\r\n\r\n```\r\n output_file = File(path=\"/tmp/saved_df.csv\")\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n export = aql.export_file(\r\n input_data=table,\r\n output_file=output_file,\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=output_file)\r\n export >> res_df\r\n```\r\n\r\n**Desired behaviour**\r\n\r\n```\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n exported_file = aql.export_file(\r\n input_data=table,\r\n output_file=File(path=\"/tmp/saved_df.csv\"),\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=exported_file)\r\n```\r\n\r\n**Acceptance criteria**\r\n* Change `export_file` so it returns the `File` instance, as opposed to `None`\r\n\r\nSince there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> None:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object.\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}], "after_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object of type File which matches the output_file parameter.\n\n This will allow users to perform further actions with the exported file.\n\n e.g.\n\n with sample_dag:\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n exported_file = aql.export_file(\n input_data=table,\n output_file=File(path=\"/tmp/saved_df.csv\"),\n if_exists=\"replace\",\n )\n res_df = aql.load_file(input_file=exported_file)\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}]}
| 1,415 | 395 |
gh_patches_debug_25852
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DEP: Drop `inplace` option of `filter_in`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [x] closes #522
- [ ] whatsnew entry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/accessor/dataframe/drop_inf.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 import numpy as np
6 import pandas as pd
7 from pandas.util._validators import validate_bool_kwarg
8
9 from dtoolkit.accessor._util import get_inf_range
10 from dtoolkit.accessor.dataframe import boolean # noqa
11 from dtoolkit.accessor.register import register_dataframe_method
12
13
14 if TYPE_CHECKING:
15 from dtoolkit._typing import IntOrStr
16
17
18 @register_dataframe_method
19 def drop_inf(
20 df: pd.DataFrame,
21 axis: IntOrStr = 0,
22 how: str = "any",
23 inf: str = "all",
24 subset: list[str] = None,
25 inplace: bool = False,
26 ) -> pd.DataFrame | None:
27 """
28 Remove ``inf`` values.
29
30 Parameters
31 ----------
32 axis : {0 or 'index', 1 or 'columns'}, default 0
33 Determine if rows or columns which contain ``inf`` values are
34 removed.
35
36 * 0, or 'index' : Drop rows which contain ``inf`` values.
37 * 1, or 'columns' : Drop columns which contain ``inf`` value.
38
39 how : {'any', 'all'}, default 'any'
40 Determine if row or column is removed from :obj:`~pandas.DataFrame`,
41 when we have at least one ``inf`` or all ``inf``.
42
43 * 'any' : If any ``inf`` values are present, drop that row or column.
44 * 'all' : If all values are ``inf``, drop that row or column.
45
46 inf : {'all', 'pos', 'neg'}, default 'all'
47 * 'all' : Remove ``inf`` and ``-inf``.
48 * 'pos' : Only remove ``inf``.
49 * 'neg' : Only remove ``-inf``.
50
51 subset : array-like, optional
52 Labels along other axis to consider, e.g. if you are dropping rows
53 these would be a list of columns to include.
54
55 inplace : bool, default False
56 If True, do operation inplace and return None.
57
58 Returns
59 -------
60 DataFrame or None
61 DataFrame with ``inf`` entries dropped from it or None if
62 ``inplace=True``.
63
64 See Also
65 --------
66 dtoolkit.accessor.series.drop_inf
67 :obj:`~pandas.Series` drops ``inf`` values.
68
69 Examples
70 --------
71 >>> import dtoolkit.accessor
72 >>> import pandas as pd
73 >>> import numpy as np
74 >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
75 ... "toy": [np.inf, 'Batmobile', 'Bullwhip'],
76 ... "born": [np.inf, pd.Timestamp("1940-04-25"),
77 ... -np.inf]})
78 >>> df
79 name toy born
80 0 Alfred inf inf
81 1 Batman Batmobile 1940-04-25 00:00:00
82 2 Catwoman Bullwhip -inf
83
84 Drop the rows where at least one element is inf and -inf.
85
86 >>> df.drop_inf()
87 name toy born
88 1 Batman Batmobile 1940-04-25 00:00:00
89
90 Drop the columns where at least one element is inf and -inf.
91
92 >>> df.drop_inf(axis='columns')
93 name
94 0 Alfred
95 1 Batman
96 2 Catwoman
97
98 Drop the rows where all elements are inf and -inf.
99
100 >>> df.drop_inf(how='all')
101 name toy born
102 0 Alfred inf inf
103 1 Batman Batmobile 1940-04-25 00:00:00
104 2 Catwoman Bullwhip -inf
105
106 Drop the rows where at least one element is -inf.
107
108 >>> df.drop_inf(inf='neg')
109 name toy born
110 0 Alfred inf inf
111 1 Batman Batmobile 1940-04-25 00:00:00
112
113 Define in which columns to look for inf and -inf values.
114
115 >>> df.drop_inf(subset=['name', 'toy'])
116 name toy born
117 1 Batman Batmobile 1940-04-25 00:00:00
118 2 Catwoman Bullwhip -inf
119
120 Keep the DataFrame with valid entries in the same variable.
121
122 >>> df.drop_inf(inplace=True)
123 >>> df
124 name toy born
125 1 Batman Batmobile 1940-04-25 00:00:00
126 """
127
128 inplace = validate_bool_kwarg(inplace, "inplace")
129
130 axis = df._get_axis_number(axis)
131 agg_axis = 1 - axis
132
133 agg_obj = df
134 if subset is not None:
135 ax = df._get_axis(agg_axis)
136 indices = ax.get_indexer_for(subset)
137 check = indices == -1
138 if check.any():
139 raise KeyError(list(np.compress(check, subset)))
140
141 agg_obj = df.take(indices, axis=agg_axis)
142
143 inf_range = get_inf_range(inf)
144 mask = agg_obj.isin(inf_range).boolean(how=how, axis=agg_axis)
145 result = df.loc(axis=axis)[~mask]
146
147 if not inplace:
148 return result
149
150 df._update_inplace(result)
151
```
Path: `dtoolkit/accessor/series/drop_inf.py`
Content:
```
1 from __future__ import annotations
2
3 import pandas as pd
4 from pandas.util._validators import validate_bool_kwarg
5
6 from dtoolkit.accessor._util import get_inf_range
7 from dtoolkit.accessor.register import register_series_method
8
9
10 @register_series_method
11 def drop_inf(
12 s: pd.Series,
13 inf: str = "all",
14 inplace: bool = False,
15 ) -> pd.Series | None:
16 """
17 Remove ``inf`` values.
18
19 Parameters
20 ----------
21 inf : {'all', 'pos', 'neg'}, default 'all'
22
23 * 'all' : Remove ``inf`` and ``-inf``.
24 * 'pos' : Only remove ``inf``.
25 * 'neg' : Only remove ``-inf``.
26
27 inplace : bool, default False
28 If True, do operation inplace and return None.
29
30 Returns
31 -------
32 Series or None
33 Series with ``inf`` entries dropped from it or None if
34 ``inplace=True``.
35
36 See Also
37 --------
38 dtoolkit.accessor.dataframe.drop_inf
39 :obj:`~pandas.DataFrame` drops rows or columns which contain ``inf``
40 values.
41
42 Examples
43 --------
44 >>> import dtoolkit.accessor
45 >>> import pandas as pd
46 >>> import numpy as np
47 >>> s = pd.Series([1., 2., np.inf])
48 >>> s
49 0 1.0
50 1 2.0
51 2 inf
52 dtype: float64
53
54 Drop inf values from a Series.
55
56 >>> s.drop_inf()
57 0 1.0
58 1 2.0
59 dtype: float64
60
61 Keep the Series with valid entries in the same variable.
62
63 >>> s.drop_inf(inplace=True)
64 >>> s
65 0 1.0
66 1 2.0
67 dtype: float64
68 """
69
70 inplace = validate_bool_kwarg(inplace, "inplace")
71 inf_range = get_inf_range(inf)
72 mask = s.isin(inf_range)
73 result = s[~mask]
74
75 if not inplace:
76 return result
77
78 s._update_inplace(result)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dtoolkit/accessor/dataframe/drop_inf.py b/dtoolkit/accessor/dataframe/drop_inf.py
--- a/dtoolkit/accessor/dataframe/drop_inf.py
+++ b/dtoolkit/accessor/dataframe/drop_inf.py
@@ -9,6 +9,7 @@
from dtoolkit.accessor._util import get_inf_range
from dtoolkit.accessor.dataframe import boolean # noqa
from dtoolkit.accessor.register import register_dataframe_method
+from dtoolkit.util._decorator import deprecated_kwargs
if TYPE_CHECKING:
@@ -16,6 +17,13 @@
@register_dataframe_method
+@deprecated_kwargs(
+ "inplace",
+ message=(
+ "The keyword argument '{argument}' of '{func_name}' is deprecated and will "
+ "be removed in 0.0.17. (Warning added DToolKit 0.0.16)"
+ ),
+)
def drop_inf(
df: pd.DataFrame,
axis: IntOrStr = 0,
diff --git a/dtoolkit/accessor/series/drop_inf.py b/dtoolkit/accessor/series/drop_inf.py
--- a/dtoolkit/accessor/series/drop_inf.py
+++ b/dtoolkit/accessor/series/drop_inf.py
@@ -5,9 +5,17 @@
from dtoolkit.accessor._util import get_inf_range
from dtoolkit.accessor.register import register_series_method
+from dtoolkit.util._decorator import deprecated_kwargs
@register_series_method
+@deprecated_kwargs(
+ "inplace",
+ message=(
+ "The keyword argument '{argument}' of '{func_name}' is deprecated and will "
+ "be removed in 0.0.17. (Warning added DToolKit 0.0.16)"
+ ),
+)
def drop_inf(
s: pd.Series,
inf: str = "all",
|
{"golden_diff": "diff --git a/dtoolkit/accessor/dataframe/drop_inf.py b/dtoolkit/accessor/dataframe/drop_inf.py\n--- a/dtoolkit/accessor/dataframe/drop_inf.py\n+++ b/dtoolkit/accessor/dataframe/drop_inf.py\n@@ -9,6 +9,7 @@\n from dtoolkit.accessor._util import get_inf_range\n from dtoolkit.accessor.dataframe import boolean # noqa\n from dtoolkit.accessor.register import register_dataframe_method\n+from dtoolkit.util._decorator import deprecated_kwargs\n \n \n if TYPE_CHECKING:\n@@ -16,6 +17,13 @@\n \n \n @register_dataframe_method\n+@deprecated_kwargs(\n+ \"inplace\",\n+ message=(\n+ \"The keyword argument '{argument}' of '{func_name}' is deprecated and will \"\n+ \"be removed in 0.0.17. (Warning added DToolKit 0.0.16)\"\n+ ),\n+)\n def drop_inf(\n df: pd.DataFrame,\n axis: IntOrStr = 0,\ndiff --git a/dtoolkit/accessor/series/drop_inf.py b/dtoolkit/accessor/series/drop_inf.py\n--- a/dtoolkit/accessor/series/drop_inf.py\n+++ b/dtoolkit/accessor/series/drop_inf.py\n@@ -5,9 +5,17 @@\n \n from dtoolkit.accessor._util import get_inf_range\n from dtoolkit.accessor.register import register_series_method\n+from dtoolkit.util._decorator import deprecated_kwargs\n \n \n @register_series_method\n+@deprecated_kwargs(\n+ \"inplace\",\n+ message=(\n+ \"The keyword argument '{argument}' of '{func_name}' is deprecated and will \"\n+ \"be removed in 0.0.17. (Warning added DToolKit 0.0.16)\"\n+ ),\n+)\n def drop_inf(\n s: pd.Series,\n inf: str = \"all\",\n", "issue": "DEP: Drop `inplace` option of `filter_in`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #522\r\n- [ ] whatsnew entry\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_inf_range\nfrom dtoolkit.accessor.dataframe import boolean # noqa\nfrom dtoolkit.accessor.register import register_dataframe_method\n\n\nif TYPE_CHECKING:\n from dtoolkit._typing import IntOrStr\n\n\n@register_dataframe_method\ndef drop_inf(\n df: pd.DataFrame,\n axis: IntOrStr = 0,\n how: str = \"any\",\n inf: str = \"all\",\n subset: list[str] = None,\n inplace: bool = False,\n) -> pd.DataFrame | None:\n \"\"\"\n Remove ``inf`` values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain ``inf`` values are\n removed.\n\n * 0, or 'index' : Drop rows which contain ``inf`` values.\n * 1, or 'columns' : Drop columns which contain ``inf`` value.\n\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from :obj:`~pandas.DataFrame`,\n when we have at least one ``inf`` or all ``inf``.\n\n * 'any' : If any ``inf`` values are present, drop that row or column.\n * 'all' : If all values are ``inf``, drop that row or column.\n\n inf : {'all', 'pos', 'neg'}, default 'all'\n * 'all' : Remove ``inf`` and ``-inf``.\n * 'pos' : Only remove ``inf``.\n * 'neg' : Only remove ``-inf``.\n\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame or None\n DataFrame with ``inf`` entries dropped from it or None if\n ``inplace=True``.\n\n See Also\n --------\n dtoolkit.accessor.series.drop_inf\n :obj:`~pandas.Series` drops ``inf`` values.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [np.inf, 'Batmobile', 'Bullwhip'],\n ... \"born\": [np.inf, pd.Timestamp(\"1940-04-25\"),\n ... -np.inf]})\n >>> df\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Drop the rows where at least one element is inf and -inf.\n\n >>> df.drop_inf()\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n\n Drop the columns where at least one element is inf and -inf.\n\n >>> df.drop_inf(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are inf and -inf.\n\n >>> df.drop_inf(how='all')\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Drop the rows where at least one element is -inf.\n\n >>> df.drop_inf(inf='neg')\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n\n Define in which columns to look for inf and -inf values.\n\n >>> df.drop_inf(subset=['name', 'toy'])\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.drop_inf(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = df._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = df\n if subset is not None:\n ax = df._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check, subset)))\n\n agg_obj = df.take(indices, axis=agg_axis)\n\n inf_range = get_inf_range(inf)\n mask = agg_obj.isin(inf_range).boolean(how=how, axis=agg_axis)\n result = df.loc(axis=axis)[~mask]\n\n if not inplace:\n return result\n\n df._update_inplace(result)\n", "path": "dtoolkit/accessor/dataframe/drop_inf.py"}, {"content": "from __future__ import annotations\n\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_inf_range\nfrom dtoolkit.accessor.register import register_series_method\n\n\n@register_series_method\ndef drop_inf(\n s: pd.Series,\n inf: str = \"all\",\n inplace: bool = False,\n) -> pd.Series | None:\n \"\"\"\n Remove ``inf`` values.\n\n Parameters\n ----------\n inf : {'all', 'pos', 'neg'}, default 'all'\n\n * 'all' : Remove ``inf`` and ``-inf``.\n * 'pos' : Only remove ``inf``.\n * 'neg' : Only remove ``-inf``.\n\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n Series or None\n Series with ``inf`` entries dropped from it or None if\n ``inplace=True``.\n\n See Also\n --------\n dtoolkit.accessor.dataframe.drop_inf\n :obj:`~pandas.DataFrame` drops rows or columns which contain ``inf``\n values.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> import numpy as np\n >>> s = pd.Series([1., 2., np.inf])\n >>> s\n 0 1.0\n 1 2.0\n 2 inf\n dtype: float64\n\n Drop inf values from a Series.\n\n >>> s.drop_inf()\n 0 1.0\n 1 2.0\n dtype: float64\n\n Keep the Series with valid entries in the same variable.\n\n >>> s.drop_inf(inplace=True)\n >>> s\n 0 1.0\n 1 2.0\n dtype: float64\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n inf_range = get_inf_range(inf)\n mask = s.isin(inf_range)\n result = s[~mask]\n\n if not inplace:\n return result\n\n s._update_inplace(result)\n", "path": "dtoolkit/accessor/series/drop_inf.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_inf_range\nfrom dtoolkit.accessor.dataframe import boolean # noqa\nfrom dtoolkit.accessor.register import register_dataframe_method\nfrom dtoolkit.util._decorator import deprecated_kwargs\n\n\nif TYPE_CHECKING:\n from dtoolkit._typing import IntOrStr\n\n\n@register_dataframe_method\n@deprecated_kwargs(\n \"inplace\",\n message=(\n \"The keyword argument '{argument}' of '{func_name}' is deprecated and will \"\n \"be removed in 0.0.17. (Warning added DToolKit 0.0.16)\"\n ),\n)\ndef drop_inf(\n df: pd.DataFrame,\n axis: IntOrStr = 0,\n how: str = \"any\",\n inf: str = \"all\",\n subset: list[str] = None,\n inplace: bool = False,\n) -> pd.DataFrame | None:\n \"\"\"\n Remove ``inf`` values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain ``inf`` values are\n removed.\n\n * 0, or 'index' : Drop rows which contain ``inf`` values.\n * 1, or 'columns' : Drop columns which contain ``inf`` value.\n\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from :obj:`~pandas.DataFrame`,\n when we have at least one ``inf`` or all ``inf``.\n\n * 'any' : If any ``inf`` values are present, drop that row or column.\n * 'all' : If all values are ``inf``, drop that row or column.\n\n inf : {'all', 'pos', 'neg'}, default 'all'\n * 'all' : Remove ``inf`` and ``-inf``.\n * 'pos' : Only remove ``inf``.\n * 'neg' : Only remove ``-inf``.\n\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame or None\n DataFrame with ``inf`` entries dropped from it or None if\n ``inplace=True``.\n\n See Also\n --------\n dtoolkit.accessor.series.drop_inf\n :obj:`~pandas.Series` drops ``inf`` values.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [np.inf, 'Batmobile', 'Bullwhip'],\n ... \"born\": [np.inf, pd.Timestamp(\"1940-04-25\"),\n ... -np.inf]})\n >>> df\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Drop the rows where at least one element is inf and -inf.\n\n >>> df.drop_inf()\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n\n Drop the columns where at least one element is inf and -inf.\n\n >>> df.drop_inf(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are inf and -inf.\n\n >>> df.drop_inf(how='all')\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Drop the rows where at least one element is -inf.\n\n >>> df.drop_inf(inf='neg')\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n\n Define in which columns to look for inf and -inf values.\n\n >>> df.drop_inf(subset=['name', 'toy'])\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.drop_inf(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = df._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = df\n if subset is not None:\n ax = df._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check, subset)))\n\n agg_obj = df.take(indices, axis=agg_axis)\n\n inf_range = get_inf_range(inf)\n mask = agg_obj.isin(inf_range).boolean(how=how, axis=agg_axis)\n result = df.loc(axis=axis)[~mask]\n\n if not inplace:\n return result\n\n df._update_inplace(result)\n", "path": "dtoolkit/accessor/dataframe/drop_inf.py"}, {"content": "from __future__ import annotations\n\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_inf_range\nfrom dtoolkit.accessor.register import register_series_method\nfrom dtoolkit.util._decorator import deprecated_kwargs\n\n\n@register_series_method\n@deprecated_kwargs(\n \"inplace\",\n message=(\n \"The keyword argument '{argument}' of '{func_name}' is deprecated and will \"\n \"be removed in 0.0.17. (Warning added DToolKit 0.0.16)\"\n ),\n)\ndef drop_inf(\n s: pd.Series,\n inf: str = \"all\",\n inplace: bool = False,\n) -> pd.Series | None:\n \"\"\"\n Remove ``inf`` values.\n\n Parameters\n ----------\n inf : {'all', 'pos', 'neg'}, default 'all'\n\n * 'all' : Remove ``inf`` and ``-inf``.\n * 'pos' : Only remove ``inf``.\n * 'neg' : Only remove ``-inf``.\n\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n Series or None\n Series with ``inf`` entries dropped from it or None if\n ``inplace=True``.\n\n See Also\n --------\n dtoolkit.accessor.dataframe.drop_inf\n :obj:`~pandas.DataFrame` drops rows or columns which contain ``inf``\n values.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> import numpy as np\n >>> s = pd.Series([1., 2., np.inf])\n >>> s\n 0 1.0\n 1 2.0\n 2 inf\n dtype: float64\n\n Drop inf values from a Series.\n\n >>> s.drop_inf()\n 0 1.0\n 1 2.0\n dtype: float64\n\n Keep the Series with valid entries in the same variable.\n\n >>> s.drop_inf(inplace=True)\n >>> s\n 0 1.0\n 1 2.0\n dtype: float64\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n inf_range = get_inf_range(inf)\n mask = s.isin(inf_range)\n result = s[~mask]\n\n if not inplace:\n return result\n\n s._update_inplace(result)\n", "path": "dtoolkit/accessor/series/drop_inf.py"}]}
| 2,727 | 427 |
gh_patches_debug_39269
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-1019
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_file_stream support
node-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/files/file.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains an object that represents a Telegram File."""
20 from os.path import basename
21
22 from future.backports.urllib import parse as urllib_parse
23
24 from telegram import TelegramObject
25
26
27 class File(TelegramObject):
28 """
29 This object represents a file ready to be downloaded. The file can be downloaded with
30 :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the
31 link expires, a new one can be requested by calling getFile.
32
33 Note:
34 Maximum file size to download is 20 MB
35
36 Attributes:
37 file_id (:obj:`str`): Unique identifier for this file.
38 file_size (:obj:`str`): Optional. File size.
39 file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.
40
41 Args:
42 file_id (:obj:`str`): Unique identifier for this file.
43 file_size (:obj:`int`, optional): Optional. File size, if known.
44 file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.
45 bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.
46 **kwargs (:obj:`dict`): Arbitrary keyword arguments.
47
48 """
49
50 def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):
51 # Required
52 self.file_id = str(file_id)
53
54 # Optionals
55 self.file_size = file_size
56 self.file_path = file_path
57
58 self.bot = bot
59
60 self._id_attrs = (self.file_id,)
61
62 @classmethod
63 def de_json(cls, data, bot):
64 if not data:
65 return None
66
67 return cls(bot=bot, **data)
68
69 def download(self, custom_path=None, out=None, timeout=None):
70 """
71 Download this file. By default, the file is saved in the current working directory with its
72 original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be
73 saved to that path instead. If :attr:`out` is defined, the file contents will be saved to
74 that object using the ``out.write`` method.
75
76 Note:
77 `custom_path` and `out` are mutually exclusive.
78
79 Args:
80 custom_path (:obj:`str`, optional): Custom path.
81 out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
82 applicable.
83 timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
84 the read timeout from the server (instead of the one specified during creation of
85 the connection pool).
86
87 Raises:
88 ValueError: If both ``custom_path`` and ``out`` are passed.
89
90 """
91 if custom_path is not None and out is not None:
92 raise ValueError('custom_path and out are mutually exclusive')
93
94 # Convert any UTF-8 char into a url encoded ASCII string.
95 sres = urllib_parse.urlsplit(self.file_path)
96 url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
97 sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
98
99 if out:
100 buf = self.bot.request.retrieve(url)
101 out.write(buf)
102
103 else:
104 if custom_path:
105 filename = custom_path
106 else:
107 filename = basename(self.file_path)
108
109 self.bot.request.download(url, filename, timeout=timeout)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/telegram/files/file.py b/telegram/files/file.py
--- a/telegram/files/file.py
+++ b/telegram/files/file.py
@@ -74,32 +74,34 @@
that object using the ``out.write`` method.
Note:
- `custom_path` and `out` are mutually exclusive.
+ :attr:`custom_path` and :attr:`out` are mutually exclusive.
Args:
custom_path (:obj:`str`, optional): Custom path.
- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
- applicable.
+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for
+ writing in binary mode, if applicable.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
+ Returns:
+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.
+ Otherwise, returns the filename downloaded to.
+
Raises:
- ValueError: If both ``custom_path`` and ``out`` are passed.
+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.
"""
if custom_path is not None and out is not None:
raise ValueError('custom_path and out are mutually exclusive')
# Convert any UTF-8 char into a url encoded ASCII string.
- sres = urllib_parse.urlsplit(self.file_path)
- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+ url = self._get_encoded_url()
if out:
buf = self.bot.request.retrieve(url)
out.write(buf)
-
+ return out
else:
if custom_path:
filename = custom_path
@@ -107,3 +109,27 @@
filename = basename(self.file_path)
self.bot.request.download(url, filename, timeout=timeout)
+ return filename
+
+ def _get_encoded_url(self):
+ """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string."""
+ sres = urllib_parse.urlsplit(self.file_path)
+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(
+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+
+ def download_as_bytearray(self, buf=None):
+ """Download this file and return it as a bytearray.
+
+ Args:
+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.
+
+ Returns:
+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly
+ allocated :obj:`bytearray`.
+
+ """
+ if buf is None:
+ buf = bytearray()
+
+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))
+ return buf
|
{"golden_diff": "diff --git a/telegram/files/file.py b/telegram/files/file.py\n--- a/telegram/files/file.py\n+++ b/telegram/files/file.py\n@@ -74,32 +74,34 @@\n that object using the ``out.write`` method.\n \n Note:\n- `custom_path` and `out` are mutually exclusive.\n+ :attr:`custom_path` and :attr:`out` are mutually exclusive.\n \n Args:\n custom_path (:obj:`str`, optional): Custom path.\n- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n- applicable.\n+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for\n+ writing in binary mode, if applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n \n+ Returns:\n+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.\n+ Otherwise, returns the filename downloaded to.\n+\n Raises:\n- ValueError: If both ``custom_path`` and ``out`` are passed.\n+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.\n \n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n \n # Convert any UTF-8 char into a url encoded ASCII string.\n- sres = urllib_parse.urlsplit(self.file_path)\n- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+ url = self._get_encoded_url()\n \n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n-\n+ return out\n else:\n if custom_path:\n filename = custom_path\n@@ -107,3 +109,27 @@\n filename = basename(self.file_path)\n \n self.bot.request.download(url, filename, timeout=timeout)\n+ return filename\n+\n+ def _get_encoded_url(self):\n+ \"\"\"Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.\"\"\"\n+ sres = urllib_parse.urlsplit(self.file_path)\n+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(\n+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+\n+ def download_as_bytearray(self, buf=None):\n+ \"\"\"Download this file and return it as a bytearray.\n+\n+ Args:\n+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.\n+\n+ Returns:\n+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly\n+ allocated :obj:`bytearray`.\n+\n+ \"\"\"\n+ if buf is None:\n+ buf = bytearray()\n+\n+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))\n+ return buf\n", "issue": "get_file_stream support\nnode-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram File.\"\"\"\nfrom os.path import basename\n\nfrom future.backports.urllib import parse as urllib_parse\n\nfrom telegram import TelegramObject\n\n\nclass File(TelegramObject):\n \"\"\"\n This object represents a file ready to be downloaded. The file can be downloaded with\n :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the\n link expires, a new one can be requested by calling getFile.\n\n Note:\n Maximum file size to download is 20 MB\n\n Attributes:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`str`): Optional. File size.\n file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.\n\n Args:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`int`, optional): Optional. File size, if known.\n file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.\n bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.\n **kwargs (:obj:`dict`): Arbitrary keyword arguments.\n\n \"\"\"\n\n def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):\n # Required\n self.file_id = str(file_id)\n\n # Optionals\n self.file_size = file_size\n self.file_path = file_path\n\n self.bot = bot\n\n self._id_attrs = (self.file_id,)\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n return cls(bot=bot, **data)\n\n def download(self, custom_path=None, out=None, timeout=None):\n \"\"\"\n Download this file. By default, the file is saved in the current working directory with its\n original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be\n saved to that path instead. If :attr:`out` is defined, the file contents will be saved to\n that object using the ``out.write`` method.\n\n Note:\n `custom_path` and `out` are mutually exclusive.\n\n Args:\n custom_path (:obj:`str`, optional): Custom path.\n out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n\n Raises:\n ValueError: If both ``custom_path`` and ``out`` are passed.\n\n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n\n # Convert any UTF-8 char into a url encoded ASCII string.\n sres = urllib_parse.urlsplit(self.file_path)\n url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n\n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n\n else:\n if custom_path:\n filename = custom_path\n else:\n filename = basename(self.file_path)\n\n self.bot.request.download(url, filename, timeout=timeout)\n", "path": "telegram/files/file.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram File.\"\"\"\nfrom os.path import basename\n\nfrom future.backports.urllib import parse as urllib_parse\n\nfrom telegram import TelegramObject\n\n\nclass File(TelegramObject):\n \"\"\"\n This object represents a file ready to be downloaded. The file can be downloaded with\n :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the\n link expires, a new one can be requested by calling getFile.\n\n Note:\n Maximum file size to download is 20 MB\n\n Attributes:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`str`): Optional. File size.\n file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.\n\n Args:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`int`, optional): Optional. File size, if known.\n file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.\n bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.\n **kwargs (:obj:`dict`): Arbitrary keyword arguments.\n\n \"\"\"\n\n def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):\n # Required\n self.file_id = str(file_id)\n\n # Optionals\n self.file_size = file_size\n self.file_path = file_path\n\n self.bot = bot\n\n self._id_attrs = (self.file_id,)\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n return cls(bot=bot, **data)\n\n def download(self, custom_path=None, out=None, timeout=None):\n \"\"\"\n Download this file. By default, the file is saved in the current working directory with its\n original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be\n saved to that path instead. If :attr:`out` is defined, the file contents will be saved to\n that object using the ``out.write`` method.\n\n Note:\n :attr:`custom_path` and :attr:`out` are mutually exclusive.\n\n Args:\n custom_path (:obj:`str`, optional): Custom path.\n out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for\n writing in binary mode, if applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n\n Returns:\n :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.\n Otherwise, returns the filename downloaded to.\n\n Raises:\n ValueError: If both :attr:`custom_path` and :attr:`out` are passed.\n\n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n\n # Convert any UTF-8 char into a url encoded ASCII string.\n url = self._get_encoded_url()\n\n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n return out\n else:\n if custom_path:\n filename = custom_path\n else:\n filename = basename(self.file_path)\n\n self.bot.request.download(url, filename, timeout=timeout)\n return filename\n\n def _get_encoded_url(self):\n \"\"\"Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.\"\"\"\n sres = urllib_parse.urlsplit(self.file_path)\n return urllib_parse.urlunsplit(urllib_parse.SplitResult(\n sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n\n def download_as_bytearray(self, buf=None):\n \"\"\"Download this file and return it as a bytearray.\n\n Args:\n buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.\n\n Returns:\n :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly\n allocated :obj:`bytearray`.\n\n \"\"\"\n if buf is None:\n buf = bytearray()\n\n buf.extend(self.bot.request.retrieve(self._get_encoded_url()))\n return buf\n", "path": "telegram/files/file.py"}]}
| 1,491 | 705 |
gh_patches_debug_32392
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-2387
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`set_qiskit_logger()` is hard to use
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
A utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.
It has room for improvement, namely:
- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.
- One cannot customize the LOG_LEVEL.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/tools/logging.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Utilities for logging."""
16
17 import logging
18 from logging.config import dictConfig
19
20
21 class SimpleInfoFormatter(logging.Formatter):
22 """Custom Formatter that uses a simple format for INFO."""
23 _style_info = logging._STYLES['%'][0]('%(message)s')
24
25 def formatMessage(self, record):
26 if record.levelno == logging.INFO:
27 return self._style_info.format(record)
28 return logging.Formatter.formatMessage(self, record)
29
30
31 QISKIT_LOGGING_CONFIG = {
32 'version': 1,
33 'disable_existing_loggers': False,
34 'formatters': {
35 'f': {
36 '()': SimpleInfoFormatter,
37 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
38 },
39 },
40 'handlers': {
41 'h': {
42 'class': 'logging.StreamHandler',
43 'formatter': 'f'
44 }
45 },
46 'loggers': {
47 'qiskit': {
48 'handlers': ['h'],
49 'level': logging.INFO,
50 },
51 }
52 }
53
54
55 def set_qiskit_logger():
56 """Update 'qiskit' logger configuration using a SDK default one.
57
58 Update the configuration of the 'qiskit' logger using the default SDK
59 configuration provided by `QISKIT_LOGGING_CONFIG`:
60
61 * console logging using a custom format for levels != INFO.
62 * console logging with simple format for level INFO.
63 * set logger level to INFO.
64
65 Warning:
66 This function modifies the configuration of the standard logging system
67 for the 'qiskit.*' loggers, and might interfere with custom logger
68 configurations.
69 """
70 dictConfig(QISKIT_LOGGING_CONFIG)
71
72
73 def unset_qiskit_logger():
74 """Remove the handlers for the 'qiskit' logger."""
75 qiskit_logger = logging.getLogger('qiskit')
76 for handler in qiskit_logger.handlers:
77 qiskit_logger.removeHandler(handler)
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py
deleted file mode 100644
--- a/qiskit/tools/logging.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Qiskit.
-#
-# (C) Copyright IBM 2017.
-#
-# This code is licensed under the Apache License, Version 2.0. You may
-# obtain a copy of this license in the LICENSE.txt file in the root directory
-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
-#
-# Any modifications or derivative works of this code must retain this
-# copyright notice, and modified files need to carry a notice indicating
-# that they have been altered from the originals.
-
-"""Utilities for logging."""
-
-import logging
-from logging.config import dictConfig
-
-
-class SimpleInfoFormatter(logging.Formatter):
- """Custom Formatter that uses a simple format for INFO."""
- _style_info = logging._STYLES['%'][0]('%(message)s')
-
- def formatMessage(self, record):
- if record.levelno == logging.INFO:
- return self._style_info.format(record)
- return logging.Formatter.formatMessage(self, record)
-
-
-QISKIT_LOGGING_CONFIG = {
- 'version': 1,
- 'disable_existing_loggers': False,
- 'formatters': {
- 'f': {
- '()': SimpleInfoFormatter,
- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
- },
- },
- 'handlers': {
- 'h': {
- 'class': 'logging.StreamHandler',
- 'formatter': 'f'
- }
- },
- 'loggers': {
- 'qiskit': {
- 'handlers': ['h'],
- 'level': logging.INFO,
- },
- }
-}
-
-
-def set_qiskit_logger():
- """Update 'qiskit' logger configuration using a SDK default one.
-
- Update the configuration of the 'qiskit' logger using the default SDK
- configuration provided by `QISKIT_LOGGING_CONFIG`:
-
- * console logging using a custom format for levels != INFO.
- * console logging with simple format for level INFO.
- * set logger level to INFO.
-
- Warning:
- This function modifies the configuration of the standard logging system
- for the 'qiskit.*' loggers, and might interfere with custom logger
- configurations.
- """
- dictConfig(QISKIT_LOGGING_CONFIG)
-
-
-def unset_qiskit_logger():
- """Remove the handlers for the 'qiskit' logger."""
- qiskit_logger = logging.getLogger('qiskit')
- for handler in qiskit_logger.handlers:
- qiskit_logger.removeHandler(handler)
|
{"golden_diff": "diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py\ndeleted file mode 100644\n--- a/qiskit/tools/logging.py\n+++ /dev/null\n@@ -1,77 +0,0 @@\n-# -*- coding: utf-8 -*-\n-\n-# This code is part of Qiskit.\n-#\n-# (C) Copyright IBM 2017.\n-#\n-# This code is licensed under the Apache License, Version 2.0. You may\n-# obtain a copy of this license in the LICENSE.txt file in the root directory\n-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n-#\n-# Any modifications or derivative works of this code must retain this\n-# copyright notice, and modified files need to carry a notice indicating\n-# that they have been altered from the originals.\n-\n-\"\"\"Utilities for logging.\"\"\"\n-\n-import logging\n-from logging.config import dictConfig\n-\n-\n-class SimpleInfoFormatter(logging.Formatter):\n- \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n- _style_info = logging._STYLES['%'][0]('%(message)s')\n-\n- def formatMessage(self, record):\n- if record.levelno == logging.INFO:\n- return self._style_info.format(record)\n- return logging.Formatter.formatMessage(self, record)\n-\n-\n-QISKIT_LOGGING_CONFIG = {\n- 'version': 1,\n- 'disable_existing_loggers': False,\n- 'formatters': {\n- 'f': {\n- '()': SimpleInfoFormatter,\n- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n- },\n- },\n- 'handlers': {\n- 'h': {\n- 'class': 'logging.StreamHandler',\n- 'formatter': 'f'\n- }\n- },\n- 'loggers': {\n- 'qiskit': {\n- 'handlers': ['h'],\n- 'level': logging.INFO,\n- },\n- }\n-}\n-\n-\n-def set_qiskit_logger():\n- \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n-\n- Update the configuration of the 'qiskit' logger using the default SDK\n- configuration provided by `QISKIT_LOGGING_CONFIG`:\n-\n- * console logging using a custom format for levels != INFO.\n- * console logging with simple format for level INFO.\n- * set logger level to INFO.\n-\n- Warning:\n- This function modifies the configuration of the standard logging system\n- for the 'qiskit.*' loggers, and might interfere with custom logger\n- configurations.\n- \"\"\"\n- dictConfig(QISKIT_LOGGING_CONFIG)\n-\n-\n-def unset_qiskit_logger():\n- \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n- qiskit_logger = logging.getLogger('qiskit')\n- for handler in qiskit_logger.handlers:\n- qiskit_logger.removeHandler(handler)\n", "issue": "`set_qiskit_logger()` is hard to use\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nA utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.\r\n\r\nIt has room for improvement, namely:\r\n- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.\r\n- One cannot customize the LOG_LEVEL.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Utilities for logging.\"\"\"\n\nimport logging\nfrom logging.config import dictConfig\n\n\nclass SimpleInfoFormatter(logging.Formatter):\n \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n _style_info = logging._STYLES['%'][0]('%(message)s')\n\n def formatMessage(self, record):\n if record.levelno == logging.INFO:\n return self._style_info.format(record)\n return logging.Formatter.formatMessage(self, record)\n\n\nQISKIT_LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'f': {\n '()': SimpleInfoFormatter,\n 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n },\n },\n 'handlers': {\n 'h': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'f'\n }\n },\n 'loggers': {\n 'qiskit': {\n 'handlers': ['h'],\n 'level': logging.INFO,\n },\n }\n}\n\n\ndef set_qiskit_logger():\n \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n\n Update the configuration of the 'qiskit' logger using the default SDK\n configuration provided by `QISKIT_LOGGING_CONFIG`:\n\n * console logging using a custom format for levels != INFO.\n * console logging with simple format for level INFO.\n * set logger level to INFO.\n\n Warning:\n This function modifies the configuration of the standard logging system\n for the 'qiskit.*' loggers, and might interfere with custom logger\n configurations.\n \"\"\"\n dictConfig(QISKIT_LOGGING_CONFIG)\n\n\ndef unset_qiskit_logger():\n \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n qiskit_logger = logging.getLogger('qiskit')\n for handler in qiskit_logger.handlers:\n qiskit_logger.removeHandler(handler)\n", "path": "qiskit/tools/logging.py"}], "after_files": [{"content": null, "path": "qiskit/tools/logging.py"}]}
| 1,142 | 656 |
gh_patches_debug_12334
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-877
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E3002, E3003, E3012 - AWS::SSM::MaintenanceWindowTarget errors
cfn-lint version: 0.19.1
cfn-lint -u has been run
I am unable to to get a clean lint on a SSM template file that seems alright and that has loaded properly in AWS. Would really appreciate any assistance.
The documentation is here on MaintenanceWindowTarget:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ssm-maintenancewindowtarget.html
And the associated targets:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtarget-targets.html
The below cf should be syntactically correct.....
```
BaselinePatchDailySSMMaintenanceWindowTarget:
Type: AWS::SSM::MaintenanceWindowTarget
Properties:
Name: BaselinePatchDailyTarget
Description: Systems with Tag Key=Patch,Value=Daily.
WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow
ResourceType: INSTANCE
Targets:
- Key: tag:Patch
Values:
- Daily
- daily
```
Results in the following cfn-lint errors:
```
E0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
E0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
E3012 Property Resources/BaselinePatchDailySSMMaintenanceWindowTarget/Properties/Targets/0 should be of type String
BaselineSSMConfig.yaml:292:9
```
Edited file as follows to attempt to make target a string:
```
BaselinePatchDailySSMMaintenanceWindowTarget:
Type: AWS::SSM::MaintenanceWindowTarget
Properties:
Name: BaselinePatchDailyTarget
Description: Systems with Tag Key=Patch,Value=Daily.
WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow
ResourceType: INSTANCE
Targets: Key=tag:Patch,Values=Daily,daily
```
Results in the following cfn-lint error:
```
E3002 Property Targets should be of type List for resource BaselinePatchDailySSMMaintenanceWindowTarget
BaselineSSMConfig.yaml:291:7
```
Attempting to make the string a list:
```
BaselinePatchDailySSMMaintenanceWindowTarget:
Type: AWS::SSM::MaintenanceWindowTarget
Properties:
Name: BaselinePatchDailyTarget
Description: Systems with Tag Key=Patch,Value=Daily.
WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow
ResourceType: INSTANCE
Targets:
- Key=tag:Patch,Values=Daily,daily
```
Results in the following errors:
```
E0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
E0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
```
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/properties/ValuePrimitiveType.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import sys
18 import six
19 from cfnlint import CloudFormationLintRule
20 from cfnlint import RuleMatch
21 import cfnlint.helpers
22
23
24 class ValuePrimitiveType(CloudFormationLintRule):
25 """Check if Resource PrimitiveTypes are correct"""
26 id = 'E3012'
27 shortdesc = 'Check resource properties values'
28 description = 'Checks resource property values with Primitive Types for ' \
29 'values that match those types.'
30 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#valueprimitivetype'
31 tags = ['resources']
32
33 def __init__(self):
34 """Init"""
35 super(ValuePrimitiveType, self).__init__()
36 self.resource_specs = []
37 self.property_specs = []
38 self.config_definition = {
39 'strict': {
40 'default': True,
41 'type': 'boolean'
42 }
43 }
44 self.configure()
45
46 def initialize(self, cfn):
47 """Initialize the rule"""
48 specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0])
49 self.property_specs = specs.get('PropertyTypes')
50 self.resource_specs = specs.get('ResourceTypes')
51 for resource_spec in self.resource_specs:
52 self.resource_property_types.append(resource_spec)
53 for property_spec in self.property_specs:
54 self.resource_sub_property_types.append(property_spec)
55
56 def _value_check(self, value, path, item_type, extra_args):
57 """ Checks non strict """
58 matches = []
59 if not self.config['strict']:
60 try:
61 if item_type in ['String']:
62 str(value)
63 elif item_type in ['Boolean']:
64 if value not in ['True', 'true', 'False', 'false']:
65 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
66 matches.append(RuleMatch(path, message, **extra_args))
67 elif item_type in ['Integer', 'Long', 'Double']:
68 if isinstance(value, bool):
69 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
70 matches.append(RuleMatch(path, message, **extra_args))
71 elif item_type in ['Integer']:
72 int(value)
73 elif item_type in ['Long']:
74 # Some times python will strip the decimals when doing a conversion
75 if isinstance(value, float):
76 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
77 matches.append(RuleMatch(path, message, **extra_args))
78 if sys.version_info < (3,):
79 long(value) # pylint: disable=undefined-variable
80 else:
81 int(value)
82 else: # has to be a Double
83 float(value)
84 except Exception: # pylint: disable=W0703
85 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
86 matches.append(RuleMatch(path, message, **extra_args))
87 else:
88 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
89 matches.append(RuleMatch(path, message, **extra_args))
90
91 return matches
92
93 def check_primitive_type(self, value, item_type, path):
94 """Chec item type"""
95 matches = []
96
97 if isinstance(value, dict) and item_type == 'Json':
98 return matches
99 if item_type in ['String']:
100 if not isinstance(value, (six.string_types)):
101 extra_args = {'actual_type': type(value).__name__, 'expected_type': str.__name__}
102 matches.extend(self._value_check(value, path, item_type, extra_args))
103 elif item_type in ['Boolean']:
104 if not isinstance(value, (bool)):
105 extra_args = {'actual_type': type(value).__name__, 'expected_type': bool.__name__}
106 matches.extend(self._value_check(value, path, item_type, extra_args))
107 elif item_type in ['Double']:
108 if not isinstance(value, (float, int)):
109 extra_args = {'actual_type': type(value).__name__, 'expected_type': [float.__name__, int.__name__]}
110 matches.extend(self._value_check(value, path, item_type, extra_args))
111 elif item_type in ['Integer']:
112 if not isinstance(value, (int)):
113 extra_args = {'actual_type': type(value).__name__, 'expected_type': int.__name__}
114 matches.extend(self._value_check(value, path, item_type, extra_args))
115 elif item_type in ['Long']:
116 if sys.version_info < (3,):
117 integer_types = (int, long,) # pylint: disable=undefined-variable
118 else:
119 integer_types = (int,)
120 if not isinstance(value, integer_types):
121 extra_args = {'actual_type': type(value).__name__, 'expected_type': ' or '.join([x.__name__ for x in integer_types])}
122 matches.extend(self._value_check(value, path, item_type, extra_args))
123 elif isinstance(value, list):
124 message = 'Property should be of type %s at %s' % (item_type, '/'.join(map(str, path)))
125 extra_args = {'actual_type': type(value).__name__, 'expected_type': list.__name__}
126 matches.append(RuleMatch(path, message, **extra_args))
127
128 return matches
129
130 def check_value(self, value, path, **kwargs):
131 """Check Value"""
132 matches = []
133 primitive_type = kwargs.get('primitive_type', {})
134 item_type = kwargs.get('item_type', {})
135 if item_type in ['Map']:
136 if isinstance(value, dict):
137 for map_key, map_value in value.items():
138 if not isinstance(map_value, dict):
139 matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))
140 else:
141 matches.extend(self.check_primitive_type(value, primitive_type, path))
142
143 return matches
144
145 def check(self, cfn, properties, specs, path):
146 """Check itself"""
147 matches = []
148
149 for prop in properties:
150 if prop in specs:
151 primitive_type = specs.get(prop).get('PrimitiveType')
152 if not primitive_type:
153 primitive_type = specs.get(prop).get('PrimitiveItemType')
154 if specs.get(prop).get('Type') in ['List', 'Map']:
155 item_type = specs.get(prop).get('Type')
156 else:
157 item_type = None
158 if primitive_type:
159 matches.extend(
160 cfn.check_value(
161 properties, prop, path,
162 check_value=self.check_value,
163 primitive_type=primitive_type,
164 item_type=item_type
165 )
166 )
167
168 return matches
169
170 def match_resource_sub_properties(self, properties, property_type, path, cfn):
171 """Match for sub properties"""
172 matches = []
173
174 if self.property_specs.get(property_type, {}).get('Properties'):
175 property_specs = self.property_specs.get(property_type, {}).get('Properties', {})
176 matches.extend(self.check(cfn, properties, property_specs, path))
177
178 return matches
179
180 def match_resource_properties(self, properties, resource_type, path, cfn):
181 """Check CloudFormation Properties"""
182 matches = []
183 resource_specs = self.resource_specs.get(resource_type, {}).get('Properties', {})
184 matches.extend(self.check(cfn, properties, resource_specs, path))
185
186 return matches
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py
--- a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py
+++ b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py
@@ -138,6 +138,10 @@
if not isinstance(map_value, dict):
matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))
else:
+ # some properties support primitive types and objects
+ # skip in the case it could be an object and the value is a object
+ if item_type and isinstance(value, dict):
+ return matches
matches.extend(self.check_primitive_type(value, primitive_type, path))
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n--- a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n+++ b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n@@ -138,6 +138,10 @@\n if not isinstance(map_value, dict):\n matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))\n else:\n+ # some properties support primitive types and objects\n+ # skip in the case it could be an object and the value is a object\n+ if item_type and isinstance(value, dict):\n+ return matches\n matches.extend(self.check_primitive_type(value, primitive_type, path))\n \n return matches\n", "issue": "E3002, E3003, E3012 - AWS::SSM::MaintenanceWindowTarget errors\ncfn-lint version: 0.19.1\r\n\r\ncfn-lint -u has been run\r\n\r\nI am unable to to get a clean lint on a SSM template file that seems alright and that has loaded properly in AWS. Would really appreciate any assistance. \r\n\r\nThe documentation is here on MaintenanceWindowTarget:\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ssm-maintenancewindowtarget.html\r\n\r\nAnd the associated targets:\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtarget-targets.html\r\n\r\nThe below cf should be syntactically correct..... \r\n\r\n```\r\n BaselinePatchDailySSMMaintenanceWindowTarget:\r\n Type: AWS::SSM::MaintenanceWindowTarget\r\n Properties:\r\n Name: BaselinePatchDailyTarget\r\n Description: Systems with Tag Key=Patch,Value=Daily.\r\n WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow\r\n ResourceType: INSTANCE\r\n Targets:\r\n - Key: tag:Patch\r\n Values:\r\n - Daily\r\n - daily\r\n```\r\nResults in the following cfn-lint errors:\r\n\r\n```\r\nE0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n\r\nE0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n\r\nE3012 Property Resources/BaselinePatchDailySSMMaintenanceWindowTarget/Properties/Targets/0 should be of type String\r\nBaselineSSMConfig.yaml:292:9\r\n```\r\nEdited file as follows to attempt to make target a string:\r\n\r\n```\r\n BaselinePatchDailySSMMaintenanceWindowTarget:\r\n Type: AWS::SSM::MaintenanceWindowTarget\r\n Properties:\r\n Name: BaselinePatchDailyTarget\r\n Description: Systems with Tag Key=Patch,Value=Daily.\r\n WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow\r\n ResourceType: INSTANCE\r\n Targets: Key=tag:Patch,Values=Daily,daily\r\n```\r\nResults in the following cfn-lint error:\r\n\r\n```\r\nE3002 Property Targets should be of type List for resource BaselinePatchDailySSMMaintenanceWindowTarget\r\nBaselineSSMConfig.yaml:291:7\r\n```\r\nAttempting to make the string a list:\r\n```\r\n BaselinePatchDailySSMMaintenanceWindowTarget:\r\n Type: AWS::SSM::MaintenanceWindowTarget\r\n Properties:\r\n Name: BaselinePatchDailyTarget\r\n Description: Systems with Tag Key=Patch,Value=Daily.\r\n WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow\r\n ResourceType: INSTANCE\r\n Targets:\r\n - Key=tag:Patch,Values=Daily,daily\r\n```\r\nResults in the following errors:\r\n```\r\nE0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n\r\nE0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n```\r\n\r\nThanks!\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass ValuePrimitiveType(CloudFormationLintRule):\n \"\"\"Check if Resource PrimitiveTypes are correct\"\"\"\n id = 'E3012'\n shortdesc = 'Check resource properties values'\n description = 'Checks resource property values with Primitive Types for ' \\\n 'values that match those types.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#valueprimitivetype'\n tags = ['resources']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(ValuePrimitiveType, self).__init__()\n self.resource_specs = []\n self.property_specs = []\n self.config_definition = {\n 'strict': {\n 'default': True,\n 'type': 'boolean'\n }\n }\n self.configure()\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0])\n self.property_specs = specs.get('PropertyTypes')\n self.resource_specs = specs.get('ResourceTypes')\n for resource_spec in self.resource_specs:\n self.resource_property_types.append(resource_spec)\n for property_spec in self.property_specs:\n self.resource_sub_property_types.append(property_spec)\n\n def _value_check(self, value, path, item_type, extra_args):\n \"\"\" Checks non strict \"\"\"\n matches = []\n if not self.config['strict']:\n try:\n if item_type in ['String']:\n str(value)\n elif item_type in ['Boolean']:\n if value not in ['True', 'true', 'False', 'false']:\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer', 'Long', 'Double']:\n if isinstance(value, bool):\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer']:\n int(value)\n elif item_type in ['Long']:\n # Some times python will strip the decimals when doing a conversion\n if isinstance(value, float):\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n if sys.version_info < (3,):\n long(value) # pylint: disable=undefined-variable\n else:\n int(value)\n else: # has to be a Double\n float(value)\n except Exception: # pylint: disable=W0703\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n else:\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_primitive_type(self, value, item_type, path):\n \"\"\"Chec item type\"\"\"\n matches = []\n\n if isinstance(value, dict) and item_type == 'Json':\n return matches\n if item_type in ['String']:\n if not isinstance(value, (six.string_types)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': str.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Boolean']:\n if not isinstance(value, (bool)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': bool.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Double']:\n if not isinstance(value, (float, int)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': [float.__name__, int.__name__]}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Integer']:\n if not isinstance(value, (int)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': int.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Long']:\n if sys.version_info < (3,):\n integer_types = (int, long,) # pylint: disable=undefined-variable\n else:\n integer_types = (int,)\n if not isinstance(value, integer_types):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': ' or '.join([x.__name__ for x in integer_types])}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif isinstance(value, list):\n message = 'Property should be of type %s at %s' % (item_type, '/'.join(map(str, path)))\n extra_args = {'actual_type': type(value).__name__, 'expected_type': list.__name__}\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_value(self, value, path, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n primitive_type = kwargs.get('primitive_type', {})\n item_type = kwargs.get('item_type', {})\n if item_type in ['Map']:\n if isinstance(value, dict):\n for map_key, map_value in value.items():\n if not isinstance(map_value, dict):\n matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))\n else:\n matches.extend(self.check_primitive_type(value, primitive_type, path))\n\n return matches\n\n def check(self, cfn, properties, specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n\n for prop in properties:\n if prop in specs:\n primitive_type = specs.get(prop).get('PrimitiveType')\n if not primitive_type:\n primitive_type = specs.get(prop).get('PrimitiveItemType')\n if specs.get(prop).get('Type') in ['List', 'Map']:\n item_type = specs.get(prop).get('Type')\n else:\n item_type = None\n if primitive_type:\n matches.extend(\n cfn.check_value(\n properties, prop, path,\n check_value=self.check_value,\n primitive_type=primitive_type,\n item_type=item_type\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n if self.property_specs.get(property_type, {}).get('Properties'):\n property_specs = self.property_specs.get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n resource_specs = self.resource_specs.get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ValuePrimitiveType.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass ValuePrimitiveType(CloudFormationLintRule):\n \"\"\"Check if Resource PrimitiveTypes are correct\"\"\"\n id = 'E3012'\n shortdesc = 'Check resource properties values'\n description = 'Checks resource property values with Primitive Types for ' \\\n 'values that match those types.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#valueprimitivetype'\n tags = ['resources']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(ValuePrimitiveType, self).__init__()\n self.resource_specs = []\n self.property_specs = []\n self.config_definition = {\n 'strict': {\n 'default': True,\n 'type': 'boolean'\n }\n }\n self.configure()\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0])\n self.property_specs = specs.get('PropertyTypes')\n self.resource_specs = specs.get('ResourceTypes')\n for resource_spec in self.resource_specs:\n self.resource_property_types.append(resource_spec)\n for property_spec in self.property_specs:\n self.resource_sub_property_types.append(property_spec)\n\n def _value_check(self, value, path, item_type, extra_args):\n \"\"\" Checks non strict \"\"\"\n matches = []\n if not self.config['strict']:\n try:\n if item_type in ['String']:\n str(value)\n elif item_type in ['Boolean']:\n if value not in ['True', 'true', 'False', 'false']:\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer', 'Long', 'Double']:\n if isinstance(value, bool):\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer']:\n int(value)\n elif item_type in ['Long']:\n # Some times python will strip the decimals when doing a conversion\n if isinstance(value, float):\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n if sys.version_info < (3,):\n long(value) # pylint: disable=undefined-variable\n else:\n int(value)\n else: # has to be a Double\n float(value)\n except Exception: # pylint: disable=W0703\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n else:\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_primitive_type(self, value, item_type, path):\n \"\"\"Chec item type\"\"\"\n matches = []\n\n if isinstance(value, dict) and item_type == 'Json':\n return matches\n if item_type in ['String']:\n if not isinstance(value, (six.string_types)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': str.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Boolean']:\n if not isinstance(value, (bool)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': bool.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Double']:\n if not isinstance(value, (float, int)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': [float.__name__, int.__name__]}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Integer']:\n if not isinstance(value, (int)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': int.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Long']:\n if sys.version_info < (3,):\n integer_types = (int, long,) # pylint: disable=undefined-variable\n else:\n integer_types = (int,)\n if not isinstance(value, integer_types):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': ' or '.join([x.__name__ for x in integer_types])}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif isinstance(value, list):\n message = 'Property should be of type %s at %s' % (item_type, '/'.join(map(str, path)))\n extra_args = {'actual_type': type(value).__name__, 'expected_type': list.__name__}\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_value(self, value, path, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n primitive_type = kwargs.get('primitive_type', {})\n item_type = kwargs.get('item_type', {})\n if item_type in ['Map']:\n if isinstance(value, dict):\n for map_key, map_value in value.items():\n if not isinstance(map_value, dict):\n matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))\n else:\n # some properties support primitive types and objects\n # skip in the case it could be an object and the value is a object\n if item_type and isinstance(value, dict):\n return matches\n matches.extend(self.check_primitive_type(value, primitive_type, path))\n\n return matches\n\n def check(self, cfn, properties, specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n\n for prop in properties:\n if prop in specs:\n primitive_type = specs.get(prop).get('PrimitiveType')\n if not primitive_type:\n primitive_type = specs.get(prop).get('PrimitiveItemType')\n if specs.get(prop).get('Type') in ['List', 'Map']:\n item_type = specs.get(prop).get('Type')\n else:\n item_type = None\n if primitive_type:\n matches.extend(\n cfn.check_value(\n properties, prop, path,\n check_value=self.check_value,\n primitive_type=primitive_type,\n item_type=item_type\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n if self.property_specs.get(property_type, {}).get('Properties'):\n property_specs = self.property_specs.get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n resource_specs = self.resource_specs.get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ValuePrimitiveType.py"}]}
| 3,231 | 175 |
gh_patches_debug_6305
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-5920
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"master" fails to build with MKL
Recently, when I try to build the "master" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:
``` .Python
ValueError: list.remove(x): x not in list
```
The error traces to this block of code <a href="https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.
I don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?
Setup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/sparse/linalg/dsolve/setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import division, print_function, absolute_import
3
4 from os.path import join, dirname
5 import sys
6 import os
7 import glob
8
9
10 def configuration(parent_package='',top_path=None):
11 from numpy.distutils.misc_util import Configuration
12 from numpy.distutils.system_info import get_info
13 from scipy._build_utils import get_sgemv_fix
14 from scipy._build_utils import numpy_nodepr_api
15
16 config = Configuration('dsolve',parent_package,top_path)
17 config.add_data_dir('tests')
18
19 lapack_opt = get_info('lapack_opt',notfound_action=2)
20 if sys.platform == 'win32':
21 superlu_defs = [('NO_TIMER',1)]
22 else:
23 superlu_defs = []
24 superlu_defs.append(('USE_VENDOR_BLAS',1))
25
26 superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')
27
28 sources = list(glob.glob(join(superlu_src, '*.c')))
29 headers = list(glob.glob(join(superlu_src, '*.h')))
30 if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
31 # when using MSVC + MKL, lsame is already in MKL
32 sources.remove(join(superlu_src, 'lsame.c'))
33
34 config.add_library('superlu_src',
35 sources=sources,
36 macros=superlu_defs,
37 include_dirs=[superlu_src],
38 )
39
40 # Extension
41 ext_sources = ['_superlumodule.c',
42 '_superlu_utils.c',
43 '_superluobject.c']
44 ext_sources += get_sgemv_fix(lapack_opt)
45
46 config.add_extension('_superlu',
47 sources=ext_sources,
48 libraries=['superlu_src'],
49 depends=(sources + headers),
50 extra_info=lapack_opt,
51 **numpy_nodepr_api
52 )
53
54 return config
55
56 if __name__ == '__main__':
57 from numpy.distutils.core import setup
58 setup(**configuration(top_path='').todict())
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py
--- a/scipy/sparse/linalg/dsolve/setup.py
+++ b/scipy/sparse/linalg/dsolve/setup.py
@@ -27,9 +27,6 @@
sources = list(glob.glob(join(superlu_src, '*.c')))
headers = list(glob.glob(join(superlu_src, '*.h')))
- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
- # when using MSVC + MKL, lsame is already in MKL
- sources.remove(join(superlu_src, 'lsame.c'))
config.add_library('superlu_src',
sources=sources,
|
{"golden_diff": "diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py\n--- a/scipy/sparse/linalg/dsolve/setup.py\n+++ b/scipy/sparse/linalg/dsolve/setup.py\n@@ -27,9 +27,6 @@\n \n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n- # when using MSVC + MKL, lsame is already in MKL\n- sources.remove(join(superlu_src, 'lsame.c'))\n \n config.add_library('superlu_src',\n sources=sources,\n", "issue": "\"master\" fails to build with MKL\nRecently, when I try to build the \"master\" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:\n\n``` .Python\nValueError: list.remove(x): x not in list\n```\n\nThe error traces to this block of code <a href=\"https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32\">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.\n\nI don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?\n\nSetup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\nfrom os.path import join, dirname\nimport sys\nimport os\nimport glob\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n from scipy._build_utils import get_sgemv_fix\n from scipy._build_utils import numpy_nodepr_api\n\n config = Configuration('dsolve',parent_package,top_path)\n config.add_data_dir('tests')\n\n lapack_opt = get_info('lapack_opt',notfound_action=2)\n if sys.platform == 'win32':\n superlu_defs = [('NO_TIMER',1)]\n else:\n superlu_defs = []\n superlu_defs.append(('USE_VENDOR_BLAS',1))\n\n superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')\n\n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n # when using MSVC + MKL, lsame is already in MKL\n sources.remove(join(superlu_src, 'lsame.c'))\n\n config.add_library('superlu_src',\n sources=sources,\n macros=superlu_defs,\n include_dirs=[superlu_src],\n )\n\n # Extension\n ext_sources = ['_superlumodule.c',\n '_superlu_utils.c',\n '_superluobject.c']\n ext_sources += get_sgemv_fix(lapack_opt)\n\n config.add_extension('_superlu',\n sources=ext_sources,\n libraries=['superlu_src'],\n depends=(sources + headers),\n extra_info=lapack_opt,\n **numpy_nodepr_api\n )\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/sparse/linalg/dsolve/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\nfrom os.path import join, dirname\nimport sys\nimport os\nimport glob\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n from scipy._build_utils import get_sgemv_fix\n from scipy._build_utils import numpy_nodepr_api\n\n config = Configuration('dsolve',parent_package,top_path)\n config.add_data_dir('tests')\n\n lapack_opt = get_info('lapack_opt',notfound_action=2)\n if sys.platform == 'win32':\n superlu_defs = [('NO_TIMER',1)]\n else:\n superlu_defs = []\n superlu_defs.append(('USE_VENDOR_BLAS',1))\n\n superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')\n\n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n\n config.add_library('superlu_src',\n sources=sources,\n macros=superlu_defs,\n include_dirs=[superlu_src],\n )\n\n # Extension\n ext_sources = ['_superlumodule.c',\n '_superlu_utils.c',\n '_superluobject.c']\n ext_sources += get_sgemv_fix(lapack_opt)\n\n config.add_extension('_superlu',\n sources=ext_sources,\n libraries=['superlu_src'],\n depends=(sources + headers),\n extra_info=lapack_opt,\n **numpy_nodepr_api\n )\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/sparse/linalg/dsolve/setup.py"}]}
| 1,015 | 170 |
gh_patches_debug_34544
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-6067
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate `create_multi_gpu_supervised_trainer` APIs
**Is your feature request related to a problem? Please describe.**
As we already set up MONAI workflow based on our own SupervisedTrainer, seems no need to keep the ignite trainer APIs which were developed at the beginning of the project.
Would be nice to deprecate them:
https://github.com/Project-MONAI/MONAI/blob/dev/monai/engines/multi_gpu_supervised_trainer.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/engines/multi_gpu_supervised_trainer.py`
Content:
```
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from __future__ import annotations
13
14 from collections.abc import Callable, Sequence
15 from typing import TYPE_CHECKING
16
17 import torch.nn
18 from torch.nn.parallel import DataParallel, DistributedDataParallel
19 from torch.optim.optimizer import Optimizer
20
21 from monai.config import IgniteInfo
22 from monai.engines.utils import get_devices_spec
23 from monai.utils import min_version, optional_import
24
25 create_supervised_trainer, _ = optional_import(
26 "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_trainer"
27 )
28 create_supervised_evaluator, _ = optional_import(
29 "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_evaluator"
30 )
31 _prepare_batch, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "_prepare_batch")
32 if TYPE_CHECKING:
33 from ignite.engine import Engine
34 from ignite.metrics import Metric
35 else:
36 Engine, _ = optional_import(
37 "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
38 )
39 Metric, _ = optional_import(
40 "ignite.metrics", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Metric", as_type="decorator"
41 )
42
43 __all__ = ["create_multigpu_supervised_trainer", "create_multigpu_supervised_evaluator"]
44
45
46 def _default_transform(_x: torch.Tensor, _y: torch.Tensor, _y_pred: torch.Tensor, loss: torch.Tensor) -> float:
47 return loss.item()
48
49
50 def _default_eval_transform(
51 x: torch.Tensor, y: torch.Tensor, y_pred: torch.Tensor
52 ) -> tuple[torch.Tensor, torch.Tensor]:
53 return y_pred, y
54
55
56 def create_multigpu_supervised_trainer(
57 net: torch.nn.Module,
58 optimizer: Optimizer,
59 loss_fn: Callable,
60 devices: Sequence[str | torch.device] | None = None,
61 non_blocking: bool = False,
62 prepare_batch: Callable = _prepare_batch,
63 output_transform: Callable = _default_transform,
64 distributed: bool = False,
65 ) -> Engine:
66 """
67 Derived from `create_supervised_trainer` in Ignite.
68
69 Factory function for creating a trainer for supervised models.
70
71 Args:
72 net: the network to train.
73 optimizer: the optimizer to use.
74 loss_fn: the loss function to use.
75 devices: device(s) type specification (default: None).
76 Applies to both model and batches. None is all devices used, empty list is CPU only.
77 non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
78 with respect to the host. For other cases, this argument has no effect.
79 prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
80 tuple of tensors `(batch_x, batch_y)`.
81 output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
82 to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
83 distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain
84 only 1 GPU or CPU for current distributed rank.
85
86 Returns:
87 Engine: a trainer engine with supervised update function.
88
89 Note:
90 `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss
91 of the processed batch by default.
92 """
93
94 devices_ = get_devices_spec(devices)
95 if distributed:
96 if len(devices_) > 1:
97 raise ValueError(f"for distributed training, `devices` must contain only 1 GPU or CPU, but got {devices_}.")
98 net = DistributedDataParallel(net, device_ids=devices_)
99 elif len(devices_) > 1:
100 net = DataParallel(net)
101
102 return create_supervised_trainer( # type: ignore[no-any-return]
103 net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform
104 )
105
106
107 def create_multigpu_supervised_evaluator(
108 net: torch.nn.Module,
109 metrics: dict[str, Metric] | None = None,
110 devices: Sequence[str | torch.device] | None = None,
111 non_blocking: bool = False,
112 prepare_batch: Callable = _prepare_batch,
113 output_transform: Callable = _default_eval_transform,
114 distributed: bool = False,
115 ) -> Engine:
116 """
117 Derived from `create_supervised_evaluator` in Ignite.
118
119 Factory function for creating an evaluator for supervised models.
120
121 Args:
122 net: the model to train.
123 metrics: a map of metric names to Metrics.
124 devices: device(s) type specification (default: None).
125 Applies to both model and batches. None is all devices used, empty list is CPU only.
126 non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
127 with respect to the host. For other cases, this argument has no effect.
128 prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
129 tuple of tensors `(batch_x, batch_y)`.
130 output_transform: function that receives 'x', 'y', 'y_pred' and returns value
131 to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)`
132 which fits output expected by metrics. If you change it you should use `output_transform` in metrics.
133 distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain
134 only 1 GPU or CPU for current distributed rank.
135
136 Note:
137 `engine.state.output` for this engine is defined by `output_transform` parameter and is
138 a tuple of `(batch_pred, batch_y)` by default.
139
140 Returns:
141 Engine: an evaluator engine with supervised inference function.
142 """
143
144 devices_ = get_devices_spec(devices)
145
146 if distributed:
147 net = DistributedDataParallel(net, device_ids=devices_)
148 if len(devices_) > 1:
149 raise ValueError(
150 f"for distributed evaluation, `devices` must contain only 1 GPU or CPU, but got {devices_}."
151 )
152 elif len(devices_) > 1:
153 net = DataParallel(net)
154
155 return create_supervised_evaluator( # type: ignore[no-any-return]
156 net, metrics, devices_[0], non_blocking, prepare_batch, output_transform
157 )
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/monai/engines/multi_gpu_supervised_trainer.py b/monai/engines/multi_gpu_supervised_trainer.py
--- a/monai/engines/multi_gpu_supervised_trainer.py
+++ b/monai/engines/multi_gpu_supervised_trainer.py
@@ -20,7 +20,7 @@
from monai.config import IgniteInfo
from monai.engines.utils import get_devices_spec
-from monai.utils import min_version, optional_import
+from monai.utils import deprecated, min_version, optional_import
create_supervised_trainer, _ = optional_import(
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_trainer"
@@ -53,6 +53,11 @@
return y_pred, y
+@deprecated(
+ since="1.1",
+ removed="1.3",
+ msg_suffix=("Native ignite engine lacks support of many MONAI features, please use `SupervisedTrainer` instead."),
+)
def create_multigpu_supervised_trainer(
net: torch.nn.Module,
optimizer: Optimizer,
@@ -100,10 +105,23 @@
net = DataParallel(net)
return create_supervised_trainer( # type: ignore[no-any-return]
- net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform
+ model=net,
+ optimizer=optimizer,
+ loss_fn=loss_fn,
+ device=devices_[0],
+ non_blocking=non_blocking,
+ prepare_batch=prepare_batch,
+ output_transform=output_transform,
)
+@deprecated(
+ since="1.1",
+ removed="1.3",
+ msg_suffix=(
+ "Native ignite evaluator lacks support of many MONAI features, please use `SupervisedEvaluator` instead."
+ ),
+)
def create_multigpu_supervised_evaluator(
net: torch.nn.Module,
metrics: dict[str, Metric] | None = None,
@@ -153,5 +171,10 @@
net = DataParallel(net)
return create_supervised_evaluator( # type: ignore[no-any-return]
- net, metrics, devices_[0], non_blocking, prepare_batch, output_transform
+ model=net,
+ metrics=metrics,
+ device=devices_[0],
+ non_blocking=non_blocking,
+ prepare_batch=prepare_batch,
+ output_transform=output_transform,
)
|
{"golden_diff": "diff --git a/monai/engines/multi_gpu_supervised_trainer.py b/monai/engines/multi_gpu_supervised_trainer.py\n--- a/monai/engines/multi_gpu_supervised_trainer.py\n+++ b/monai/engines/multi_gpu_supervised_trainer.py\n@@ -20,7 +20,7 @@\n \n from monai.config import IgniteInfo\n from monai.engines.utils import get_devices_spec\n-from monai.utils import min_version, optional_import\n+from monai.utils import deprecated, min_version, optional_import\n \n create_supervised_trainer, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_trainer\"\n@@ -53,6 +53,11 @@\n return y_pred, y\n \n \n+@deprecated(\n+ since=\"1.1\",\n+ removed=\"1.3\",\n+ msg_suffix=(\"Native ignite engine lacks support of many MONAI features, please use `SupervisedTrainer` instead.\"),\n+)\n def create_multigpu_supervised_trainer(\n net: torch.nn.Module,\n optimizer: Optimizer,\n@@ -100,10 +105,23 @@\n net = DataParallel(net)\n \n return create_supervised_trainer( # type: ignore[no-any-return]\n- net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform\n+ model=net,\n+ optimizer=optimizer,\n+ loss_fn=loss_fn,\n+ device=devices_[0],\n+ non_blocking=non_blocking,\n+ prepare_batch=prepare_batch,\n+ output_transform=output_transform,\n )\n \n \n+@deprecated(\n+ since=\"1.1\",\n+ removed=\"1.3\",\n+ msg_suffix=(\n+ \"Native ignite evaluator lacks support of many MONAI features, please use `SupervisedEvaluator` instead.\"\n+ ),\n+)\n def create_multigpu_supervised_evaluator(\n net: torch.nn.Module,\n metrics: dict[str, Metric] | None = None,\n@@ -153,5 +171,10 @@\n net = DataParallel(net)\n \n return create_supervised_evaluator( # type: ignore[no-any-return]\n- net, metrics, devices_[0], non_blocking, prepare_batch, output_transform\n+ model=net,\n+ metrics=metrics,\n+ device=devices_[0],\n+ non_blocking=non_blocking,\n+ prepare_batch=prepare_batch,\n+ output_transform=output_transform,\n )\n", "issue": "Deprecate `create_multi_gpu_supervised_trainer` APIs\n**Is your feature request related to a problem? Please describe.**\r\nAs we already set up MONAI workflow based on our own SupervisedTrainer, seems no need to keep the ignite trainer APIs which were developed at the beginning of the project.\r\nWould be nice to deprecate them:\r\nhttps://github.com/Project-MONAI/MONAI/blob/dev/monai/engines/multi_gpu_supervised_trainer.py\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections.abc import Callable, Sequence\nfrom typing import TYPE_CHECKING\n\nimport torch.nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.optim.optimizer import Optimizer\n\nfrom monai.config import IgniteInfo\nfrom monai.engines.utils import get_devices_spec\nfrom monai.utils import min_version, optional_import\n\ncreate_supervised_trainer, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_trainer\"\n)\ncreate_supervised_evaluator, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_evaluator\"\n)\n_prepare_batch, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"_prepare_batch\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\n from ignite.metrics import Metric\nelse:\n Engine, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Engine\", as_type=\"decorator\"\n )\n Metric, _ = optional_import(\n \"ignite.metrics\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Metric\", as_type=\"decorator\"\n )\n\n__all__ = [\"create_multigpu_supervised_trainer\", \"create_multigpu_supervised_evaluator\"]\n\n\ndef _default_transform(_x: torch.Tensor, _y: torch.Tensor, _y_pred: torch.Tensor, loss: torch.Tensor) -> float:\n return loss.item()\n\n\ndef _default_eval_transform(\n x: torch.Tensor, y: torch.Tensor, y_pred: torch.Tensor\n) -> tuple[torch.Tensor, torch.Tensor]:\n return y_pred, y\n\n\ndef create_multigpu_supervised_trainer(\n net: torch.nn.Module,\n optimizer: Optimizer,\n loss_fn: Callable,\n devices: Sequence[str | torch.device] | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = _default_transform,\n distributed: bool = False,\n) -> Engine:\n \"\"\"\n Derived from `create_supervised_trainer` in Ignite.\n\n Factory function for creating a trainer for supervised models.\n\n Args:\n net: the network to train.\n optimizer: the optimizer to use.\n loss_fn: the loss function to use.\n devices: device(s) type specification (default: None).\n Applies to both model and batches. None is all devices used, empty list is CPU only.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain\n only 1 GPU or CPU for current distributed rank.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n\n Note:\n `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss\n of the processed batch by default.\n \"\"\"\n\n devices_ = get_devices_spec(devices)\n if distributed:\n if len(devices_) > 1:\n raise ValueError(f\"for distributed training, `devices` must contain only 1 GPU or CPU, but got {devices_}.\")\n net = DistributedDataParallel(net, device_ids=devices_)\n elif len(devices_) > 1:\n net = DataParallel(net)\n\n return create_supervised_trainer( # type: ignore[no-any-return]\n net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform\n )\n\n\ndef create_multigpu_supervised_evaluator(\n net: torch.nn.Module,\n metrics: dict[str, Metric] | None = None,\n devices: Sequence[str | torch.device] | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = _default_eval_transform,\n distributed: bool = False,\n) -> Engine:\n \"\"\"\n Derived from `create_supervised_evaluator` in Ignite.\n\n Factory function for creating an evaluator for supervised models.\n\n Args:\n net: the model to train.\n metrics: a map of metric names to Metrics.\n devices: device(s) type specification (default: None).\n Applies to both model and batches. None is all devices used, empty list is CPU only.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform: function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)`\n which fits output expected by metrics. If you change it you should use `output_transform` in metrics.\n distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain\n only 1 GPU or CPU for current distributed rank.\n\n Note:\n `engine.state.output` for this engine is defined by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n\n devices_ = get_devices_spec(devices)\n\n if distributed:\n net = DistributedDataParallel(net, device_ids=devices_)\n if len(devices_) > 1:\n raise ValueError(\n f\"for distributed evaluation, `devices` must contain only 1 GPU or CPU, but got {devices_}.\"\n )\n elif len(devices_) > 1:\n net = DataParallel(net)\n\n return create_supervised_evaluator( # type: ignore[no-any-return]\n net, metrics, devices_[0], non_blocking, prepare_batch, output_transform\n )\n", "path": "monai/engines/multi_gpu_supervised_trainer.py"}], "after_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections.abc import Callable, Sequence\nfrom typing import TYPE_CHECKING\n\nimport torch.nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.optim.optimizer import Optimizer\n\nfrom monai.config import IgniteInfo\nfrom monai.engines.utils import get_devices_spec\nfrom monai.utils import deprecated, min_version, optional_import\n\ncreate_supervised_trainer, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_trainer\"\n)\ncreate_supervised_evaluator, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_evaluator\"\n)\n_prepare_batch, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"_prepare_batch\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\n from ignite.metrics import Metric\nelse:\n Engine, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Engine\", as_type=\"decorator\"\n )\n Metric, _ = optional_import(\n \"ignite.metrics\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Metric\", as_type=\"decorator\"\n )\n\n__all__ = [\"create_multigpu_supervised_trainer\", \"create_multigpu_supervised_evaluator\"]\n\n\ndef _default_transform(_x: torch.Tensor, _y: torch.Tensor, _y_pred: torch.Tensor, loss: torch.Tensor) -> float:\n return loss.item()\n\n\ndef _default_eval_transform(\n x: torch.Tensor, y: torch.Tensor, y_pred: torch.Tensor\n) -> tuple[torch.Tensor, torch.Tensor]:\n return y_pred, y\n\n\n@deprecated(\n since=\"1.1\",\n removed=\"1.3\",\n msg_suffix=(\"Native ignite engine lacks support of many MONAI features, please use `SupervisedTrainer` instead.\"),\n)\ndef create_multigpu_supervised_trainer(\n net: torch.nn.Module,\n optimizer: Optimizer,\n loss_fn: Callable,\n devices: Sequence[str | torch.device] | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = _default_transform,\n distributed: bool = False,\n) -> Engine:\n \"\"\"\n Derived from `create_supervised_trainer` in Ignite.\n\n Factory function for creating a trainer for supervised models.\n\n Args:\n net: the network to train.\n optimizer: the optimizer to use.\n loss_fn: the loss function to use.\n devices: device(s) type specification (default: None).\n Applies to both model and batches. None is all devices used, empty list is CPU only.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain\n only 1 GPU or CPU for current distributed rank.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n\n Note:\n `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss\n of the processed batch by default.\n \"\"\"\n\n devices_ = get_devices_spec(devices)\n if distributed:\n if len(devices_) > 1:\n raise ValueError(f\"for distributed training, `devices` must contain only 1 GPU or CPU, but got {devices_}.\")\n net = DistributedDataParallel(net, device_ids=devices_)\n elif len(devices_) > 1:\n net = DataParallel(net)\n\n return create_supervised_trainer( # type: ignore[no-any-return]\n model=net,\n optimizer=optimizer,\n loss_fn=loss_fn,\n device=devices_[0],\n non_blocking=non_blocking,\n prepare_batch=prepare_batch,\n output_transform=output_transform,\n )\n\n\n@deprecated(\n since=\"1.1\",\n removed=\"1.3\",\n msg_suffix=(\n \"Native ignite evaluator lacks support of many MONAI features, please use `SupervisedEvaluator` instead.\"\n ),\n)\ndef create_multigpu_supervised_evaluator(\n net: torch.nn.Module,\n metrics: dict[str, Metric] | None = None,\n devices: Sequence[str | torch.device] | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = _default_eval_transform,\n distributed: bool = False,\n) -> Engine:\n \"\"\"\n Derived from `create_supervised_evaluator` in Ignite.\n\n Factory function for creating an evaluator for supervised models.\n\n Args:\n net: the model to train.\n metrics: a map of metric names to Metrics.\n devices: device(s) type specification (default: None).\n Applies to both model and batches. None is all devices used, empty list is CPU only.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform: function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)`\n which fits output expected by metrics. If you change it you should use `output_transform` in metrics.\n distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain\n only 1 GPU or CPU for current distributed rank.\n\n Note:\n `engine.state.output` for this engine is defined by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n\n devices_ = get_devices_spec(devices)\n\n if distributed:\n net = DistributedDataParallel(net, device_ids=devices_)\n if len(devices_) > 1:\n raise ValueError(\n f\"for distributed evaluation, `devices` must contain only 1 GPU or CPU, but got {devices_}.\"\n )\n elif len(devices_) > 1:\n net = DataParallel(net)\n\n return create_supervised_evaluator( # type: ignore[no-any-return]\n model=net,\n metrics=metrics,\n device=devices_[0],\n non_blocking=non_blocking,\n prepare_batch=prepare_batch,\n output_transform=output_transform,\n )\n", "path": "monai/engines/multi_gpu_supervised_trainer.py"}]}
| 2,261 | 558 |
gh_patches_debug_16851
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-999
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Driver resolution code can not handle dataset format being None
When attempting to load dataset without format specified `None` is used as a string
Example reported on Slack:
```
2020-07-23 17:05:21,576 48721 datacube-ingest ERROR Failed to create storage unit file (Exception: 'NoneType' object has no attribute 'lower')
Traceback (most recent call last):
File "/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py", line 340, in process_tasks
executor.result(future)
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 63, in result
return func(*args, **kwargs)
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 33, in reraise
raise t.with_traceback(e, traceback)
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 36, in get_ready
result = SerialExecutor.result(futures[0])
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 63, in result
return func(*args, **kwargs)
File "/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py", line 257, in ingest_work
fuse_func=fuse_func)
File "/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py", line 589, in load_data
progress_cbk=progress_cbk)
File "/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py", line 526, in _xr_load
progress_cbk=_cbk)
File "/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py", line 703, in _fuse_measurement
src = new_datasource(BandInfo(ds, measurement.name))
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 101, in new_datasource
source_type = choose_datasource(band)
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 80, in choose_datasource
return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 37, in __call__
driver = self._find_driver(uri_scheme, fmt)
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 25, in _find_driver
key = (uri_scheme.lower(), fmt.lower())
AttributeError: 'NoneType' object has no attribute 'lower'
2020-07-23 17:05:21,576 48721 datacube-ingest INFO Storage unit file creation status (Created_Count: 14, Failed_Count: 15)
```
I think proper solution to that is to fallback to default driver implementation. Also at index time warning should be printed if dataset format is not specified, see #955.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/drivers/readers.py`
Content:
```
1 from typing import List, Optional, Callable
2 from .driver_cache import load_drivers
3 from .datasource import DataSource
4 from ._tools import singleton_setup
5 from datacube.storage._base import BandInfo
6
7 DatasourceFactory = Callable[[BandInfo], DataSource] # pylint: disable=invalid-name
8
9
10 class ReaderDriverCache(object):
11 def __init__(self, group: str):
12 self._drivers = load_drivers(group)
13
14 lookup = {}
15 for driver in self._drivers.values():
16 for uri_scheme in driver.protocols:
17 for fmt in driver.formats:
18 if driver.supports(uri_scheme, fmt):
19 key = (uri_scheme.lower(), fmt.lower())
20 lookup[key] = driver
21
22 self._lookup = lookup
23
24 def _find_driver(self, uri_scheme: str, fmt: str):
25 key = (uri_scheme.lower(), fmt.lower())
26 return self._lookup.get(key)
27
28 def __call__(self, uri_scheme: str, fmt: str,
29 fallback: Optional[DatasourceFactory] = None) -> DatasourceFactory:
30 """Lookup `new_datasource` constructor method from the driver. Returns
31 `fallback` method if no driver is found.
32
33 :param uri_scheme: Protocol part of the Dataset uri
34 :param fmt: Dataset format
35 :return: Returns function `(DataSet, band_name:str) => DataSource`
36 """
37 driver = self._find_driver(uri_scheme, fmt)
38 if driver is not None:
39 return driver.new_datasource
40 if fallback is not None:
41 return fallback
42 else:
43 raise KeyError("No driver found and no fallback provided")
44
45 def drivers(self) -> List[str]:
46 """ Returns list of driver names
47 """
48 return list(self._drivers.keys())
49
50
51 def rdr_cache() -> ReaderDriverCache:
52 """ Singleton for ReaderDriverCache
53 """
54 return singleton_setup(rdr_cache, '_instance',
55 ReaderDriverCache,
56 'datacube.plugins.io.read')
57
58
59 def reader_drivers() -> List[str]:
60 """ Returns list driver names
61 """
62 return rdr_cache().drivers()
63
64
65 def choose_datasource(band: 'BandInfo') -> DatasourceFactory:
66 """Returns appropriate `DataSource` class (or a constructor method) for loading
67 given `dataset`.
68
69 An appropriate `DataSource` implementation is chosen based on:
70
71 - Dataset URI (protocol part)
72 - Dataset format
73 - Current system settings
74 - Available IO plugins
75
76 NOTE: we assume that all bands can be loaded with the same implementation.
77
78 """
79 from datacube.storage._rio import RasterDatasetDataSource
80 return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)
81
82
83 def new_datasource(band: BandInfo) -> Optional[DataSource]:
84 """Returns a newly constructed data source to read dataset band data.
85
86 An appropriate `DataSource` implementation is chosen based on:
87
88 - Dataset URI (protocol part)
89 - Dataset format
90 - Current system settings
91 - Available IO plugins
92
93 This function will return the default :class:`RasterDatasetDataSource` if no more specific
94 ``DataSource`` can be found.
95
96 :param dataset: The dataset to read.
97 :param str band_name: the name of the band to read.
98
99 """
100
101 source_type = choose_datasource(band)
102
103 if source_type is None:
104 return None
105
106 return source_type(band)
107
```
Path: `datacube/storage/_base.py`
Content:
```
1 from typing import Optional, Dict, Any, Tuple
2 from urllib.parse import urlparse
3
4 from datacube.model import Dataset
5 from datacube.utils.uris import uri_resolve, pick_uri
6
7
8 def _get_band_and_layer(b: Dict[str, Any]) -> Tuple[Optional[int], Optional[str]]:
9 """ Encode legacy logic for extracting band/layer:
10
11 on input:
12 band -- Int | Nothing
13 layer -- Str | Int | Nothing
14
15 Valid combinations are:
16 band layer Output
17 ---------------------------
18 - - ( - , - )
19 - int (int, - )
20 int - (int, - )
21 int str (int, str)
22 - str ( - , str)
23
24 """
25 band = b.get('band')
26 layer = b.get('layer')
27
28 if band is None:
29 if isinstance(layer, int):
30 return (layer, None)
31 if layer is None or isinstance(layer, str):
32 return (None, layer)
33
34 raise ValueError('Expect `layer` to be one of None,int,str but it is {}'.format(type(layer)))
35 else:
36 if not isinstance(band, int):
37 raise ValueError('Expect `band` to be an integer (it is {})'.format(type(band)))
38 if layer is not None and not isinstance(layer, str):
39 raise ValueError('Expect `layer` to be one of None,str but it is {}'.format(type(layer)))
40
41 return (band, layer)
42
43
44 def _extract_driver_data(ds: Dataset) -> Optional[Any]:
45 return ds.metadata_doc.get('driver_data', None)
46
47
48 def measurement_paths(ds: Dataset) -> Dict[str, str]:
49 """
50 Returns a dictionary mapping from band name to url pointing to band storage
51 resource.
52
53 :return: Band Name => URL
54 """
55 if ds.uris is None:
56 raise ValueError('No locations on this dataset')
57
58 base = pick_uri(ds.uris)
59 return dict((k, uri_resolve(base, m.get('path')))
60 for k, m in ds.measurements.items())
61
62
63 class BandInfo:
64 __slots__ = ('name',
65 'uri',
66 'band',
67 'layer',
68 'dtype',
69 'nodata',
70 'units',
71 'crs',
72 'transform',
73 'center_time',
74 'format',
75 'driver_data')
76
77 def __init__(self,
78 ds: Dataset,
79 band: str,
80 uri_scheme: Optional[str] = None):
81 try:
82 mp, = ds.type.lookup_measurements([band]).values()
83 except KeyError:
84 raise ValueError('No such band: {}'.format(band))
85
86 mm = ds.measurements.get(mp.canonical_name)
87
88 if mm is None:
89 raise ValueError('No such band: {}'.format(band))
90
91 if ds.uris is None:
92 raise ValueError('No uris defined on a dataset')
93
94 base_uri = pick_uri(ds.uris, uri_scheme)
95
96 bint, layer = _get_band_and_layer(mm)
97
98 self.name = band
99 self.uri = uri_resolve(base_uri, mm.get('path'))
100 self.band = bint
101 self.layer = layer
102 self.dtype = mp.dtype
103 self.nodata = mp.nodata
104 self.units = mp.units
105 self.crs = ds.crs
106 self.transform = ds.transform
107 self.format = ds.format
108 self.driver_data = _extract_driver_data(ds)
109
110 @property
111 def uri_scheme(self) -> str:
112 return urlparse(self.uri).scheme
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datacube/drivers/readers.py b/datacube/drivers/readers.py
--- a/datacube/drivers/readers.py
+++ b/datacube/drivers/readers.py
@@ -62,7 +62,7 @@
return rdr_cache().drivers()
-def choose_datasource(band: 'BandInfo') -> DatasourceFactory:
+def choose_datasource(band: BandInfo) -> DatasourceFactory:
"""Returns appropriate `DataSource` class (or a constructor method) for loading
given `dataset`.
diff --git a/datacube/storage/_base.py b/datacube/storage/_base.py
--- a/datacube/storage/_base.py
+++ b/datacube/storage/_base.py
@@ -104,9 +104,9 @@
self.units = mp.units
self.crs = ds.crs
self.transform = ds.transform
- self.format = ds.format
+ self.format = ds.format or ''
self.driver_data = _extract_driver_data(ds)
@property
def uri_scheme(self) -> str:
- return urlparse(self.uri).scheme
+ return urlparse(self.uri).scheme or ''
|
{"golden_diff": "diff --git a/datacube/drivers/readers.py b/datacube/drivers/readers.py\n--- a/datacube/drivers/readers.py\n+++ b/datacube/drivers/readers.py\n@@ -62,7 +62,7 @@\n return rdr_cache().drivers()\n \n \n-def choose_datasource(band: 'BandInfo') -> DatasourceFactory:\n+def choose_datasource(band: BandInfo) -> DatasourceFactory:\n \"\"\"Returns appropriate `DataSource` class (or a constructor method) for loading\n given `dataset`.\n \ndiff --git a/datacube/storage/_base.py b/datacube/storage/_base.py\n--- a/datacube/storage/_base.py\n+++ b/datacube/storage/_base.py\n@@ -104,9 +104,9 @@\n self.units = mp.units\n self.crs = ds.crs\n self.transform = ds.transform\n- self.format = ds.format\n+ self.format = ds.format or ''\n self.driver_data = _extract_driver_data(ds)\n \n @property\n def uri_scheme(self) -> str:\n- return urlparse(self.uri).scheme\n+ return urlparse(self.uri).scheme or ''\n", "issue": "Driver resolution code can not handle dataset format being None\nWhen attempting to load dataset without format specified `None` is used as a string\r\n\r\nExample reported on Slack:\r\n\r\n```\r\n2020-07-23 17:05:21,576 48721 datacube-ingest ERROR Failed to create storage unit file (Exception: 'NoneType' object has no attribute 'lower') \r\nTraceback (most recent call last):\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py\", line 340, in process_tasks\r\n executor.result(future)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 63, in result\r\n return func(*args, **kwargs)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 33, in reraise\r\n raise t.with_traceback(e, traceback)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 36, in get_ready\r\n result = SerialExecutor.result(futures[0])\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 63, in result\r\n return func(*args, **kwargs)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py\", line 257, in ingest_work\r\n fuse_func=fuse_func)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py\", line 589, in load_data\r\n progress_cbk=progress_cbk)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py\", line 526, in _xr_load\r\n progress_cbk=_cbk)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py\", line 703, in _fuse_measurement\r\n src = new_datasource(BandInfo(ds, measurement.name))\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 101, in new_datasource\r\n source_type = choose_datasource(band)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 80, in choose_datasource\r\n return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 37, in __call__\r\n driver = self._find_driver(uri_scheme, fmt)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 25, in _find_driver\r\n key = (uri_scheme.lower(), fmt.lower())\r\nAttributeError: 'NoneType' object has no attribute 'lower'\r\n2020-07-23 17:05:21,576 48721 datacube-ingest INFO Storage unit file creation status (Created_Count: 14, Failed_Count: 15)\r\n```\r\n\r\nI think proper solution to that is to fallback to default driver implementation. Also at index time warning should be printed if dataset format is not specified, see #955.\n", "before_files": [{"content": "from typing import List, Optional, Callable\nfrom .driver_cache import load_drivers\nfrom .datasource import DataSource\nfrom ._tools import singleton_setup\nfrom datacube.storage._base import BandInfo\n\nDatasourceFactory = Callable[[BandInfo], DataSource] # pylint: disable=invalid-name\n\n\nclass ReaderDriverCache(object):\n def __init__(self, group: str):\n self._drivers = load_drivers(group)\n\n lookup = {}\n for driver in self._drivers.values():\n for uri_scheme in driver.protocols:\n for fmt in driver.formats:\n if driver.supports(uri_scheme, fmt):\n key = (uri_scheme.lower(), fmt.lower())\n lookup[key] = driver\n\n self._lookup = lookup\n\n def _find_driver(self, uri_scheme: str, fmt: str):\n key = (uri_scheme.lower(), fmt.lower())\n return self._lookup.get(key)\n\n def __call__(self, uri_scheme: str, fmt: str,\n fallback: Optional[DatasourceFactory] = None) -> DatasourceFactory:\n \"\"\"Lookup `new_datasource` constructor method from the driver. Returns\n `fallback` method if no driver is found.\n\n :param uri_scheme: Protocol part of the Dataset uri\n :param fmt: Dataset format\n :return: Returns function `(DataSet, band_name:str) => DataSource`\n \"\"\"\n driver = self._find_driver(uri_scheme, fmt)\n if driver is not None:\n return driver.new_datasource\n if fallback is not None:\n return fallback\n else:\n raise KeyError(\"No driver found and no fallback provided\")\n\n def drivers(self) -> List[str]:\n \"\"\" Returns list of driver names\n \"\"\"\n return list(self._drivers.keys())\n\n\ndef rdr_cache() -> ReaderDriverCache:\n \"\"\" Singleton for ReaderDriverCache\n \"\"\"\n return singleton_setup(rdr_cache, '_instance',\n ReaderDriverCache,\n 'datacube.plugins.io.read')\n\n\ndef reader_drivers() -> List[str]:\n \"\"\" Returns list driver names\n \"\"\"\n return rdr_cache().drivers()\n\n\ndef choose_datasource(band: 'BandInfo') -> DatasourceFactory:\n \"\"\"Returns appropriate `DataSource` class (or a constructor method) for loading\n given `dataset`.\n\n An appropriate `DataSource` implementation is chosen based on:\n\n - Dataset URI (protocol part)\n - Dataset format\n - Current system settings\n - Available IO plugins\n\n NOTE: we assume that all bands can be loaded with the same implementation.\n\n \"\"\"\n from datacube.storage._rio import RasterDatasetDataSource\n return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)\n\n\ndef new_datasource(band: BandInfo) -> Optional[DataSource]:\n \"\"\"Returns a newly constructed data source to read dataset band data.\n\n An appropriate `DataSource` implementation is chosen based on:\n\n - Dataset URI (protocol part)\n - Dataset format\n - Current system settings\n - Available IO plugins\n\n This function will return the default :class:`RasterDatasetDataSource` if no more specific\n ``DataSource`` can be found.\n\n :param dataset: The dataset to read.\n :param str band_name: the name of the band to read.\n\n \"\"\"\n\n source_type = choose_datasource(band)\n\n if source_type is None:\n return None\n\n return source_type(band)\n", "path": "datacube/drivers/readers.py"}, {"content": "from typing import Optional, Dict, Any, Tuple\nfrom urllib.parse import urlparse\n\nfrom datacube.model import Dataset\nfrom datacube.utils.uris import uri_resolve, pick_uri\n\n\ndef _get_band_and_layer(b: Dict[str, Any]) -> Tuple[Optional[int], Optional[str]]:\n \"\"\" Encode legacy logic for extracting band/layer:\n\n on input:\n band -- Int | Nothing\n layer -- Str | Int | Nothing\n\n Valid combinations are:\n band layer Output\n ---------------------------\n - - ( - , - )\n - int (int, - )\n int - (int, - )\n int str (int, str)\n - str ( - , str)\n\n \"\"\"\n band = b.get('band')\n layer = b.get('layer')\n\n if band is None:\n if isinstance(layer, int):\n return (layer, None)\n if layer is None or isinstance(layer, str):\n return (None, layer)\n\n raise ValueError('Expect `layer` to be one of None,int,str but it is {}'.format(type(layer)))\n else:\n if not isinstance(band, int):\n raise ValueError('Expect `band` to be an integer (it is {})'.format(type(band)))\n if layer is not None and not isinstance(layer, str):\n raise ValueError('Expect `layer` to be one of None,str but it is {}'.format(type(layer)))\n\n return (band, layer)\n\n\ndef _extract_driver_data(ds: Dataset) -> Optional[Any]:\n return ds.metadata_doc.get('driver_data', None)\n\n\ndef measurement_paths(ds: Dataset) -> Dict[str, str]:\n \"\"\"\n Returns a dictionary mapping from band name to url pointing to band storage\n resource.\n\n :return: Band Name => URL\n \"\"\"\n if ds.uris is None:\n raise ValueError('No locations on this dataset')\n\n base = pick_uri(ds.uris)\n return dict((k, uri_resolve(base, m.get('path')))\n for k, m in ds.measurements.items())\n\n\nclass BandInfo:\n __slots__ = ('name',\n 'uri',\n 'band',\n 'layer',\n 'dtype',\n 'nodata',\n 'units',\n 'crs',\n 'transform',\n 'center_time',\n 'format',\n 'driver_data')\n\n def __init__(self,\n ds: Dataset,\n band: str,\n uri_scheme: Optional[str] = None):\n try:\n mp, = ds.type.lookup_measurements([band]).values()\n except KeyError:\n raise ValueError('No such band: {}'.format(band))\n\n mm = ds.measurements.get(mp.canonical_name)\n\n if mm is None:\n raise ValueError('No such band: {}'.format(band))\n\n if ds.uris is None:\n raise ValueError('No uris defined on a dataset')\n\n base_uri = pick_uri(ds.uris, uri_scheme)\n\n bint, layer = _get_band_and_layer(mm)\n\n self.name = band\n self.uri = uri_resolve(base_uri, mm.get('path'))\n self.band = bint\n self.layer = layer\n self.dtype = mp.dtype\n self.nodata = mp.nodata\n self.units = mp.units\n self.crs = ds.crs\n self.transform = ds.transform\n self.format = ds.format\n self.driver_data = _extract_driver_data(ds)\n\n @property\n def uri_scheme(self) -> str:\n return urlparse(self.uri).scheme\n", "path": "datacube/storage/_base.py"}], "after_files": [{"content": "from typing import List, Optional, Callable\nfrom .driver_cache import load_drivers\nfrom .datasource import DataSource\nfrom ._tools import singleton_setup\nfrom datacube.storage._base import BandInfo\n\nDatasourceFactory = Callable[[BandInfo], DataSource] # pylint: disable=invalid-name\n\n\nclass ReaderDriverCache(object):\n def __init__(self, group: str):\n self._drivers = load_drivers(group)\n\n lookup = {}\n for driver in self._drivers.values():\n for uri_scheme in driver.protocols:\n for fmt in driver.formats:\n if driver.supports(uri_scheme, fmt):\n key = (uri_scheme.lower(), fmt.lower())\n lookup[key] = driver\n\n self._lookup = lookup\n\n def _find_driver(self, uri_scheme: str, fmt: str):\n key = (uri_scheme.lower(), fmt.lower())\n return self._lookup.get(key)\n\n def __call__(self, uri_scheme: str, fmt: str,\n fallback: Optional[DatasourceFactory] = None) -> DatasourceFactory:\n \"\"\"Lookup `new_datasource` constructor method from the driver. Returns\n `fallback` method if no driver is found.\n\n :param uri_scheme: Protocol part of the Dataset uri\n :param fmt: Dataset format\n :return: Returns function `(DataSet, band_name:str) => DataSource`\n \"\"\"\n driver = self._find_driver(uri_scheme, fmt)\n if driver is not None:\n return driver.new_datasource\n if fallback is not None:\n return fallback\n else:\n raise KeyError(\"No driver found and no fallback provided\")\n\n def drivers(self) -> List[str]:\n \"\"\" Returns list of driver names\n \"\"\"\n return list(self._drivers.keys())\n\n\ndef rdr_cache() -> ReaderDriverCache:\n \"\"\" Singleton for ReaderDriverCache\n \"\"\"\n return singleton_setup(rdr_cache, '_instance',\n ReaderDriverCache,\n 'datacube.plugins.io.read')\n\n\ndef reader_drivers() -> List[str]:\n \"\"\" Returns list driver names\n \"\"\"\n return rdr_cache().drivers()\n\n\ndef choose_datasource(band: BandInfo) -> DatasourceFactory:\n \"\"\"Returns appropriate `DataSource` class (or a constructor method) for loading\n given `dataset`.\n\n An appropriate `DataSource` implementation is chosen based on:\n\n - Dataset URI (protocol part)\n - Dataset format\n - Current system settings\n - Available IO plugins\n\n NOTE: we assume that all bands can be loaded with the same implementation.\n\n \"\"\"\n from datacube.storage._rio import RasterDatasetDataSource\n return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)\n\n\ndef new_datasource(band: BandInfo) -> Optional[DataSource]:\n \"\"\"Returns a newly constructed data source to read dataset band data.\n\n An appropriate `DataSource` implementation is chosen based on:\n\n - Dataset URI (protocol part)\n - Dataset format\n - Current system settings\n - Available IO plugins\n\n This function will return the default :class:`RasterDatasetDataSource` if no more specific\n ``DataSource`` can be found.\n\n :param dataset: The dataset to read.\n :param str band_name: the name of the band to read.\n\n \"\"\"\n\n source_type = choose_datasource(band)\n\n if source_type is None:\n return None\n\n return source_type(band)\n", "path": "datacube/drivers/readers.py"}, {"content": "from typing import Optional, Dict, Any, Tuple\nfrom urllib.parse import urlparse\n\nfrom datacube.model import Dataset\nfrom datacube.utils.uris import uri_resolve, pick_uri\n\n\ndef _get_band_and_layer(b: Dict[str, Any]) -> Tuple[Optional[int], Optional[str]]:\n \"\"\" Encode legacy logic for extracting band/layer:\n\n on input:\n band -- Int | Nothing\n layer -- Str | Int | Nothing\n\n Valid combinations are:\n band layer Output\n ---------------------------\n - - ( - , - )\n - int (int, - )\n int - (int, - )\n int str (int, str)\n - str ( - , str)\n\n \"\"\"\n band = b.get('band')\n layer = b.get('layer')\n\n if band is None:\n if isinstance(layer, int):\n return (layer, None)\n if layer is None or isinstance(layer, str):\n return (None, layer)\n\n raise ValueError('Expect `layer` to be one of None,int,str but it is {}'.format(type(layer)))\n else:\n if not isinstance(band, int):\n raise ValueError('Expect `band` to be an integer (it is {})'.format(type(band)))\n if layer is not None and not isinstance(layer, str):\n raise ValueError('Expect `layer` to be one of None,str but it is {}'.format(type(layer)))\n\n return (band, layer)\n\n\ndef _extract_driver_data(ds: Dataset) -> Optional[Any]:\n return ds.metadata_doc.get('driver_data', None)\n\n\ndef measurement_paths(ds: Dataset) -> Dict[str, str]:\n \"\"\"\n Returns a dictionary mapping from band name to url pointing to band storage\n resource.\n\n :return: Band Name => URL\n \"\"\"\n if ds.uris is None:\n raise ValueError('No locations on this dataset')\n\n base = pick_uri(ds.uris)\n return dict((k, uri_resolve(base, m.get('path')))\n for k, m in ds.measurements.items())\n\n\nclass BandInfo:\n __slots__ = ('name',\n 'uri',\n 'band',\n 'layer',\n 'dtype',\n 'nodata',\n 'units',\n 'crs',\n 'transform',\n 'center_time',\n 'format',\n 'driver_data')\n\n def __init__(self,\n ds: Dataset,\n band: str,\n uri_scheme: Optional[str] = None):\n try:\n mp, = ds.type.lookup_measurements([band]).values()\n except KeyError:\n raise ValueError('No such band: {}'.format(band))\n\n mm = ds.measurements.get(mp.canonical_name)\n\n if mm is None:\n raise ValueError('No such band: {}'.format(band))\n\n if ds.uris is None:\n raise ValueError('No uris defined on a dataset')\n\n base_uri = pick_uri(ds.uris, uri_scheme)\n\n bint, layer = _get_band_and_layer(mm)\n\n self.name = band\n self.uri = uri_resolve(base_uri, mm.get('path'))\n self.band = bint\n self.layer = layer\n self.dtype = mp.dtype\n self.nodata = mp.nodata\n self.units = mp.units\n self.crs = ds.crs\n self.transform = ds.transform\n self.format = ds.format or ''\n self.driver_data = _extract_driver_data(ds)\n\n @property\n def uri_scheme(self) -> str:\n return urlparse(self.uri).scheme or ''\n", "path": "datacube/storage/_base.py"}]}
| 2,987 | 247 |
gh_patches_debug_29852
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1506
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Proxied evaluations shouldn't show as delegated
Evaluations where a proxy user is responsible shouldn't be shown as "delegated courses" for the delegates of this proxy user. The delegates should see this course in the list of their own courses like those they are directly responsible for and they shouldn't be hidden when the option to hide delegated courses is active.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/contributor/views.py`
Content:
```
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied, SuspiciousOperation
3 from django.db import IntegrityError, transaction
4 from django.db.models import Max, Q
5 from django.forms.models import inlineformset_factory
6 from django.shortcuts import get_object_or_404, redirect, render
7 from django.utils.translation import gettext as _
8 from django.views.decorators.http import require_POST
9
10 from evap.contributor.forms import EvaluationForm, DelegatesForm, EditorContributionForm, DelegateSelectionForm
11 from evap.evaluation.auth import responsible_or_contributor_or_delegate_required, editor_or_delegate_required, editor_required
12 from evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, Semester, UserProfile, EmailTemplate
13 from evap.evaluation.tools import get_parameter_from_url_or_session, sort_formset, FileResponse
14 from evap.results.exporters import ResultsExporter
15 from evap.results.tools import (calculate_average_distribution, distribution_to_grade,
16 get_evaluations_with_course_result_attributes, get_single_result_rating_result,
17 normalized_distribution)
18 from evap.staff.forms import ContributionFormSet
19 from evap.student.views import get_valid_form_groups_or_render_vote_page
20
21
22 @responsible_or_contributor_or_delegate_required
23 def index(request):
24 user = request.user
25 show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)
26
27 contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
28 own_courses = Course.objects.filter(
29 Q(evaluations__state__in=contributor_visible_states) & (
30 Q(responsibles=user) |
31 Q(evaluations__contributions__contributor=user)
32 )
33 )
34 own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
35 for evaluation in own_evaluations:
36 evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()
37
38 displayed_evaluations = set(own_evaluations)
39 if show_delegated:
40 represented_users = user.represented_users.all()
41 delegated_courses = Course.objects.filter(
42 Q(evaluations__state__in=contributor_visible_states) & (
43 Q(responsibles__in=represented_users) |
44 Q(
45 evaluations__contributions__role=Contribution.Role.EDITOR,
46 evaluations__contributions__contributor__in=represented_users,
47 )
48 )
49 )
50 delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))
51 for evaluation in delegated_evaluations:
52 evaluation.delegated_evaluation = True
53 displayed_evaluations |= delegated_evaluations - displayed_evaluations
54 displayed_evaluations = list(displayed_evaluations)
55 displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name)) # evaluations must be sorted for regrouping them in the template
56
57 for evaluation in displayed_evaluations:
58 if evaluation.state == "published":
59 if not evaluation.is_single_result:
60 evaluation.distribution = calculate_average_distribution(evaluation)
61 else:
62 evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
63 evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
64 evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
65 displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)
66
67 semesters = Semester.objects.all()
68 semester_list = [dict(
69 semester_name=semester.name,
70 id=semester.id,
71 is_active=semester.is_active,
72 evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]
73 ) for semester in semesters]
74
75 template_data = dict(
76 semester_list=semester_list,
77 show_delegated=show_delegated,
78 delegate_selection_form=DelegateSelectionForm(),
79 )
80 return render(request, "contributor_index.html", template_data)
81
82
83 @editor_required
84 def settings_edit(request):
85 user = request.user
86 form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)
87
88 if form.is_valid():
89 form.save()
90
91 messages.success(request, _("Successfully updated your settings."))
92 return redirect('contributor:settings_edit')
93
94 return render(request, "contributor_settings.html", dict(
95 form=form,
96 delegate_of=user.represented_users.all(),
97 cc_users=user.cc_users.all(),
98 ccing_users=user.ccing_users.all(),
99 ))
100
101
102 @editor_or_delegate_required
103 def evaluation_view(request, evaluation_id):
104 user = request.user
105 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
106
107 # check rights
108 if not evaluation.is_user_editor_or_delegate(user) or evaluation.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:
109 raise PermissionDenied
110
111 InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)
112
113 form = EvaluationForm(request.POST or None, instance=evaluation)
114 formset = InlineContributionFormset(request.POST or None, instance=evaluation)
115
116 # make everything read-only
117 for cform in formset.forms + [form]:
118 for field in cform.fields.values():
119 field.disabled = True
120
121 template_data = dict(form=form, formset=formset, evaluation=evaluation, editable=False)
122 return render(request, "contributor_evaluation_form.html", template_data)
123
124
125 def render_preview(request, formset, evaluation_form, evaluation):
126 # open transaction to not let any other requests see anything of what we're doing here
127 try:
128 with transaction.atomic():
129 evaluation = evaluation_form.save()
130 evaluation.set_last_modified(request.user)
131 evaluation.save()
132 formset.save()
133 request.POST = None # this prevents errors rendered in the vote form
134
135 preview_response = get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True, for_rendering_in_modal=True)[1].content.decode()
136 raise IntegrityError # rollback transaction to discard the database writes
137 except IntegrityError:
138 pass
139
140 return preview_response
141
142
143 @editor_or_delegate_required
144 def evaluation_edit(request, evaluation_id):
145 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
146
147 # check rights
148 if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == 'prepared'):
149 raise PermissionDenied
150
151 post_operation = request.POST.get('operation') if request.POST else None
152 preview = post_operation == 'preview'
153
154 InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)
155 evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)
156 formset = InlineContributionFormset(request.POST or None, instance=evaluation, form_kwargs={'evaluation': evaluation})
157
158 forms_are_valid = evaluation_form.is_valid() and formset.is_valid()
159 if forms_are_valid and not preview:
160 if post_operation not in ('save', 'approve'):
161 raise SuspiciousOperation("Invalid POST operation")
162
163 form_has_changed = evaluation_form.has_changed() or formset.has_changed()
164
165 if form_has_changed:
166 evaluation.set_last_modified(request.user)
167 evaluation_form.save()
168 formset.save()
169
170 if post_operation == 'approve':
171 evaluation.editor_approve()
172 evaluation.save()
173 if form_has_changed:
174 messages.success(request, _("Successfully updated and approved evaluation."))
175 else:
176 messages.success(request, _("Successfully approved evaluation."))
177 else:
178 messages.success(request, _("Successfully updated evaluation."))
179
180 return redirect('contributor:index')
181
182 preview_html = None
183 if preview and forms_are_valid:
184 preview_html = render_preview(request, formset, evaluation_form, evaluation)
185
186 if not forms_are_valid and (evaluation_form.errors or formset.errors):
187 if preview:
188 messages.error(request, _("The preview could not be rendered. Please resolve the errors shown below."))
189 else:
190 messages.error(request, _("The form was not saved. Please resolve the errors shown below."))
191
192 sort_formset(request, formset)
193 template_data = dict(form=evaluation_form, formset=formset, evaluation=evaluation, editable=True, preview_html=preview_html)
194 return render(request, "contributor_evaluation_form.html", template_data)
195
196
197 @responsible_or_contributor_or_delegate_required
198 def evaluation_preview(request, evaluation_id):
199 user = request.user
200 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
201
202 # check rights
203 if not (evaluation.is_user_responsible_or_contributor_or_delegate(user) and evaluation.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):
204 raise PermissionDenied
205
206 return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]
207
208
209 @require_POST
210 @editor_or_delegate_required
211 def evaluation_direct_delegation(request, evaluation_id):
212 delegate_user_id = request.POST.get("delegate_to")
213
214 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
215 delegate_user = get_object_or_404(UserProfile, id=delegate_user_id)
216
217 contribution, created = Contribution.objects.update_or_create(
218 evaluation=evaluation,
219 contributor=delegate_user,
220 defaults={'role': Contribution.Role.EDITOR},
221 )
222 if created:
223 contribution.order = evaluation.contributions.all().aggregate(Max('order'))['order__max'] + 1
224 contribution.save()
225
226 template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)
227 subject_params = {"evaluation": evaluation, "user": request.user, "delegate_user": delegate_user}
228 body_params = subject_params
229
230 # we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have
231 # an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins.
232 template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])
233
234 messages.add_message(
235 request,
236 messages.SUCCESS,
237 _('{} was added as a contributor for evaluation "{}" and was sent an email with further information.').format(str(delegate_user), str(evaluation))
238 )
239
240 return redirect('contributor:index')
241
242
243 def export_contributor_results(contributor):
244 filename = "Evaluation_{}.xls".format(contributor.full_name)
245 response = FileResponse(filename, content_type="application/vnd.ms-excel")
246 ResultsExporter().export(
247 response,
248 Semester.objects.all(),
249 [(Degree.objects.all(), CourseType.objects.all())],
250 include_not_enough_voters=True,
251 include_unpublished=False,
252 contributor=contributor
253 )
254 return response
255
256
257 @responsible_or_contributor_or_delegate_required
258 def export(request):
259 return export_contributor_results(request.user)
260
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/contributor/views.py b/evap/contributor/views.py
--- a/evap/contributor/views.py
+++ b/evap/contributor/views.py
@@ -24,11 +24,14 @@
user = request.user
show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)
+ represented_proxy_users = user.represented_users.filter(is_proxy_user=True)
contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
own_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states) & (
Q(responsibles=user) |
- Q(evaluations__contributions__contributor=user)
+ Q(evaluations__contributions__contributor=user) |
+ Q(evaluations__contributions__contributor__in=represented_proxy_users) |
+ Q(responsibles__in=represented_proxy_users)
)
)
own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
@@ -37,7 +40,7 @@
displayed_evaluations = set(own_evaluations)
if show_delegated:
- represented_users = user.represented_users.all()
+ represented_users = user.represented_users.exclude(is_proxy_user=True)
delegated_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states) & (
Q(responsibles__in=represented_users) |
|
{"golden_diff": "diff --git a/evap/contributor/views.py b/evap/contributor/views.py\n--- a/evap/contributor/views.py\n+++ b/evap/contributor/views.py\n@@ -24,11 +24,14 @@\n user = request.user\n show_delegated = get_parameter_from_url_or_session(request, \"show_delegated\", True)\n \n+ represented_proxy_users = user.represented_users.filter(is_proxy_user=True)\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles=user) |\n- Q(evaluations__contributions__contributor=user)\n+ Q(evaluations__contributions__contributor=user) |\n+ Q(evaluations__contributions__contributor__in=represented_proxy_users) |\n+ Q(responsibles__in=represented_proxy_users)\n )\n )\n own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]\n@@ -37,7 +40,7 @@\n \n displayed_evaluations = set(own_evaluations)\n if show_delegated:\n- represented_users = user.represented_users.all()\n+ represented_users = user.represented_users.exclude(is_proxy_user=True)\n delegated_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles__in=represented_users) |\n", "issue": "Proxied evaluations shouldn't show as delegated\nEvaluations where a proxy user is responsible shouldn't be shown as \"delegated courses\" for the delegates of this proxy user. The delegates should see this course in the list of their own courses like those they are directly responsible for and they shouldn't be hidden when the option to hide delegated courses is active.\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied, SuspiciousOperation\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Max, Q\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom evap.contributor.forms import EvaluationForm, DelegatesForm, EditorContributionForm, DelegateSelectionForm\nfrom evap.evaluation.auth import responsible_or_contributor_or_delegate_required, editor_or_delegate_required, editor_required\nfrom evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, Semester, UserProfile, EmailTemplate\nfrom evap.evaluation.tools import get_parameter_from_url_or_session, sort_formset, FileResponse\nfrom evap.results.exporters import ResultsExporter\nfrom evap.results.tools import (calculate_average_distribution, distribution_to_grade,\n get_evaluations_with_course_result_attributes, get_single_result_rating_result,\n normalized_distribution)\nfrom evap.staff.forms import ContributionFormSet\nfrom evap.student.views import get_valid_form_groups_or_render_vote_page\n\n\n@responsible_or_contributor_or_delegate_required\ndef index(request):\n user = request.user\n show_delegated = get_parameter_from_url_or_session(request, \"show_delegated\", True)\n\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles=user) |\n Q(evaluations__contributions__contributor=user)\n )\n )\n own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]\n for evaluation in own_evaluations:\n evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()\n\n displayed_evaluations = set(own_evaluations)\n if show_delegated:\n represented_users = user.represented_users.all()\n delegated_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles__in=represented_users) |\n Q(\n evaluations__contributions__role=Contribution.Role.EDITOR,\n evaluations__contributions__contributor__in=represented_users,\n )\n )\n )\n delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))\n for evaluation in delegated_evaluations:\n evaluation.delegated_evaluation = True\n displayed_evaluations |= delegated_evaluations - displayed_evaluations\n displayed_evaluations = list(displayed_evaluations)\n displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name)) # evaluations must be sorted for regrouping them in the template\n\n for evaluation in displayed_evaluations:\n if evaluation.state == \"published\":\n if not evaluation.is_single_result:\n evaluation.distribution = calculate_average_distribution(evaluation)\n else:\n evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)\n evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)\n evaluation.avg_grade = distribution_to_grade(evaluation.distribution)\n displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)\n\n semesters = Semester.objects.all()\n semester_list = [dict(\n semester_name=semester.name,\n id=semester.id,\n is_active=semester.is_active,\n evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]\n ) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n show_delegated=show_delegated,\n delegate_selection_form=DelegateSelectionForm(),\n )\n return render(request, \"contributor_index.html\", template_data)\n\n\n@editor_required\ndef settings_edit(request):\n user = request.user\n form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Successfully updated your settings.\"))\n return redirect('contributor:settings_edit')\n\n return render(request, \"contributor_settings.html\", dict(\n form=form,\n delegate_of=user.represented_users.all(),\n cc_users=user.cc_users.all(),\n ccing_users=user.ccing_users.all(),\n ))\n\n\n@editor_or_delegate_required\ndef evaluation_view(request, evaluation_id):\n user = request.user\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not evaluation.is_user_editor_or_delegate(user) or evaluation.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:\n raise PermissionDenied\n\n InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n\n form = EvaluationForm(request.POST or None, instance=evaluation)\n formset = InlineContributionFormset(request.POST or None, instance=evaluation)\n\n # make everything read-only\n for cform in formset.forms + [form]:\n for field in cform.fields.values():\n field.disabled = True\n\n template_data = dict(form=form, formset=formset, evaluation=evaluation, editable=False)\n return render(request, \"contributor_evaluation_form.html\", template_data)\n\n\ndef render_preview(request, formset, evaluation_form, evaluation):\n # open transaction to not let any other requests see anything of what we're doing here\n try:\n with transaction.atomic():\n evaluation = evaluation_form.save()\n evaluation.set_last_modified(request.user)\n evaluation.save()\n formset.save()\n request.POST = None # this prevents errors rendered in the vote form\n\n preview_response = get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True, for_rendering_in_modal=True)[1].content.decode()\n raise IntegrityError # rollback transaction to discard the database writes\n except IntegrityError:\n pass\n\n return preview_response\n\n\n@editor_or_delegate_required\ndef evaluation_edit(request, evaluation_id):\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == 'prepared'):\n raise PermissionDenied\n\n post_operation = request.POST.get('operation') if request.POST else None\n preview = post_operation == 'preview'\n\n InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)\n evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)\n formset = InlineContributionFormset(request.POST or None, instance=evaluation, form_kwargs={'evaluation': evaluation})\n\n forms_are_valid = evaluation_form.is_valid() and formset.is_valid()\n if forms_are_valid and not preview:\n if post_operation not in ('save', 'approve'):\n raise SuspiciousOperation(\"Invalid POST operation\")\n\n form_has_changed = evaluation_form.has_changed() or formset.has_changed()\n\n if form_has_changed:\n evaluation.set_last_modified(request.user)\n evaluation_form.save()\n formset.save()\n\n if post_operation == 'approve':\n evaluation.editor_approve()\n evaluation.save()\n if form_has_changed:\n messages.success(request, _(\"Successfully updated and approved evaluation.\"))\n else:\n messages.success(request, _(\"Successfully approved evaluation.\"))\n else:\n messages.success(request, _(\"Successfully updated evaluation.\"))\n\n return redirect('contributor:index')\n\n preview_html = None\n if preview and forms_are_valid:\n preview_html = render_preview(request, formset, evaluation_form, evaluation)\n\n if not forms_are_valid and (evaluation_form.errors or formset.errors):\n if preview:\n messages.error(request, _(\"The preview could not be rendered. Please resolve the errors shown below.\"))\n else:\n messages.error(request, _(\"The form was not saved. Please resolve the errors shown below.\"))\n\n sort_formset(request, formset)\n template_data = dict(form=evaluation_form, formset=formset, evaluation=evaluation, editable=True, preview_html=preview_html)\n return render(request, \"contributor_evaluation_form.html\", template_data)\n\n\n@responsible_or_contributor_or_delegate_required\ndef evaluation_preview(request, evaluation_id):\n user = request.user\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not (evaluation.is_user_responsible_or_contributor_or_delegate(user) and evaluation.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]\n\n\n@require_POST\n@editor_or_delegate_required\ndef evaluation_direct_delegation(request, evaluation_id):\n delegate_user_id = request.POST.get(\"delegate_to\")\n\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n delegate_user = get_object_or_404(UserProfile, id=delegate_user_id)\n\n contribution, created = Contribution.objects.update_or_create(\n evaluation=evaluation,\n contributor=delegate_user,\n defaults={'role': Contribution.Role.EDITOR},\n )\n if created:\n contribution.order = evaluation.contributions.all().aggregate(Max('order'))['order__max'] + 1\n contribution.save()\n\n template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)\n subject_params = {\"evaluation\": evaluation, \"user\": request.user, \"delegate_user\": delegate_user}\n body_params = subject_params\n\n # we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have\n # an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins.\n template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])\n\n messages.add_message(\n request,\n messages.SUCCESS,\n _('{} was added as a contributor for evaluation \"{}\" and was sent an email with further information.').format(str(delegate_user), str(evaluation))\n )\n\n return redirect('contributor:index')\n\n\ndef export_contributor_results(contributor):\n filename = \"Evaluation_{}.xls\".format(contributor.full_name)\n response = FileResponse(filename, content_type=\"application/vnd.ms-excel\")\n ResultsExporter().export(\n response,\n Semester.objects.all(),\n [(Degree.objects.all(), CourseType.objects.all())],\n include_not_enough_voters=True,\n include_unpublished=False,\n contributor=contributor\n )\n return response\n\n\n@responsible_or_contributor_or_delegate_required\ndef export(request):\n return export_contributor_results(request.user)\n", "path": "evap/contributor/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied, SuspiciousOperation\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Max, Q\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom evap.contributor.forms import EvaluationForm, DelegatesForm, EditorContributionForm, DelegateSelectionForm\nfrom evap.evaluation.auth import responsible_or_contributor_or_delegate_required, editor_or_delegate_required, editor_required\nfrom evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, Semester, UserProfile, EmailTemplate\nfrom evap.evaluation.tools import get_parameter_from_url_or_session, sort_formset, FileResponse\nfrom evap.results.exporters import ResultsExporter\nfrom evap.results.tools import (calculate_average_distribution, distribution_to_grade,\n get_evaluations_with_course_result_attributes, get_single_result_rating_result,\n normalized_distribution)\nfrom evap.staff.forms import ContributionFormSet\nfrom evap.student.views import get_valid_form_groups_or_render_vote_page\n\n\n@responsible_or_contributor_or_delegate_required\ndef index(request):\n user = request.user\n show_delegated = get_parameter_from_url_or_session(request, \"show_delegated\", True)\n\n represented_proxy_users = user.represented_users.filter(is_proxy_user=True)\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles=user) |\n Q(evaluations__contributions__contributor=user) |\n Q(evaluations__contributions__contributor__in=represented_proxy_users) |\n Q(responsibles__in=represented_proxy_users)\n )\n )\n own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]\n for evaluation in own_evaluations:\n evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()\n\n displayed_evaluations = set(own_evaluations)\n if show_delegated:\n represented_users = user.represented_users.exclude(is_proxy_user=True)\n delegated_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles__in=represented_users) |\n Q(\n evaluations__contributions__role=Contribution.Role.EDITOR,\n evaluations__contributions__contributor__in=represented_users,\n )\n )\n )\n delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))\n for evaluation in delegated_evaluations:\n evaluation.delegated_evaluation = True\n displayed_evaluations |= delegated_evaluations - displayed_evaluations\n displayed_evaluations = list(displayed_evaluations)\n displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name)) # evaluations must be sorted for regrouping them in the template\n\n for evaluation in displayed_evaluations:\n if evaluation.state == \"published\":\n if not evaluation.is_single_result:\n evaluation.distribution = calculate_average_distribution(evaluation)\n else:\n evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)\n evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)\n evaluation.avg_grade = distribution_to_grade(evaluation.distribution)\n displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)\n\n semesters = Semester.objects.all()\n semester_list = [dict(\n semester_name=semester.name,\n id=semester.id,\n is_active=semester.is_active,\n evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]\n ) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n show_delegated=show_delegated,\n delegate_selection_form=DelegateSelectionForm(),\n )\n return render(request, \"contributor_index.html\", template_data)\n\n\n@editor_required\ndef settings_edit(request):\n user = request.user\n form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Successfully updated your settings.\"))\n return redirect('contributor:settings_edit')\n\n return render(request, \"contributor_settings.html\", dict(\n form=form,\n delegate_of=user.represented_users.all(),\n cc_users=user.cc_users.all(),\n ccing_users=user.ccing_users.all(),\n ))\n\n\n@editor_or_delegate_required\ndef evaluation_view(request, evaluation_id):\n user = request.user\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not evaluation.is_user_editor_or_delegate(user) or evaluation.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:\n raise PermissionDenied\n\n InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n\n form = EvaluationForm(request.POST or None, instance=evaluation)\n formset = InlineContributionFormset(request.POST or None, instance=evaluation)\n\n # make everything read-only\n for cform in formset.forms + [form]:\n for field in cform.fields.values():\n field.disabled = True\n\n template_data = dict(form=form, formset=formset, evaluation=evaluation, editable=False)\n return render(request, \"contributor_evaluation_form.html\", template_data)\n\n\ndef render_preview(request, formset, evaluation_form, evaluation):\n # open transaction to not let any other requests see anything of what we're doing here\n try:\n with transaction.atomic():\n evaluation = evaluation_form.save()\n evaluation.set_last_modified(request.user)\n evaluation.save()\n formset.save()\n request.POST = None # this prevents errors rendered in the vote form\n\n preview_response = get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True, for_rendering_in_modal=True)[1].content.decode()\n raise IntegrityError # rollback transaction to discard the database writes\n except IntegrityError:\n pass\n\n return preview_response\n\n\n@editor_or_delegate_required\ndef evaluation_edit(request, evaluation_id):\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == 'prepared'):\n raise PermissionDenied\n\n post_operation = request.POST.get('operation') if request.POST else None\n preview = post_operation == 'preview'\n\n InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)\n evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)\n formset = InlineContributionFormset(request.POST or None, instance=evaluation, form_kwargs={'evaluation': evaluation})\n\n forms_are_valid = evaluation_form.is_valid() and formset.is_valid()\n if forms_are_valid and not preview:\n if post_operation not in ('save', 'approve'):\n raise SuspiciousOperation(\"Invalid POST operation\")\n\n form_has_changed = evaluation_form.has_changed() or formset.has_changed()\n\n if form_has_changed:\n evaluation.set_last_modified(request.user)\n evaluation_form.save()\n formset.save()\n\n if post_operation == 'approve':\n evaluation.editor_approve()\n evaluation.save()\n if form_has_changed:\n messages.success(request, _(\"Successfully updated and approved evaluation.\"))\n else:\n messages.success(request, _(\"Successfully approved evaluation.\"))\n else:\n messages.success(request, _(\"Successfully updated evaluation.\"))\n\n return redirect('contributor:index')\n\n preview_html = None\n if preview and forms_are_valid:\n preview_html = render_preview(request, formset, evaluation_form, evaluation)\n\n if not forms_are_valid and (evaluation_form.errors or formset.errors):\n if preview:\n messages.error(request, _(\"The preview could not be rendered. Please resolve the errors shown below.\"))\n else:\n messages.error(request, _(\"The form was not saved. Please resolve the errors shown below.\"))\n\n sort_formset(request, formset)\n template_data = dict(form=evaluation_form, formset=formset, evaluation=evaluation, editable=True, preview_html=preview_html)\n return render(request, \"contributor_evaluation_form.html\", template_data)\n\n\n@responsible_or_contributor_or_delegate_required\ndef evaluation_preview(request, evaluation_id):\n user = request.user\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not (evaluation.is_user_responsible_or_contributor_or_delegate(user) and evaluation.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]\n\n\n@require_POST\n@editor_or_delegate_required\ndef evaluation_direct_delegation(request, evaluation_id):\n delegate_user_id = request.POST.get(\"delegate_to\")\n\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n delegate_user = get_object_or_404(UserProfile, id=delegate_user_id)\n\n contribution, created = Contribution.objects.update_or_create(\n evaluation=evaluation,\n contributor=delegate_user,\n defaults={'role': Contribution.Role.EDITOR},\n )\n if created:\n contribution.order = evaluation.contributions.all().aggregate(Max('order'))['order__max'] + 1\n contribution.save()\n\n template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)\n subject_params = {\"evaluation\": evaluation, \"user\": request.user, \"delegate_user\": delegate_user}\n body_params = subject_params\n\n # we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have\n # an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins.\n template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])\n\n messages.add_message(\n request,\n messages.SUCCESS,\n _('{} was added as a contributor for evaluation \"{}\" and was sent an email with further information.').format(str(delegate_user), str(evaluation))\n )\n\n return redirect('contributor:index')\n\n\ndef export_contributor_results(contributor):\n filename = \"Evaluation_{}.xls\".format(contributor.full_name)\n response = FileResponse(filename, content_type=\"application/vnd.ms-excel\")\n ResultsExporter().export(\n response,\n Semester.objects.all(),\n [(Degree.objects.all(), CourseType.objects.all())],\n include_not_enough_voters=True,\n include_unpublished=False,\n contributor=contributor\n )\n return response\n\n\n@responsible_or_contributor_or_delegate_required\ndef export(request):\n return export_contributor_results(request.user)\n", "path": "evap/contributor/views.py"}]}
| 3,381 | 358 |
gh_patches_debug_4853
|
rasdani/github-patches
|
git_diff
|
google__flax-3886
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve SEO for docs pages
When I Google a Flax module to pull up its API documentation, I get broken or irrelevant pages. Examples:
- [flax dense](https://www.google.com/search?q=flax%20dense) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Dense.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Dense)
- [flax layernorm](https://www.google.com/search?q=flax%20layernorm) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.LayerNorm.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.LayerNorm)
- [flax multi head attention](https://www.google.com/search?q=flax%20attention) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.MultiHeadDotProductAttention.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.MultiHeadAttention)
- [flax conv](https://www.google.com/search?q=flax%20conv) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Conv.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Conv)
Is there something that can be done on the website side to improve this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Copyright 2024 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Configuration file for the Sphinx documentation builder."""
16
17
18 # This file only contains a selection of the most common options. For a full
19 # list see the documentation:
20 # https://www.sphinx-doc.org/en/master/usage/configuration.html
21
22 # -- Path setup --------------------------------------------------------------
23
24 # If extensions (or modules to document with autodoc) are in another directory,
25 # add these directories to sys.path here. If the directory is relative to the
26 # documentation root, use os.path.abspath to make it absolute, like shown here.
27 #
28 # import os
29 # import sys
30 # sys.path.insert(0, os.path.abspath('.'))
31
32 import os
33 import sys
34
35 sys.path.insert(0, os.path.abspath('..'))
36 # Include local extension.
37 sys.path.append(os.path.abspath('./_ext'))
38
39 # patch sphinx
40 # -- Project information -----------------------------------------------------
41
42 project = 'Flax'
43 copyright = '2023, The Flax authors' # pylint: disable=redefined-builtin
44 author = 'The Flax authors'
45
46
47 # -- General configuration ---------------------------------------------------
48
49 # Add any Sphinx extension module names here, as strings. They can be
50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
51 # ones.
52 extensions = [
53 'sphinx.ext.autodoc',
54 'sphinx.ext.autosummary',
55 'sphinx.ext.autosectionlabel',
56 'sphinx.ext.doctest',
57 'sphinx.ext.intersphinx',
58 'sphinx.ext.mathjax',
59 'sphinx.ext.napoleon',
60 'sphinx.ext.viewcode',
61 'myst_nb',
62 'codediff',
63 'flax_module',
64 'sphinx_design',
65 ]
66
67 # Add any paths that contain templates here, relative to this directory.
68 templates_path = ['_templates']
69
70 # List of patterns, relative to source directory, that match files and
71 # directories to ignore when looking for source files.
72 # This pattern also affects html_static_path and html_extra_path.
73 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
74
75 # The suffix(es) of source filenames.
76 # Note: important to list ipynb before md here: we have both md and ipynb
77 # copies of each notebook, and myst will choose which to convert based on
78 # the order in the source_suffix list. Notebooks which are not executed have
79 # outputs stored in ipynb but not in md, so we must convert the ipynb.
80 source_suffix = ['.rst', '.ipynb', '.md']
81
82 autosummary_generate = True
83
84 master_doc = 'index'
85
86 autodoc_typehints = 'none'
87
88
89 # -- Options for HTML output -------------------------------------------------
90
91 # The theme to use for HTML and HTML Help pages. See the documentation for
92 # a list of builtin themes.
93 #
94 # html_theme = 'pydata_sphinx_theme'
95 html_theme = 'sphinx_book_theme'
96 html_css_files = ['css/flax_theme.css']
97
98 # The name of an image file (relative to this directory) to place at the top
99 # of the sidebar.
100 html_logo = './flax.png'
101 html_favicon = './flax.png'
102
103 # title of the website
104 html_title = ''
105
106 # Add any paths that contain custom static files (such as style sheets) here,
107 # relative to this directory. They are copied after the builtin static files,
108 # so a file named 'default.css' will overwrite the builtin 'default.css'.
109 html_static_path = ['_static']
110
111 html_theme_options = {
112 'repository_url': 'https://github.com/google/flax',
113 'use_repository_button': True, # add a 'link to repository' button
114 'use_issues_button': False, # add an 'Open an Issue' button
115 'path_to_docs': (
116 'docs'
117 ), # used to compute the path to launch notebooks in colab
118 'launch_buttons': {
119 'colab_url': 'https://colab.research.google.com/',
120 },
121 'prev_next_buttons_location': None,
122 'show_navbar_depth': 1,
123 }
124
125 # -- Options for myst ----------------------------------------------
126 # uncomment line below to avoid running notebooks during development
127 nb_execution_mode = 'off'
128 # Notebook cell execution timeout; defaults to 30.
129 nb_execution_timeout = 100
130 # List of patterns, relative to source directory, that match notebook
131 # files that will not be executed.
132 myst_enable_extensions = ['dollarmath']
133 nb_execution_excludepatterns = [
134 'quick_start.ipynb', # <-- times out
135 'transfer_learning.ipynb', # <-- transformers requires flax<=0.7.0
136 'flax/experimental/nnx', # exclude nnx
137 ]
138 # raise exceptions on execution so CI can catch errors
139 nb_execution_allow_errors = False
140 nb_execution_raise_on_error = True
141
142 # -- Extension configuration -------------------------------------------------
143
144 # Tell sphinx-autodoc-typehints to generate stub parameter annotations including
145 # types, even if the parameters aren't explicitly documented.
146 always_document_param_types = True
147
148 # -- doctest configuration -------------------------------------------------
149 doctest_global_setup = """
150 import jax
151 import jax.numpy as jnp
152 from flax.experimental import nnx
153
154 import logging as slog
155 from absl import logging as alog
156
157 # Avoid certain absl logging messages to break doctest
158 filtered_message = [
159 'SaveArgs.aggregate is deprecated',
160 '',
161 ]
162
163 class _CustomLogFilter(slog.Formatter):
164 def format(self, record):
165 message = super(_CustomLogFilter, self).format(record)
166 for m in filtered_message:
167 if m in message:
168 return ''
169 return message
170
171 alog.use_absl_handler()
172 alog.get_absl_handler().setFormatter(_CustomLogFilter())
173 """
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -108,6 +108,8 @@
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['_static']
+html_extra_path = ['robots.txt']
+
html_theme_options = {
'repository_url': 'https://github.com/google/flax',
'use_repository_button': True, # add a 'link to repository' button
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -108,6 +108,8 @@\n # so a file named 'default.css' will overwrite the builtin 'default.css'.\n html_static_path = ['_static']\n \n+html_extra_path = ['robots.txt']\n+\n html_theme_options = {\n 'repository_url': 'https://github.com/google/flax',\n 'use_repository_button': True, # add a 'link to repository' button\n", "issue": "Improve SEO for docs pages\nWhen I Google a Flax module to pull up its API documentation, I get broken or irrelevant pages. Examples:\r\n\r\n- [flax dense](https://www.google.com/search?q=flax%20dense) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Dense.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Dense)\r\n- [flax layernorm](https://www.google.com/search?q=flax%20layernorm) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.LayerNorm.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.LayerNorm)\r\n- [flax multi head attention](https://www.google.com/search?q=flax%20attention) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.MultiHeadDotProductAttention.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.MultiHeadAttention)\r\n- [flax conv](https://www.google.com/search?q=flax%20conv) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Conv.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Conv)\r\n\r\nIs there something that can be done on the website side to improve this?\n", "before_files": [{"content": "# Copyright 2024 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\n\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n# Include local extension.\nsys.path.append(os.path.abspath('./_ext'))\n\n# patch sphinx\n# -- Project information -----------------------------------------------------\n\nproject = 'Flax'\ncopyright = '2023, The Flax authors' # pylint: disable=redefined-builtin\nauthor = 'The Flax authors'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'myst_nb',\n 'codediff',\n 'flax_module',\n 'sphinx_design',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The suffix(es) of source filenames.\n# Note: important to list ipynb before md here: we have both md and ipynb\n# copies of each notebook, and myst will choose which to convert based on\n# the order in the source_suffix list. Notebooks which are not executed have\n# outputs stored in ipynb but not in md, so we must convert the ipynb.\nsource_suffix = ['.rst', '.ipynb', '.md']\n\nautosummary_generate = True\n\nmaster_doc = 'index'\n\nautodoc_typehints = 'none'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'pydata_sphinx_theme'\nhtml_theme = 'sphinx_book_theme'\nhtml_css_files = ['css/flax_theme.css']\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = './flax.png'\nhtml_favicon = './flax.png'\n\n# title of the website\nhtml_title = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named 'default.css' will overwrite the builtin 'default.css'.\nhtml_static_path = ['_static']\n\nhtml_theme_options = {\n 'repository_url': 'https://github.com/google/flax',\n 'use_repository_button': True, # add a 'link to repository' button\n 'use_issues_button': False, # add an 'Open an Issue' button\n 'path_to_docs': (\n 'docs'\n ), # used to compute the path to launch notebooks in colab\n 'launch_buttons': {\n 'colab_url': 'https://colab.research.google.com/',\n },\n 'prev_next_buttons_location': None,\n 'show_navbar_depth': 1,\n}\n\n# -- Options for myst ----------------------------------------------\n# uncomment line below to avoid running notebooks during development\nnb_execution_mode = 'off'\n# Notebook cell execution timeout; defaults to 30.\nnb_execution_timeout = 100\n# List of patterns, relative to source directory, that match notebook\n# files that will not be executed.\nmyst_enable_extensions = ['dollarmath']\nnb_execution_excludepatterns = [\n 'quick_start.ipynb', # <-- times out\n 'transfer_learning.ipynb', # <-- transformers requires flax<=0.7.0\n 'flax/experimental/nnx', # exclude nnx\n]\n# raise exceptions on execution so CI can catch errors\nnb_execution_allow_errors = False\nnb_execution_raise_on_error = True\n\n# -- Extension configuration -------------------------------------------------\n\n# Tell sphinx-autodoc-typehints to generate stub parameter annotations including\n# types, even if the parameters aren't explicitly documented.\nalways_document_param_types = True\n\n# -- doctest configuration -------------------------------------------------\ndoctest_global_setup = \"\"\"\nimport jax\nimport jax.numpy as jnp\nfrom flax.experimental import nnx\n\nimport logging as slog\nfrom absl import logging as alog\n\n# Avoid certain absl logging messages to break doctest\nfiltered_message = [\n 'SaveArgs.aggregate is deprecated',\n '',\n]\n\nclass _CustomLogFilter(slog.Formatter):\n def format(self, record):\n message = super(_CustomLogFilter, self).format(record)\n for m in filtered_message:\n if m in message:\n return ''\n return message\n\nalog.use_absl_handler()\nalog.get_absl_handler().setFormatter(_CustomLogFilter())\n\"\"\"\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Copyright 2024 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\n\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n# Include local extension.\nsys.path.append(os.path.abspath('./_ext'))\n\n# patch sphinx\n# -- Project information -----------------------------------------------------\n\nproject = 'Flax'\ncopyright = '2023, The Flax authors' # pylint: disable=redefined-builtin\nauthor = 'The Flax authors'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'myst_nb',\n 'codediff',\n 'flax_module',\n 'sphinx_design',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The suffix(es) of source filenames.\n# Note: important to list ipynb before md here: we have both md and ipynb\n# copies of each notebook, and myst will choose which to convert based on\n# the order in the source_suffix list. Notebooks which are not executed have\n# outputs stored in ipynb but not in md, so we must convert the ipynb.\nsource_suffix = ['.rst', '.ipynb', '.md']\n\nautosummary_generate = True\n\nmaster_doc = 'index'\n\nautodoc_typehints = 'none'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'pydata_sphinx_theme'\nhtml_theme = 'sphinx_book_theme'\nhtml_css_files = ['css/flax_theme.css']\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = './flax.png'\nhtml_favicon = './flax.png'\n\n# title of the website\nhtml_title = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named 'default.css' will overwrite the builtin 'default.css'.\nhtml_static_path = ['_static']\n\nhtml_extra_path = ['robots.txt']\n\nhtml_theme_options = {\n 'repository_url': 'https://github.com/google/flax',\n 'use_repository_button': True, # add a 'link to repository' button\n 'use_issues_button': False, # add an 'Open an Issue' button\n 'path_to_docs': (\n 'docs'\n ), # used to compute the path to launch notebooks in colab\n 'launch_buttons': {\n 'colab_url': 'https://colab.research.google.com/',\n },\n 'prev_next_buttons_location': None,\n 'show_navbar_depth': 1,\n}\n\n# -- Options for myst ----------------------------------------------\n# uncomment line below to avoid running notebooks during development\nnb_execution_mode = 'off'\n# Notebook cell execution timeout; defaults to 30.\nnb_execution_timeout = 100\n# List of patterns, relative to source directory, that match notebook\n# files that will not be executed.\nmyst_enable_extensions = ['dollarmath']\nnb_execution_excludepatterns = [\n 'quick_start.ipynb', # <-- times out\n 'transfer_learning.ipynb', # <-- transformers requires flax<=0.7.0\n 'flax/experimental/nnx', # exclude nnx\n]\n# raise exceptions on execution so CI can catch errors\nnb_execution_allow_errors = False\nnb_execution_raise_on_error = True\n\n# -- Extension configuration -------------------------------------------------\n\n# Tell sphinx-autodoc-typehints to generate stub parameter annotations including\n# types, even if the parameters aren't explicitly documented.\nalways_document_param_types = True\n\n# -- doctest configuration -------------------------------------------------\ndoctest_global_setup = \"\"\"\nimport jax\nimport jax.numpy as jnp\nfrom flax.experimental import nnx\n\nimport logging as slog\nfrom absl import logging as alog\n\n# Avoid certain absl logging messages to break doctest\nfiltered_message = [\n 'SaveArgs.aggregate is deprecated',\n '',\n]\n\nclass _CustomLogFilter(slog.Formatter):\n def format(self, record):\n message = super(_CustomLogFilter, self).format(record)\n for m in filtered_message:\n if m in message:\n return ''\n return message\n\nalog.use_absl_handler()\nalog.get_absl_handler().setFormatter(_CustomLogFilter())\n\"\"\"\n", "path": "docs/conf.py"}]}
| 2,422 | 112 |
gh_patches_debug_17591
|
rasdani/github-patches
|
git_diff
|
yt-project__yt-3423
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weird styling in docs navbar
### Bug report
**Bug summary**
Since 3.6.1, there's a weird offset (or lack of it) for first item in the docs main navbar. See below:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/source/conf.py`
Content:
```
1 #
2 # yt documentation build configuration file, created by
3 # sphinx-quickstart on Tue Jan 11 09:46:53 2011.
4 #
5 # This file is execfile()d with the current directory set to its containing dir.
6 #
7 # Note that not all possible configuration values are present in this
8 # autogenerated file.
9 #
10 # All configuration values have a default; values that are commented out
11 # serve to show the default.
12
13 import glob
14 import os
15 import sys
16
17 import sphinx_bootstrap_theme
18
19 on_rtd = os.environ.get("READTHEDOCS", None) == "True"
20
21 # If extensions (or modules to document with autodoc) are in another directory,
22 # add these directories to sys.path here. If the directory is relative to the
23 # documentation root, use os.path.abspath to make it absolute, like shown here.
24 sys.path.insert(0, os.path.abspath("../extensions/"))
25
26 # -- General configuration -----------------------------------------------------
27
28 # If your documentation needs a minimal Sphinx version, state it here.
29 # needs_sphinx = '1.0'
30
31 # Add any Sphinx extension module names here, as strings. They can be extensions
32 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
33 extensions = [
34 "sphinx.ext.autodoc",
35 "sphinx.ext.intersphinx",
36 "sphinx.ext.mathjax",
37 "sphinx.ext.viewcode",
38 "sphinx.ext.napoleon",
39 "yt_cookbook",
40 "yt_colormaps",
41 "config_help",
42 "yt_showfields",
43 ]
44
45 if not on_rtd:
46 extensions.append("sphinx.ext.autosummary")
47 extensions.append("pythonscript_sphinxext")
48
49 try:
50 import nbconvert # noqa: F401
51 import RunNotebook # noqa: F401
52
53 if not on_rtd:
54 extensions.append("RunNotebook.notebook_sphinxext")
55 extensions.append("RunNotebook.notebookcell_sphinxext")
56 except ImportError:
57 pass
58
59 # Add any paths that contain templates here, relative to this directory.
60 templates_path = ["_templates"]
61
62 # The suffix of source filenames.
63 source_suffix = ".rst"
64
65 # The encoding of source files.
66 # source_encoding = 'utf-8-sig'
67
68 # The master toctree document.
69 master_doc = "index"
70
71 # General information about the project.
72 project = "The yt Project"
73 copyright = "2013-2020, the yt Project"
74
75 # The version info for the project you're documenting, acts as replacement for
76 # |version| and |release|, also used in various other places throughout the
77 # built documents.
78 #
79 # The short X.Y version.
80 version = "4.1-dev"
81 # The full version, including alpha/beta/rc tags.
82 release = "4.1-dev"
83
84 # The language for content autogenerated by Sphinx. Refer to documentation
85 # for a list of supported languages.
86 # language = None
87
88 # There are two options for replacing |today|: either, you set today to some
89 # non-false value, then it is used:
90 # today = ''
91 # Else, today_fmt is used as the format for a strftime call.
92 # today_fmt = '%B %d, %Y'
93
94 # List of patterns, relative to source directory, that match files and
95 # directories to ignore when looking for source files.
96 exclude_patterns = []
97
98 # The reST default role (used for this markup: `text`) to use for all documents.
99 # default_role = None
100
101 # If true, '()' will be appended to :func: etc. cross-reference text.
102 # add_function_parentheses = True
103
104 # If true, the current module name will be prepended to all description
105 # unit titles (such as .. function::).
106 # add_module_names = True
107
108 # If true, sectionauthor and moduleauthor directives will be shown in the
109 # output. They are ignored by default.
110 show_authors = False
111
112 # The name of the Pygments (syntax highlighting) style to use.
113 pygments_style = "sphinx"
114
115 # A list of ignored prefixes for module index sorting.
116 # modindex_common_prefix = []
117
118
119 # -- Options for HTML output ---------------------------------------------------
120
121 # The theme to use for HTML and HTML Help pages. See the documentation for
122 # a list of builtin themes.
123 html_theme = "bootstrap"
124 html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
125
126 # Theme options are theme-specific and customize the look and feel of a theme
127 # further. For a list of options available for each theme, see the
128 # documentation.
129 html_theme_options = dict(
130 bootstrap_version="3",
131 bootswatch_theme="readable",
132 navbar_links=[
133 ("How to get help", "help/index"),
134 ("Quickstart notebooks", "quickstart/index"),
135 ("Cookbook", "cookbook/index"),
136 ],
137 navbar_sidebarrel=False,
138 globaltoc_depth=2,
139 )
140
141 # Add any paths that contain custom themes here, relative to this directory.
142 # html_theme_path = []
143
144 # The name for this set of Sphinx documents. If None, it defaults to
145 # "<project> v<release> documentation".
146 # html_title = None
147
148 # A shorter title for the navigation bar. Default is the same as html_title.
149 # html_short_title = None
150
151 # The name of an image file (relative to this directory) to place at the top
152 # of the sidebar.
153 html_logo = "_static/yt_icon.png"
154
155 # The name of an image file (within the static path) to use as favicon of the
156 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
157 # pixels large.
158 # html_favicon = None
159
160 # Add any paths that contain custom static files (such as style sheets) here,
161 # relative to this directory. They are copied after the builtin static files,
162 # so a file named "default.css" will overwrite the builtin "default.css".
163 html_static_path = ["_static", "analyzing/_static"]
164
165 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
166 # using the given strftime format.
167 # html_last_updated_fmt = '%b %d, %Y'
168
169 # If true, SmartyPants will be used to convert quotes and dashes to
170 # typographically correct entities.
171 # html_use_smartypants = True
172
173 # Custom sidebar templates, maps document names to template names.
174 # html_sidebars = {}
175
176 # Additional templates that should be rendered to pages, maps page names to
177 # template names.
178 # html_additional_pages = {}
179
180 # If false, no module index is generated.
181 html_domain_indices = False
182
183 # If false, no index is generated.
184 html_use_index = True
185
186 # If true, the index is split into individual pages for each letter.
187 # html_split_index = False
188
189 # If true, links to the reST sources are added to the pages.
190 html_show_sourcelink = False
191
192 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
193 # html_show_sphinx = True
194
195 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
196 # html_show_copyright = True
197
198 # If true, an OpenSearch description file will be output, and all pages will
199 # contain a <link> tag referring to it. The value of this option must be the
200 # base URL from which the finished HTML is served.
201 # html_use_opensearch = ''
202
203 # This is the file name suffix for HTML files (e.g. ".xhtml").
204 # html_file_suffix = None
205
206 # Output file base name for HTML help builder.
207 htmlhelp_basename = "ytdoc"
208
209
210 # -- Options for LaTeX output --------------------------------------------------
211
212 # The paper size ('letter' or 'a4').
213 # latex_paper_size = 'letter'
214
215 # The font size ('10pt', '11pt' or '12pt').
216 # latex_font_size = '10pt'
217
218 # Grouping the document tree into LaTeX files. List of tuples
219 # (source start file, target name, title, author, documentclass [howto/manual]).
220 latex_documents = [
221 ("index", "yt.tex", "yt Documentation", "The yt Project", "manual"),
222 ]
223
224 # The name of an image file (relative to this directory) to place at the top of
225 # the title page.
226 # latex_logo = None
227
228 # For "manual" documents, if this is true, then toplevel headings are parts,
229 # not chapters.
230 # latex_use_parts = False
231
232 # If true, show page references after internal links.
233 # latex_show_pagerefs = False
234
235 # If true, show URL addresses after external links.
236 # latex_show_urls = False
237
238 # Documents to append as an appendix to all manuals.
239 # latex_appendices = []
240
241 # If false, no module index is generated.
242 # latex_domain_indices = True
243
244
245 # -- Options for manual page output --------------------------------------------
246
247 # One entry per manual page. List of tuples
248 # (source start file, name, description, authors, manual section).
249 man_pages = [("index", "yt", "yt Documentation", ["The yt Project"], 1)]
250
251
252 # Example configuration for intersphinx: refer to the Python standard library.
253 intersphinx_mapping = {
254 "https://docs.python.org/3/": None,
255 "https://ipython.readthedocs.io/en/stable/": None,
256 "https://numpy.org/doc/stable/": None,
257 "https://matplotlib.org/stable/": None,
258 "https://docs.astropy.org/en/stable": None,
259 "https://pandas.pydata.org/pandas-docs/stable": None,
260 "trident": ("https://trident.readthedocs.io/en/latest/", None),
261 "yt_astro_analysis": ("https://yt-astro-analysis.readthedocs.io/en/latest/", None),
262 "yt_attic": ("https://yt-attic.readthedocs.io/en/latest/", None),
263 }
264
265 if not on_rtd:
266 autosummary_generate = glob.glob("reference/api/api.rst")
267
268 # as of Sphinx 3.1.2 this is the supported way to link custom style sheets
269 def setup(app):
270 app.add_css_file("custom.css")
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -70,7 +70,7 @@
# General information about the project.
project = "The yt Project"
-copyright = "2013-2020, the yt Project"
+copyright = "2013-2021, the yt Project"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -130,6 +130,7 @@
bootstrap_version="3",
bootswatch_theme="readable",
navbar_links=[
+ ("", ""), # see https://github.com/yt-project/yt/pull/3423
("How to get help", "help/index"),
("Quickstart notebooks", "quickstart/index"),
("Cookbook", "cookbook/index"),
|
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -70,7 +70,7 @@\n \n # General information about the project.\n project = \"The yt Project\"\n-copyright = \"2013-2020, the yt Project\"\n+copyright = \"2013-2021, the yt Project\"\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n@@ -130,6 +130,7 @@\n bootstrap_version=\"3\",\n bootswatch_theme=\"readable\",\n navbar_links=[\n+ (\"\", \"\"), # see https://github.com/yt-project/yt/pull/3423\n (\"How to get help\", \"help/index\"),\n (\"Quickstart notebooks\", \"quickstart/index\"),\n (\"Cookbook\", \"cookbook/index\"),\n", "issue": "Weird styling in docs navbar\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nSince 3.6.1, there's a weird offset (or lack of it) for first item in the docs main navbar. See below:\r\n\r\n\r\n\n", "before_files": [{"content": "#\n# yt documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 11 09:46:53 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport glob\nimport os\nimport sys\n\nimport sphinx_bootstrap_theme\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"../extensions/\"))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"yt_cookbook\",\n \"yt_colormaps\",\n \"config_help\",\n \"yt_showfields\",\n]\n\nif not on_rtd:\n extensions.append(\"sphinx.ext.autosummary\")\n extensions.append(\"pythonscript_sphinxext\")\n\ntry:\n import nbconvert # noqa: F401\n import RunNotebook # noqa: F401\n\n if not on_rtd:\n extensions.append(\"RunNotebook.notebook_sphinxext\")\n extensions.append(\"RunNotebook.notebookcell_sphinxext\")\nexcept ImportError:\n pass\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"The yt Project\"\ncopyright = \"2013-2020, the yt Project\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"4.1-dev\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"4.1-dev\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"bootstrap\"\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = dict(\n bootstrap_version=\"3\",\n bootswatch_theme=\"readable\",\n navbar_links=[\n (\"How to get help\", \"help/index\"),\n (\"Quickstart notebooks\", \"quickstart/index\"),\n (\"Cookbook\", \"cookbook/index\"),\n ],\n navbar_sidebarrel=False,\n globaltoc_depth=2,\n)\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/yt_icon.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"analyzing/_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ytdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n (\"index\", \"yt.tex\", \"yt Documentation\", \"The yt Project\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"yt\", \"yt Documentation\", [\"The yt Project\"], 1)]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/3/\": None,\n \"https://ipython.readthedocs.io/en/stable/\": None,\n \"https://numpy.org/doc/stable/\": None,\n \"https://matplotlib.org/stable/\": None,\n \"https://docs.astropy.org/en/stable\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n \"trident\": (\"https://trident.readthedocs.io/en/latest/\", None),\n \"yt_astro_analysis\": (\"https://yt-astro-analysis.readthedocs.io/en/latest/\", None),\n \"yt_attic\": (\"https://yt-attic.readthedocs.io/en/latest/\", None),\n}\n\nif not on_rtd:\n autosummary_generate = glob.glob(\"reference/api/api.rst\")\n\n# as of Sphinx 3.1.2 this is the supported way to link custom style sheets\ndef setup(app):\n app.add_css_file(\"custom.css\")\n", "path": "doc/source/conf.py"}], "after_files": [{"content": "#\n# yt documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 11 09:46:53 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport glob\nimport os\nimport sys\n\nimport sphinx_bootstrap_theme\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"../extensions/\"))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"yt_cookbook\",\n \"yt_colormaps\",\n \"config_help\",\n \"yt_showfields\",\n]\n\nif not on_rtd:\n extensions.append(\"sphinx.ext.autosummary\")\n extensions.append(\"pythonscript_sphinxext\")\n\ntry:\n import nbconvert # noqa: F401\n import RunNotebook # noqa: F401\n\n if not on_rtd:\n extensions.append(\"RunNotebook.notebook_sphinxext\")\n extensions.append(\"RunNotebook.notebookcell_sphinxext\")\nexcept ImportError:\n pass\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"The yt Project\"\ncopyright = \"2013-2021, the yt Project\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"4.1-dev\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"4.1-dev\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"bootstrap\"\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = dict(\n bootstrap_version=\"3\",\n bootswatch_theme=\"readable\",\n navbar_links=[\n (\"\", \"\"), # see https://github.com/yt-project/yt/pull/3423\n (\"How to get help\", \"help/index\"),\n (\"Quickstart notebooks\", \"quickstart/index\"),\n (\"Cookbook\", \"cookbook/index\"),\n ],\n navbar_sidebarrel=False,\n globaltoc_depth=2,\n)\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/yt_icon.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"analyzing/_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ytdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n (\"index\", \"yt.tex\", \"yt Documentation\", \"The yt Project\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"yt\", \"yt Documentation\", [\"The yt Project\"], 1)]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/3/\": None,\n \"https://ipython.readthedocs.io/en/stable/\": None,\n \"https://numpy.org/doc/stable/\": None,\n \"https://matplotlib.org/stable/\": None,\n \"https://docs.astropy.org/en/stable\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n \"trident\": (\"https://trident.readthedocs.io/en/latest/\", None),\n \"yt_astro_analysis\": (\"https://yt-astro-analysis.readthedocs.io/en/latest/\", None),\n \"yt_attic\": (\"https://yt-attic.readthedocs.io/en/latest/\", None),\n}\n\nif not on_rtd:\n autosummary_generate = glob.glob(\"reference/api/api.rst\")\n\n# as of Sphinx 3.1.2 this is the supported way to link custom style sheets\ndef setup(app):\n app.add_css_file(\"custom.css\")\n", "path": "doc/source/conf.py"}]}
| 3,299 | 216 |
gh_patches_debug_13220
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-3396
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pilot.click can't use `Screen` as selector.
If you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.
Pilot.click can't use `Screen` as selector.
If you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/pilot.py`
Content:
```
1 """
2
3 The pilot object is used by [App.run_test][textual.app.App.run_test] to programmatically operate an app.
4
5 See the guide on how to [test Textual apps](/guide/testing).
6
7 """
8
9 from __future__ import annotations
10
11 import asyncio
12 from typing import Any, Generic
13
14 import rich.repr
15
16 from ._wait import wait_for_idle
17 from .app import App, ReturnType
18 from .events import Click, MouseDown, MouseMove, MouseUp
19 from .widget import Widget
20
21
22 def _get_mouse_message_arguments(
23 target: Widget,
24 offset: tuple[int, int] = (0, 0),
25 button: int = 0,
26 shift: bool = False,
27 meta: bool = False,
28 control: bool = False,
29 ) -> dict[str, Any]:
30 """Get the arguments to pass into mouse messages for the click and hover methods."""
31 click_x, click_y = target.region.offset + offset
32 message_arguments = {
33 "x": click_x,
34 "y": click_y,
35 "delta_x": 0,
36 "delta_y": 0,
37 "button": button,
38 "shift": shift,
39 "meta": meta,
40 "ctrl": control,
41 "screen_x": click_x,
42 "screen_y": click_y,
43 }
44 return message_arguments
45
46
47 class WaitForScreenTimeout(Exception):
48 """Exception raised if messages aren't being processed quickly enough.
49
50 If this occurs, the most likely explanation is some kind of deadlock in the app code.
51 """
52
53
54 @rich.repr.auto(angular=True)
55 class Pilot(Generic[ReturnType]):
56 """Pilot object to drive an app."""
57
58 def __init__(self, app: App[ReturnType]) -> None:
59 self._app = app
60
61 def __rich_repr__(self) -> rich.repr.Result:
62 yield "app", self._app
63
64 @property
65 def app(self) -> App[ReturnType]:
66 """App: A reference to the application."""
67 return self._app
68
69 async def press(self, *keys: str) -> None:
70 """Simulate key-presses.
71
72 Args:
73 *keys: Keys to press.
74 """
75 if keys:
76 await self._app._press_keys(keys)
77 await self._wait_for_screen()
78
79 async def click(
80 self,
81 selector: type[Widget] | str | None = None,
82 offset: tuple[int, int] = (0, 0),
83 shift: bool = False,
84 meta: bool = False,
85 control: bool = False,
86 ) -> None:
87 """Simulate clicking with the mouse.
88
89 Args:
90 selector: The widget that should be clicked. If None, then the click
91 will occur relative to the screen. Note that this simply causes
92 a click to occur at the location of the widget. If the widget is
93 currently hidden or obscured by another widget, then the click may
94 not land on it.
95 offset: The offset to click within the selected widget.
96 shift: Click with the shift key held down.
97 meta: Click with the meta key held down.
98 control: Click with the control key held down.
99 """
100 app = self.app
101 screen = app.screen
102 if selector is not None:
103 target_widget = screen.query_one(selector)
104 else:
105 target_widget = screen
106
107 message_arguments = _get_mouse_message_arguments(
108 target_widget, offset, button=1, shift=shift, meta=meta, control=control
109 )
110 app.post_message(MouseDown(**message_arguments))
111 await self.pause(0.1)
112 app.post_message(MouseUp(**message_arguments))
113 await self.pause(0.1)
114 app.post_message(Click(**message_arguments))
115 await self.pause(0.1)
116
117 async def hover(
118 self,
119 selector: type[Widget] | str | None | None = None,
120 offset: tuple[int, int] = (0, 0),
121 ) -> None:
122 """Simulate hovering with the mouse cursor.
123
124 Args:
125 selector: The widget that should be hovered. If None, then the click
126 will occur relative to the screen. Note that this simply causes
127 a hover to occur at the location of the widget. If the widget is
128 currently hidden or obscured by another widget, then the hover may
129 not land on it.
130 offset: The offset to hover over within the selected widget.
131 """
132 app = self.app
133 screen = app.screen
134 if selector is not None:
135 target_widget = screen.query_one(selector)
136 else:
137 target_widget = screen
138
139 message_arguments = _get_mouse_message_arguments(
140 target_widget, offset, button=0
141 )
142 await self.pause()
143 app.post_message(MouseMove(**message_arguments))
144 await self.pause()
145
146 async def _wait_for_screen(self, timeout: float = 30.0) -> bool:
147 """Wait for the current screen and its children to have processed all pending events.
148
149 Args:
150 timeout: A timeout in seconds to wait.
151
152 Returns:
153 `True` if all events were processed. `False` if an exception occurred,
154 meaning that not all events could be processed.
155
156 Raises:
157 WaitForScreenTimeout: If the screen and its children didn't finish processing within the timeout.
158 """
159 children = [self.app, *self.app.screen.walk_children(with_self=True)]
160 count = 0
161 count_zero_event = asyncio.Event()
162
163 def decrement_counter() -> None:
164 """Decrement internal counter, and set an event if it reaches zero."""
165 nonlocal count
166 count -= 1
167 if count == 0:
168 # When count is zero, all messages queued at the start of the method have been processed
169 count_zero_event.set()
170
171 # Increase the count for every successful call_later
172 for child in children:
173 if child.call_later(decrement_counter):
174 count += 1
175
176 if count:
177 # Wait for the count to return to zero, or a timeout, or an exception
178 wait_for = [
179 asyncio.create_task(count_zero_event.wait()),
180 asyncio.create_task(self.app._exception_event.wait()),
181 ]
182 _, pending = await asyncio.wait(
183 wait_for,
184 timeout=timeout,
185 return_when=asyncio.FIRST_COMPLETED,
186 )
187
188 for task in pending:
189 task.cancel()
190
191 timed_out = len(wait_for) == len(pending)
192 if timed_out:
193 raise WaitForScreenTimeout(
194 "Timed out while waiting for widgets to process pending messages."
195 )
196
197 # We've either timed out, encountered an exception, or we've finished
198 # decrementing all the counters (all events processed in children).
199 if count > 0:
200 return False
201
202 return True
203
204 async def pause(self, delay: float | None = None) -> None:
205 """Insert a pause.
206
207 Args:
208 delay: Seconds to pause, or None to wait for cpu idle.
209 """
210 # These sleep zeros, are to force asyncio to give up a time-slice.
211 await self._wait_for_screen()
212 if delay is None:
213 await wait_for_idle(0)
214 else:
215 await asyncio.sleep(delay)
216 self.app.screen._on_timer_update()
217
218 async def wait_for_animation(self) -> None:
219 """Wait for any current animation to complete."""
220 await self._app.animator.wait_for_idle()
221 self.app.screen._on_timer_update()
222
223 async def wait_for_scheduled_animations(self) -> None:
224 """Wait for any current and scheduled animations to complete."""
225 await self._wait_for_screen()
226 await self._app.animator.wait_until_complete()
227 await self._wait_for_screen()
228 await wait_for_idle()
229 self.app.screen._on_timer_update()
230
231 async def exit(self, result: ReturnType) -> None:
232 """Exit the app with the given result.
233
234 Args:
235 result: The app result returned by `run` or `run_async`.
236 """
237 await self._wait_for_screen()
238 await wait_for_idle()
239 self.app.exit(result)
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/textual/pilot.py b/src/textual/pilot.py
--- a/src/textual/pilot.py
+++ b/src/textual/pilot.py
@@ -100,7 +100,7 @@
app = self.app
screen = app.screen
if selector is not None:
- target_widget = screen.query_one(selector)
+ target_widget = app.query_one(selector)
else:
target_widget = screen
@@ -132,7 +132,7 @@
app = self.app
screen = app.screen
if selector is not None:
- target_widget = screen.query_one(selector)
+ target_widget = app.query_one(selector)
else:
target_widget = screen
|
{"golden_diff": "diff --git a/src/textual/pilot.py b/src/textual/pilot.py\n--- a/src/textual/pilot.py\n+++ b/src/textual/pilot.py\n@@ -100,7 +100,7 @@\n app = self.app\n screen = app.screen\n if selector is not None:\n- target_widget = screen.query_one(selector)\n+ target_widget = app.query_one(selector)\n else:\n target_widget = screen\n \n@@ -132,7 +132,7 @@\n app = self.app\n screen = app.screen\n if selector is not None:\n- target_widget = screen.query_one(selector)\n+ target_widget = app.query_one(selector)\n else:\n target_widget = screen\n", "issue": "Pilot.click can't use `Screen` as selector.\nIf you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.\nPilot.click can't use `Screen` as selector.\nIf you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.\n", "before_files": [{"content": "\"\"\"\n\nThe pilot object is used by [App.run_test][textual.app.App.run_test] to programmatically operate an app.\n\nSee the guide on how to [test Textual apps](/guide/testing).\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nfrom typing import Any, Generic\n\nimport rich.repr\n\nfrom ._wait import wait_for_idle\nfrom .app import App, ReturnType\nfrom .events import Click, MouseDown, MouseMove, MouseUp\nfrom .widget import Widget\n\n\ndef _get_mouse_message_arguments(\n target: Widget,\n offset: tuple[int, int] = (0, 0),\n button: int = 0,\n shift: bool = False,\n meta: bool = False,\n control: bool = False,\n) -> dict[str, Any]:\n \"\"\"Get the arguments to pass into mouse messages for the click and hover methods.\"\"\"\n click_x, click_y = target.region.offset + offset\n message_arguments = {\n \"x\": click_x,\n \"y\": click_y,\n \"delta_x\": 0,\n \"delta_y\": 0,\n \"button\": button,\n \"shift\": shift,\n \"meta\": meta,\n \"ctrl\": control,\n \"screen_x\": click_x,\n \"screen_y\": click_y,\n }\n return message_arguments\n\n\nclass WaitForScreenTimeout(Exception):\n \"\"\"Exception raised if messages aren't being processed quickly enough.\n\n If this occurs, the most likely explanation is some kind of deadlock in the app code.\n \"\"\"\n\n\[email protected](angular=True)\nclass Pilot(Generic[ReturnType]):\n \"\"\"Pilot object to drive an app.\"\"\"\n\n def __init__(self, app: App[ReturnType]) -> None:\n self._app = app\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield \"app\", self._app\n\n @property\n def app(self) -> App[ReturnType]:\n \"\"\"App: A reference to the application.\"\"\"\n return self._app\n\n async def press(self, *keys: str) -> None:\n \"\"\"Simulate key-presses.\n\n Args:\n *keys: Keys to press.\n \"\"\"\n if keys:\n await self._app._press_keys(keys)\n await self._wait_for_screen()\n\n async def click(\n self,\n selector: type[Widget] | str | None = None,\n offset: tuple[int, int] = (0, 0),\n shift: bool = False,\n meta: bool = False,\n control: bool = False,\n ) -> None:\n \"\"\"Simulate clicking with the mouse.\n\n Args:\n selector: The widget that should be clicked. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a click to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the click may\n not land on it.\n offset: The offset to click within the selected widget.\n shift: Click with the shift key held down.\n meta: Click with the meta key held down.\n control: Click with the control key held down.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = screen.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=1, shift=shift, meta=meta, control=control\n )\n app.post_message(MouseDown(**message_arguments))\n await self.pause(0.1)\n app.post_message(MouseUp(**message_arguments))\n await self.pause(0.1)\n app.post_message(Click(**message_arguments))\n await self.pause(0.1)\n\n async def hover(\n self,\n selector: type[Widget] | str | None | None = None,\n offset: tuple[int, int] = (0, 0),\n ) -> None:\n \"\"\"Simulate hovering with the mouse cursor.\n\n Args:\n selector: The widget that should be hovered. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a hover to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the hover may\n not land on it.\n offset: The offset to hover over within the selected widget.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = screen.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=0\n )\n await self.pause()\n app.post_message(MouseMove(**message_arguments))\n await self.pause()\n\n async def _wait_for_screen(self, timeout: float = 30.0) -> bool:\n \"\"\"Wait for the current screen and its children to have processed all pending events.\n\n Args:\n timeout: A timeout in seconds to wait.\n\n Returns:\n `True` if all events were processed. `False` if an exception occurred,\n meaning that not all events could be processed.\n\n Raises:\n WaitForScreenTimeout: If the screen and its children didn't finish processing within the timeout.\n \"\"\"\n children = [self.app, *self.app.screen.walk_children(with_self=True)]\n count = 0\n count_zero_event = asyncio.Event()\n\n def decrement_counter() -> None:\n \"\"\"Decrement internal counter, and set an event if it reaches zero.\"\"\"\n nonlocal count\n count -= 1\n if count == 0:\n # When count is zero, all messages queued at the start of the method have been processed\n count_zero_event.set()\n\n # Increase the count for every successful call_later\n for child in children:\n if child.call_later(decrement_counter):\n count += 1\n\n if count:\n # Wait for the count to return to zero, or a timeout, or an exception\n wait_for = [\n asyncio.create_task(count_zero_event.wait()),\n asyncio.create_task(self.app._exception_event.wait()),\n ]\n _, pending = await asyncio.wait(\n wait_for,\n timeout=timeout,\n return_when=asyncio.FIRST_COMPLETED,\n )\n\n for task in pending:\n task.cancel()\n\n timed_out = len(wait_for) == len(pending)\n if timed_out:\n raise WaitForScreenTimeout(\n \"Timed out while waiting for widgets to process pending messages.\"\n )\n\n # We've either timed out, encountered an exception, or we've finished\n # decrementing all the counters (all events processed in children).\n if count > 0:\n return False\n\n return True\n\n async def pause(self, delay: float | None = None) -> None:\n \"\"\"Insert a pause.\n\n Args:\n delay: Seconds to pause, or None to wait for cpu idle.\n \"\"\"\n # These sleep zeros, are to force asyncio to give up a time-slice.\n await self._wait_for_screen()\n if delay is None:\n await wait_for_idle(0)\n else:\n await asyncio.sleep(delay)\n self.app.screen._on_timer_update()\n\n async def wait_for_animation(self) -> None:\n \"\"\"Wait for any current animation to complete.\"\"\"\n await self._app.animator.wait_for_idle()\n self.app.screen._on_timer_update()\n\n async def wait_for_scheduled_animations(self) -> None:\n \"\"\"Wait for any current and scheduled animations to complete.\"\"\"\n await self._wait_for_screen()\n await self._app.animator.wait_until_complete()\n await self._wait_for_screen()\n await wait_for_idle()\n self.app.screen._on_timer_update()\n\n async def exit(self, result: ReturnType) -> None:\n \"\"\"Exit the app with the given result.\n\n Args:\n result: The app result returned by `run` or `run_async`.\n \"\"\"\n await self._wait_for_screen()\n await wait_for_idle()\n self.app.exit(result)\n", "path": "src/textual/pilot.py"}], "after_files": [{"content": "\"\"\"\n\nThe pilot object is used by [App.run_test][textual.app.App.run_test] to programmatically operate an app.\n\nSee the guide on how to [test Textual apps](/guide/testing).\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nfrom typing import Any, Generic\n\nimport rich.repr\n\nfrom ._wait import wait_for_idle\nfrom .app import App, ReturnType\nfrom .events import Click, MouseDown, MouseMove, MouseUp\nfrom .widget import Widget\n\n\ndef _get_mouse_message_arguments(\n target: Widget,\n offset: tuple[int, int] = (0, 0),\n button: int = 0,\n shift: bool = False,\n meta: bool = False,\n control: bool = False,\n) -> dict[str, Any]:\n \"\"\"Get the arguments to pass into mouse messages for the click and hover methods.\"\"\"\n click_x, click_y = target.region.offset + offset\n message_arguments = {\n \"x\": click_x,\n \"y\": click_y,\n \"delta_x\": 0,\n \"delta_y\": 0,\n \"button\": button,\n \"shift\": shift,\n \"meta\": meta,\n \"ctrl\": control,\n \"screen_x\": click_x,\n \"screen_y\": click_y,\n }\n return message_arguments\n\n\nclass WaitForScreenTimeout(Exception):\n \"\"\"Exception raised if messages aren't being processed quickly enough.\n\n If this occurs, the most likely explanation is some kind of deadlock in the app code.\n \"\"\"\n\n\[email protected](angular=True)\nclass Pilot(Generic[ReturnType]):\n \"\"\"Pilot object to drive an app.\"\"\"\n\n def __init__(self, app: App[ReturnType]) -> None:\n self._app = app\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield \"app\", self._app\n\n @property\n def app(self) -> App[ReturnType]:\n \"\"\"App: A reference to the application.\"\"\"\n return self._app\n\n async def press(self, *keys: str) -> None:\n \"\"\"Simulate key-presses.\n\n Args:\n *keys: Keys to press.\n \"\"\"\n if keys:\n await self._app._press_keys(keys)\n await self._wait_for_screen()\n\n async def click(\n self,\n selector: type[Widget] | str | None = None,\n offset: tuple[int, int] = (0, 0),\n shift: bool = False,\n meta: bool = False,\n control: bool = False,\n ) -> None:\n \"\"\"Simulate clicking with the mouse.\n\n Args:\n selector: The widget that should be clicked. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a click to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the click may\n not land on it.\n offset: The offset to click within the selected widget.\n shift: Click with the shift key held down.\n meta: Click with the meta key held down.\n control: Click with the control key held down.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = app.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=1, shift=shift, meta=meta, control=control\n )\n app.post_message(MouseDown(**message_arguments))\n await self.pause(0.1)\n app.post_message(MouseUp(**message_arguments))\n await self.pause(0.1)\n app.post_message(Click(**message_arguments))\n await self.pause(0.1)\n\n async def hover(\n self,\n selector: type[Widget] | str | None | None = None,\n offset: tuple[int, int] = (0, 0),\n ) -> None:\n \"\"\"Simulate hovering with the mouse cursor.\n\n Args:\n selector: The widget that should be hovered. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a hover to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the hover may\n not land on it.\n offset: The offset to hover over within the selected widget.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = app.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=0\n )\n await self.pause()\n app.post_message(MouseMove(**message_arguments))\n await self.pause()\n\n async def _wait_for_screen(self, timeout: float = 30.0) -> bool:\n \"\"\"Wait for the current screen and its children to have processed all pending events.\n\n Args:\n timeout: A timeout in seconds to wait.\n\n Returns:\n `True` if all events were processed. `False` if an exception occurred,\n meaning that not all events could be processed.\n\n Raises:\n WaitForScreenTimeout: If the screen and its children didn't finish processing within the timeout.\n \"\"\"\n children = [self.app, *self.app.screen.walk_children(with_self=True)]\n count = 0\n count_zero_event = asyncio.Event()\n\n def decrement_counter() -> None:\n \"\"\"Decrement internal counter, and set an event if it reaches zero.\"\"\"\n nonlocal count\n count -= 1\n if count == 0:\n # When count is zero, all messages queued at the start of the method have been processed\n count_zero_event.set()\n\n # Increase the count for every successful call_later\n for child in children:\n if child.call_later(decrement_counter):\n count += 1\n\n if count:\n # Wait for the count to return to zero, or a timeout, or an exception\n wait_for = [\n asyncio.create_task(count_zero_event.wait()),\n asyncio.create_task(self.app._exception_event.wait()),\n ]\n _, pending = await asyncio.wait(\n wait_for,\n timeout=timeout,\n return_when=asyncio.FIRST_COMPLETED,\n )\n\n for task in pending:\n task.cancel()\n\n timed_out = len(wait_for) == len(pending)\n if timed_out:\n raise WaitForScreenTimeout(\n \"Timed out while waiting for widgets to process pending messages.\"\n )\n\n # We've either timed out, encountered an exception, or we've finished\n # decrementing all the counters (all events processed in children).\n if count > 0:\n return False\n\n return True\n\n async def pause(self, delay: float | None = None) -> None:\n \"\"\"Insert a pause.\n\n Args:\n delay: Seconds to pause, or None to wait for cpu idle.\n \"\"\"\n # These sleep zeros, are to force asyncio to give up a time-slice.\n await self._wait_for_screen()\n if delay is None:\n await wait_for_idle(0)\n else:\n await asyncio.sleep(delay)\n self.app.screen._on_timer_update()\n\n async def wait_for_animation(self) -> None:\n \"\"\"Wait for any current animation to complete.\"\"\"\n await self._app.animator.wait_for_idle()\n self.app.screen._on_timer_update()\n\n async def wait_for_scheduled_animations(self) -> None:\n \"\"\"Wait for any current and scheduled animations to complete.\"\"\"\n await self._wait_for_screen()\n await self._app.animator.wait_until_complete()\n await self._wait_for_screen()\n await wait_for_idle()\n self.app.screen._on_timer_update()\n\n async def exit(self, result: ReturnType) -> None:\n \"\"\"Exit the app with the given result.\n\n Args:\n result: The app result returned by `run` or `run_async`.\n \"\"\"\n await self._wait_for_screen()\n await wait_for_idle()\n self.app.exit(result)\n", "path": "src/textual/pilot.py"}]}
| 2,689 | 160 |
gh_patches_debug_624
|
rasdani/github-patches
|
git_diff
|
codespell-project__codespell-89
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Makefile is broken
The makefile is no longer working, since there is no longer a codespell.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2
3 # adapted from mne-python
4
5 import os
6 from os import path as op
7
8 try:
9 import setuptools # noqa to allow --develop
10 except Exception:
11 pass
12 from distutils.core import setup
13
14 from codespell_lib import __version__
15
16 DISTNAME = 'codespell'
17 DESCRIPTION = """Codespell"""
18 MAINTAINER = 'Lucas De Marchi'
19 MAINTAINER_EMAIL = '[email protected]'
20 URL = 'https://github.com/lucasdemarchi/codespell/'
21 LICENSE = 'GPL v2'
22 DOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'
23 with open('README.rst', 'r') as f:
24 LONG_DESCRIPTION = f.read()
25
26 if __name__ == "__main__":
27 if os.path.exists('MANIFEST'):
28 os.remove('MANIFEST')
29
30 setup(name=DISTNAME,
31 maintainer=MAINTAINER,
32 include_package_data=True,
33 maintainer_email=MAINTAINER_EMAIL,
34 description=DESCRIPTION,
35 license=LICENSE,
36 url=URL,
37 version=__version__,
38 download_url=DOWNLOAD_URL,
39 long_description=LONG_DESCRIPTION,
40 zip_safe=False,
41 classifiers=['Intended Audience :: Developers',
42 'License :: OSI Approved',
43 'Programming Language :: Python',
44 'Topic :: Software Development',
45 'Operating System :: Microsoft :: Windows',
46 'Operating System :: POSIX',
47 'Operating System :: Unix',
48 'Operating System :: MacOS'],
49 platforms='any',
50 packages=[
51 'codespell_lib', 'codespell_lib.tests',
52 'codespell_lib.data',
53 ],
54 package_data={'codespell_lib': [
55 op.join('data', 'dictionary.txt'),
56 op.join('data', 'linux-kernel.exclude'),
57 ]},
58 scripts=['bin/codespell.py'])
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,4 +55,4 @@
op.join('data', 'dictionary.txt'),
op.join('data', 'linux-kernel.exclude'),
]},
- scripts=['bin/codespell.py'])
+ scripts=['bin/codespell'])
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,4 +55,4 @@\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n- scripts=['bin/codespell.py'])\n+ scripts=['bin/codespell'])\n", "issue": "Makefile is broken\nThe makefile is no longer working, since there is no longer a codespell.py\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\nfrom os import path as op\n\ntry:\n import setuptools # noqa to allow --develop\nexcept Exception:\n pass\nfrom distutils.core import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/lucasdemarchi/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n packages=[\n 'codespell_lib', 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n scripts=['bin/codespell.py'])\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\nfrom os import path as op\n\ntry:\n import setuptools # noqa to allow --develop\nexcept Exception:\n pass\nfrom distutils.core import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/lucasdemarchi/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n packages=[\n 'codespell_lib', 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n scripts=['bin/codespell'])\n", "path": "setup.py"}]}
| 779 | 76 |
gh_patches_debug_28536
|
rasdani/github-patches
|
git_diff
|
systemd__mkosi-499
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include man page in Python package
When building the man page with
```
pandoc -t man -o mkosi.1 mkosi.md
```
the output looks rather unpleasing:
```
mkosi ‐ Build Legacy‐Free OS Images mkosi [options...] build
mkosi [options...] clean mkosi [options...] summary mkosi [op‐
tions...] shell [command line...] mkosi [options...] boot [com‐
mand line...] mkosi [options...] qemu mkosi is a tool for easily
building legacy‐free OS images. It’s a fancy wrapper around dnf
‐‐installroot, debootstrap, pacstrap and zypper that may generate
disk images with a number of bells and whistles. The following
output formats are supported: Raw GPT disk image, with ext4 as
[...]
```
We should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkosi/__main__.py`
Content:
```
1 # SPDX-License-Identifier: LGPL-2.1+
2 # PYTHON_ARGCOMPLETE_OK
3 import os
4 import sys
5
6 from . import parse_args, complete_step, run_verb, die, MkosiException
7
8
9 try:
10 args = parse_args()
11
12 for job_name, a in args.items():
13 # Change working directory if --directory is passed
14 if a.directory:
15 work_dir = a.directory
16 if os.path.isdir(work_dir):
17 os.chdir(work_dir)
18 else:
19 die(f"Error: {work_dir} is not a directory!")
20 with complete_step(f"Processing {job_name}"):
21 run_verb(a)
22 except MkosiException:
23 sys.exit(1)
24
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/python3
2 # SPDX-License-Identifier: LGPL-2.1+
3
4 from setuptools import setup, Command
5
6 class BuildManpage(Command):
7 description = ('builds the manpage')
8 user_options = []
9
10 def initialize_options(self):
11 pass
12 def finalize_options(self):
13 pass
14
15 def run(self):
16 self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
17
18
19 setup(
20 name="mkosi",
21 version="5",
22 description="Create legacy-free OS images",
23 url="https://github.com/systemd/mkosi",
24 maintainer="mkosi contributors",
25 maintainer_email="[email protected]",
26 license="LGPLv2+",
27 python_requires=">=3.6",
28 packages = ["mkosi"],
29 cmdclass = { "man": BuildManpage },
30 entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
31 )
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkosi/__main__.py b/mkosi/__main__.py
--- a/mkosi/__main__.py
+++ b/mkosi/__main__.py
@@ -6,18 +6,23 @@
from . import parse_args, complete_step, run_verb, die, MkosiException
-try:
- args = parse_args()
+def main() -> None:
+ try:
+ args = parse_args()
- for job_name, a in args.items():
- # Change working directory if --directory is passed
- if a.directory:
- work_dir = a.directory
- if os.path.isdir(work_dir):
- os.chdir(work_dir)
- else:
- die(f"Error: {work_dir} is not a directory!")
- with complete_step(f"Processing {job_name}"):
- run_verb(a)
-except MkosiException:
- sys.exit(1)
+ for job_name, a in args.items():
+ # Change working directory if --directory is passed
+ if a.directory:
+ work_dir = a.directory
+ if os.path.isdir(work_dir):
+ os.chdir(work_dir)
+ else:
+ die(f"Error: {work_dir} is not a directory!")
+ with complete_step(f"Processing {job_name}"):
+ run_verb(a)
+ except MkosiException:
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@
pass
def run(self):
- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])
setup(
@@ -27,5 +27,6 @@
python_requires=">=3.6",
packages = ["mkosi"],
cmdclass = { "man": BuildManpage },
- entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
+ data_files = [('share/man/man1', ["man/mkosi.1"])],
+ entry_points = {"console_scripts": ["mkosi=mkosi.__main__:main"]},
)
|
{"golden_diff": "diff --git a/mkosi/__main__.py b/mkosi/__main__.py\n--- a/mkosi/__main__.py\n+++ b/mkosi/__main__.py\n@@ -6,18 +6,23 @@\n from . import parse_args, complete_step, run_verb, die, MkosiException\n \n \n-try:\n- args = parse_args()\n+def main() -> None:\n+ try:\n+ args = parse_args()\n \n- for job_name, a in args.items():\n- # Change working directory if --directory is passed\n- if a.directory:\n- work_dir = a.directory\n- if os.path.isdir(work_dir):\n- os.chdir(work_dir)\n- else:\n- die(f\"Error: {work_dir} is not a directory!\")\n- with complete_step(f\"Processing {job_name}\"):\n- run_verb(a)\n-except MkosiException:\n- sys.exit(1)\n+ for job_name, a in args.items():\n+ # Change working directory if --directory is passed\n+ if a.directory:\n+ work_dir = a.directory\n+ if os.path.isdir(work_dir):\n+ os.chdir(work_dir)\n+ else:\n+ die(f\"Error: {work_dir} is not a directory!\")\n+ with complete_step(f\"Processing {job_name}\"):\n+ run_verb(a)\n+ except MkosiException:\n+ sys.exit(1)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,7 +13,7 @@\n pass\n \n def run(self):\n- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])\n \n \n setup(\n@@ -27,5 +27,6 @@\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n- entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n+ data_files = [('share/man/man1', [\"man/mkosi.1\"])],\n+ entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__:main\"]},\n )\n", "issue": "Include man page in Python package\nWhen building the man page with\r\n```\r\npandoc -t man -o mkosi.1 mkosi.md\r\n```\r\nthe output looks rather unpleasing:\r\n```\r\nmkosi \u2010 Build Legacy\u2010Free OS Images mkosi [options...] build\r\nmkosi [options...] clean mkosi [options...] summary mkosi [op\u2010\r\ntions...] shell [command line...] mkosi [options...] boot [com\u2010\r\nmand line...] mkosi [options...] qemu mkosi is a tool for easily\r\nbuilding legacy\u2010free OS images. It\u2019s a fancy wrapper around dnf\r\n\u2010\u2010installroot, debootstrap, pacstrap and zypper that may generate\r\ndisk images with a number of bells and whistles. The following\r\noutput formats are supported: Raw GPT disk image, with ext4 as\r\n[...]\r\n```\r\n\r\nWe should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\nimport os\nimport sys\n\nfrom . import parse_args, complete_step, run_verb, die, MkosiException\n\n\ntry:\n args = parse_args()\n\n for job_name, a in args.items():\n # Change working directory if --directory is passed\n if a.directory:\n work_dir = a.directory\n if os.path.isdir(work_dir):\n os.chdir(work_dir)\n else:\n die(f\"Error: {work_dir} is not a directory!\")\n with complete_step(f\"Processing {job_name}\"):\n run_verb(a)\nexcept MkosiException:\n sys.exit(1)\n", "path": "mkosi/__main__.py"}, {"content": "#!/usr/bin/python3\n# SPDX-License-Identifier: LGPL-2.1+\n\nfrom setuptools import setup, Command\n\nclass BuildManpage(Command):\n description = ('builds the manpage')\n user_options = []\n\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n\n def run(self):\n self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n\n\nsetup(\n name=\"mkosi\",\n version=\"5\",\n description=\"Create legacy-free OS images\",\n url=\"https://github.com/systemd/mkosi\",\n maintainer=\"mkosi contributors\",\n maintainer_email=\"[email protected]\",\n license=\"LGPLv2+\",\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\nimport os\nimport sys\n\nfrom . import parse_args, complete_step, run_verb, die, MkosiException\n\n\ndef main() -> None:\n try:\n args = parse_args()\n\n for job_name, a in args.items():\n # Change working directory if --directory is passed\n if a.directory:\n work_dir = a.directory\n if os.path.isdir(work_dir):\n os.chdir(work_dir)\n else:\n die(f\"Error: {work_dir} is not a directory!\")\n with complete_step(f\"Processing {job_name}\"):\n run_verb(a)\n except MkosiException:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mkosi/__main__.py"}, {"content": "#!/usr/bin/python3\n# SPDX-License-Identifier: LGPL-2.1+\n\nfrom setuptools import setup, Command\n\nclass BuildManpage(Command):\n description = ('builds the manpage')\n user_options = []\n\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n\n def run(self):\n self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])\n\n\nsetup(\n name=\"mkosi\",\n version=\"5\",\n description=\"Create legacy-free OS images\",\n url=\"https://github.com/systemd/mkosi\",\n maintainer=\"mkosi contributors\",\n maintainer_email=\"[email protected]\",\n license=\"LGPLv2+\",\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n data_files = [('share/man/man1', [\"man/mkosi.1\"])],\n entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__:main\"]},\n)\n", "path": "setup.py"}]}
| 972 | 540 |
gh_patches_debug_419
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-8651
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
grid_axis_alignment_no_toolbar is broken in Windows (32-bit)
`examples/integration/layout/grid_axis_alignment_no_toolbar`:

All axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/integration/layout/grid_axis_alignment_no_toolbar.py`
Content:
```
1 from __future__ import absolute_import
2
3 import numpy as np
4
5 from bokeh.plotting import figure, save
6 from bokeh.layouts import gridplot
7
8 coeffs = [10**0, 10**3, 10**6, 10**9]
9 V = np.arange(10)
10
11 figs = []
12
13 for ycoeff in coeffs:
14 row = []
15 for xcoeff in coeffs:
16 fig = figure(plot_height=200, plot_width=200)
17 fig.xaxis[0].formatter.use_scientific = False
18 fig.yaxis[0].formatter.use_scientific = False
19 fig.xaxis[0].major_label_orientation = "vertical"
20 fig.yaxis[0].major_label_orientation = "horizontal"
21 fig.scatter(V*xcoeff, V*ycoeff)
22 row.append(fig)
23 figs.append(row)
24
25 grid = gridplot(figs, toolbar_location=None)
26
27 save(grid)
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py
+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
@@ -6,7 +6,7 @@
from bokeh.layouts import gridplot
coeffs = [10**0, 10**3, 10**6, 10**9]
-V = np.arange(10)
+V = np.arange(10, dtype="int64")
figs = []
|
{"golden_diff": "diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n@@ -6,7 +6,7 @@\n from bokeh.layouts import gridplot\n \n coeffs = [10**0, 10**3, 10**6, 10**9]\n-V = np.arange(10)\n+V = np.arange(10, dtype=\"int64\")\n \n figs = []\n", "issue": "grid_axis_alignment_no_toolbar is broken in Windows (32-bit)\n`examples/integration/layout/grid_axis_alignment_no_toolbar`:\r\n\r\n\r\n\r\nAll axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10)\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n", "path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10, dtype=\"int64\")\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n", "path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"}]}
| 674 | 124 |
gh_patches_debug_17856
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-5636
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary SQL select for react home landing page
The React landing page view extracts the default filters. Every time but it's never used.
<img width="1502" alt="Screen Shot 2019-08-13 at 2 44 39 PM" src="https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/landing/views.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django.http import HttpResponse
5 from django.shortcuts import redirect, render
6 from django.views import static
7 from django.views.decorators.cache import never_cache
8 from django.views.generic import RedirectView
9
10 from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
11 from kuma.core.utils import is_wiki
12 from kuma.feeder.models import Bundle
13 from kuma.feeder.sections import SECTION_HACKS
14 from kuma.search.models import Filter
15
16 from .utils import favicon_url
17
18
19 @shared_cache_control
20 def contribute_json(request):
21 return static.serve(request, 'contribute.json', document_root=settings.ROOT)
22
23
24 @shared_cache_control
25 def home(request):
26 """Home page."""
27 if is_wiki(request):
28 return render_home(request, 'landing/homepage.html')
29 return render_home(request, 'landing/react_homepage.html')
30
31
32 def render_home(request, template_name):
33 """Render the home page with the template named "template_name"."""
34 updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
35 default_filters = Filter.objects.default_filters()
36 context = {
37 'updates': updates,
38 'default_filters': default_filters,
39 }
40 return render(request, template_name, context)
41
42
43 @ensure_wiki_domain
44 @never_cache
45 def maintenance_mode(request):
46 if settings.MAINTENANCE_MODE:
47 return render(request, 'landing/maintenance-mode.html')
48 else:
49 return redirect('home')
50
51
52 @ensure_wiki_domain
53 @shared_cache_control
54 def promote_buttons(request):
55 """Bug 646192: MDN affiliate buttons"""
56 return render(request, 'landing/promote_buttons.html')
57
58
59 ROBOTS_ALLOWED_TXT = '''\
60 User-agent: *
61 Sitemap: https://developer.mozilla.org/sitemap.xml
62
63 Disallow: /admin/
64 Disallow: /api/
65 Disallow: /*/dashboards/*
66 Disallow: /*docs/feeds
67 Disallow: /*docs/templates
68 Disallow: /*docs*Template:
69 Disallow: /*docs/all
70 Disallow: /*docs/tag*
71 Disallow: /*docs/needs-review*
72 Disallow: /*docs/localization-tag*
73 Disallow: /*docs/with-errors
74 Disallow: /*docs/without-parent
75 Disallow: /*docs/top-level
76 Disallow: /*docs/new
77 Disallow: /*docs/get-documents
78 Disallow: /*docs/submit_akismet_spam
79 Disallow: /*docs/load*
80 Disallow: /*docs/Experiment:*
81 Disallow: /*$api
82 Disallow: /*$compare
83 Disallow: /*$revision
84 Disallow: /*$history
85 Disallow: /*$children
86 Disallow: /*$flag
87 Disallow: /*$locales
88 Disallow: /*$toc
89 Disallow: /*$move
90 Disallow: /*$quick-review
91 Disallow: /*$samples
92 Disallow: /*$revert
93 Disallow: /*$repair_breadcrumbs
94 Disallow: /*$delete
95 Disallow: /*$restore
96 Disallow: /*$purge
97 Disallow: /*$subscribe
98 Disallow: /*$subscribe_to_tree
99 Disallow: /*$vote
100 Disallow: /*docs.json
101 Disallow: /*docs/ckeditor_config.js
102 Disallow: /*/files/
103 Disallow: /media
104 Disallow: /*move-requested
105 Disallow: /*preview-wiki-content
106 Disallow: /*profiles*/edit
107 Disallow: /skins
108 Disallow: /*type=feed
109 Disallow: /*users/
110 ''' + '\n'.join('Disallow: /{locale}/search'.format(locale=locale)
111 for locale in settings.ENABLED_LOCALES)
112
113 ROBOTS_GO_AWAY_TXT = '''\
114 User-Agent: *
115 Disallow: /
116 '''
117
118
119 @shared_cache_control
120 def robots_txt(request):
121 """Serve robots.txt that allows or forbids robots."""
122 host = request.get_host()
123 if host in settings.ALLOW_ROBOTS_DOMAINS:
124 robots = ""
125 elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
126 robots = ROBOTS_ALLOWED_TXT
127 else:
128 robots = ROBOTS_GO_AWAY_TXT
129 return HttpResponse(robots, content_type='text/plain')
130
131
132 class FaviconRedirect(RedirectView):
133 """Redirect to the favicon in the static img folder (bug 1402497)"""
134
135 def get_redirect_url(self, *args, **kwargs):
136 return favicon_url()
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/landing/views.py b/kuma/landing/views.py
--- a/kuma/landing/views.py
+++ b/kuma/landing/views.py
@@ -24,19 +24,16 @@
@shared_cache_control
def home(request):
"""Home page."""
+ context = {}
+ # Need for both wiki and react homepage
+ context['updates'] = list(
+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
+
+ # The default template name
+ template_name = 'landing/react_homepage.html'
if is_wiki(request):
- return render_home(request, 'landing/homepage.html')
- return render_home(request, 'landing/react_homepage.html')
-
-
-def render_home(request, template_name):
- """Render the home page with the template named "template_name"."""
- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
- default_filters = Filter.objects.default_filters()
- context = {
- 'updates': updates,
- 'default_filters': default_filters,
- }
+ template_name = 'landing/homepage.html'
+ context['default_filters'] = Filter.objects.default_filters()
return render(request, template_name, context)
|
{"golden_diff": "diff --git a/kuma/landing/views.py b/kuma/landing/views.py\n--- a/kuma/landing/views.py\n+++ b/kuma/landing/views.py\n@@ -24,19 +24,16 @@\n @shared_cache_control\n def home(request):\n \"\"\"Home page.\"\"\"\n+ context = {}\n+ # Need for both wiki and react homepage\n+ context['updates'] = list(\n+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n+\n+ # The default template name\n+ template_name = 'landing/react_homepage.html'\n if is_wiki(request):\n- return render_home(request, 'landing/homepage.html')\n- return render_home(request, 'landing/react_homepage.html')\n-\n-\n-def render_home(request, template_name):\n- \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n- default_filters = Filter.objects.default_filters()\n- context = {\n- 'updates': updates,\n- 'default_filters': default_filters,\n- }\n+ template_name = 'landing/homepage.html'\n+ context['default_filters'] = Filter.objects.default_filters()\n return render(request, template_name, context)\n", "issue": "Unnecessary SQL select for react home landing page\nThe React landing page view extracts the default filters. Every time but it's never used. \r\n<img width=\"1502\" alt=\"Screen Shot 2019-08-13 at 2 44 39 PM\" src=\"https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png\">\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, 'contribute.json', document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n if is_wiki(request):\n return render_home(request, 'landing/homepage.html')\n return render_home(request, 'landing/react_homepage.html')\n\n\ndef render_home(request, template_name):\n \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n default_filters = Filter.objects.default_filters()\n context = {\n 'updates': updates,\n 'default_filters': default_filters,\n }\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, 'landing/maintenance-mode.html')\n else:\n return redirect('home')\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, 'landing/promote_buttons.html')\n\n\nROBOTS_ALLOWED_TXT = '''\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /admin/\nDisallow: /api/\nDisallow: /*/dashboards/*\nDisallow: /*docs/feeds\nDisallow: /*docs/templates\nDisallow: /*docs*Template:\nDisallow: /*docs/all\nDisallow: /*docs/tag*\nDisallow: /*docs/needs-review*\nDisallow: /*docs/localization-tag*\nDisallow: /*docs/with-errors\nDisallow: /*docs/without-parent\nDisallow: /*docs/top-level\nDisallow: /*docs/new\nDisallow: /*docs/get-documents\nDisallow: /*docs/submit_akismet_spam\nDisallow: /*docs/load*\nDisallow: /*docs/Experiment:*\nDisallow: /*$api\nDisallow: /*$compare\nDisallow: /*$revision\nDisallow: /*$history\nDisallow: /*$children\nDisallow: /*$flag\nDisallow: /*$locales\nDisallow: /*$toc\nDisallow: /*$move\nDisallow: /*$quick-review\nDisallow: /*$samples\nDisallow: /*$revert\nDisallow: /*$repair_breadcrumbs\nDisallow: /*$delete\nDisallow: /*$restore\nDisallow: /*$purge\nDisallow: /*$subscribe\nDisallow: /*$subscribe_to_tree\nDisallow: /*$vote\nDisallow: /*docs.json\nDisallow: /*docs/ckeditor_config.js\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*move-requested\nDisallow: /*preview-wiki-content\nDisallow: /*profiles*/edit\nDisallow: /skins\nDisallow: /*type=feed\nDisallow: /*users/\n''' + '\\n'.join('Disallow: /{locale}/search'.format(locale=locale)\n for locale in settings.ENABLED_LOCALES)\n\nROBOTS_GO_AWAY_TXT = '''\\\nUser-Agent: *\nDisallow: /\n'''\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type='text/plain')\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, 'contribute.json', document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n context = {}\n # Need for both wiki and react homepage\n context['updates'] = list(\n Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n\n # The default template name\n template_name = 'landing/react_homepage.html'\n if is_wiki(request):\n template_name = 'landing/homepage.html'\n context['default_filters'] = Filter.objects.default_filters()\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, 'landing/maintenance-mode.html')\n else:\n return redirect('home')\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, 'landing/promote_buttons.html')\n\n\nROBOTS_ALLOWED_TXT = '''\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /admin/\nDisallow: /api/\nDisallow: /*/dashboards/*\nDisallow: /*docs/feeds\nDisallow: /*docs/templates\nDisallow: /*docs*Template:\nDisallow: /*docs/all\nDisallow: /*docs/tag*\nDisallow: /*docs/needs-review*\nDisallow: /*docs/localization-tag*\nDisallow: /*docs/with-errors\nDisallow: /*docs/without-parent\nDisallow: /*docs/top-level\nDisallow: /*docs/new\nDisallow: /*docs/get-documents\nDisallow: /*docs/submit_akismet_spam\nDisallow: /*docs/load*\nDisallow: /*docs/Experiment:*\nDisallow: /*$api\nDisallow: /*$compare\nDisallow: /*$revision\nDisallow: /*$history\nDisallow: /*$children\nDisallow: /*$flag\nDisallow: /*$locales\nDisallow: /*$toc\nDisallow: /*$move\nDisallow: /*$quick-review\nDisallow: /*$samples\nDisallow: /*$revert\nDisallow: /*$repair_breadcrumbs\nDisallow: /*$delete\nDisallow: /*$restore\nDisallow: /*$purge\nDisallow: /*$subscribe\nDisallow: /*$subscribe_to_tree\nDisallow: /*$vote\nDisallow: /*docs.json\nDisallow: /*docs/ckeditor_config.js\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*move-requested\nDisallow: /*preview-wiki-content\nDisallow: /*profiles*/edit\nDisallow: /skins\nDisallow: /*type=feed\nDisallow: /*users/\n''' + '\\n'.join('Disallow: /{locale}/search'.format(locale=locale)\n for locale in settings.ENABLED_LOCALES)\n\nROBOTS_GO_AWAY_TXT = '''\\\nUser-Agent: *\nDisallow: /\n'''\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type='text/plain')\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}]}
| 1,645 | 275 |
gh_patches_debug_1310
|
rasdani/github-patches
|
git_diff
|
hylang__hy-161
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LIST-COMP breaks with certain variable names
Try compiling:
```
(list-comp (, i j) (i [-1 0 1] j [-1 0 1]))
```
With hy and you'll get some strange errors. If you replace "i" and "j" with "x" and "y" respectively, the same piece of code works as expected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/lex/states.py`
Content:
```
1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from hy.models.expression import HyExpression
22 from hy.models.integer import HyInteger
23 from hy.models.lambdalist import HyLambdaListKeyword
24 from hy.models.float import HyFloat
25 from hy.models.complex import HyComplex
26 from hy.models.symbol import HySymbol
27 from hy.models.string import HyString
28 from hy.models.keyword import HyKeyword
29 from hy.models.dict import HyDict
30 from hy.models.list import HyList
31
32 from hy.errors import HyError
33
34 from abc import ABCMeta, abstractmethod
35
36
37 WHITESPACE = [" ", "\t", "\n", "\r"]
38
39
40 class LexException(HyError):
41 """
42 Error during the Lexing of a Hython expression.
43 """
44 pass
45
46
47 def _resolve_atom(obj):
48 """
49 Resolve a bare atom into one of the following (in order):
50
51 - Integer
52 - LambdaListKeyword
53 - Float
54 - Complex
55 - Symbol
56 """
57 try:
58 return HyInteger(obj)
59 except ValueError:
60 pass
61
62 if obj.startswith("&"):
63 return HyLambdaListKeyword(obj)
64
65 try:
66 return HyFloat(obj)
67 except ValueError:
68 pass
69
70 try:
71 return HyComplex(obj)
72 except ValueError:
73 pass
74
75 table = {
76 "true": "True",
77 "false": "False",
78 "null": "None",
79 }
80
81 if obj in table:
82 return HySymbol(table[obj])
83
84 if obj.startswith(":"):
85 return HyKeyword(obj)
86
87 if obj.startswith("*") and obj.endswith("*") and obj not in ("*", "**"):
88 obj = obj[1:-1].upper()
89
90 if "-" in obj and obj != "-":
91 obj = obj.replace("-", "_")
92
93 return HySymbol(obj)
94
95
96 class State(object):
97 """
98 Generic State model.
99 """
100
101 __slots__ = ("nodes", "machine")
102 __metaclass__ = ABCMeta
103
104 def __init__(self, machine):
105 self.machine = machine
106
107 def _enter(self):
108 """ Internal shim for running global ``enter`` code """
109 self.result = None
110 self.nodes = []
111 self.enter()
112
113 def _exit(self):
114 """ Internal shim for running global ``exit`` code """
115 self.exit()
116
117 def enter(self):
118 """
119 Overridable ``enter`` routines. Subclasses may implement this.
120 """
121 pass
122
123 def exit(self):
124 """
125 Overridable ``exit`` routines. Subclasses may implement this.
126 """
127 pass
128
129 @abstractmethod
130 def process(self, char):
131 """
132 Overridable ``process`` routines. Subclasses must implement this to be
133 useful.
134 """
135 pass # ABC
136
137
138 class ListeyThing(State):
139
140 def enter(self):
141 self.buf = ""
142
143 def commit(self):
144 if self.buf != "":
145 ret = _resolve_atom(self.buf)
146 ret.start_line = self._start_line
147 ret.start_column = self._start_column
148 ret.end_line = self.machine.line
149 ret.end_column = (self.machine.column - 1)
150
151 self.nodes.append(ret)
152 self.buf = ""
153
154 def exit(self):
155 self.commit()
156 self.result = self.result_type(self.nodes)
157
158 def process(self, char):
159 if char == "(":
160 self.commit()
161 self.machine.sub(Expression)
162 return
163
164 if char == "{":
165 self.commit()
166 self.machine.sub(Dict)
167 return
168
169 if char == "[":
170 self.commit()
171 self.machine.sub(List)
172 return
173
174 if char == "\"":
175 self.commit()
176 self.machine.sub(String)
177 return
178
179 if char == ";":
180 self.commit()
181 self.machine.sub(Comment)
182 return
183
184 if char == self.end_char:
185 return Idle
186
187 if char in ")]}":
188 raise LexException("Unexpected closing character: `%s'" % (char))
189
190 if char in WHITESPACE:
191 self.commit()
192 return
193
194 if self.buf == "":
195 self._start_line = self.machine.line
196 self._start_column = self.machine.column
197
198 self.buf += char
199
200
201 class List(ListeyThing):
202 """
203 This state parses a Hy list (like a Clojure vector) for use in native
204 Python interop.
205
206 [foo 1 2 3 4] is a good example.
207 """
208
209 result_type = HyList
210 end_char = "]"
211
212
213 class Expression(ListeyThing):
214 """
215 This state parses a Hy expression (statement, to be evaluated at runtime)
216 for running things & stuff.
217 """
218
219 result_type = HyExpression
220 end_char = ")"
221
222
223 class Dict(ListeyThing):
224 """
225 This state parses a Hy dict for things.
226 """
227
228 def exit(self):
229 self.commit()
230 it = iter(self.nodes)
231 result = dict(zip(it, it))
232 self.result = HyDict(result)
233
234 end_char = "}"
235
236
237 class String(State):
238 """
239 String state. This will handle stuff like:
240
241 (println "foobar")
242 ^^^^^^^^ -- String
243 """
244
245 def enter(self):
246 self.escaped = False
247
248 def exit(self):
249 self.result = HyString("".join(self.nodes))
250
251 def process(self, char):
252 """
253 State transitions:
254
255 - " - Idle
256 """
257 if self.escaped:
258 self.escaped = False
259 if char == "n":
260 self.nodes.append("\n")
261 return
262 if char == "\\":
263 self.nodes.append("\\")
264 return
265 if char == "\"":
266 self.nodes.append("\"")
267 return
268
269 raise LexException("Unknown modifier: `%s'" % (char))
270
271 if char == "\"":
272 return Idle
273
274 if char == "\\":
275 self.escaped = True
276 return
277
278 self.nodes.append(char)
279
280
281 class Atom(State):
282 """
283 This state parses integer constants, boolean constants, and symbols
284 """
285
286 def __init__(self, machine):
287 State.__init__(self, machine)
288 self.initial_buf = ''
289
290 def enter(self):
291 self.buf = self.initial_buf
292
293 def exit(self):
294 self.result = _resolve_atom(self.buf)
295
296 def process(self, char):
297 """
298 State transitions:
299
300 - WHITESPACE - Idle
301 - ; - Comment
302 """
303
304 if char in WHITESPACE:
305 return Idle
306
307 if char == ";":
308 return Comment
309
310 self.buf += char
311
312
313 def AtomStartingWith(initial_char):
314 def AtomFactory(machine):
315 state = Atom(machine)
316 state.initial_buf = initial_char
317 return state
318 return AtomFactory
319
320
321 class Idle(State):
322 """
323 Idle state. This is the first (and last) thing that we should
324 be in.
325 """
326
327 def process(self, char):
328 """
329 State transitions:
330
331 - ( - Expression
332 - [ - List
333 - { - Dict
334 - \" - String
335 - ; - Comment
336 - # - Hash
337 - (default) - Atom
338 """
339
340 if char == "(":
341 return Expression
342
343 if char == "[":
344 return List
345
346 if char == "{":
347 return Dict
348
349 if char == "\"":
350 return String
351
352 if char == ";":
353 return Comment
354
355 if char == "#":
356 return Hash
357
358 if char in WHITESPACE:
359 return
360
361 return AtomStartingWith(char)
362
363
364 class Comment(State):
365 """
366 Comment state.
367 """
368
369 def process(self, char):
370 """
371 State transitions:
372
373 - \n - Idle
374 - (default) - disregard.
375 """
376
377 if char == "\n":
378 return Idle
379
380
381 class Hash(State):
382 """
383 Hash state
384 """
385
386 def process(self, char):
387 """
388 State transitions:
389
390 - ! - Comment
391 """
392
393 if char == "!":
394 return Comment
395
396 raise LexException("Unknown char (Hash state): `%s'" % (char))
397
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hy/lex/states.py b/hy/lex/states.py
--- a/hy/lex/states.py
+++ b/hy/lex/states.py
@@ -67,10 +67,11 @@
except ValueError:
pass
- try:
- return HyComplex(obj)
- except ValueError:
- pass
+ if obj != "j":
+ try:
+ return HyComplex(obj)
+ except ValueError:
+ pass
table = {
"true": "True",
|
{"golden_diff": "diff --git a/hy/lex/states.py b/hy/lex/states.py\n--- a/hy/lex/states.py\n+++ b/hy/lex/states.py\n@@ -67,10 +67,11 @@\n except ValueError:\n pass\n \n- try:\n- return HyComplex(obj)\n- except ValueError:\n- pass\n+ if obj != \"j\":\n+ try:\n+ return HyComplex(obj)\n+ except ValueError:\n+ pass\n \n table = {\n \"true\": \"True\",\n", "issue": "LIST-COMP breaks with certain variable names\nTry compiling:\n\n```\n(list-comp (, i j) (i [-1 0 1] j [-1 0 1]))\n```\n\nWith hy and you'll get some strange errors. If you replace \"i\" and \"j\" with \"x\" and \"y\" respectively, the same piece of code works as expected.\n\n", "before_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.integer import HyInteger\nfrom hy.models.lambdalist import HyLambdaListKeyword\nfrom hy.models.float import HyFloat\nfrom hy.models.complex import HyComplex\nfrom hy.models.symbol import HySymbol\nfrom hy.models.string import HyString\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.dict import HyDict\nfrom hy.models.list import HyList\n\nfrom hy.errors import HyError\n\nfrom abc import ABCMeta, abstractmethod\n\n\nWHITESPACE = [\" \", \"\\t\", \"\\n\", \"\\r\"]\n\n\nclass LexException(HyError):\n \"\"\"\n Error during the Lexing of a Hython expression.\n \"\"\"\n pass\n\n\ndef _resolve_atom(obj):\n \"\"\"\n Resolve a bare atom into one of the following (in order):\n\n - Integer\n - LambdaListKeyword\n - Float\n - Complex\n - Symbol\n \"\"\"\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n if obj.startswith(\"&\"):\n return HyLambdaListKeyword(obj)\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n if obj.startswith(\"*\") and obj.endswith(\"*\") and obj not in (\"*\", \"**\"):\n obj = obj[1:-1].upper()\n\n if \"-\" in obj and obj != \"-\":\n obj = obj.replace(\"-\", \"_\")\n\n return HySymbol(obj)\n\n\nclass State(object):\n \"\"\"\n Generic State model.\n \"\"\"\n\n __slots__ = (\"nodes\", \"machine\")\n __metaclass__ = ABCMeta\n\n def __init__(self, machine):\n self.machine = machine\n\n def _enter(self):\n \"\"\" Internal shim for running global ``enter`` code \"\"\"\n self.result = None\n self.nodes = []\n self.enter()\n\n def _exit(self):\n \"\"\" Internal shim for running global ``exit`` code \"\"\"\n self.exit()\n\n def enter(self):\n \"\"\"\n Overridable ``enter`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n def exit(self):\n \"\"\"\n Overridable ``exit`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n @abstractmethod\n def process(self, char):\n \"\"\"\n Overridable ``process`` routines. Subclasses must implement this to be\n useful.\n \"\"\"\n pass # ABC\n\n\nclass ListeyThing(State):\n\n def enter(self):\n self.buf = \"\"\n\n def commit(self):\n if self.buf != \"\":\n ret = _resolve_atom(self.buf)\n ret.start_line = self._start_line\n ret.start_column = self._start_column\n ret.end_line = self.machine.line\n ret.end_column = (self.machine.column - 1)\n\n self.nodes.append(ret)\n self.buf = \"\"\n\n def exit(self):\n self.commit()\n self.result = self.result_type(self.nodes)\n\n def process(self, char):\n if char == \"(\":\n self.commit()\n self.machine.sub(Expression)\n return\n\n if char == \"{\":\n self.commit()\n self.machine.sub(Dict)\n return\n\n if char == \"[\":\n self.commit()\n self.machine.sub(List)\n return\n\n if char == \"\\\"\":\n self.commit()\n self.machine.sub(String)\n return\n\n if char == \";\":\n self.commit()\n self.machine.sub(Comment)\n return\n\n if char == self.end_char:\n return Idle\n\n if char in \")]}\":\n raise LexException(\"Unexpected closing character: `%s'\" % (char))\n\n if char in WHITESPACE:\n self.commit()\n return\n\n if self.buf == \"\":\n self._start_line = self.machine.line\n self._start_column = self.machine.column\n\n self.buf += char\n\n\nclass List(ListeyThing):\n \"\"\"\n This state parses a Hy list (like a Clojure vector) for use in native\n Python interop.\n\n [foo 1 2 3 4] is a good example.\n \"\"\"\n\n result_type = HyList\n end_char = \"]\"\n\n\nclass Expression(ListeyThing):\n \"\"\"\n This state parses a Hy expression (statement, to be evaluated at runtime)\n for running things & stuff.\n \"\"\"\n\n result_type = HyExpression\n end_char = \")\"\n\n\nclass Dict(ListeyThing):\n \"\"\"\n This state parses a Hy dict for things.\n \"\"\"\n\n def exit(self):\n self.commit()\n it = iter(self.nodes)\n result = dict(zip(it, it))\n self.result = HyDict(result)\n\n end_char = \"}\"\n\n\nclass String(State):\n \"\"\"\n String state. This will handle stuff like:\n\n (println \"foobar\")\n ^^^^^^^^ -- String\n \"\"\"\n\n def enter(self):\n self.escaped = False\n\n def exit(self):\n self.result = HyString(\"\".join(self.nodes))\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \" - Idle\n \"\"\"\n if self.escaped:\n self.escaped = False\n if char == \"n\":\n self.nodes.append(\"\\n\")\n return\n if char == \"\\\\\":\n self.nodes.append(\"\\\\\")\n return\n if char == \"\\\"\":\n self.nodes.append(\"\\\"\")\n return\n\n raise LexException(\"Unknown modifier: `%s'\" % (char))\n\n if char == \"\\\"\":\n return Idle\n\n if char == \"\\\\\":\n self.escaped = True\n return\n\n self.nodes.append(char)\n\n\nclass Atom(State):\n \"\"\"\n This state parses integer constants, boolean constants, and symbols\n \"\"\"\n\n def __init__(self, machine):\n State.__init__(self, machine)\n self.initial_buf = ''\n\n def enter(self):\n self.buf = self.initial_buf\n\n def exit(self):\n self.result = _resolve_atom(self.buf)\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - WHITESPACE - Idle\n - ; - Comment\n \"\"\"\n\n if char in WHITESPACE:\n return Idle\n\n if char == \";\":\n return Comment\n\n self.buf += char\n\n\ndef AtomStartingWith(initial_char):\n def AtomFactory(machine):\n state = Atom(machine)\n state.initial_buf = initial_char\n return state\n return AtomFactory\n\n\nclass Idle(State):\n \"\"\"\n Idle state. This is the first (and last) thing that we should\n be in.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ( - Expression\n - [ - List\n - { - Dict\n - \\\" - String\n - ; - Comment\n - # - Hash\n - (default) - Atom\n \"\"\"\n\n if char == \"(\":\n return Expression\n\n if char == \"[\":\n return List\n\n if char == \"{\":\n return Dict\n\n if char == \"\\\"\":\n return String\n\n if char == \";\":\n return Comment\n\n if char == \"#\":\n return Hash\n\n if char in WHITESPACE:\n return\n\n return AtomStartingWith(char)\n\n\nclass Comment(State):\n \"\"\"\n Comment state.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \\n - Idle\n - (default) - disregard.\n \"\"\"\n\n if char == \"\\n\":\n return Idle\n\n\nclass Hash(State):\n \"\"\"\n Hash state\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ! - Comment\n \"\"\"\n\n if char == \"!\":\n return Comment\n\n raise LexException(\"Unknown char (Hash state): `%s'\" % (char))\n", "path": "hy/lex/states.py"}], "after_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.integer import HyInteger\nfrom hy.models.lambdalist import HyLambdaListKeyword\nfrom hy.models.float import HyFloat\nfrom hy.models.complex import HyComplex\nfrom hy.models.symbol import HySymbol\nfrom hy.models.string import HyString\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.dict import HyDict\nfrom hy.models.list import HyList\n\nfrom hy.errors import HyError\n\nfrom abc import ABCMeta, abstractmethod\n\n\nWHITESPACE = [\" \", \"\\t\", \"\\n\", \"\\r\"]\n\n\nclass LexException(HyError):\n \"\"\"\n Error during the Lexing of a Hython expression.\n \"\"\"\n pass\n\n\ndef _resolve_atom(obj):\n \"\"\"\n Resolve a bare atom into one of the following (in order):\n\n - Integer\n - LambdaListKeyword\n - Float\n - Complex\n - Symbol\n \"\"\"\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n if obj.startswith(\"&\"):\n return HyLambdaListKeyword(obj)\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n if obj != \"j\":\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n if obj.startswith(\"*\") and obj.endswith(\"*\") and obj not in (\"*\", \"**\"):\n obj = obj[1:-1].upper()\n\n if \"-\" in obj and obj != \"-\":\n obj = obj.replace(\"-\", \"_\")\n\n return HySymbol(obj)\n\n\nclass State(object):\n \"\"\"\n Generic State model.\n \"\"\"\n\n __slots__ = (\"nodes\", \"machine\")\n __metaclass__ = ABCMeta\n\n def __init__(self, machine):\n self.machine = machine\n\n def _enter(self):\n \"\"\" Internal shim for running global ``enter`` code \"\"\"\n self.result = None\n self.nodes = []\n self.enter()\n\n def _exit(self):\n \"\"\" Internal shim for running global ``exit`` code \"\"\"\n self.exit()\n\n def enter(self):\n \"\"\"\n Overridable ``enter`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n def exit(self):\n \"\"\"\n Overridable ``exit`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n @abstractmethod\n def process(self, char):\n \"\"\"\n Overridable ``process`` routines. Subclasses must implement this to be\n useful.\n \"\"\"\n pass # ABC\n\n\nclass ListeyThing(State):\n\n def enter(self):\n self.buf = \"\"\n\n def commit(self):\n if self.buf != \"\":\n ret = _resolve_atom(self.buf)\n ret.start_line = self._start_line\n ret.start_column = self._start_column\n ret.end_line = self.machine.line\n ret.end_column = (self.machine.column - 1)\n\n self.nodes.append(ret)\n self.buf = \"\"\n\n def exit(self):\n self.commit()\n self.result = self.result_type(self.nodes)\n\n def process(self, char):\n if char == \"(\":\n self.commit()\n self.machine.sub(Expression)\n return\n\n if char == \"{\":\n self.commit()\n self.machine.sub(Dict)\n return\n\n if char == \"[\":\n self.commit()\n self.machine.sub(List)\n return\n\n if char == \"\\\"\":\n self.commit()\n self.machine.sub(String)\n return\n\n if char == \";\":\n self.commit()\n self.machine.sub(Comment)\n return\n\n if char == self.end_char:\n return Idle\n\n if char in \")]}\":\n raise LexException(\"Unexpected closing character: `%s'\" % (char))\n\n if char in WHITESPACE:\n self.commit()\n return\n\n if self.buf == \"\":\n self._start_line = self.machine.line\n self._start_column = self.machine.column\n\n self.buf += char\n\n\nclass List(ListeyThing):\n \"\"\"\n This state parses a Hy list (like a Clojure vector) for use in native\n Python interop.\n\n [foo 1 2 3 4] is a good example.\n \"\"\"\n\n result_type = HyList\n end_char = \"]\"\n\n\nclass Expression(ListeyThing):\n \"\"\"\n This state parses a Hy expression (statement, to be evaluated at runtime)\n for running things & stuff.\n \"\"\"\n\n result_type = HyExpression\n end_char = \")\"\n\n\nclass Dict(ListeyThing):\n \"\"\"\n This state parses a Hy dict for things.\n \"\"\"\n\n def exit(self):\n self.commit()\n it = iter(self.nodes)\n result = dict(zip(it, it))\n self.result = HyDict(result)\n\n end_char = \"}\"\n\n\nclass String(State):\n \"\"\"\n String state. This will handle stuff like:\n\n (println \"foobar\")\n ^^^^^^^^ -- String\n \"\"\"\n\n def enter(self):\n self.escaped = False\n\n def exit(self):\n self.result = HyString(\"\".join(self.nodes))\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \" - Idle\n \"\"\"\n if self.escaped:\n self.escaped = False\n if char == \"n\":\n self.nodes.append(\"\\n\")\n return\n if char == \"\\\\\":\n self.nodes.append(\"\\\\\")\n return\n if char == \"\\\"\":\n self.nodes.append(\"\\\"\")\n return\n\n raise LexException(\"Unknown modifier: `%s'\" % (char))\n\n if char == \"\\\"\":\n return Idle\n\n if char == \"\\\\\":\n self.escaped = True\n return\n\n self.nodes.append(char)\n\n\nclass Atom(State):\n \"\"\"\n This state parses integer constants, boolean constants, and symbols\n \"\"\"\n\n def __init__(self, machine):\n State.__init__(self, machine)\n self.initial_buf = ''\n\n def enter(self):\n self.buf = self.initial_buf\n\n def exit(self):\n self.result = _resolve_atom(self.buf)\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - WHITESPACE - Idle\n - ; - Comment\n \"\"\"\n\n if char in WHITESPACE:\n return Idle\n\n if char == \";\":\n return Comment\n\n self.buf += char\n\n\ndef AtomStartingWith(initial_char):\n def AtomFactory(machine):\n state = Atom(machine)\n state.initial_buf = initial_char\n return state\n return AtomFactory\n\n\nclass Idle(State):\n \"\"\"\n Idle state. This is the first (and last) thing that we should\n be in.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ( - Expression\n - [ - List\n - { - Dict\n - \\\" - String\n - ; - Comment\n - # - Hash\n - (default) - Atom\n \"\"\"\n\n if char == \"(\":\n return Expression\n\n if char == \"[\":\n return List\n\n if char == \"{\":\n return Dict\n\n if char == \"\\\"\":\n return String\n\n if char == \";\":\n return Comment\n\n if char == \"#\":\n return Hash\n\n if char in WHITESPACE:\n return\n\n return AtomStartingWith(char)\n\n\nclass Comment(State):\n \"\"\"\n Comment state.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \\n - Idle\n - (default) - disregard.\n \"\"\"\n\n if char == \"\\n\":\n return Idle\n\n\nclass Hash(State):\n \"\"\"\n Hash state\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ! - Comment\n \"\"\"\n\n if char == \"!\":\n return Comment\n\n raise LexException(\"Unknown char (Hash state): `%s'\" % (char))\n", "path": "hy/lex/states.py"}]}
| 3,420 | 120 |
gh_patches_debug_32694
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-6324
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider.logger not logging custom extra information
I noticed the implicit behavior of the Spider.logger: when logging with extra, extra ultimately do not end up in the log because they are overwritten by default `process` method of [LoggerAdapter](https://github.com/scrapy/scrapy/blob/master/scrapy/spiders/__init__.py#L47)
Current logic:
```py
>>> self.logger.info("test log", extra={"test": "very important information"})
{"message": "test log", "spider": "spider_name"}
```
Expected logic:
```py
>>> self.logger.info("test log", extra={"test": "very important information"})
{"message": "test log", "spider": "spider_name", "test": "very important information"}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/spiders/__init__.py`
Content:
```
1 """
2 Base class for Scrapy spiders
3
4 See documentation in docs/topics/spiders.rst
5 """
6
7 from __future__ import annotations
8
9 import logging
10 from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Union, cast
11
12 from twisted.internet.defer import Deferred
13
14 from scrapy import signals
15 from scrapy.http import Request, Response
16 from scrapy.utils.trackref import object_ref
17 from scrapy.utils.url import url_is_from_spider
18
19 if TYPE_CHECKING:
20 # typing.Self requires Python 3.11
21 from typing_extensions import Self
22
23 from scrapy.crawler import Crawler
24 from scrapy.settings import BaseSettings
25
26
27 class Spider(object_ref):
28 """Base class for scrapy spiders. All spiders must inherit from this
29 class.
30 """
31
32 name: str
33 custom_settings: Optional[dict] = None
34
35 def __init__(self, name: Optional[str] = None, **kwargs: Any):
36 if name is not None:
37 self.name = name
38 elif not getattr(self, "name", None):
39 raise ValueError(f"{type(self).__name__} must have a name")
40 self.__dict__.update(kwargs)
41 if not hasattr(self, "start_urls"):
42 self.start_urls: List[str] = []
43
44 @property
45 def logger(self) -> logging.LoggerAdapter:
46 logger = logging.getLogger(self.name)
47 return logging.LoggerAdapter(logger, {"spider": self})
48
49 def log(self, message: Any, level: int = logging.DEBUG, **kw: Any) -> None:
50 """Log the given message at the given log level
51
52 This helper wraps a log call to the logger within the spider, but you
53 can use it directly (e.g. Spider.logger.info('msg')) or use any other
54 Python logger too.
55 """
56 self.logger.log(level, message, **kw)
57
58 @classmethod
59 def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:
60 spider = cls(*args, **kwargs)
61 spider._set_crawler(crawler)
62 return spider
63
64 def _set_crawler(self, crawler: Crawler) -> None:
65 self.crawler = crawler
66 self.settings = crawler.settings
67 crawler.signals.connect(self.close, signals.spider_closed)
68
69 def start_requests(self) -> Iterable[Request]:
70 if not self.start_urls and hasattr(self, "start_url"):
71 raise AttributeError(
72 "Crawling could not start: 'start_urls' not found "
73 "or empty (but found 'start_url' attribute instead, "
74 "did you miss an 's'?)"
75 )
76 for url in self.start_urls:
77 yield Request(url, dont_filter=True)
78
79 def _parse(self, response: Response, **kwargs: Any) -> Any:
80 return self.parse(response, **kwargs)
81
82 def parse(self, response: Response, **kwargs: Any) -> Any:
83 raise NotImplementedError(
84 f"{self.__class__.__name__}.parse callback is not defined"
85 )
86
87 @classmethod
88 def update_settings(cls, settings: BaseSettings) -> None:
89 settings.setdict(cls.custom_settings or {}, priority="spider")
90
91 @classmethod
92 def handles_request(cls, request: Request) -> bool:
93 return url_is_from_spider(request.url, cls)
94
95 @staticmethod
96 def close(spider: Spider, reason: str) -> Union[Deferred, None]:
97 closed = getattr(spider, "closed", None)
98 if callable(closed):
99 return cast(Union[Deferred, None], closed(reason))
100 return None
101
102 def __repr__(self) -> str:
103 return f"<{type(self).__name__} {self.name!r} at 0x{id(self):0x}>"
104
105
106 # Top-level imports
107 from scrapy.spiders.crawl import CrawlSpider, Rule
108 from scrapy.spiders.feed import CSVFeedSpider, XMLFeedSpider
109 from scrapy.spiders.sitemap import SitemapSpider
110
```
Path: `scrapy/utils/log.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import sys
5 from logging.config import dictConfig
6 from types import TracebackType
7 from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union, cast
8
9 from twisted.python import log as twisted_log
10 from twisted.python.failure import Failure
11
12 import scrapy
13 from scrapy.settings import Settings
14 from scrapy.utils.versions import scrapy_components_versions
15
16 if TYPE_CHECKING:
17 from scrapy.crawler import Crawler
18
19 logger = logging.getLogger(__name__)
20
21
22 def failure_to_exc_info(
23 failure: Failure,
24 ) -> Optional[Tuple[Type[BaseException], BaseException, Optional[TracebackType]]]:
25 """Extract exc_info from Failure instances"""
26 if isinstance(failure, Failure):
27 assert failure.type
28 assert failure.value
29 return (
30 failure.type,
31 failure.value,
32 cast(Optional[TracebackType], failure.getTracebackObject()),
33 )
34 return None
35
36
37 class TopLevelFormatter(logging.Filter):
38 """Keep only top level loggers's name (direct children from root) from
39 records.
40
41 This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
42 the old Scrapy log behaviour and helps shortening long names.
43
44 Since it can't be set for just one logger (it won't propagate for its
45 children), it's going to be set in the root handler, with a parametrized
46 ``loggers`` list where it should act.
47 """
48
49 def __init__(self, loggers: Optional[List[str]] = None):
50 self.loggers: List[str] = loggers or []
51
52 def filter(self, record: logging.LogRecord) -> bool:
53 if any(record.name.startswith(logger + ".") for logger in self.loggers):
54 record.name = record.name.split(".", 1)[0]
55 return True
56
57
58 DEFAULT_LOGGING = {
59 "version": 1,
60 "disable_existing_loggers": False,
61 "loggers": {
62 "filelock": {
63 "level": "ERROR",
64 },
65 "hpack": {
66 "level": "ERROR",
67 },
68 "scrapy": {
69 "level": "DEBUG",
70 },
71 "twisted": {
72 "level": "ERROR",
73 },
74 },
75 }
76
77
78 def configure_logging(
79 settings: Union[Settings, dict, None] = None, install_root_handler: bool = True
80 ) -> None:
81 """
82 Initialize logging defaults for Scrapy.
83
84 :param settings: settings used to create and configure a handler for the
85 root logger (default: None).
86 :type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
87
88 :param install_root_handler: whether to install root logging handler
89 (default: True)
90 :type install_root_handler: bool
91
92 This function does:
93
94 - Route warnings and twisted logging through Python standard logging
95 - Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
96 - Route stdout to log if LOG_STDOUT setting is True
97
98 When ``install_root_handler`` is True (default), this function also
99 creates a handler for the root logger according to given settings
100 (see :ref:`topics-logging-settings`). You can override default options
101 using ``settings`` argument. When ``settings`` is empty or None, defaults
102 are used.
103 """
104 if not sys.warnoptions:
105 # Route warnings through python logging
106 logging.captureWarnings(True)
107
108 observer = twisted_log.PythonLoggingObserver("twisted")
109 observer.start()
110
111 dictConfig(DEFAULT_LOGGING)
112
113 if isinstance(settings, dict) or settings is None:
114 settings = Settings(settings)
115
116 if settings.getbool("LOG_STDOUT"):
117 sys.stdout = StreamLogger(logging.getLogger("stdout")) # type: ignore[assignment]
118
119 if install_root_handler:
120 install_scrapy_root_handler(settings)
121
122
123 _scrapy_root_handler: Optional[logging.Handler] = None
124
125
126 def install_scrapy_root_handler(settings: Settings) -> None:
127 global _scrapy_root_handler
128
129 if (
130 _scrapy_root_handler is not None
131 and _scrapy_root_handler in logging.root.handlers
132 ):
133 logging.root.removeHandler(_scrapy_root_handler)
134 logging.root.setLevel(logging.NOTSET)
135 _scrapy_root_handler = _get_handler(settings)
136 logging.root.addHandler(_scrapy_root_handler)
137
138
139 def get_scrapy_root_handler() -> Optional[logging.Handler]:
140 return _scrapy_root_handler
141
142
143 def _get_handler(settings: Settings) -> logging.Handler:
144 """Return a log handler object according to settings"""
145 filename = settings.get("LOG_FILE")
146 handler: logging.Handler
147 if filename:
148 mode = "a" if settings.getbool("LOG_FILE_APPEND") else "w"
149 encoding = settings.get("LOG_ENCODING")
150 handler = logging.FileHandler(filename, mode=mode, encoding=encoding)
151 elif settings.getbool("LOG_ENABLED"):
152 handler = logging.StreamHandler()
153 else:
154 handler = logging.NullHandler()
155
156 formatter = logging.Formatter(
157 fmt=settings.get("LOG_FORMAT"), datefmt=settings.get("LOG_DATEFORMAT")
158 )
159 handler.setFormatter(formatter)
160 handler.setLevel(settings.get("LOG_LEVEL"))
161 if settings.getbool("LOG_SHORT_NAMES"):
162 handler.addFilter(TopLevelFormatter(["scrapy"]))
163 return handler
164
165
166 def log_scrapy_info(settings: Settings) -> None:
167 logger.info(
168 "Scrapy %(version)s started (bot: %(bot)s)",
169 {"version": scrapy.__version__, "bot": settings["BOT_NAME"]},
170 )
171 versions = [
172 f"{name} {version}"
173 for name, version in scrapy_components_versions()
174 if name != "Scrapy"
175 ]
176 logger.info("Versions: %(versions)s", {"versions": ", ".join(versions)})
177
178
179 def log_reactor_info() -> None:
180 from twisted.internet import reactor
181
182 logger.debug("Using reactor: %s.%s", reactor.__module__, reactor.__class__.__name__)
183 from twisted.internet import asyncioreactor
184
185 if isinstance(reactor, asyncioreactor.AsyncioSelectorReactor):
186 logger.debug(
187 "Using asyncio event loop: %s.%s",
188 reactor._asyncioEventloop.__module__,
189 reactor._asyncioEventloop.__class__.__name__,
190 )
191
192
193 class StreamLogger:
194 """Fake file-like stream object that redirects writes to a logger instance
195
196 Taken from:
197 https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
198 """
199
200 def __init__(self, logger: logging.Logger, log_level: int = logging.INFO):
201 self.logger: logging.Logger = logger
202 self.log_level: int = log_level
203 self.linebuf: str = ""
204
205 def write(self, buf: str) -> None:
206 for line in buf.rstrip().splitlines():
207 self.logger.log(self.log_level, line.rstrip())
208
209 def flush(self) -> None:
210 for h in self.logger.handlers:
211 h.flush()
212
213
214 class LogCounterHandler(logging.Handler):
215 """Record log levels count into a crawler stats"""
216
217 def __init__(self, crawler: Crawler, *args: Any, **kwargs: Any):
218 super().__init__(*args, **kwargs)
219 self.crawler: Crawler = crawler
220
221 def emit(self, record: logging.LogRecord) -> None:
222 sname = f"log_count/{record.levelname}"
223 assert self.crawler.stats
224 self.crawler.stats.inc_value(sname)
225
226
227 def logformatter_adapter(logkws: dict) -> Tuple[int, str, dict]:
228 """
229 Helper that takes the dictionary output from the methods in LogFormatter
230 and adapts it into a tuple of positional arguments for logger.log calls,
231 handling backward compatibility as well.
232 """
233
234 level = logkws.get("level", logging.INFO)
235 message = logkws.get("msg") or ""
236 # NOTE: This also handles 'args' being an empty dict, that case doesn't
237 # play well in logger.log calls
238 args = logkws if not logkws.get("args") else logkws["args"]
239
240 return (level, message, args)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py
--- a/scrapy/spiders/__init__.py
+++ b/scrapy/spiders/__init__.py
@@ -22,6 +22,7 @@
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
+ from scrapy.utils.log import SpiderLoggerAdapter
class Spider(object_ref):
@@ -42,9 +43,11 @@
self.start_urls: List[str] = []
@property
- def logger(self) -> logging.LoggerAdapter:
+ def logger(self) -> SpiderLoggerAdapter:
+ from scrapy.utils.log import SpiderLoggerAdapter
+
logger = logging.getLogger(self.name)
- return logging.LoggerAdapter(logger, {"spider": self})
+ return SpiderLoggerAdapter(logger, {"spider": self})
def log(self, message: Any, level: int = logging.DEBUG, **kw: Any) -> None:
"""Log the given message at the given log level
diff --git a/scrapy/utils/log.py b/scrapy/utils/log.py
--- a/scrapy/utils/log.py
+++ b/scrapy/utils/log.py
@@ -4,7 +4,17 @@
import sys
from logging.config import dictConfig
from types import TracebackType
-from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union, cast
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ List,
+ MutableMapping,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
from twisted.python import log as twisted_log
from twisted.python.failure import Failure
@@ -238,3 +248,16 @@
args = logkws if not logkws.get("args") else logkws["args"]
return (level, message, args)
+
+
+class SpiderLoggerAdapter(logging.LoggerAdapter):
+ def process(
+ self, msg: str, kwargs: MutableMapping[str, Any]
+ ) -> Tuple[str, MutableMapping[str, Any]]:
+ """Method that augments logging with additional 'extra' data"""
+ if isinstance(kwargs.get("extra"), MutableMapping):
+ kwargs["extra"].update(self.extra)
+ else:
+ kwargs["extra"] = self.extra
+
+ return msg, kwargs
|
{"golden_diff": "diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py\n--- a/scrapy/spiders/__init__.py\n+++ b/scrapy/spiders/__init__.py\n@@ -22,6 +22,7 @@\n \n from scrapy.crawler import Crawler\n from scrapy.settings import BaseSettings\n+ from scrapy.utils.log import SpiderLoggerAdapter\n \n \n class Spider(object_ref):\n@@ -42,9 +43,11 @@\n self.start_urls: List[str] = []\n \n @property\n- def logger(self) -> logging.LoggerAdapter:\n+ def logger(self) -> SpiderLoggerAdapter:\n+ from scrapy.utils.log import SpiderLoggerAdapter\n+\n logger = logging.getLogger(self.name)\n- return logging.LoggerAdapter(logger, {\"spider\": self})\n+ return SpiderLoggerAdapter(logger, {\"spider\": self})\n \n def log(self, message: Any, level: int = logging.DEBUG, **kw: Any) -> None:\n \"\"\"Log the given message at the given log level\ndiff --git a/scrapy/utils/log.py b/scrapy/utils/log.py\n--- a/scrapy/utils/log.py\n+++ b/scrapy/utils/log.py\n@@ -4,7 +4,17 @@\n import sys\n from logging.config import dictConfig\n from types import TracebackType\n-from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union, cast\n+from typing import (\n+ TYPE_CHECKING,\n+ Any,\n+ List,\n+ MutableMapping,\n+ Optional,\n+ Tuple,\n+ Type,\n+ Union,\n+ cast,\n+)\n \n from twisted.python import log as twisted_log\n from twisted.python.failure import Failure\n@@ -238,3 +248,16 @@\n args = logkws if not logkws.get(\"args\") else logkws[\"args\"]\n \n return (level, message, args)\n+\n+\n+class SpiderLoggerAdapter(logging.LoggerAdapter):\n+ def process(\n+ self, msg: str, kwargs: MutableMapping[str, Any]\n+ ) -> Tuple[str, MutableMapping[str, Any]]:\n+ \"\"\"Method that augments logging with additional 'extra' data\"\"\"\n+ if isinstance(kwargs.get(\"extra\"), MutableMapping):\n+ kwargs[\"extra\"].update(self.extra)\n+ else:\n+ kwargs[\"extra\"] = self.extra\n+\n+ return msg, kwargs\n", "issue": "Spider.logger not logging custom extra information\nI noticed the implicit behavior of the Spider.logger: when logging with extra, extra ultimately do not end up in the log because they are overwritten by default `process` method of [LoggerAdapter](https://github.com/scrapy/scrapy/blob/master/scrapy/spiders/__init__.py#L47)\r\n\r\nCurrent logic:\r\n```py\r\n>>> self.logger.info(\"test log\", extra={\"test\": \"very important information\"})\r\n{\"message\": \"test log\", \"spider\": \"spider_name\"}\r\n```\r\n\r\n\r\n\r\nExpected logic:\r\n```py\r\n>>> self.logger.info(\"test log\", extra={\"test\": \"very important information\"})\r\n{\"message\": \"test log\", \"spider\": \"spider_name\", \"test\": \"very important information\"}\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nBase class for Scrapy spiders\n\nSee documentation in docs/topics/spiders.rst\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING, Any, Iterable, List, Optional, Union, cast\n\nfrom twisted.internet.defer import Deferred\n\nfrom scrapy import signals\nfrom scrapy.http import Request, Response\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import url_is_from_spider\n\nif TYPE_CHECKING:\n # typing.Self requires Python 3.11\n from typing_extensions import Self\n\n from scrapy.crawler import Crawler\n from scrapy.settings import BaseSettings\n\n\nclass Spider(object_ref):\n \"\"\"Base class for scrapy spiders. All spiders must inherit from this\n class.\n \"\"\"\n\n name: str\n custom_settings: Optional[dict] = None\n\n def __init__(self, name: Optional[str] = None, **kwargs: Any):\n if name is not None:\n self.name = name\n elif not getattr(self, \"name\", None):\n raise ValueError(f\"{type(self).__name__} must have a name\")\n self.__dict__.update(kwargs)\n if not hasattr(self, \"start_urls\"):\n self.start_urls: List[str] = []\n\n @property\n def logger(self) -> logging.LoggerAdapter:\n logger = logging.getLogger(self.name)\n return logging.LoggerAdapter(logger, {\"spider\": self})\n\n def log(self, message: Any, level: int = logging.DEBUG, **kw: Any) -> None:\n \"\"\"Log the given message at the given log level\n\n This helper wraps a log call to the logger within the spider, but you\n can use it directly (e.g. Spider.logger.info('msg')) or use any other\n Python logger too.\n \"\"\"\n self.logger.log(level, message, **kw)\n\n @classmethod\n def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:\n spider = cls(*args, **kwargs)\n spider._set_crawler(crawler)\n return spider\n\n def _set_crawler(self, crawler: Crawler) -> None:\n self.crawler = crawler\n self.settings = crawler.settings\n crawler.signals.connect(self.close, signals.spider_closed)\n\n def start_requests(self) -> Iterable[Request]:\n if not self.start_urls and hasattr(self, \"start_url\"):\n raise AttributeError(\n \"Crawling could not start: 'start_urls' not found \"\n \"or empty (but found 'start_url' attribute instead, \"\n \"did you miss an 's'?)\"\n )\n for url in self.start_urls:\n yield Request(url, dont_filter=True)\n\n def _parse(self, response: Response, **kwargs: Any) -> Any:\n return self.parse(response, **kwargs)\n\n def parse(self, response: Response, **kwargs: Any) -> Any:\n raise NotImplementedError(\n f\"{self.__class__.__name__}.parse callback is not defined\"\n )\n\n @classmethod\n def update_settings(cls, settings: BaseSettings) -> None:\n settings.setdict(cls.custom_settings or {}, priority=\"spider\")\n\n @classmethod\n def handles_request(cls, request: Request) -> bool:\n return url_is_from_spider(request.url, cls)\n\n @staticmethod\n def close(spider: Spider, reason: str) -> Union[Deferred, None]:\n closed = getattr(spider, \"closed\", None)\n if callable(closed):\n return cast(Union[Deferred, None], closed(reason))\n return None\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {self.name!r} at 0x{id(self):0x}>\"\n\n\n# Top-level imports\nfrom scrapy.spiders.crawl import CrawlSpider, Rule\nfrom scrapy.spiders.feed import CSVFeedSpider, XMLFeedSpider\nfrom scrapy.spiders.sitemap import SitemapSpider\n", "path": "scrapy/spiders/__init__.py"}, {"content": "from __future__ import annotations\n\nimport logging\nimport sys\nfrom logging.config import dictConfig\nfrom types import TracebackType\nfrom typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union, cast\n\nfrom twisted.python import log as twisted_log\nfrom twisted.python.failure import Failure\n\nimport scrapy\nfrom scrapy.settings import Settings\nfrom scrapy.utils.versions import scrapy_components_versions\n\nif TYPE_CHECKING:\n from scrapy.crawler import Crawler\n\nlogger = logging.getLogger(__name__)\n\n\ndef failure_to_exc_info(\n failure: Failure,\n) -> Optional[Tuple[Type[BaseException], BaseException, Optional[TracebackType]]]:\n \"\"\"Extract exc_info from Failure instances\"\"\"\n if isinstance(failure, Failure):\n assert failure.type\n assert failure.value\n return (\n failure.type,\n failure.value,\n cast(Optional[TracebackType], failure.getTracebackObject()),\n )\n return None\n\n\nclass TopLevelFormatter(logging.Filter):\n \"\"\"Keep only top level loggers's name (direct children from root) from\n records.\n\n This filter will replace Scrapy loggers' names with 'scrapy'. This mimics\n the old Scrapy log behaviour and helps shortening long names.\n\n Since it can't be set for just one logger (it won't propagate for its\n children), it's going to be set in the root handler, with a parametrized\n ``loggers`` list where it should act.\n \"\"\"\n\n def __init__(self, loggers: Optional[List[str]] = None):\n self.loggers: List[str] = loggers or []\n\n def filter(self, record: logging.LogRecord) -> bool:\n if any(record.name.startswith(logger + \".\") for logger in self.loggers):\n record.name = record.name.split(\".\", 1)[0]\n return True\n\n\nDEFAULT_LOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"loggers\": {\n \"filelock\": {\n \"level\": \"ERROR\",\n },\n \"hpack\": {\n \"level\": \"ERROR\",\n },\n \"scrapy\": {\n \"level\": \"DEBUG\",\n },\n \"twisted\": {\n \"level\": \"ERROR\",\n },\n },\n}\n\n\ndef configure_logging(\n settings: Union[Settings, dict, None] = None, install_root_handler: bool = True\n) -> None:\n \"\"\"\n Initialize logging defaults for Scrapy.\n\n :param settings: settings used to create and configure a handler for the\n root logger (default: None).\n :type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``\n\n :param install_root_handler: whether to install root logging handler\n (default: True)\n :type install_root_handler: bool\n\n This function does:\n\n - Route warnings and twisted logging through Python standard logging\n - Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively\n - Route stdout to log if LOG_STDOUT setting is True\n\n When ``install_root_handler`` is True (default), this function also\n creates a handler for the root logger according to given settings\n (see :ref:`topics-logging-settings`). You can override default options\n using ``settings`` argument. When ``settings`` is empty or None, defaults\n are used.\n \"\"\"\n if not sys.warnoptions:\n # Route warnings through python logging\n logging.captureWarnings(True)\n\n observer = twisted_log.PythonLoggingObserver(\"twisted\")\n observer.start()\n\n dictConfig(DEFAULT_LOGGING)\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n\n if settings.getbool(\"LOG_STDOUT\"):\n sys.stdout = StreamLogger(logging.getLogger(\"stdout\")) # type: ignore[assignment]\n\n if install_root_handler:\n install_scrapy_root_handler(settings)\n\n\n_scrapy_root_handler: Optional[logging.Handler] = None\n\n\ndef install_scrapy_root_handler(settings: Settings) -> None:\n global _scrapy_root_handler\n\n if (\n _scrapy_root_handler is not None\n and _scrapy_root_handler in logging.root.handlers\n ):\n logging.root.removeHandler(_scrapy_root_handler)\n logging.root.setLevel(logging.NOTSET)\n _scrapy_root_handler = _get_handler(settings)\n logging.root.addHandler(_scrapy_root_handler)\n\n\ndef get_scrapy_root_handler() -> Optional[logging.Handler]:\n return _scrapy_root_handler\n\n\ndef _get_handler(settings: Settings) -> logging.Handler:\n \"\"\"Return a log handler object according to settings\"\"\"\n filename = settings.get(\"LOG_FILE\")\n handler: logging.Handler\n if filename:\n mode = \"a\" if settings.getbool(\"LOG_FILE_APPEND\") else \"w\"\n encoding = settings.get(\"LOG_ENCODING\")\n handler = logging.FileHandler(filename, mode=mode, encoding=encoding)\n elif settings.getbool(\"LOG_ENABLED\"):\n handler = logging.StreamHandler()\n else:\n handler = logging.NullHandler()\n\n formatter = logging.Formatter(\n fmt=settings.get(\"LOG_FORMAT\"), datefmt=settings.get(\"LOG_DATEFORMAT\")\n )\n handler.setFormatter(formatter)\n handler.setLevel(settings.get(\"LOG_LEVEL\"))\n if settings.getbool(\"LOG_SHORT_NAMES\"):\n handler.addFilter(TopLevelFormatter([\"scrapy\"]))\n return handler\n\n\ndef log_scrapy_info(settings: Settings) -> None:\n logger.info(\n \"Scrapy %(version)s started (bot: %(bot)s)\",\n {\"version\": scrapy.__version__, \"bot\": settings[\"BOT_NAME\"]},\n )\n versions = [\n f\"{name} {version}\"\n for name, version in scrapy_components_versions()\n if name != \"Scrapy\"\n ]\n logger.info(\"Versions: %(versions)s\", {\"versions\": \", \".join(versions)})\n\n\ndef log_reactor_info() -> None:\n from twisted.internet import reactor\n\n logger.debug(\"Using reactor: %s.%s\", reactor.__module__, reactor.__class__.__name__)\n from twisted.internet import asyncioreactor\n\n if isinstance(reactor, asyncioreactor.AsyncioSelectorReactor):\n logger.debug(\n \"Using asyncio event loop: %s.%s\",\n reactor._asyncioEventloop.__module__,\n reactor._asyncioEventloop.__class__.__name__,\n )\n\n\nclass StreamLogger:\n \"\"\"Fake file-like stream object that redirects writes to a logger instance\n\n Taken from:\n https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/\n \"\"\"\n\n def __init__(self, logger: logging.Logger, log_level: int = logging.INFO):\n self.logger: logging.Logger = logger\n self.log_level: int = log_level\n self.linebuf: str = \"\"\n\n def write(self, buf: str) -> None:\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\n def flush(self) -> None:\n for h in self.logger.handlers:\n h.flush()\n\n\nclass LogCounterHandler(logging.Handler):\n \"\"\"Record log levels count into a crawler stats\"\"\"\n\n def __init__(self, crawler: Crawler, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self.crawler: Crawler = crawler\n\n def emit(self, record: logging.LogRecord) -> None:\n sname = f\"log_count/{record.levelname}\"\n assert self.crawler.stats\n self.crawler.stats.inc_value(sname)\n\n\ndef logformatter_adapter(logkws: dict) -> Tuple[int, str, dict]:\n \"\"\"\n Helper that takes the dictionary output from the methods in LogFormatter\n and adapts it into a tuple of positional arguments for logger.log calls,\n handling backward compatibility as well.\n \"\"\"\n\n level = logkws.get(\"level\", logging.INFO)\n message = logkws.get(\"msg\") or \"\"\n # NOTE: This also handles 'args' being an empty dict, that case doesn't\n # play well in logger.log calls\n args = logkws if not logkws.get(\"args\") else logkws[\"args\"]\n\n return (level, message, args)\n", "path": "scrapy/utils/log.py"}], "after_files": [{"content": "\"\"\"\nBase class for Scrapy spiders\n\nSee documentation in docs/topics/spiders.rst\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING, Any, Iterable, List, Optional, Union, cast\n\nfrom twisted.internet.defer import Deferred\n\nfrom scrapy import signals\nfrom scrapy.http import Request, Response\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import url_is_from_spider\n\nif TYPE_CHECKING:\n # typing.Self requires Python 3.11\n from typing_extensions import Self\n\n from scrapy.crawler import Crawler\n from scrapy.settings import BaseSettings\n from scrapy.utils.log import SpiderLoggerAdapter\n\n\nclass Spider(object_ref):\n \"\"\"Base class for scrapy spiders. All spiders must inherit from this\n class.\n \"\"\"\n\n name: str\n custom_settings: Optional[dict] = None\n\n def __init__(self, name: Optional[str] = None, **kwargs: Any):\n if name is not None:\n self.name = name\n elif not getattr(self, \"name\", None):\n raise ValueError(f\"{type(self).__name__} must have a name\")\n self.__dict__.update(kwargs)\n if not hasattr(self, \"start_urls\"):\n self.start_urls: List[str] = []\n\n @property\n def logger(self) -> SpiderLoggerAdapter:\n from scrapy.utils.log import SpiderLoggerAdapter\n\n logger = logging.getLogger(self.name)\n return SpiderLoggerAdapter(logger, {\"spider\": self})\n\n def log(self, message: Any, level: int = logging.DEBUG, **kw: Any) -> None:\n \"\"\"Log the given message at the given log level\n\n This helper wraps a log call to the logger within the spider, but you\n can use it directly (e.g. Spider.logger.info('msg')) or use any other\n Python logger too.\n \"\"\"\n self.logger.log(level, message, **kw)\n\n @classmethod\n def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:\n spider = cls(*args, **kwargs)\n spider._set_crawler(crawler)\n return spider\n\n def _set_crawler(self, crawler: Crawler) -> None:\n self.crawler = crawler\n self.settings = crawler.settings\n crawler.signals.connect(self.close, signals.spider_closed)\n\n def start_requests(self) -> Iterable[Request]:\n if not self.start_urls and hasattr(self, \"start_url\"):\n raise AttributeError(\n \"Crawling could not start: 'start_urls' not found \"\n \"or empty (but found 'start_url' attribute instead, \"\n \"did you miss an 's'?)\"\n )\n for url in self.start_urls:\n yield Request(url, dont_filter=True)\n\n def _parse(self, response: Response, **kwargs: Any) -> Any:\n return self.parse(response, **kwargs)\n\n def parse(self, response: Response, **kwargs: Any) -> Any:\n raise NotImplementedError(\n f\"{self.__class__.__name__}.parse callback is not defined\"\n )\n\n @classmethod\n def update_settings(cls, settings: BaseSettings) -> None:\n settings.setdict(cls.custom_settings or {}, priority=\"spider\")\n\n @classmethod\n def handles_request(cls, request: Request) -> bool:\n return url_is_from_spider(request.url, cls)\n\n @staticmethod\n def close(spider: Spider, reason: str) -> Union[Deferred, None]:\n closed = getattr(spider, \"closed\", None)\n if callable(closed):\n return cast(Union[Deferred, None], closed(reason))\n return None\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {self.name!r} at 0x{id(self):0x}>\"\n\n\n# Top-level imports\nfrom scrapy.spiders.crawl import CrawlSpider, Rule\nfrom scrapy.spiders.feed import CSVFeedSpider, XMLFeedSpider\nfrom scrapy.spiders.sitemap import SitemapSpider\n", "path": "scrapy/spiders/__init__.py"}, {"content": "from __future__ import annotations\n\nimport logging\nimport sys\nfrom logging.config import dictConfig\nfrom types import TracebackType\nfrom typing import (\n TYPE_CHECKING,\n Any,\n List,\n MutableMapping,\n Optional,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom twisted.python import log as twisted_log\nfrom twisted.python.failure import Failure\n\nimport scrapy\nfrom scrapy.settings import Settings\nfrom scrapy.utils.versions import scrapy_components_versions\n\nif TYPE_CHECKING:\n from scrapy.crawler import Crawler\n\nlogger = logging.getLogger(__name__)\n\n\ndef failure_to_exc_info(\n failure: Failure,\n) -> Optional[Tuple[Type[BaseException], BaseException, Optional[TracebackType]]]:\n \"\"\"Extract exc_info from Failure instances\"\"\"\n if isinstance(failure, Failure):\n assert failure.type\n assert failure.value\n return (\n failure.type,\n failure.value,\n cast(Optional[TracebackType], failure.getTracebackObject()),\n )\n return None\n\n\nclass TopLevelFormatter(logging.Filter):\n \"\"\"Keep only top level loggers's name (direct children from root) from\n records.\n\n This filter will replace Scrapy loggers' names with 'scrapy'. This mimics\n the old Scrapy log behaviour and helps shortening long names.\n\n Since it can't be set for just one logger (it won't propagate for its\n children), it's going to be set in the root handler, with a parametrized\n ``loggers`` list where it should act.\n \"\"\"\n\n def __init__(self, loggers: Optional[List[str]] = None):\n self.loggers: List[str] = loggers or []\n\n def filter(self, record: logging.LogRecord) -> bool:\n if any(record.name.startswith(logger + \".\") for logger in self.loggers):\n record.name = record.name.split(\".\", 1)[0]\n return True\n\n\nDEFAULT_LOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"loggers\": {\n \"filelock\": {\n \"level\": \"ERROR\",\n },\n \"hpack\": {\n \"level\": \"ERROR\",\n },\n \"scrapy\": {\n \"level\": \"DEBUG\",\n },\n \"twisted\": {\n \"level\": \"ERROR\",\n },\n },\n}\n\n\ndef configure_logging(\n settings: Union[Settings, dict, None] = None, install_root_handler: bool = True\n) -> None:\n \"\"\"\n Initialize logging defaults for Scrapy.\n\n :param settings: settings used to create and configure a handler for the\n root logger (default: None).\n :type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``\n\n :param install_root_handler: whether to install root logging handler\n (default: True)\n :type install_root_handler: bool\n\n This function does:\n\n - Route warnings and twisted logging through Python standard logging\n - Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively\n - Route stdout to log if LOG_STDOUT setting is True\n\n When ``install_root_handler`` is True (default), this function also\n creates a handler for the root logger according to given settings\n (see :ref:`topics-logging-settings`). You can override default options\n using ``settings`` argument. When ``settings`` is empty or None, defaults\n are used.\n \"\"\"\n if not sys.warnoptions:\n # Route warnings through python logging\n logging.captureWarnings(True)\n\n observer = twisted_log.PythonLoggingObserver(\"twisted\")\n observer.start()\n\n dictConfig(DEFAULT_LOGGING)\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n\n if settings.getbool(\"LOG_STDOUT\"):\n sys.stdout = StreamLogger(logging.getLogger(\"stdout\")) # type: ignore[assignment]\n\n if install_root_handler:\n install_scrapy_root_handler(settings)\n\n\n_scrapy_root_handler: Optional[logging.Handler] = None\n\n\ndef install_scrapy_root_handler(settings: Settings) -> None:\n global _scrapy_root_handler\n\n if (\n _scrapy_root_handler is not None\n and _scrapy_root_handler in logging.root.handlers\n ):\n logging.root.removeHandler(_scrapy_root_handler)\n logging.root.setLevel(logging.NOTSET)\n _scrapy_root_handler = _get_handler(settings)\n logging.root.addHandler(_scrapy_root_handler)\n\n\ndef get_scrapy_root_handler() -> Optional[logging.Handler]:\n return _scrapy_root_handler\n\n\ndef _get_handler(settings: Settings) -> logging.Handler:\n \"\"\"Return a log handler object according to settings\"\"\"\n filename = settings.get(\"LOG_FILE\")\n handler: logging.Handler\n if filename:\n mode = \"a\" if settings.getbool(\"LOG_FILE_APPEND\") else \"w\"\n encoding = settings.get(\"LOG_ENCODING\")\n handler = logging.FileHandler(filename, mode=mode, encoding=encoding)\n elif settings.getbool(\"LOG_ENABLED\"):\n handler = logging.StreamHandler()\n else:\n handler = logging.NullHandler()\n\n formatter = logging.Formatter(\n fmt=settings.get(\"LOG_FORMAT\"), datefmt=settings.get(\"LOG_DATEFORMAT\")\n )\n handler.setFormatter(formatter)\n handler.setLevel(settings.get(\"LOG_LEVEL\"))\n if settings.getbool(\"LOG_SHORT_NAMES\"):\n handler.addFilter(TopLevelFormatter([\"scrapy\"]))\n return handler\n\n\ndef log_scrapy_info(settings: Settings) -> None:\n logger.info(\n \"Scrapy %(version)s started (bot: %(bot)s)\",\n {\"version\": scrapy.__version__, \"bot\": settings[\"BOT_NAME\"]},\n )\n versions = [\n f\"{name} {version}\"\n for name, version in scrapy_components_versions()\n if name != \"Scrapy\"\n ]\n logger.info(\"Versions: %(versions)s\", {\"versions\": \", \".join(versions)})\n\n\ndef log_reactor_info() -> None:\n from twisted.internet import reactor\n\n logger.debug(\"Using reactor: %s.%s\", reactor.__module__, reactor.__class__.__name__)\n from twisted.internet import asyncioreactor\n\n if isinstance(reactor, asyncioreactor.AsyncioSelectorReactor):\n logger.debug(\n \"Using asyncio event loop: %s.%s\",\n reactor._asyncioEventloop.__module__,\n reactor._asyncioEventloop.__class__.__name__,\n )\n\n\nclass StreamLogger:\n \"\"\"Fake file-like stream object that redirects writes to a logger instance\n\n Taken from:\n https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/\n \"\"\"\n\n def __init__(self, logger: logging.Logger, log_level: int = logging.INFO):\n self.logger: logging.Logger = logger\n self.log_level: int = log_level\n self.linebuf: str = \"\"\n\n def write(self, buf: str) -> None:\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\n def flush(self) -> None:\n for h in self.logger.handlers:\n h.flush()\n\n\nclass LogCounterHandler(logging.Handler):\n \"\"\"Record log levels count into a crawler stats\"\"\"\n\n def __init__(self, crawler: Crawler, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self.crawler: Crawler = crawler\n\n def emit(self, record: logging.LogRecord) -> None:\n sname = f\"log_count/{record.levelname}\"\n assert self.crawler.stats\n self.crawler.stats.inc_value(sname)\n\n\ndef logformatter_adapter(logkws: dict) -> Tuple[int, str, dict]:\n \"\"\"\n Helper that takes the dictionary output from the methods in LogFormatter\n and adapts it into a tuple of positional arguments for logger.log calls,\n handling backward compatibility as well.\n \"\"\"\n\n level = logkws.get(\"level\", logging.INFO)\n message = logkws.get(\"msg\") or \"\"\n # NOTE: This also handles 'args' being an empty dict, that case doesn't\n # play well in logger.log calls\n args = logkws if not logkws.get(\"args\") else logkws[\"args\"]\n\n return (level, message, args)\n\n\nclass SpiderLoggerAdapter(logging.LoggerAdapter):\n def process(\n self, msg: str, kwargs: MutableMapping[str, Any]\n ) -> Tuple[str, MutableMapping[str, Any]]:\n \"\"\"Method that augments logging with additional 'extra' data\"\"\"\n if isinstance(kwargs.get(\"extra\"), MutableMapping):\n kwargs[\"extra\"].update(self.extra)\n else:\n kwargs[\"extra\"] = self.extra\n\n return msg, kwargs\n", "path": "scrapy/utils/log.py"}]}
| 3,924 | 521 |
gh_patches_debug_24846
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-952
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flask: Capture request/response headers as span attributes
part of #906
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Note: This package is not named "flask" because of
16 # https://github.com/PyCQA/pylint/issues/2648
17
18 """
19 This library builds on the OpenTelemetry WSGI middleware to track web requests
20 in Flask applications. In addition to opentelemetry-util-http, it
21 supports Flask-specific features such as:
22
23 * The Flask url rule pattern is used as the Span name.
24 * The ``http.route`` Span attribute is set so that one can see which URL rule
25 matched a request.
26
27 Usage
28 -----
29
30 .. code-block:: python
31
32 from flask import Flask
33 from opentelemetry.instrumentation.flask import FlaskInstrumentor
34
35 app = Flask(__name__)
36
37 FlaskInstrumentor().instrument_app(app)
38
39 @app.route("/")
40 def hello():
41 return "Hello!"
42
43 if __name__ == "__main__":
44 app.run(debug=True)
45
46 Configuration
47 -------------
48
49 Exclude lists
50 *************
51 To exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_FLASK_EXCLUDED_URLS``
52 (or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude.
53
54 For example,
55
56 ::
57
58 export OTEL_PYTHON_FLASK_EXCLUDED_URLS="client/.*/info,healthcheck"
59
60 will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.
61
62 You can also pass the comma delimited regexes to the ``instrument_app`` method directly:
63
64 .. code-block:: python
65
66 FlaskInstrumentor().instrument_app(app, excluded_urls="client/.*/info,healthcheck")
67
68 Request/Response hooks
69 **********************
70
71 Utilize request/reponse hooks to execute custom logic to be performed before/after performing a request. Environ is an instance of WSGIEnvironment (flask.request.environ).
72 Response_headers is a list of key-value (tuples) representing the response headers returned from the response.
73
74 .. code-block:: python
75
76 def request_hook(span: Span, environ: WSGIEnvironment):
77 if span and span.is_recording():
78 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
79
80 def response_hook(span: Span, status: str, response_headers: List):
81 if span and span.is_recording():
82 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
83
84 FlaskInstrumentation().instrument(request_hook=request_hook, response_hook=response_hook)
85
86 Flask Request object reference: https://flask.palletsprojects.com/en/2.0.x/api/#flask.Request
87
88 API
89 ---
90 """
91
92 from logging import getLogger
93 from typing import Collection
94
95 import flask
96
97 import opentelemetry.instrumentation.wsgi as otel_wsgi
98 from opentelemetry import context, trace
99 from opentelemetry.instrumentation.flask.package import _instruments
100 from opentelemetry.instrumentation.flask.version import __version__
101 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
102 from opentelemetry.instrumentation.propagators import (
103 get_global_response_propagator,
104 )
105 from opentelemetry.instrumentation.utils import _start_internal_or_server_span
106 from opentelemetry.semconv.trace import SpanAttributes
107 from opentelemetry.util._time import _time_ns
108 from opentelemetry.util.http import get_excluded_urls, parse_excluded_urls
109
110 _logger = getLogger(__name__)
111
112 _ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key"
113 _ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key"
114 _ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key"
115 _ENVIRON_TOKEN = "opentelemetry-flask.token"
116
117
118 _excluded_urls_from_env = get_excluded_urls("FLASK")
119
120
121 def get_default_span_name():
122 span_name = ""
123 try:
124 span_name = flask.request.url_rule.rule
125 except AttributeError:
126 span_name = otel_wsgi.get_default_span_name(flask.request.environ)
127 return span_name
128
129
130 def _rewrapped_app(wsgi_app, response_hook=None, excluded_urls=None):
131 def _wrapped_app(wrapped_app_environ, start_response):
132 # We want to measure the time for route matching, etc.
133 # In theory, we could start the span here and use
134 # update_name later but that API is "highly discouraged" so
135 # we better avoid it.
136 wrapped_app_environ[_ENVIRON_STARTTIME_KEY] = _time_ns()
137
138 def _start_response(status, response_headers, *args, **kwargs):
139 if flask.request and (
140 excluded_urls is None
141 or not excluded_urls.url_disabled(flask.request.url)
142 ):
143 span = flask.request.environ.get(_ENVIRON_SPAN_KEY)
144
145 propagator = get_global_response_propagator()
146 if propagator:
147 propagator.inject(
148 response_headers,
149 setter=otel_wsgi.default_response_propagation_setter,
150 )
151
152 if span:
153 otel_wsgi.add_response_attributes(
154 span, status, response_headers
155 )
156 else:
157 _logger.warning(
158 "Flask environ's OpenTelemetry span "
159 "missing at _start_response(%s)",
160 status,
161 )
162 if response_hook is not None:
163 response_hook(span, status, response_headers)
164 return start_response(status, response_headers, *args, **kwargs)
165
166 return wsgi_app(wrapped_app_environ, _start_response)
167
168 return _wrapped_app
169
170
171 def _wrapped_before_request(
172 request_hook=None, tracer=None, excluded_urls=None
173 ):
174 def _before_request():
175 if excluded_urls and excluded_urls.url_disabled(flask.request.url):
176 return
177 flask_request_environ = flask.request.environ
178 span_name = get_default_span_name()
179
180 span, token = _start_internal_or_server_span(
181 tracer=tracer,
182 span_name=span_name,
183 start_time=flask_request_environ.get(_ENVIRON_STARTTIME_KEY),
184 context_carrier=flask_request_environ,
185 context_getter=otel_wsgi.wsgi_getter,
186 )
187
188 if request_hook:
189 request_hook(span, flask_request_environ)
190
191 if span.is_recording():
192 attributes = otel_wsgi.collect_request_attributes(
193 flask_request_environ
194 )
195 if flask.request.url_rule:
196 # For 404 that result from no route found, etc, we
197 # don't have a url_rule.
198 attributes[
199 SpanAttributes.HTTP_ROUTE
200 ] = flask.request.url_rule.rule
201 for key, value in attributes.items():
202 span.set_attribute(key, value)
203
204 activation = trace.use_span(span, end_on_exit=True)
205 activation.__enter__() # pylint: disable=E1101
206 flask_request_environ[_ENVIRON_ACTIVATION_KEY] = activation
207 flask_request_environ[_ENVIRON_SPAN_KEY] = span
208 flask_request_environ[_ENVIRON_TOKEN] = token
209
210 return _before_request
211
212
213 def _wrapped_teardown_request(excluded_urls=None):
214 def _teardown_request(exc):
215 # pylint: disable=E1101
216 if excluded_urls and excluded_urls.url_disabled(flask.request.url):
217 return
218
219 activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)
220 if not activation:
221 # This request didn't start a span, maybe because it was created in
222 # a way that doesn't run `before_request`, like when it is created
223 # with `app.test_request_context`.
224 return
225
226 if exc is None:
227 activation.__exit__(None, None, None)
228 else:
229 activation.__exit__(
230 type(exc), exc, getattr(exc, "__traceback__", None)
231 )
232
233 if flask.request.environ.get(_ENVIRON_TOKEN, None):
234 context.detach(flask.request.environ.get(_ENVIRON_TOKEN))
235
236 return _teardown_request
237
238
239 class _InstrumentedFlask(flask.Flask):
240
241 _excluded_urls = None
242 _tracer_provider = None
243 _request_hook = None
244 _response_hook = None
245
246 def __init__(self, *args, **kwargs):
247 super().__init__(*args, **kwargs)
248
249 self._original_wsgi_app = self.wsgi_app
250 self._is_instrumented_by_opentelemetry = True
251
252 self.wsgi_app = _rewrapped_app(
253 self.wsgi_app,
254 _InstrumentedFlask._response_hook,
255 excluded_urls=_InstrumentedFlask._excluded_urls,
256 )
257
258 tracer = trace.get_tracer(
259 __name__, __version__, _InstrumentedFlask._tracer_provider
260 )
261
262 _before_request = _wrapped_before_request(
263 _InstrumentedFlask._request_hook,
264 tracer,
265 excluded_urls=_InstrumentedFlask._excluded_urls,
266 )
267 self._before_request = _before_request
268 self.before_request(_before_request)
269
270 _teardown_request = _wrapped_teardown_request(
271 excluded_urls=_InstrumentedFlask._excluded_urls,
272 )
273 self.teardown_request(_teardown_request)
274
275
276 class FlaskInstrumentor(BaseInstrumentor):
277 # pylint: disable=protected-access,attribute-defined-outside-init
278 """An instrumentor for flask.Flask
279
280 See `BaseInstrumentor`
281 """
282
283 def instrumentation_dependencies(self) -> Collection[str]:
284 return _instruments
285
286 def _instrument(self, **kwargs):
287 self._original_flask = flask.Flask
288 request_hook = kwargs.get("request_hook")
289 response_hook = kwargs.get("response_hook")
290 if callable(request_hook):
291 _InstrumentedFlask._request_hook = request_hook
292 if callable(response_hook):
293 _InstrumentedFlask._response_hook = response_hook
294 tracer_provider = kwargs.get("tracer_provider")
295 _InstrumentedFlask._tracer_provider = tracer_provider
296 excluded_urls = kwargs.get("excluded_urls")
297 _InstrumentedFlask._excluded_urls = (
298 _excluded_urls_from_env
299 if excluded_urls is None
300 else parse_excluded_urls(excluded_urls)
301 )
302 flask.Flask = _InstrumentedFlask
303
304 def _uninstrument(self, **kwargs):
305 flask.Flask = self._original_flask
306
307 @staticmethod
308 def instrument_app(
309 app,
310 request_hook=None,
311 response_hook=None,
312 tracer_provider=None,
313 excluded_urls=None,
314 ):
315 if not hasattr(app, "_is_instrumented_by_opentelemetry"):
316 app._is_instrumented_by_opentelemetry = False
317
318 if not app._is_instrumented_by_opentelemetry:
319 excluded_urls = (
320 parse_excluded_urls(excluded_urls)
321 if excluded_urls is not None
322 else _excluded_urls_from_env
323 )
324 app._original_wsgi_app = app.wsgi_app
325 app.wsgi_app = _rewrapped_app(
326 app.wsgi_app, response_hook, excluded_urls=excluded_urls
327 )
328
329 tracer = trace.get_tracer(__name__, __version__, tracer_provider)
330
331 _before_request = _wrapped_before_request(
332 request_hook,
333 tracer,
334 excluded_urls=excluded_urls,
335 )
336 app._before_request = _before_request
337 app.before_request(_before_request)
338
339 _teardown_request = _wrapped_teardown_request(
340 excluded_urls=excluded_urls,
341 )
342 app._teardown_request = _teardown_request
343 app.teardown_request(_teardown_request)
344 app._is_instrumented_by_opentelemetry = True
345 else:
346 _logger.warning(
347 "Attempting to instrument Flask app while already instrumented"
348 )
349
350 @staticmethod
351 def uninstrument_app(app):
352 if hasattr(app, "_original_wsgi_app"):
353 app.wsgi_app = app._original_wsgi_app
354
355 # FIXME add support for other Flask blueprints that are not None
356 app.before_request_funcs[None].remove(app._before_request)
357 app.teardown_request_funcs[None].remove(app._teardown_request)
358 del app._original_wsgi_app
359 app._is_instrumented_by_opentelemetry = False
360 else:
361 _logger.warning(
362 "Attempting to uninstrument Flask "
363 "app while already uninstrumented"
364 )
365
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py
@@ -153,6 +153,10 @@
otel_wsgi.add_response_attributes(
span, status, response_headers
)
+ if span.kind == trace.SpanKind.SERVER:
+ otel_wsgi.add_custom_response_headers(
+ span, response_headers
+ )
else:
_logger.warning(
"Flask environ's OpenTelemetry span "
@@ -200,6 +204,10 @@
] = flask.request.url_rule.rule
for key, value in attributes.items():
span.set_attribute(key, value)
+ if span.kind == trace.SpanKind.SERVER:
+ otel_wsgi.add_custom_request_headers(
+ span, flask_request_environ
+ )
activation = trace.use_span(span, end_on_exit=True)
activation.__enter__() # pylint: disable=E1101
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py\n@@ -153,6 +153,10 @@\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n+ if span.kind == trace.SpanKind.SERVER:\n+ otel_wsgi.add_custom_response_headers(\n+ span, response_headers\n+ )\n else:\n _logger.warning(\n \"Flask environ's OpenTelemetry span \"\n@@ -200,6 +204,10 @@\n ] = flask.request.url_rule.rule\n for key, value in attributes.items():\n span.set_attribute(key, value)\n+ if span.kind == trace.SpanKind.SERVER:\n+ otel_wsgi.add_custom_request_headers(\n+ span, flask_request_environ\n+ )\n \n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n", "issue": "Flask: Capture request/response headers as span attributes\npart of #906 \r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-util-http, it\nsupports Flask-specific features such as:\n\n* The Flask url rule pattern is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.flask import FlaskInstrumentor\n\n app = Flask(__name__)\n\n FlaskInstrumentor().instrument_app(app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nConfiguration\n-------------\n\nExclude lists\n*************\nTo exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_FLASK_EXCLUDED_URLS``\n(or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude.\n\nFor example,\n\n::\n\n export OTEL_PYTHON_FLASK_EXCLUDED_URLS=\"client/.*/info,healthcheck\"\n\nwill exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.\n\nYou can also pass the comma delimited regexes to the ``instrument_app`` method directly:\n\n.. code-block:: python\n\n FlaskInstrumentor().instrument_app(app, excluded_urls=\"client/.*/info,healthcheck\")\n\nRequest/Response hooks\n**********************\n\nUtilize request/reponse hooks to execute custom logic to be performed before/after performing a request. Environ is an instance of WSGIEnvironment (flask.request.environ).\nResponse_headers is a list of key-value (tuples) representing the response headers returned from the response.\n\n.. code-block:: python\n\n def request_hook(span: Span, environ: WSGIEnvironment):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span: Span, status: str, response_headers: List):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n FlaskInstrumentation().instrument(request_hook=request_hook, response_hook=response_hook)\n\nFlask Request object reference: https://flask.palletsprojects.com/en/2.0.x/api/#flask.Request\n\nAPI\n---\n\"\"\"\n\nfrom logging import getLogger\nfrom typing import Collection\n\nimport flask\n\nimport opentelemetry.instrumentation.wsgi as otel_wsgi\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.flask.package import _instruments\nfrom opentelemetry.instrumentation.flask.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.propagators import (\n get_global_response_propagator,\n)\nfrom opentelemetry.instrumentation.utils import _start_internal_or_server_span\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.util._time import _time_ns\nfrom opentelemetry.util.http import get_excluded_urls, parse_excluded_urls\n\n_logger = getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\n_excluded_urls_from_env = get_excluded_urls(\"FLASK\")\n\n\ndef get_default_span_name():\n span_name = \"\"\n try:\n span_name = flask.request.url_rule.rule\n except AttributeError:\n span_name = otel_wsgi.get_default_span_name(flask.request.environ)\n return span_name\n\n\ndef _rewrapped_app(wsgi_app, response_hook=None, excluded_urls=None):\n def _wrapped_app(wrapped_app_environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n wrapped_app_environ[_ENVIRON_STARTTIME_KEY] = _time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n if flask.request and (\n excluded_urls is None\n or not excluded_urls.url_disabled(flask.request.url)\n ):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n\n propagator = get_global_response_propagator()\n if propagator:\n propagator.inject(\n response_headers,\n setter=otel_wsgi.default_response_propagation_setter,\n )\n\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n else:\n _logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n if response_hook is not None:\n response_hook(span, status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return wsgi_app(wrapped_app_environ, _start_response)\n\n return _wrapped_app\n\n\ndef _wrapped_before_request(\n request_hook=None, tracer=None, excluded_urls=None\n):\n def _before_request():\n if excluded_urls and excluded_urls.url_disabled(flask.request.url):\n return\n flask_request_environ = flask.request.environ\n span_name = get_default_span_name()\n\n span, token = _start_internal_or_server_span(\n tracer=tracer,\n span_name=span_name,\n start_time=flask_request_environ.get(_ENVIRON_STARTTIME_KEY),\n context_carrier=flask_request_environ,\n context_getter=otel_wsgi.wsgi_getter,\n )\n\n if request_hook:\n request_hook(span, flask_request_environ)\n\n if span.is_recording():\n attributes = otel_wsgi.collect_request_attributes(\n flask_request_environ\n )\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\n SpanAttributes.HTTP_ROUTE\n ] = flask.request.url_rule.rule\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n flask_request_environ[_ENVIRON_ACTIVATION_KEY] = activation\n flask_request_environ[_ENVIRON_SPAN_KEY] = span\n flask_request_environ[_ENVIRON_TOKEN] = token\n\n return _before_request\n\n\ndef _wrapped_teardown_request(excluded_urls=None):\n def _teardown_request(exc):\n # pylint: disable=E1101\n if excluded_urls and excluded_urls.url_disabled(flask.request.url):\n return\n\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n # This request didn't start a span, maybe because it was created in\n # a way that doesn't run `before_request`, like when it is created\n # with `app.test_request_context`.\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n\n if flask.request.environ.get(_ENVIRON_TOKEN, None):\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n return _teardown_request\n\n\nclass _InstrumentedFlask(flask.Flask):\n\n _excluded_urls = None\n _tracer_provider = None\n _request_hook = None\n _response_hook = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._original_wsgi_app = self.wsgi_app\n self._is_instrumented_by_opentelemetry = True\n\n self.wsgi_app = _rewrapped_app(\n self.wsgi_app,\n _InstrumentedFlask._response_hook,\n excluded_urls=_InstrumentedFlask._excluded_urls,\n )\n\n tracer = trace.get_tracer(\n __name__, __version__, _InstrumentedFlask._tracer_provider\n )\n\n _before_request = _wrapped_before_request(\n _InstrumentedFlask._request_hook,\n tracer,\n excluded_urls=_InstrumentedFlask._excluded_urls,\n )\n self._before_request = _before_request\n self.before_request(_before_request)\n\n _teardown_request = _wrapped_teardown_request(\n excluded_urls=_InstrumentedFlask._excluded_urls,\n )\n self.teardown_request(_teardown_request)\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n # pylint: disable=protected-access,attribute-defined-outside-init\n \"\"\"An instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n self._original_flask = flask.Flask\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n if callable(request_hook):\n _InstrumentedFlask._request_hook = request_hook\n if callable(response_hook):\n _InstrumentedFlask._response_hook = response_hook\n tracer_provider = kwargs.get(\"tracer_provider\")\n _InstrumentedFlask._tracer_provider = tracer_provider\n excluded_urls = kwargs.get(\"excluded_urls\")\n _InstrumentedFlask._excluded_urls = (\n _excluded_urls_from_env\n if excluded_urls is None\n else parse_excluded_urls(excluded_urls)\n )\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self, **kwargs):\n flask.Flask = self._original_flask\n\n @staticmethod\n def instrument_app(\n app,\n request_hook=None,\n response_hook=None,\n tracer_provider=None,\n excluded_urls=None,\n ):\n if not hasattr(app, \"_is_instrumented_by_opentelemetry\"):\n app._is_instrumented_by_opentelemetry = False\n\n if not app._is_instrumented_by_opentelemetry:\n excluded_urls = (\n parse_excluded_urls(excluded_urls)\n if excluded_urls is not None\n else _excluded_urls_from_env\n )\n app._original_wsgi_app = app.wsgi_app\n app.wsgi_app = _rewrapped_app(\n app.wsgi_app, response_hook, excluded_urls=excluded_urls\n )\n\n tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n _before_request = _wrapped_before_request(\n request_hook,\n tracer,\n excluded_urls=excluded_urls,\n )\n app._before_request = _before_request\n app.before_request(_before_request)\n\n _teardown_request = _wrapped_teardown_request(\n excluded_urls=excluded_urls,\n )\n app._teardown_request = _teardown_request\n app.teardown_request(_teardown_request)\n app._is_instrumented_by_opentelemetry = True\n else:\n _logger.warning(\n \"Attempting to instrument Flask app while already instrumented\"\n )\n\n @staticmethod\n def uninstrument_app(app):\n if hasattr(app, \"_original_wsgi_app\"):\n app.wsgi_app = app._original_wsgi_app\n\n # FIXME add support for other Flask blueprints that are not None\n app.before_request_funcs[None].remove(app._before_request)\n app.teardown_request_funcs[None].remove(app._teardown_request)\n del app._original_wsgi_app\n app._is_instrumented_by_opentelemetry = False\n else:\n _logger.warning(\n \"Attempting to uninstrument Flask \"\n \"app while already uninstrumented\"\n )\n", "path": "instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-util-http, it\nsupports Flask-specific features such as:\n\n* The Flask url rule pattern is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.flask import FlaskInstrumentor\n\n app = Flask(__name__)\n\n FlaskInstrumentor().instrument_app(app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nConfiguration\n-------------\n\nExclude lists\n*************\nTo exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_FLASK_EXCLUDED_URLS``\n(or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude.\n\nFor example,\n\n::\n\n export OTEL_PYTHON_FLASK_EXCLUDED_URLS=\"client/.*/info,healthcheck\"\n\nwill exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.\n\nYou can also pass the comma delimited regexes to the ``instrument_app`` method directly:\n\n.. code-block:: python\n\n FlaskInstrumentor().instrument_app(app, excluded_urls=\"client/.*/info,healthcheck\")\n\nRequest/Response hooks\n**********************\n\nUtilize request/reponse hooks to execute custom logic to be performed before/after performing a request. Environ is an instance of WSGIEnvironment (flask.request.environ).\nResponse_headers is a list of key-value (tuples) representing the response headers returned from the response.\n\n.. code-block:: python\n\n def request_hook(span: Span, environ: WSGIEnvironment):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span: Span, status: str, response_headers: List):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n FlaskInstrumentation().instrument(request_hook=request_hook, response_hook=response_hook)\n\nFlask Request object reference: https://flask.palletsprojects.com/en/2.0.x/api/#flask.Request\n\nAPI\n---\n\"\"\"\n\nfrom logging import getLogger\nfrom typing import Collection\n\nimport flask\n\nimport opentelemetry.instrumentation.wsgi as otel_wsgi\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.flask.package import _instruments\nfrom opentelemetry.instrumentation.flask.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.propagators import (\n get_global_response_propagator,\n)\nfrom opentelemetry.instrumentation.utils import _start_internal_or_server_span\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.util._time import _time_ns\nfrom opentelemetry.util.http import get_excluded_urls, parse_excluded_urls\n\n_logger = getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\n_excluded_urls_from_env = get_excluded_urls(\"FLASK\")\n\n\ndef get_default_span_name():\n span_name = \"\"\n try:\n span_name = flask.request.url_rule.rule\n except AttributeError:\n span_name = otel_wsgi.get_default_span_name(flask.request.environ)\n return span_name\n\n\ndef _rewrapped_app(wsgi_app, response_hook=None, excluded_urls=None):\n def _wrapped_app(wrapped_app_environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n wrapped_app_environ[_ENVIRON_STARTTIME_KEY] = _time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n if flask.request and (\n excluded_urls is None\n or not excluded_urls.url_disabled(flask.request.url)\n ):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n\n propagator = get_global_response_propagator()\n if propagator:\n propagator.inject(\n response_headers,\n setter=otel_wsgi.default_response_propagation_setter,\n )\n\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n if span.kind == trace.SpanKind.SERVER:\n otel_wsgi.add_custom_response_headers(\n span, response_headers\n )\n else:\n _logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n if response_hook is not None:\n response_hook(span, status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return wsgi_app(wrapped_app_environ, _start_response)\n\n return _wrapped_app\n\n\ndef _wrapped_before_request(\n request_hook=None, tracer=None, excluded_urls=None\n):\n def _before_request():\n if excluded_urls and excluded_urls.url_disabled(flask.request.url):\n return\n flask_request_environ = flask.request.environ\n span_name = get_default_span_name()\n\n span, token = _start_internal_or_server_span(\n tracer=tracer,\n span_name=span_name,\n start_time=flask_request_environ.get(_ENVIRON_STARTTIME_KEY),\n context_carrier=flask_request_environ,\n context_getter=otel_wsgi.wsgi_getter,\n )\n\n if request_hook:\n request_hook(span, flask_request_environ)\n\n if span.is_recording():\n attributes = otel_wsgi.collect_request_attributes(\n flask_request_environ\n )\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\n SpanAttributes.HTTP_ROUTE\n ] = flask.request.url_rule.rule\n for key, value in attributes.items():\n span.set_attribute(key, value)\n if span.kind == trace.SpanKind.SERVER:\n otel_wsgi.add_custom_request_headers(\n span, flask_request_environ\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n flask_request_environ[_ENVIRON_ACTIVATION_KEY] = activation\n flask_request_environ[_ENVIRON_SPAN_KEY] = span\n flask_request_environ[_ENVIRON_TOKEN] = token\n\n return _before_request\n\n\ndef _wrapped_teardown_request(excluded_urls=None):\n def _teardown_request(exc):\n # pylint: disable=E1101\n if excluded_urls and excluded_urls.url_disabled(flask.request.url):\n return\n\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n # This request didn't start a span, maybe because it was created in\n # a way that doesn't run `before_request`, like when it is created\n # with `app.test_request_context`.\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n\n if flask.request.environ.get(_ENVIRON_TOKEN, None):\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n return _teardown_request\n\n\nclass _InstrumentedFlask(flask.Flask):\n\n _excluded_urls = None\n _tracer_provider = None\n _request_hook = None\n _response_hook = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._original_wsgi_app = self.wsgi_app\n self._is_instrumented_by_opentelemetry = True\n\n self.wsgi_app = _rewrapped_app(\n self.wsgi_app,\n _InstrumentedFlask._response_hook,\n excluded_urls=_InstrumentedFlask._excluded_urls,\n )\n\n tracer = trace.get_tracer(\n __name__, __version__, _InstrumentedFlask._tracer_provider\n )\n\n _before_request = _wrapped_before_request(\n _InstrumentedFlask._request_hook,\n tracer,\n excluded_urls=_InstrumentedFlask._excluded_urls,\n )\n self._before_request = _before_request\n self.before_request(_before_request)\n\n _teardown_request = _wrapped_teardown_request(\n excluded_urls=_InstrumentedFlask._excluded_urls,\n )\n self.teardown_request(_teardown_request)\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n # pylint: disable=protected-access,attribute-defined-outside-init\n \"\"\"An instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n self._original_flask = flask.Flask\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n if callable(request_hook):\n _InstrumentedFlask._request_hook = request_hook\n if callable(response_hook):\n _InstrumentedFlask._response_hook = response_hook\n tracer_provider = kwargs.get(\"tracer_provider\")\n _InstrumentedFlask._tracer_provider = tracer_provider\n excluded_urls = kwargs.get(\"excluded_urls\")\n _InstrumentedFlask._excluded_urls = (\n _excluded_urls_from_env\n if excluded_urls is None\n else parse_excluded_urls(excluded_urls)\n )\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self, **kwargs):\n flask.Flask = self._original_flask\n\n @staticmethod\n def instrument_app(\n app,\n request_hook=None,\n response_hook=None,\n tracer_provider=None,\n excluded_urls=None,\n ):\n if not hasattr(app, \"_is_instrumented_by_opentelemetry\"):\n app._is_instrumented_by_opentelemetry = False\n\n if not app._is_instrumented_by_opentelemetry:\n excluded_urls = (\n parse_excluded_urls(excluded_urls)\n if excluded_urls is not None\n else _excluded_urls_from_env\n )\n app._original_wsgi_app = app.wsgi_app\n app.wsgi_app = _rewrapped_app(\n app.wsgi_app, response_hook, excluded_urls=excluded_urls\n )\n\n tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n _before_request = _wrapped_before_request(\n request_hook,\n tracer,\n excluded_urls=excluded_urls,\n )\n app._before_request = _before_request\n app.before_request(_before_request)\n\n _teardown_request = _wrapped_teardown_request(\n excluded_urls=excluded_urls,\n )\n app._teardown_request = _teardown_request\n app.teardown_request(_teardown_request)\n app._is_instrumented_by_opentelemetry = True\n else:\n _logger.warning(\n \"Attempting to instrument Flask app while already instrumented\"\n )\n\n @staticmethod\n def uninstrument_app(app):\n if hasattr(app, \"_original_wsgi_app\"):\n app.wsgi_app = app._original_wsgi_app\n\n # FIXME add support for other Flask blueprints that are not None\n app.before_request_funcs[None].remove(app._before_request)\n app.teardown_request_funcs[None].remove(app._teardown_request)\n del app._original_wsgi_app\n app._is_instrumented_by_opentelemetry = False\n else:\n _logger.warning(\n \"Attempting to uninstrument Flask \"\n \"app while already uninstrumented\"\n )\n", "path": "instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py"}]}
| 4,041 | 298 |
gh_patches_debug_41801
|
rasdani/github-patches
|
git_diff
|
pydantic__pydantic-1620
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add encoding to `read_env_file()`
# Feature Request
Output of `import pydantic.utils; print(pydantic.utils.version_info())`:
```
pydantic version: 1.5.1
pydantic compiled: True
python version: 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]
platform: Windows-7-6.1.7601-SP1
optional deps. installed: ['email-validator']
```
## Description
Hi, there's known problem on Windows with parsing dotenv files - https://github.com/pypa/pipenv/issues/1963. `python-dotenv` would parse files with default encoding (cp1251 for Cyrillic Windows). As a result we get `Лист 1` instead of `Лист 1`.
It looks like [this](https://github.com/samuelcolvin/pydantic/blob/960b24a5aab7ae0631bfbfbe0047b4d8600c6012/pydantic/env_settings.py#L111) function need to fetch encoding from `Config` class somehow.
<!-- Where possible please include a self-contained code snippet describing your feature request: -->
## Example
`.env` file (UTF-8):
```
foo=Лист 1
```
Code snippet:
```py
import pydantic
class Settings(pydantic.BaseSettings):
foo: str
class Config:
env_file_encoding = 'utf-8'
settings = Settings(_env_file='.env')
print(settings)
# foo='Лист 1'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/env_settings.py`
Content:
```
1 import os
2 import warnings
3 from pathlib import Path
4 from typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union
5
6 from .fields import ModelField
7 from .main import BaseModel, Extra
8 from .typing import display_as_type
9 from .utils import deep_update, sequence_like
10
11 env_file_sentinel = str(object())
12
13
14 class SettingsError(ValueError):
15 pass
16
17
18 class BaseSettings(BaseModel):
19 """
20 Base class for settings, allowing values to be overridden by environment variables.
21
22 This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),
23 Heroku and any 12 factor app design.
24 """
25
26 def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:
27 # Uses something other than `self` the first arg to allow "self" as a settable attribute
28 super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))
29
30 def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:
31 return deep_update(self._build_environ(_env_file), init_kwargs)
32
33 def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:
34 """
35 Build environment variables suitable for passing to the Model.
36 """
37 d: Dict[str, Optional[str]] = {}
38
39 if self.__config__.case_sensitive:
40 env_vars: Mapping[str, Optional[str]] = os.environ
41 else:
42 env_vars = {k.lower(): v for k, v in os.environ.items()}
43
44 env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file
45 if env_file is not None:
46 env_path = Path(env_file)
47 if env_path.is_file():
48 env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}
49
50 for field in self.__fields__.values():
51 env_val: Optional[str] = None
52 for env_name in field.field_info.extra['env_names']:
53 env_val = env_vars.get(env_name)
54 if env_val is not None:
55 break
56
57 if env_val is None:
58 continue
59
60 if field.is_complex():
61 try:
62 env_val = self.__config__.json_loads(env_val) # type: ignore
63 except ValueError as e:
64 raise SettingsError(f'error parsing JSON for "{env_name}"') from e
65 d[field.alias] = env_val
66 return d
67
68 class Config:
69 env_prefix = ''
70 env_file = None
71 validate_all = True
72 extra = Extra.forbid
73 arbitrary_types_allowed = True
74 case_sensitive = False
75
76 @classmethod
77 def prepare_field(cls, field: ModelField) -> None:
78 env_names: Union[List[str], AbstractSet[str]]
79 env = field.field_info.extra.get('env')
80 if env is None:
81 if field.has_alias:
82 warnings.warn(
83 'aliases are no longer used by BaseSettings to define which environment variables to read. '
84 'Instead use the "env" field setting. '
85 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',
86 FutureWarning,
87 )
88 env_names = {cls.env_prefix + field.name}
89 elif isinstance(env, str):
90 env_names = {env}
91 elif isinstance(env, (set, frozenset)):
92 env_names = env
93 elif sequence_like(env):
94 env_names = list(env)
95 else:
96 raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')
97
98 if not cls.case_sensitive:
99 env_names = env_names.__class__(n.lower() for n in env_names)
100 field.field_info.extra['env_names'] = env_names
101
102 __config__: Config # type: ignore
103
104
105 def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
106 try:
107 from dotenv import dotenv_values
108 except ImportError as e:
109 raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
110
111 file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)
112 if not case_sensitive:
113 return {k.lower(): v for k, v in file_vars.items()}
114 else:
115 return file_vars
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pydantic/env_settings.py b/pydantic/env_settings.py
--- a/pydantic/env_settings.py
+++ b/pydantic/env_settings.py
@@ -23,14 +23,28 @@
Heroku and any 12 factor app design.
"""
- def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:
+ def __init__(
+ __pydantic_self__,
+ _env_file: Union[Path, str, None] = env_file_sentinel,
+ _env_file_encoding: Optional[str] = None,
+ **values: Any,
+ ) -> None:
# Uses something other than `self` the first arg to allow "self" as a settable attribute
- super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))
-
- def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:
- return deep_update(self._build_environ(_env_file), init_kwargs)
-
- def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:
+ super().__init__(
+ **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)
+ )
+
+ def _build_values(
+ self,
+ init_kwargs: Dict[str, Any],
+ _env_file: Union[Path, str, None] = None,
+ _env_file_encoding: Optional[str] = None,
+ ) -> Dict[str, Any]:
+ return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)
+
+ def _build_environ(
+ self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None
+ ) -> Dict[str, Optional[str]]:
"""
Build environment variables suitable for passing to the Model.
"""
@@ -42,10 +56,16 @@
env_vars = {k.lower(): v for k, v in os.environ.items()}
env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file
+ env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding
if env_file is not None:
env_path = Path(env_file)
if env_path.is_file():
- env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}
+ env_vars = {
+ **read_env_file(
+ env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive
+ ),
+ **env_vars,
+ }
for field in self.__fields__.values():
env_val: Optional[str] = None
@@ -68,6 +88,7 @@
class Config:
env_prefix = ''
env_file = None
+ env_file_encoding = None
validate_all = True
extra = Extra.forbid
arbitrary_types_allowed = True
@@ -102,13 +123,13 @@
__config__: Config # type: ignore
-def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
+def read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
try:
from dotenv import dotenv_values
except ImportError as e:
raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
- file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)
+ file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)
if not case_sensitive:
return {k.lower(): v for k, v in file_vars.items()}
else:
|
{"golden_diff": "diff --git a/pydantic/env_settings.py b/pydantic/env_settings.py\n--- a/pydantic/env_settings.py\n+++ b/pydantic/env_settings.py\n@@ -23,14 +23,28 @@\n Heroku and any 12 factor app design.\n \"\"\"\n \n- def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:\n+ def __init__(\n+ __pydantic_self__,\n+ _env_file: Union[Path, str, None] = env_file_sentinel,\n+ _env_file_encoding: Optional[str] = None,\n+ **values: Any,\n+ ) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n- super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))\n-\n- def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:\n- return deep_update(self._build_environ(_env_file), init_kwargs)\n-\n- def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:\n+ super().__init__(\n+ **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)\n+ )\n+\n+ def _build_values(\n+ self,\n+ init_kwargs: Dict[str, Any],\n+ _env_file: Union[Path, str, None] = None,\n+ _env_file_encoding: Optional[str] = None,\n+ ) -> Dict[str, Any]:\n+ return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)\n+\n+ def _build_environ(\n+ self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None\n+ ) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n@@ -42,10 +56,16 @@\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n \n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n+ env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n- env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}\n+ env_vars = {\n+ **read_env_file(\n+ env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive\n+ ),\n+ **env_vars,\n+ }\n \n for field in self.__fields__.values():\n env_val: Optional[str] = None\n@@ -68,6 +88,7 @@\n class Config:\n env_prefix = ''\n env_file = None\n+ env_file_encoding = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n@@ -102,13 +123,13 @@\n __config__: Config # type: ignore\n \n \n-def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n+def read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n \n- file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)\n+ file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n", "issue": "Add encoding to `read_env_file()`\n# Feature Request\r\n\r\nOutput of `import pydantic.utils; print(pydantic.utils.version_info())`:\r\n\r\n```\r\n pydantic version: 1.5.1\r\n pydantic compiled: True\r\n python version: 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\r\n platform: Windows-7-6.1.7601-SP1\r\n optional deps. installed: ['email-validator']\r\n```\r\n\r\n## Description\r\n\r\nHi, there's known problem on Windows with parsing dotenv files - https://github.com/pypa/pipenv/issues/1963. `python-dotenv` would parse files with default encoding (cp1251 for Cyrillic Windows). As a result we get `\u0420\u203a\u0420\u0451\u0421\u0403\u0421\u201a 1` instead of `\u041b\u0438\u0441\u0442 1`.\r\n\r\nIt looks like [this](https://github.com/samuelcolvin/pydantic/blob/960b24a5aab7ae0631bfbfbe0047b4d8600c6012/pydantic/env_settings.py#L111) function need to fetch encoding from `Config` class somehow.\r\n\r\n<!-- Where possible please include a self-contained code snippet describing your feature request: -->\r\n\r\n## Example\r\n\r\n`.env` file (UTF-8):\r\n\r\n```\r\nfoo=\u041b\u0438\u0441\u0442 1\r\n```\r\n\r\nCode snippet:\r\n\r\n```py\r\nimport pydantic\r\n\r\nclass Settings(pydantic.BaseSettings):\r\n foo: str\r\n\r\n class Config:\r\n env_file_encoding = 'utf-8'\r\n\r\nsettings = Settings(_env_file='.env')\r\nprint(settings)\r\n# foo='\u041b\u0438\u0441\u0442 1'\r\n```\r\n\n", "before_files": [{"content": "import os\nimport warnings\nfrom pathlib import Path\nfrom typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union\n\nfrom .fields import ModelField\nfrom .main import BaseModel, Extra\nfrom .typing import display_as_type\nfrom .utils import deep_update, sequence_like\n\nenv_file_sentinel = str(object())\n\n\nclass SettingsError(ValueError):\n pass\n\n\nclass BaseSettings(BaseModel):\n \"\"\"\n Base class for settings, allowing values to be overridden by environment variables.\n\n This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),\n Heroku and any 12 factor app design.\n \"\"\"\n\n def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))\n\n def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:\n return deep_update(self._build_environ(_env_file), init_kwargs)\n\n def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n d: Dict[str, Optional[str]] = {}\n\n if self.__config__.case_sensitive:\n env_vars: Mapping[str, Optional[str]] = os.environ\n else:\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n\n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}\n\n for field in self.__fields__.values():\n env_val: Optional[str] = None\n for env_name in field.field_info.extra['env_names']:\n env_val = env_vars.get(env_name)\n if env_val is not None:\n break\n\n if env_val is None:\n continue\n\n if field.is_complex():\n try:\n env_val = self.__config__.json_loads(env_val) # type: ignore\n except ValueError as e:\n raise SettingsError(f'error parsing JSON for \"{env_name}\"') from e\n d[field.alias] = env_val\n return d\n\n class Config:\n env_prefix = ''\n env_file = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n case_sensitive = False\n\n @classmethod\n def prepare_field(cls, field: ModelField) -> None:\n env_names: Union[List[str], AbstractSet[str]]\n env = field.field_info.extra.get('env')\n if env is None:\n if field.has_alias:\n warnings.warn(\n 'aliases are no longer used by BaseSettings to define which environment variables to read. '\n 'Instead use the \"env\" field setting. '\n 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',\n FutureWarning,\n )\n env_names = {cls.env_prefix + field.name}\n elif isinstance(env, str):\n env_names = {env}\n elif isinstance(env, (set, frozenset)):\n env_names = env\n elif sequence_like(env):\n env_names = list(env)\n else:\n raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')\n\n if not cls.case_sensitive:\n env_names = env_names.__class__(n.lower() for n in env_names)\n field.field_info.extra['env_names'] = env_names\n\n __config__: Config # type: ignore\n\n\ndef read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n\n file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n return file_vars\n", "path": "pydantic/env_settings.py"}], "after_files": [{"content": "import os\nimport warnings\nfrom pathlib import Path\nfrom typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union\n\nfrom .fields import ModelField\nfrom .main import BaseModel, Extra\nfrom .typing import display_as_type\nfrom .utils import deep_update, sequence_like\n\nenv_file_sentinel = str(object())\n\n\nclass SettingsError(ValueError):\n pass\n\n\nclass BaseSettings(BaseModel):\n \"\"\"\n Base class for settings, allowing values to be overridden by environment variables.\n\n This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),\n Heroku and any 12 factor app design.\n \"\"\"\n\n def __init__(\n __pydantic_self__,\n _env_file: Union[Path, str, None] = env_file_sentinel,\n _env_file_encoding: Optional[str] = None,\n **values: Any,\n ) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n super().__init__(\n **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)\n )\n\n def _build_values(\n self,\n init_kwargs: Dict[str, Any],\n _env_file: Union[Path, str, None] = None,\n _env_file_encoding: Optional[str] = None,\n ) -> Dict[str, Any]:\n return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)\n\n def _build_environ(\n self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None\n ) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n d: Dict[str, Optional[str]] = {}\n\n if self.__config__.case_sensitive:\n env_vars: Mapping[str, Optional[str]] = os.environ\n else:\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n\n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n env_vars = {\n **read_env_file(\n env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive\n ),\n **env_vars,\n }\n\n for field in self.__fields__.values():\n env_val: Optional[str] = None\n for env_name in field.field_info.extra['env_names']:\n env_val = env_vars.get(env_name)\n if env_val is not None:\n break\n\n if env_val is None:\n continue\n\n if field.is_complex():\n try:\n env_val = self.__config__.json_loads(env_val) # type: ignore\n except ValueError as e:\n raise SettingsError(f'error parsing JSON for \"{env_name}\"') from e\n d[field.alias] = env_val\n return d\n\n class Config:\n env_prefix = ''\n env_file = None\n env_file_encoding = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n case_sensitive = False\n\n @classmethod\n def prepare_field(cls, field: ModelField) -> None:\n env_names: Union[List[str], AbstractSet[str]]\n env = field.field_info.extra.get('env')\n if env is None:\n if field.has_alias:\n warnings.warn(\n 'aliases are no longer used by BaseSettings to define which environment variables to read. '\n 'Instead use the \"env\" field setting. '\n 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',\n FutureWarning,\n )\n env_names = {cls.env_prefix + field.name}\n elif isinstance(env, str):\n env_names = {env}\n elif isinstance(env, (set, frozenset)):\n env_names = env\n elif sequence_like(env):\n env_names = list(env)\n else:\n raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')\n\n if not cls.case_sensitive:\n env_names = env_names.__class__(n.lower() for n in env_names)\n field.field_info.extra['env_names'] = env_names\n\n __config__: Config # type: ignore\n\n\ndef read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n\n file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n return file_vars\n", "path": "pydantic/env_settings.py"}]}
| 1,914 | 926 |
gh_patches_debug_9183
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-492
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
model/text_classification 使用内置语料集训练模型后,预测时出错,具体如下
```text
I1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1
Traceback (most recent call last):
File "infer.py", line 91, in <module>
batch_size=10)
File "infer.py", line 58, in infer
for idx, item in enumerate(test_reader):
TypeError: 'function' object is not iterable
```
paddle镜像用的是https://hub.docker.com/r/paddlepaddle/paddle/tags/ latest的,model下载的最新的
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `text_classification/infer.py`
Content:
```
1 import sys
2 import os
3 import gzip
4
5 import paddle.v2 as paddle
6
7 import reader
8 from network_conf import fc_net, convolution_net
9 from utils import logger, load_dict, load_reverse_dict
10
11
12 def infer(topology, data_dir, model_path, word_dict_path, label_dict_path,
13 batch_size):
14 def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):
15 probs = inferer.infer(input=test_batch, field=["value"])
16 assert len(probs) == len(test_batch)
17 for word_ids, prob in zip(test_batch, probs):
18 word_text = " ".join([ids_2_word[id] for id in word_ids[0]])
19 print("%s\t%s\t%s" % (ids_2_label[prob.argmax()],
20 " ".join(["{:0.4f}".format(p)
21 for p in prob]), word_text))
22
23 logger.info("begin to predict...")
24 use_default_data = (data_dir is None)
25
26 if use_default_data:
27 word_dict = paddle.dataset.imdb.word_dict()
28 word_reverse_dict = dict((value, key)
29 for key, value in word_dict.iteritems())
30 label_reverse_dict = {0: "positive", 1: "negative"}
31 test_reader = paddle.dataset.imdb.test(word_dict)
32 else:
33 assert os.path.exists(
34 word_dict_path), "the word dictionary file does not exist"
35 assert os.path.exists(
36 label_dict_path), "the label dictionary file does not exist"
37
38 word_dict = load_dict(word_dict_path)
39 word_reverse_dict = load_reverse_dict(word_dict_path)
40 label_reverse_dict = load_reverse_dict(label_dict_path)
41
42 test_reader = reader.test_reader(data_dir, word_dict)()
43
44 dict_dim = len(word_dict)
45 class_num = len(label_reverse_dict)
46 prob_layer = topology(dict_dim, class_num, is_infer=True)
47
48 # initialize PaddlePaddle
49 paddle.init(use_gpu=False, trainer_count=1)
50
51 # load the trained models
52 parameters = paddle.parameters.Parameters.from_tar(
53 gzip.open(model_path, "r"))
54 inferer = paddle.inference.Inference(
55 output_layer=prob_layer, parameters=parameters)
56
57 test_batch = []
58 for idx, item in enumerate(test_reader):
59 test_batch.append([item[0]])
60 if len(test_batch) == batch_size:
61 _infer_a_batch(inferer, test_batch, word_reverse_dict,
62 label_reverse_dict)
63 test_batch = []
64
65 if len(test_batch):
66 _infer_a_batch(inferer, test_batch, word_reverse_dict,
67 label_reverse_dict)
68 test_batch = []
69
70
71 if __name__ == "__main__":
72 model_path = "models/dnn_params_pass_00000.tar.gz"
73 assert os.path.exists(model_path), "the trained model does not exist."
74
75 nn_type = "dnn"
76 test_dir = None
77 word_dict = None
78 label_dict = None
79
80 if nn_type == "dnn":
81 topology = fc_net
82 elif nn_type == "cnn":
83 topology = convolution_net
84
85 infer(
86 topology=topology,
87 data_dir=test_dir,
88 word_dict_path=word_dict,
89 label_dict_path=label_dict,
90 model_path=model_path,
91 batch_size=10)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/text_classification/infer.py b/text_classification/infer.py
--- a/text_classification/infer.py
+++ b/text_classification/infer.py
@@ -28,7 +28,7 @@
word_reverse_dict = dict((value, key)
for key, value in word_dict.iteritems())
label_reverse_dict = {0: "positive", 1: "negative"}
- test_reader = paddle.dataset.imdb.test(word_dict)
+ test_reader = paddle.dataset.imdb.test(word_dict)()
else:
assert os.path.exists(
word_dict_path), "the word dictionary file does not exist"
|
{"golden_diff": "diff --git a/text_classification/infer.py b/text_classification/infer.py\n--- a/text_classification/infer.py\n+++ b/text_classification/infer.py\n@@ -28,7 +28,7 @@\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n- test_reader = paddle.dataset.imdb.test(word_dict)\n+ test_reader = paddle.dataset.imdb.test(word_dict)()\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n", "issue": "model/text_classification \u4f7f\u7528\u5185\u7f6e\u8bed\u6599\u96c6\u8bad\u7ec3\u6a21\u578b\u540e\uff0c\u9884\u6d4b\u65f6\u51fa\u9519\uff0c\u5177\u4f53\u5982\u4e0b\n```text\r\nI1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1 \r\nTraceback (most recent call last):\r\n File \"infer.py\", line 91, in <module>\r\n batch_size=10)\r\n File \"infer.py\", line 58, in infer\r\n for idx, item in enumerate(test_reader):\r\nTypeError: 'function' object is not iterable\r\n```\r\npaddle\u955c\u50cf\u7528\u7684\u662fhttps://hub.docker.com/r/paddlepaddle/paddle/tags/ latest\u7684\uff0cmodel\u4e0b\u8f7d\u7684\u6700\u65b0\u7684\n", "before_files": [{"content": "import sys\nimport os\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom network_conf import fc_net, convolution_net\nfrom utils import logger, load_dict, load_reverse_dict\n\n\ndef infer(topology, data_dir, model_path, word_dict_path, label_dict_path,\n batch_size):\n def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):\n probs = inferer.infer(input=test_batch, field=[\"value\"])\n assert len(probs) == len(test_batch)\n for word_ids, prob in zip(test_batch, probs):\n word_text = \" \".join([ids_2_word[id] for id in word_ids[0]])\n print(\"%s\\t%s\\t%s\" % (ids_2_label[prob.argmax()],\n \" \".join([\"{:0.4f}\".format(p)\n for p in prob]), word_text))\n\n logger.info(\"begin to predict...\")\n use_default_data = (data_dir is None)\n\n if use_default_data:\n word_dict = paddle.dataset.imdb.word_dict()\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n test_reader = paddle.dataset.imdb.test(word_dict)\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n assert os.path.exists(\n label_dict_path), \"the label dictionary file does not exist\"\n\n word_dict = load_dict(word_dict_path)\n word_reverse_dict = load_reverse_dict(word_dict_path)\n label_reverse_dict = load_reverse_dict(label_dict_path)\n\n test_reader = reader.test_reader(data_dir, word_dict)()\n\n dict_dim = len(word_dict)\n class_num = len(label_reverse_dict)\n prob_layer = topology(dict_dim, class_num, is_infer=True)\n\n # initialize PaddlePaddle\n paddle.init(use_gpu=False, trainer_count=1)\n\n # load the trained models\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(model_path, \"r\"))\n inferer = paddle.inference.Inference(\n output_layer=prob_layer, parameters=parameters)\n\n test_batch = []\n for idx, item in enumerate(test_reader):\n test_batch.append([item[0]])\n if len(test_batch) == batch_size:\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n if len(test_batch):\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n\nif __name__ == \"__main__\":\n model_path = \"models/dnn_params_pass_00000.tar.gz\"\n assert os.path.exists(model_path), \"the trained model does not exist.\"\n\n nn_type = \"dnn\"\n test_dir = None\n word_dict = None\n label_dict = None\n\n if nn_type == \"dnn\":\n topology = fc_net\n elif nn_type == \"cnn\":\n topology = convolution_net\n\n infer(\n topology=topology,\n data_dir=test_dir,\n word_dict_path=word_dict,\n label_dict_path=label_dict,\n model_path=model_path,\n batch_size=10)\n", "path": "text_classification/infer.py"}], "after_files": [{"content": "import sys\nimport os\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom network_conf import fc_net, convolution_net\nfrom utils import logger, load_dict, load_reverse_dict\n\n\ndef infer(topology, data_dir, model_path, word_dict_path, label_dict_path,\n batch_size):\n def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):\n probs = inferer.infer(input=test_batch, field=[\"value\"])\n assert len(probs) == len(test_batch)\n for word_ids, prob in zip(test_batch, probs):\n word_text = \" \".join([ids_2_word[id] for id in word_ids[0]])\n print(\"%s\\t%s\\t%s\" % (ids_2_label[prob.argmax()],\n \" \".join([\"{:0.4f}\".format(p)\n for p in prob]), word_text))\n\n logger.info(\"begin to predict...\")\n use_default_data = (data_dir is None)\n\n if use_default_data:\n word_dict = paddle.dataset.imdb.word_dict()\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n test_reader = paddle.dataset.imdb.test(word_dict)()\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n assert os.path.exists(\n label_dict_path), \"the label dictionary file does not exist\"\n\n word_dict = load_dict(word_dict_path)\n word_reverse_dict = load_reverse_dict(word_dict_path)\n label_reverse_dict = load_reverse_dict(label_dict_path)\n\n test_reader = reader.test_reader(data_dir, word_dict)()\n\n dict_dim = len(word_dict)\n class_num = len(label_reverse_dict)\n prob_layer = topology(dict_dim, class_num, is_infer=True)\n\n # initialize PaddlePaddle\n paddle.init(use_gpu=False, trainer_count=1)\n\n # load the trained models\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(model_path, \"r\"))\n inferer = paddle.inference.Inference(\n output_layer=prob_layer, parameters=parameters)\n\n test_batch = []\n for idx, item in enumerate(test_reader):\n test_batch.append([item[0]])\n if len(test_batch) == batch_size:\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n if len(test_batch):\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n\nif __name__ == \"__main__\":\n model_path = \"models/dnn_params_pass_00000.tar.gz\"\n assert os.path.exists(model_path), \"the trained model does not exist.\"\n\n nn_type = \"dnn\"\n test_dir = None\n word_dict = None\n label_dict = None\n\n if nn_type == \"dnn\":\n topology = fc_net\n elif nn_type == \"cnn\":\n topology = convolution_net\n\n infer(\n topology=topology,\n data_dir=test_dir,\n word_dict_path=word_dict,\n label_dict_path=label_dict,\n model_path=model_path,\n batch_size=10)\n", "path": "text_classification/infer.py"}]}
| 1,314 | 132 |
gh_patches_debug_55295
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-799
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docker Python no longer working as soon as elastic-apm is installed
**Describe the bug**
It seems like some patches to requests destroy the functionality of the docker python package.
ValueError: invalid literal for int() with base 10: 'None'
```
Traceback (most recent call last):
File "project\venv\lib\site-packages\django\core\handlers\exception.py", line 34, in inner
response = get_response(request)
File "project\venv\lib\site-packages\django\core\handlers\base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "project\venv\lib\site-packages\django\core\handlers\base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "project\tool\src\company_tool\web\api\decorators.py", line 21, in wrapper_view_func
return view_func(request, json_data=data, *args, **kwargs)
File "project\venv\lib\site-packages\django\views\decorators\csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "project\venv\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "project\tool\src\company_tool\web\api\views.py", line 27, in start
container_status = client.containers.run(image=container, command=arguments, detach=True, stdin_open=True, tty=True)
File "project\venv\lib\site-packages\docker\models\containers.py", line 802, in run
container = self.create(image=image, command=command,
File "project\venv\lib\site-packages\docker\models\containers.py", line 861, in create
resp = self.client.api.create_container(**create_kwargs)
File "project\venv\lib\site-packages\docker\api\container.py", line 430, in create_container
return self.create_container_from_config(config, name)
File "project\venv\lib\site-packages\docker\api\container.py", line 440, in create_container_from_config
res = self._post_json(u, data=config, params=params)
File "project\venv\lib\site-packages\docker\api\client.py", line 289, in _post_json
return self._post(url, data=json.dumps(data2), **kwargs)
File "project\venv\lib\site-packages\docker\utils\decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "project\venv\lib\site-packages\docker\api\client.py", line 226, in _post
return self.post(url, **self._set_request_timeout(kwargs))
File "project\venv\lib\site-packages\requests\sessions.py", line 578, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "project\venv\lib\site-packages\requests\sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "project\venv\lib\site-packages\elasticapm\utils\wrapt\wrappers.py", line 561, in __call__
return self._self_wrapper(self.__wrapped__, self._self_instance,
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\requests.py", line 59, in call
return wrapped(*args, **kwargs)
File "project\venv\lib\site-packages\requests\sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "project\venv\lib\site-packages\requests\adapters.py", line 439, in send
resp = conn.urlopen(
File "project\venv\lib\site-packages\elasticapm\utils\wrapt\wrappers.py", line 561, in __call__
return self._self_wrapper(self.__wrapped__, self._self_instance,
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\urllib3.py", line 74, in call
destination = url_to_destination(url)
File "project\venv\lib\site-packages\elasticapm\utils\__init__.py", line 146, in url_to_destination
port = parts.port
File "C:\Python38\lib\urllib\parse.py", line 174, in port
raise ValueError(message) from None
ValueError: Port could not be cast to integer value as 'None'
```
**To Reproduce**
Steps to reproduce the behavior:
1. Install "docker" and "elastic-apm" python package within a django app
2. Load APM using default django config
2. Try to use docker
**Expected behavior**
Docker still works
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/utils/__init__.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31 import base64
32 import os
33 import re
34 from functools import partial
35
36 from elasticapm.conf import constants
37 from elasticapm.utils import compat, encoding
38
39 try:
40 from functools import partialmethod
41
42 partial_types = (partial, partialmethod)
43 except ImportError:
44 # Python 2
45 partial_types = (partial,)
46
47
48 default_ports = {"https": 443, "http": 80, "postgresql": 5432, "mysql": 3306, "mssql": 1433}
49
50
51 def varmap(func, var, context=None, name=None):
52 """
53 Executes ``func(key_name, value)`` on all values,
54 recursively discovering dict and list scoped
55 values.
56 """
57 if context is None:
58 context = set()
59 objid = id(var)
60 if objid in context:
61 return func(name, "<...>")
62 context.add(objid)
63 if isinstance(var, dict):
64 ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))
65 elif isinstance(var, (list, tuple)):
66 ret = func(name, [varmap(func, f, context, name) for f in var])
67 else:
68 ret = func(name, var)
69 context.remove(objid)
70 return ret
71
72
73 def get_name_from_func(func):
74 # partials don't have `__module__` or `__name__`, so we use the values from the "inner" function
75 if isinstance(func, partial_types):
76 return "partial({})".format(get_name_from_func(func.func))
77 elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"):
78 return "partial({})".format(get_name_from_func(func._partialmethod.func))
79
80 module = func.__module__
81
82 if hasattr(func, "__name__"):
83 view_name = func.__name__
84 else: # Fall back if there's no __name__
85 view_name = func.__class__.__name__
86
87 return "{0}.{1}".format(module, view_name)
88
89
90 def build_name_with_http_method_prefix(name, request):
91 return " ".join((request.method, name)) if name else name
92
93
94 def is_master_process():
95 # currently only recognizes uwsgi master process
96 try:
97 import uwsgi
98
99 return os.getpid() == uwsgi.masterpid()
100 except ImportError:
101 return False
102
103
104 def get_url_dict(url):
105 parse_result = compat.urlparse.urlparse(url)
106
107 url_dict = {
108 "full": encoding.keyword_field(url),
109 "protocol": parse_result.scheme + ":",
110 "hostname": encoding.keyword_field(parse_result.hostname),
111 "pathname": encoding.keyword_field(parse_result.path),
112 }
113
114 port = None if parse_result.port is None else str(parse_result.port)
115
116 if port:
117 url_dict["port"] = port
118 if parse_result.query:
119 url_dict["search"] = encoding.keyword_field("?" + parse_result.query)
120 return url_dict
121
122
123 def sanitize_url(url):
124 if "@" not in url:
125 return url
126 parts = compat.urlparse.urlparse(url)
127 return url.replace("%s:%s" % (parts.username, parts.password), "%s:%s" % (parts.username, constants.MASK))
128
129
130 def get_host_from_url(url):
131 parsed_url = compat.urlparse.urlparse(url)
132 host = parsed_url.hostname or " "
133
134 if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:
135 host += ":" + str(parsed_url.port)
136
137 return host
138
139
140 def url_to_destination(url, service_type="external"):
141 parts = compat.urlparse.urlsplit(url)
142 hostname = parts.hostname
143 # preserve brackets for IPv6 URLs
144 if "://[" in url:
145 hostname = "[%s]" % hostname
146 port = parts.port
147 default_port = default_ports.get(parts.scheme, None)
148 name = "%s://%s" % (parts.scheme, hostname)
149 resource = hostname
150 if not port and parts.scheme in default_ports:
151 port = default_ports[parts.scheme]
152 if port:
153 if port != default_port:
154 name += ":%d" % port
155 resource += ":%d" % port
156 return {"service": {"name": name, "resource": resource, "type": service_type}}
157
158
159 def read_pem_file(file_obj):
160 cert = b""
161 for line in file_obj:
162 if line.startswith(b"-----BEGIN CERTIFICATE-----"):
163 break
164 for line in file_obj:
165 if not line.startswith(b"-----END CERTIFICATE-----"):
166 cert += line.strip()
167 return base64.b64decode(cert)
168
169
170 def starmatch_to_regex(pattern):
171 i, n = 0, len(pattern)
172 res = []
173 while i < n:
174 c = pattern[i]
175 i = i + 1
176 if c == "*":
177 res.append(".*")
178 else:
179 res.append(re.escape(c))
180 return re.compile(r"(?:%s)\Z" % "".join(res), re.IGNORECASE | re.DOTALL)
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py
--- a/elasticapm/utils/__init__.py
+++ b/elasticapm/utils/__init__.py
@@ -143,7 +143,11 @@
# preserve brackets for IPv6 URLs
if "://[" in url:
hostname = "[%s]" % hostname
- port = parts.port
+ try:
+ port = parts.port
+ except ValueError:
+ # Malformed port, just use None rather than raising an exception
+ port = None
default_port = default_ports.get(parts.scheme, None)
name = "%s://%s" % (parts.scheme, hostname)
resource = hostname
|
{"golden_diff": "diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py\n--- a/elasticapm/utils/__init__.py\n+++ b/elasticapm/utils/__init__.py\n@@ -143,7 +143,11 @@\n # preserve brackets for IPv6 URLs\n if \"://[\" in url:\n hostname = \"[%s]\" % hostname\n- port = parts.port\n+ try:\n+ port = parts.port\n+ except ValueError:\n+ # Malformed port, just use None rather than raising an exception\n+ port = None\n default_port = default_ports.get(parts.scheme, None)\n name = \"%s://%s\" % (parts.scheme, hostname)\n resource = hostname\n", "issue": "Docker Python no longer working as soon as elastic-apm is installed\n**Describe the bug**\r\nIt seems like some patches to requests destroy the functionality of the docker python package.\r\n\r\nValueError: invalid literal for int() with base 10: 'None'\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"project\\venv\\lib\\site-packages\\django\\core\\handlers\\exception.py\", line 34, in inner\r\n response = get_response(request)\r\n File \"project\\venv\\lib\\site-packages\\django\\core\\handlers\\base.py\", line 115, in _get_response\r\n response = self.process_exception_by_middleware(e, request)\r\n File \"project\\venv\\lib\\site-packages\\django\\core\\handlers\\base.py\", line 113, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"project\\tool\\src\\company_tool\\web\\api\\decorators.py\", line 21, in wrapper_view_func\r\n return view_func(request, json_data=data, *args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\django\\views\\decorators\\csrf.py\", line 54, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\django\\views\\decorators\\http.py\", line 40, in inner\r\n return func(request, *args, **kwargs)\r\n File \"project\\tool\\src\\company_tool\\web\\api\\views.py\", line 27, in start\r\n container_status = client.containers.run(image=container, command=arguments, detach=True, stdin_open=True, tty=True)\r\n File \"project\\venv\\lib\\site-packages\\docker\\models\\containers.py\", line 802, in run\r\n container = self.create(image=image, command=command,\r\n File \"project\\venv\\lib\\site-packages\\docker\\models\\containers.py\", line 861, in create\r\n resp = self.client.api.create_container(**create_kwargs)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\container.py\", line 430, in create_container\r\n return self.create_container_from_config(config, name)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\container.py\", line 440, in create_container_from_config\r\n res = self._post_json(u, data=config, params=params)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\client.py\", line 289, in _post_json\r\n return self._post(url, data=json.dumps(data2), **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\docker\\utils\\decorators.py\", line 46, in inner\r\n return f(self, *args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\client.py\", line 226, in _post\r\n return self.post(url, **self._set_request_timeout(kwargs))\r\n File \"project\\venv\\lib\\site-packages\\requests\\sessions.py\", line 578, in post\r\n return self.request('POST', url, data=data, json=json, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\requests\\sessions.py\", line 530, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\utils\\wrapt\\wrappers.py\", line 561, in __call__\r\n return self._self_wrapper(self.__wrapped__, self._self_instance,\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\requests.py\", line 59, in call\r\n return wrapped(*args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\requests\\sessions.py\", line 643, in send\r\n r = adapter.send(request, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\requests\\adapters.py\", line 439, in send\r\n resp = conn.urlopen(\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\utils\\wrapt\\wrappers.py\", line 561, in __call__\r\n return self._self_wrapper(self.__wrapped__, self._self_instance,\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\urllib3.py\", line 74, in call\r\n destination = url_to_destination(url)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\utils\\__init__.py\", line 146, in url_to_destination\r\n port = parts.port\r\n File \"C:\\Python38\\lib\\urllib\\parse.py\", line 174, in port\r\n raise ValueError(message) from None\r\nValueError: Port could not be cast to integer value as 'None'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install \"docker\" and \"elastic-apm\" python package within a django app\r\n2. Load APM using default django config\r\n2. Try to use docker \r\n\r\n**Expected behavior**\r\nDocker still works\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nimport base64\nimport os\nimport re\nfrom functools import partial\n\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import compat, encoding\n\ntry:\n from functools import partialmethod\n\n partial_types = (partial, partialmethod)\nexcept ImportError:\n # Python 2\n partial_types = (partial,)\n\n\ndefault_ports = {\"https\": 443, \"http\": 80, \"postgresql\": 5432, \"mysql\": 3306, \"mssql\": 1433}\n\n\ndef varmap(func, var, context=None, name=None):\n \"\"\"\n Executes ``func(key_name, value)`` on all values,\n recursively discovering dict and list scoped\n values.\n \"\"\"\n if context is None:\n context = set()\n objid = id(var)\n if objid in context:\n return func(name, \"<...>\")\n context.add(objid)\n if isinstance(var, dict):\n ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))\n elif isinstance(var, (list, tuple)):\n ret = func(name, [varmap(func, f, context, name) for f in var])\n else:\n ret = func(name, var)\n context.remove(objid)\n return ret\n\n\ndef get_name_from_func(func):\n # partials don't have `__module__` or `__name__`, so we use the values from the \"inner\" function\n if isinstance(func, partial_types):\n return \"partial({})\".format(get_name_from_func(func.func))\n elif hasattr(func, \"_partialmethod\") and hasattr(func._partialmethod, \"func\"):\n return \"partial({})\".format(get_name_from_func(func._partialmethod.func))\n\n module = func.__module__\n\n if hasattr(func, \"__name__\"):\n view_name = func.__name__\n else: # Fall back if there's no __name__\n view_name = func.__class__.__name__\n\n return \"{0}.{1}\".format(module, view_name)\n\n\ndef build_name_with_http_method_prefix(name, request):\n return \" \".join((request.method, name)) if name else name\n\n\ndef is_master_process():\n # currently only recognizes uwsgi master process\n try:\n import uwsgi\n\n return os.getpid() == uwsgi.masterpid()\n except ImportError:\n return False\n\n\ndef get_url_dict(url):\n parse_result = compat.urlparse.urlparse(url)\n\n url_dict = {\n \"full\": encoding.keyword_field(url),\n \"protocol\": parse_result.scheme + \":\",\n \"hostname\": encoding.keyword_field(parse_result.hostname),\n \"pathname\": encoding.keyword_field(parse_result.path),\n }\n\n port = None if parse_result.port is None else str(parse_result.port)\n\n if port:\n url_dict[\"port\"] = port\n if parse_result.query:\n url_dict[\"search\"] = encoding.keyword_field(\"?\" + parse_result.query)\n return url_dict\n\n\ndef sanitize_url(url):\n if \"@\" not in url:\n return url\n parts = compat.urlparse.urlparse(url)\n return url.replace(\"%s:%s\" % (parts.username, parts.password), \"%s:%s\" % (parts.username, constants.MASK))\n\n\ndef get_host_from_url(url):\n parsed_url = compat.urlparse.urlparse(url)\n host = parsed_url.hostname or \" \"\n\n if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:\n host += \":\" + str(parsed_url.port)\n\n return host\n\n\ndef url_to_destination(url, service_type=\"external\"):\n parts = compat.urlparse.urlsplit(url)\n hostname = parts.hostname\n # preserve brackets for IPv6 URLs\n if \"://[\" in url:\n hostname = \"[%s]\" % hostname\n port = parts.port\n default_port = default_ports.get(parts.scheme, None)\n name = \"%s://%s\" % (parts.scheme, hostname)\n resource = hostname\n if not port and parts.scheme in default_ports:\n port = default_ports[parts.scheme]\n if port:\n if port != default_port:\n name += \":%d\" % port\n resource += \":%d\" % port\n return {\"service\": {\"name\": name, \"resource\": resource, \"type\": service_type}}\n\n\ndef read_pem_file(file_obj):\n cert = b\"\"\n for line in file_obj:\n if line.startswith(b\"-----BEGIN CERTIFICATE-----\"):\n break\n for line in file_obj:\n if not line.startswith(b\"-----END CERTIFICATE-----\"):\n cert += line.strip()\n return base64.b64decode(cert)\n\n\ndef starmatch_to_regex(pattern):\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), re.IGNORECASE | re.DOTALL)\n", "path": "elasticapm/utils/__init__.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nimport base64\nimport os\nimport re\nfrom functools import partial\n\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import compat, encoding\n\ntry:\n from functools import partialmethod\n\n partial_types = (partial, partialmethod)\nexcept ImportError:\n # Python 2\n partial_types = (partial,)\n\n\ndefault_ports = {\"https\": 443, \"http\": 80, \"postgresql\": 5432, \"mysql\": 3306, \"mssql\": 1433}\n\n\ndef varmap(func, var, context=None, name=None):\n \"\"\"\n Executes ``func(key_name, value)`` on all values,\n recursively discovering dict and list scoped\n values.\n \"\"\"\n if context is None:\n context = set()\n objid = id(var)\n if objid in context:\n return func(name, \"<...>\")\n context.add(objid)\n if isinstance(var, dict):\n ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))\n elif isinstance(var, (list, tuple)):\n ret = func(name, [varmap(func, f, context, name) for f in var])\n else:\n ret = func(name, var)\n context.remove(objid)\n return ret\n\n\ndef get_name_from_func(func):\n # partials don't have `__module__` or `__name__`, so we use the values from the \"inner\" function\n if isinstance(func, partial_types):\n return \"partial({})\".format(get_name_from_func(func.func))\n elif hasattr(func, \"_partialmethod\") and hasattr(func._partialmethod, \"func\"):\n return \"partial({})\".format(get_name_from_func(func._partialmethod.func))\n\n module = func.__module__\n\n if hasattr(func, \"__name__\"):\n view_name = func.__name__\n else: # Fall back if there's no __name__\n view_name = func.__class__.__name__\n\n return \"{0}.{1}\".format(module, view_name)\n\n\ndef build_name_with_http_method_prefix(name, request):\n return \" \".join((request.method, name)) if name else name\n\n\ndef is_master_process():\n # currently only recognizes uwsgi master process\n try:\n import uwsgi\n\n return os.getpid() == uwsgi.masterpid()\n except ImportError:\n return False\n\n\ndef get_url_dict(url):\n parse_result = compat.urlparse.urlparse(url)\n\n url_dict = {\n \"full\": encoding.keyword_field(url),\n \"protocol\": parse_result.scheme + \":\",\n \"hostname\": encoding.keyword_field(parse_result.hostname),\n \"pathname\": encoding.keyword_field(parse_result.path),\n }\n\n port = None if parse_result.port is None else str(parse_result.port)\n\n if port:\n url_dict[\"port\"] = port\n if parse_result.query:\n url_dict[\"search\"] = encoding.keyword_field(\"?\" + parse_result.query)\n return url_dict\n\n\ndef sanitize_url(url):\n if \"@\" not in url:\n return url\n parts = compat.urlparse.urlparse(url)\n return url.replace(\"%s:%s\" % (parts.username, parts.password), \"%s:%s\" % (parts.username, constants.MASK))\n\n\ndef get_host_from_url(url):\n parsed_url = compat.urlparse.urlparse(url)\n host = parsed_url.hostname or \" \"\n\n if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:\n host += \":\" + str(parsed_url.port)\n\n return host\n\n\ndef url_to_destination(url, service_type=\"external\"):\n parts = compat.urlparse.urlsplit(url)\n hostname = parts.hostname\n # preserve brackets for IPv6 URLs\n if \"://[\" in url:\n hostname = \"[%s]\" % hostname\n try:\n port = parts.port\n except ValueError:\n # Malformed port, just use None rather than raising an exception\n port = None\n default_port = default_ports.get(parts.scheme, None)\n name = \"%s://%s\" % (parts.scheme, hostname)\n resource = hostname\n if not port and parts.scheme in default_ports:\n port = default_ports[parts.scheme]\n if port:\n if port != default_port:\n name += \":%d\" % port\n resource += \":%d\" % port\n return {\"service\": {\"name\": name, \"resource\": resource, \"type\": service_type}}\n\n\ndef read_pem_file(file_obj):\n cert = b\"\"\n for line in file_obj:\n if line.startswith(b\"-----BEGIN CERTIFICATE-----\"):\n break\n for line in file_obj:\n if not line.startswith(b\"-----END CERTIFICATE-----\"):\n cert += line.strip()\n return base64.b64decode(cert)\n\n\ndef starmatch_to_regex(pattern):\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), re.IGNORECASE | re.DOTALL)\n", "path": "elasticapm/utils/__init__.py"}]}
| 3,409 | 166 |
gh_patches_debug_30819
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-2322
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use `show objects in schema` on Snowflake
### Describe the feature
dbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).
Caveats:
- Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.
- The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.
- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.
- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/snowflake/dbt/adapters/snowflake/impl.py`
Content:
```
1 from typing import Mapping, Any, Optional, List
2
3 import agate
4
5 from dbt.adapters.sql import SQLAdapter
6 from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
7 from dbt.adapters.snowflake import SnowflakeConnectionManager
8 from dbt.adapters.snowflake import SnowflakeRelation
9 from dbt.adapters.snowflake import SnowflakeColumn
10 from dbt.contracts.graph.manifest import Manifest
11 from dbt.exceptions import RuntimeException, DatabaseException
12 from dbt.utils import filter_null_values
13
14
15 class SnowflakeAdapter(SQLAdapter):
16 Relation = SnowflakeRelation
17 Column = SnowflakeColumn
18 ConnectionManager = SnowflakeConnectionManager
19
20 AdapterSpecificConfigs = frozenset(
21 {"transient", "cluster_by", "automatic_clustering", "secure",
22 "copy_grants", "snowflake_warehouse"}
23 )
24
25 @classmethod
26 def date_function(cls):
27 return "CURRENT_TIMESTAMP()"
28
29 @classmethod
30 def _catalog_filter_table(
31 cls, table: agate.Table, manifest: Manifest
32 ) -> agate.Table:
33 # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force
34 # the column names to their lowercased forms.
35 lowered = table.rename(
36 column_names=[c.lower() for c in table.column_names]
37 )
38 return super()._catalog_filter_table(lowered, manifest)
39
40 def _make_match_kwargs(self, database, schema, identifier):
41 quoting = self.config.quoting
42 if identifier is not None and quoting["identifier"] is False:
43 identifier = identifier.upper()
44
45 if schema is not None and quoting["schema"] is False:
46 schema = schema.upper()
47
48 if database is not None and quoting["database"] is False:
49 database = database.upper()
50
51 return filter_null_values(
52 {"identifier": identifier, "schema": schema, "database": database}
53 )
54
55 def _get_warehouse(self) -> str:
56 _, table = self.execute(
57 'select current_warehouse() as warehouse',
58 fetch=True
59 )
60 if len(table) == 0 or len(table[0]) == 0:
61 # can this happen?
62 raise RuntimeException(
63 'Could not get current warehouse: no results'
64 )
65 return str(table[0][0])
66
67 def _use_warehouse(self, warehouse: str):
68 """Use the given warehouse. Quotes are never applied."""
69 self.execute('use warehouse {}'.format(warehouse))
70
71 def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:
72 default_warehouse = self.config.credentials.warehouse
73 warehouse = config.get('snowflake_warehouse', default_warehouse)
74 if warehouse == default_warehouse or warehouse is None:
75 return None
76 previous = self._get_warehouse()
77 self._use_warehouse(warehouse)
78 return previous
79
80 def post_model_hook(
81 self, config: Mapping[str, Any], context: Optional[str]
82 ) -> None:
83 if context is not None:
84 self._use_warehouse(context)
85
86 def list_schemas(self, database: str) -> List[str]:
87 try:
88 results = self.execute_macro(
89 LIST_SCHEMAS_MACRO_NAME,
90 kwargs={'database': database}
91 )
92 except DatabaseException as exc:
93 msg = (
94 f'Database error while listing schemas in database '
95 f'"{database}"\n{exc}'
96 )
97 raise RuntimeException(msg)
98 # this uses 'show terse schemas in database', and the column name we
99 # want is 'name'
100
101 return [row['name'] for row in results]
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py
--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py
@@ -3,7 +3,10 @@
import agate
from dbt.adapters.sql import SQLAdapter
-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
+from dbt.adapters.sql.impl import (
+ LIST_SCHEMAS_MACRO_NAME,
+ LIST_RELATIONS_MACRO_NAME,
+)
from dbt.adapters.snowflake import SnowflakeConnectionManager
from dbt.adapters.snowflake import SnowflakeRelation
from dbt.adapters.snowflake import SnowflakeColumn
@@ -99,3 +102,43 @@
# want is 'name'
return [row['name'] for row in results]
+
+ def list_relations_without_caching(
+ self, information_schema, schema
+ ) -> List[SnowflakeRelation]:
+ kwargs = {'information_schema': information_schema, 'schema': schema}
+ try:
+ results = self.execute_macro(
+ LIST_RELATIONS_MACRO_NAME,
+ kwargs=kwargs
+ )
+ except DatabaseException as exc:
+ # if the schema doesn't exist, we just want to return.
+ # Alternatively, we could query the list of schemas before we start
+ # and skip listing the missing ones, which sounds expensive.
+ if 'Object does not exist' in str(exc):
+ return []
+ raise
+
+ relations = []
+ quote_policy = {
+ 'database': True,
+ 'schema': True,
+ 'identifier': True
+ }
+
+ columns = ['database_name', 'schema_name', 'name', 'kind']
+ for _database, _schema, _identifier, _type in results.select(columns):
+ try:
+ _type = self.Relation.get_relation_type(_type.lower())
+ except ValueError:
+ _type = self.Relation.External
+ relations.append(self.Relation.create(
+ database=_database,
+ schema=_schema,
+ identifier=_identifier,
+ quote_policy=quote_policy,
+ type=_type
+ ))
+
+ return relations
|
{"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n@@ -3,7 +3,10 @@\n import agate\n \n from dbt.adapters.sql import SQLAdapter\n-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\n+from dbt.adapters.sql.impl import (\n+ LIST_SCHEMAS_MACRO_NAME,\n+ LIST_RELATIONS_MACRO_NAME,\n+)\n from dbt.adapters.snowflake import SnowflakeConnectionManager\n from dbt.adapters.snowflake import SnowflakeRelation\n from dbt.adapters.snowflake import SnowflakeColumn\n@@ -99,3 +102,43 @@\n # want is 'name'\n \n return [row['name'] for row in results]\n+\n+ def list_relations_without_caching(\n+ self, information_schema, schema\n+ ) -> List[SnowflakeRelation]:\n+ kwargs = {'information_schema': information_schema, 'schema': schema}\n+ try:\n+ results = self.execute_macro(\n+ LIST_RELATIONS_MACRO_NAME,\n+ kwargs=kwargs\n+ )\n+ except DatabaseException as exc:\n+ # if the schema doesn't exist, we just want to return.\n+ # Alternatively, we could query the list of schemas before we start\n+ # and skip listing the missing ones, which sounds expensive.\n+ if 'Object does not exist' in str(exc):\n+ return []\n+ raise\n+\n+ relations = []\n+ quote_policy = {\n+ 'database': True,\n+ 'schema': True,\n+ 'identifier': True\n+ }\n+\n+ columns = ['database_name', 'schema_name', 'name', 'kind']\n+ for _database, _schema, _identifier, _type in results.select(columns):\n+ try:\n+ _type = self.Relation.get_relation_type(_type.lower())\n+ except ValueError:\n+ _type = self.Relation.External\n+ relations.append(self.Relation.create(\n+ database=_database,\n+ schema=_schema,\n+ identifier=_identifier,\n+ quote_policy=quote_policy,\n+ type=_type\n+ ))\n+\n+ return relations\n", "issue": "Use `show objects in schema` on Snowflake\n### Describe the feature\r\ndbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).\r\n\r\nCaveats:\r\n - Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.\r\n - The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.\r\n- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.\r\n- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue\n", "before_files": [{"content": "from typing import Mapping, Any, Optional, List\n\nimport agate\n\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\",\n \"copy_grants\", \"snowflake_warehouse\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}], "after_files": [{"content": "from typing import Mapping, Any, Optional, List\n\nimport agate\n\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import (\n LIST_SCHEMAS_MACRO_NAME,\n LIST_RELATIONS_MACRO_NAME,\n)\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\",\n \"copy_grants\", \"snowflake_warehouse\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n\n def list_relations_without_caching(\n self, information_schema, schema\n ) -> List[SnowflakeRelation]:\n kwargs = {'information_schema': information_schema, 'schema': schema}\n try:\n results = self.execute_macro(\n LIST_RELATIONS_MACRO_NAME,\n kwargs=kwargs\n )\n except DatabaseException as exc:\n # if the schema doesn't exist, we just want to return.\n # Alternatively, we could query the list of schemas before we start\n # and skip listing the missing ones, which sounds expensive.\n if 'Object does not exist' in str(exc):\n return []\n raise\n\n relations = []\n quote_policy = {\n 'database': True,\n 'schema': True,\n 'identifier': True\n }\n\n columns = ['database_name', 'schema_name', 'name', 'kind']\n for _database, _schema, _identifier, _type in results.select(columns):\n try:\n _type = self.Relation.get_relation_type(_type.lower())\n except ValueError:\n _type = self.Relation.External\n relations.append(self.Relation.create(\n database=_database,\n schema=_schema,\n identifier=_identifier,\n quote_policy=quote_policy,\n type=_type\n ))\n\n return relations\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}]}
| 1,471 | 513 |
gh_patches_debug_19963
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-35737
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nxos_vpc_interface peer_link is not idempotent
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and master branch are affected too.
-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest -->
- Bug Report
##### COMPONENT NAME
<!---
Name of the module, plugin, task or feature
Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path
-->
nxos_vpc_Interface
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
ansible 2.5.0 (devel 788010d0f0) last updated 2018/01/08 11:49:21 (GMT -400)
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /root/agents-ci/ansible/lib/ansible
executable location = /root/agents-ci/ansible/bin/ansible
python version = 2.7.6 (default, Oct 26 2016, 20:30:19) [GCC 4.8.4]
```
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.
-->
Ansible Server : Ubuntu 14.04
Device: N9K running 7.0(3)I5(2)
##### SUMMARY
<!--- Explain the problem briefly -->
If we set peer_link to False, it is not idempotent. Setting it to True is ok.
Note: I will open a PR shortly to address the above issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/modules/network/nxos/nxos_vpc_interface.py`
Content:
```
1 #!/usr/bin/python
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 #
18
19 ANSIBLE_METADATA = {'metadata_version': '1.1',
20 'status': ['preview'],
21 'supported_by': 'network'}
22
23 DOCUMENTATION = '''
24 ---
25 module: nxos_vpc_interface
26 extends_documentation_fragment: nxos
27 version_added: "2.2"
28 short_description: Manages interface VPC configuration
29 description:
30 - Manages interface VPC configuration
31 author:
32 - Jason Edelman (@jedelman8)
33 - Gabriele Gerbino (@GGabriele)
34 notes:
35 - Tested against NXOSv 7.3.(0)D1(1) on VIRL
36 - Either vpc or peer_link param is required, but not both.
37 - C(state=absent) removes whatever VPC config is on a port-channel
38 if one exists.
39 - Re-assigning a vpc or peerlink from one portchannel to another is not
40 supported. The module will force the user to unconfigure an existing
41 vpc/pl before configuring the same value on a new portchannel
42 options:
43 portchannel:
44 description:
45 - Group number of the portchannel that will be configured.
46 required: true
47 vpc:
48 description:
49 - VPC group/id that will be configured on associated portchannel.
50 required: false
51 default: null
52 peer_link:
53 description:
54 - Set to true/false for peer link config on associated portchannel.
55 required: false
56 default: null
57 state:
58 description:
59 - Manages desired state of the resource.
60 required: true
61 choices: ['present','absent']
62 '''
63
64 EXAMPLES = '''
65 - nxos_vpc_interface:
66 portchannel: 10
67 vpc: 100
68 '''
69
70 RETURN = '''
71 commands:
72 description: commands sent to the device
73 returned: always
74 type: list
75 sample: ["interface port-channel100", "vpc 10"]
76 '''
77
78 from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
79 from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
80 from ansible.module_utils.basic import AnsibleModule
81
82
83 def flatten_list(command_lists):
84 flat_command_list = []
85 for command in command_lists:
86 if isinstance(command, list):
87 flat_command_list.extend(command)
88 else:
89 flat_command_list.append(command)
90 return flat_command_list
91
92
93 def get_portchannel_list(module):
94 portchannels = []
95 pc_list = []
96
97 try:
98 body = run_commands(module, ['show port-channel summary | json'])[0]
99 pc_list = body['TABLE_channel']['ROW_channel']
100 except (KeyError, AttributeError, TypeError):
101 return portchannels
102
103 if pc_list:
104 if isinstance(pc_list, dict):
105 pc_list = [pc_list]
106
107 for pc in pc_list:
108 portchannels.append(pc['group'])
109
110 return portchannels
111
112
113 def get_existing_portchannel_to_vpc_mappings(module):
114 pc_vpc_mapping = {}
115
116 try:
117 body = run_commands(module, ['show vpc brief | json'])[0]
118 vpc_table = body['TABLE_vpc']['ROW_vpc']
119 except (KeyError, AttributeError, TypeError):
120 vpc_table = None
121
122 if vpc_table:
123 if isinstance(vpc_table, dict):
124 vpc_table = [vpc_table]
125
126 for vpc in vpc_table:
127 pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex'])
128
129 return pc_vpc_mapping
130
131
132 def peer_link_exists(module):
133 found = False
134 run = get_config(module, flags=['vpc'])
135
136 vpc_list = run.split('\n')
137 for each in vpc_list:
138 if 'peer-link' in each:
139 found = True
140 return found
141
142
143 def get_active_vpc_peer_link(module):
144 peer_link = None
145
146 try:
147 body = run_commands(module, ['show vpc brief | json'])[0]
148 peer_link = body['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex']
149 except (KeyError, AttributeError, TypeError):
150 return peer_link
151
152 return peer_link
153
154
155 def get_portchannel_vpc_config(module, portchannel):
156 peer_link_pc = None
157 peer_link = False
158 vpc = ""
159 pc = ""
160 config = {}
161
162 try:
163 body = run_commands(module, ['show vpc brief | json'])[0]
164 table = body['TABLE_peerlink']['ROW_peerlink']
165 except (KeyError, AttributeError, TypeError):
166 table = {}
167
168 if table:
169 peer_link_pc = table.get('peerlink-ifindex', None)
170
171 if peer_link_pc:
172 plpc = str(peer_link_pc[2:])
173 if portchannel == plpc:
174 config['portchannel'] = portchannel
175 config['peer-link'] = True
176 config['vpc'] = vpc
177
178 mapping = get_existing_portchannel_to_vpc_mappings(module)
179
180 for existing_vpc, port_channel in mapping.items():
181 port_ch = str(port_channel[2:])
182 if port_ch == portchannel:
183 pc = port_ch
184 vpc = str(existing_vpc)
185
186 config['portchannel'] = pc
187 config['peer-link'] = peer_link
188 config['vpc'] = vpc
189
190 return config
191
192
193 def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):
194 commands = []
195
196 if delta.get('peer-link') is False and existing.get('peer-link') is True:
197 command = 'no vpc peer-link'
198 commands.append('no vpc peer-link')
199 commands.insert(0, 'interface port-channel{0}'.format(portchannel))
200
201 elif delta.get('peer-link') or not existing.get('vpc'):
202 command = 'vpc {0}'.format(config_value)
203 commands.append(command)
204 commands.insert(0, 'interface port-channel{0}'.format(portchannel))
205
206 return commands
207
208
209 def state_present(portchannel, delta, config_value, existing):
210 commands = []
211
212 command = get_commands_to_config_vpc_interface(
213 portchannel,
214 delta,
215 config_value,
216 existing
217 )
218 commands.append(command)
219
220 return commands
221
222
223 def state_absent(portchannel, existing):
224 commands = []
225 if existing.get('vpc'):
226 command = 'no vpc'
227 commands.append(command)
228 elif existing.get('peer-link'):
229 command = 'no vpc peer-link'
230 commands.append(command)
231 if commands:
232 commands.insert(0, 'interface port-channel{0}'.format(portchannel))
233
234 return commands
235
236
237 def main():
238 argument_spec = dict(
239 portchannel=dict(required=True, type='str'),
240 vpc=dict(required=False, type='str'),
241 peer_link=dict(required=False, type='bool'),
242 state=dict(choices=['absent', 'present'], default='present')
243 )
244
245 argument_spec.update(nxos_argument_spec)
246
247 module = AnsibleModule(argument_spec=argument_spec,
248 mutually_exclusive=[['vpc', 'peer_link']],
249 supports_check_mode=True)
250
251 warnings = list()
252 commands = []
253 check_args(module, warnings)
254 results = {'changed': False, 'warnings': warnings}
255
256 portchannel = module.params['portchannel']
257 vpc = module.params['vpc']
258 peer_link = module.params['peer_link']
259 state = module.params['state']
260
261 args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link}
262 active_peer_link = None
263
264 if portchannel not in get_portchannel_list(module):
265 if not portchannel.isdigit() or int(portchannel) not in get_portchannel_list(module):
266 module.fail_json(msg="The portchannel you are trying to make a"
267 " VPC or PL is not created yet. "
268 "Create it first!")
269 if vpc:
270 mapping = get_existing_portchannel_to_vpc_mappings(module)
271
272 if vpc in mapping and portchannel != mapping[vpc].strip('Po'):
273 module.fail_json(msg="This vpc is already configured on "
274 "another portchannel. Remove it first "
275 "before trying to assign it here. ",
276 existing_portchannel=mapping[vpc])
277
278 for vpcid, existing_pc in mapping.items():
279 if portchannel == existing_pc.strip('Po') and vpcid != vpc:
280 module.fail_json(msg="This portchannel already has another"
281 " VPC configured. Remove it first "
282 "before assigning this one",
283 existing_vpc=vpcid)
284
285 if peer_link_exists(module):
286 active_peer_link = get_active_vpc_peer_link(module)
287 if active_peer_link[-2:] == portchannel:
288 module.fail_json(msg="That port channel is the current "
289 "PEER LINK. Remove it if you want it"
290 " to be a VPC")
291 config_value = vpc
292
293 elif peer_link is not None:
294 if peer_link_exists(module):
295 active_peer_link = get_active_vpc_peer_link(module)[2::]
296 if active_peer_link != portchannel:
297 if peer_link:
298 module.fail_json(msg="A peer link already exists on"
299 " the device. Remove it first",
300 current_peer_link='Po{0}'.format(active_peer_link))
301 config_value = 'peer-link'
302
303 proposed = dict((k, v) for k, v in args.items() if v is not None)
304 existing = get_portchannel_vpc_config(module, portchannel)
305
306 if state == 'present':
307 delta = dict(set(proposed.items()).difference(existing.items()))
308 if delta:
309 commands = state_present(portchannel, delta, config_value, existing)
310
311 elif state == 'absent' and existing:
312 commands = state_absent(portchannel, existing)
313
314 cmds = flatten_list(commands)
315 if cmds:
316 if module.check_mode:
317 module.exit_json(changed=True, commands=cmds)
318 else:
319 load_config(module, cmds)
320 results['changed'] = True
321 if 'configure' in cmds:
322 cmds.pop(0)
323
324 results['commands'] = cmds
325 module.exit_json(**results)
326
327
328 if __name__ == '__main__':
329 main()
330
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/ansible/modules/network/nxos/nxos_vpc_interface.py b/lib/ansible/modules/network/nxos/nxos_vpc_interface.py
--- a/lib/ansible/modules/network/nxos/nxos_vpc_interface.py
+++ b/lib/ansible/modules/network/nxos/nxos_vpc_interface.py
@@ -193,12 +193,15 @@
def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):
commands = []
- if delta.get('peer-link') is False and existing.get('peer-link') is True:
- command = 'no vpc peer-link'
+ if not delta.get('peer-link') and existing.get('peer-link'):
commands.append('no vpc peer-link')
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
- elif delta.get('peer-link') or not existing.get('vpc'):
+ elif delta.get('peer-link') and not existing.get('peer-link'):
+ commands.append('vpc peer-link')
+ commands.insert(0, 'interface port-channel{0}'.format(portchannel))
+
+ elif delta.get('vpc') and not existing.get('vpc'):
command = 'vpc {0}'.format(config_value)
commands.append(command)
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
|
{"golden_diff": "diff --git a/lib/ansible/modules/network/nxos/nxos_vpc_interface.py b/lib/ansible/modules/network/nxos/nxos_vpc_interface.py\n--- a/lib/ansible/modules/network/nxos/nxos_vpc_interface.py\n+++ b/lib/ansible/modules/network/nxos/nxos_vpc_interface.py\n@@ -193,12 +193,15 @@\n def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):\n commands = []\n \n- if delta.get('peer-link') is False and existing.get('peer-link') is True:\n- command = 'no vpc peer-link'\n+ if not delta.get('peer-link') and existing.get('peer-link'):\n commands.append('no vpc peer-link')\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n \n- elif delta.get('peer-link') or not existing.get('vpc'):\n+ elif delta.get('peer-link') and not existing.get('peer-link'):\n+ commands.append('vpc peer-link')\n+ commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n+\n+ elif delta.get('vpc') and not existing.get('vpc'):\n command = 'vpc {0}'.format(config_value)\n commands.append(command)\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n", "issue": "nxos_vpc_interface peer_link is not idempotent\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!---\r\nName of the module, plugin, task or feature\r\nDo not include extra details here, e.g. \"vyos_command\" not \"the network module vyos_command\" or the full path\r\n-->\r\nnxos_vpc_Interface\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.5.0 (devel 788010d0f0) last updated 2018/01/08 11:49:21 (GMT -400)\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /root/agents-ci/ansible/lib/ansible\r\n executable location = /root/agents-ci/ansible/bin/ansible\r\n python version = 2.7.6 (default, Oct 26 2016, 20:30:19) [GCC 4.8.4]\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.\r\n-->\r\nAnsible Server : Ubuntu 14.04\r\nDevice: N9K running 7.0(3)I5(2)\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\n\r\nIf we set peer_link to False, it is not idempotent. Setting it to True is ok.\r\n\r\nNote: I will open a PR shortly to address the above issue\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\nDOCUMENTATION = '''\n---\nmodule: nxos_vpc_interface\nextends_documentation_fragment: nxos\nversion_added: \"2.2\"\nshort_description: Manages interface VPC configuration\ndescription:\n - Manages interface VPC configuration\nauthor:\n - Jason Edelman (@jedelman8)\n - Gabriele Gerbino (@GGabriele)\nnotes:\n - Tested against NXOSv 7.3.(0)D1(1) on VIRL\n - Either vpc or peer_link param is required, but not both.\n - C(state=absent) removes whatever VPC config is on a port-channel\n if one exists.\n - Re-assigning a vpc or peerlink from one portchannel to another is not\n supported. The module will force the user to unconfigure an existing\n vpc/pl before configuring the same value on a new portchannel\noptions:\n portchannel:\n description:\n - Group number of the portchannel that will be configured.\n required: true\n vpc:\n description:\n - VPC group/id that will be configured on associated portchannel.\n required: false\n default: null\n peer_link:\n description:\n - Set to true/false for peer link config on associated portchannel.\n required: false\n default: null\n state:\n description:\n - Manages desired state of the resource.\n required: true\n choices: ['present','absent']\n'''\n\nEXAMPLES = '''\n- nxos_vpc_interface:\n portchannel: 10\n vpc: 100\n'''\n\nRETURN = '''\ncommands:\n description: commands sent to the device\n returned: always\n type: list\n sample: [\"interface port-channel100\", \"vpc 10\"]\n'''\n\nfrom ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands\nfrom ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef flatten_list(command_lists):\n flat_command_list = []\n for command in command_lists:\n if isinstance(command, list):\n flat_command_list.extend(command)\n else:\n flat_command_list.append(command)\n return flat_command_list\n\n\ndef get_portchannel_list(module):\n portchannels = []\n pc_list = []\n\n try:\n body = run_commands(module, ['show port-channel summary | json'])[0]\n pc_list = body['TABLE_channel']['ROW_channel']\n except (KeyError, AttributeError, TypeError):\n return portchannels\n\n if pc_list:\n if isinstance(pc_list, dict):\n pc_list = [pc_list]\n\n for pc in pc_list:\n portchannels.append(pc['group'])\n\n return portchannels\n\n\ndef get_existing_portchannel_to_vpc_mappings(module):\n pc_vpc_mapping = {}\n\n try:\n body = run_commands(module, ['show vpc brief | json'])[0]\n vpc_table = body['TABLE_vpc']['ROW_vpc']\n except (KeyError, AttributeError, TypeError):\n vpc_table = None\n\n if vpc_table:\n if isinstance(vpc_table, dict):\n vpc_table = [vpc_table]\n\n for vpc in vpc_table:\n pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex'])\n\n return pc_vpc_mapping\n\n\ndef peer_link_exists(module):\n found = False\n run = get_config(module, flags=['vpc'])\n\n vpc_list = run.split('\\n')\n for each in vpc_list:\n if 'peer-link' in each:\n found = True\n return found\n\n\ndef get_active_vpc_peer_link(module):\n peer_link = None\n\n try:\n body = run_commands(module, ['show vpc brief | json'])[0]\n peer_link = body['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex']\n except (KeyError, AttributeError, TypeError):\n return peer_link\n\n return peer_link\n\n\ndef get_portchannel_vpc_config(module, portchannel):\n peer_link_pc = None\n peer_link = False\n vpc = \"\"\n pc = \"\"\n config = {}\n\n try:\n body = run_commands(module, ['show vpc brief | json'])[0]\n table = body['TABLE_peerlink']['ROW_peerlink']\n except (KeyError, AttributeError, TypeError):\n table = {}\n\n if table:\n peer_link_pc = table.get('peerlink-ifindex', None)\n\n if peer_link_pc:\n plpc = str(peer_link_pc[2:])\n if portchannel == plpc:\n config['portchannel'] = portchannel\n config['peer-link'] = True\n config['vpc'] = vpc\n\n mapping = get_existing_portchannel_to_vpc_mappings(module)\n\n for existing_vpc, port_channel in mapping.items():\n port_ch = str(port_channel[2:])\n if port_ch == portchannel:\n pc = port_ch\n vpc = str(existing_vpc)\n\n config['portchannel'] = pc\n config['peer-link'] = peer_link\n config['vpc'] = vpc\n\n return config\n\n\ndef get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):\n commands = []\n\n if delta.get('peer-link') is False and existing.get('peer-link') is True:\n command = 'no vpc peer-link'\n commands.append('no vpc peer-link')\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n\n elif delta.get('peer-link') or not existing.get('vpc'):\n command = 'vpc {0}'.format(config_value)\n commands.append(command)\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n\n return commands\n\n\ndef state_present(portchannel, delta, config_value, existing):\n commands = []\n\n command = get_commands_to_config_vpc_interface(\n portchannel,\n delta,\n config_value,\n existing\n )\n commands.append(command)\n\n return commands\n\n\ndef state_absent(portchannel, existing):\n commands = []\n if existing.get('vpc'):\n command = 'no vpc'\n commands.append(command)\n elif existing.get('peer-link'):\n command = 'no vpc peer-link'\n commands.append(command)\n if commands:\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n\n return commands\n\n\ndef main():\n argument_spec = dict(\n portchannel=dict(required=True, type='str'),\n vpc=dict(required=False, type='str'),\n peer_link=dict(required=False, type='bool'),\n state=dict(choices=['absent', 'present'], default='present')\n )\n\n argument_spec.update(nxos_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n mutually_exclusive=[['vpc', 'peer_link']],\n supports_check_mode=True)\n\n warnings = list()\n commands = []\n check_args(module, warnings)\n results = {'changed': False, 'warnings': warnings}\n\n portchannel = module.params['portchannel']\n vpc = module.params['vpc']\n peer_link = module.params['peer_link']\n state = module.params['state']\n\n args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link}\n active_peer_link = None\n\n if portchannel not in get_portchannel_list(module):\n if not portchannel.isdigit() or int(portchannel) not in get_portchannel_list(module):\n module.fail_json(msg=\"The portchannel you are trying to make a\"\n \" VPC or PL is not created yet. \"\n \"Create it first!\")\n if vpc:\n mapping = get_existing_portchannel_to_vpc_mappings(module)\n\n if vpc in mapping and portchannel != mapping[vpc].strip('Po'):\n module.fail_json(msg=\"This vpc is already configured on \"\n \"another portchannel. Remove it first \"\n \"before trying to assign it here. \",\n existing_portchannel=mapping[vpc])\n\n for vpcid, existing_pc in mapping.items():\n if portchannel == existing_pc.strip('Po') and vpcid != vpc:\n module.fail_json(msg=\"This portchannel already has another\"\n \" VPC configured. Remove it first \"\n \"before assigning this one\",\n existing_vpc=vpcid)\n\n if peer_link_exists(module):\n active_peer_link = get_active_vpc_peer_link(module)\n if active_peer_link[-2:] == portchannel:\n module.fail_json(msg=\"That port channel is the current \"\n \"PEER LINK. Remove it if you want it\"\n \" to be a VPC\")\n config_value = vpc\n\n elif peer_link is not None:\n if peer_link_exists(module):\n active_peer_link = get_active_vpc_peer_link(module)[2::]\n if active_peer_link != portchannel:\n if peer_link:\n module.fail_json(msg=\"A peer link already exists on\"\n \" the device. Remove it first\",\n current_peer_link='Po{0}'.format(active_peer_link))\n config_value = 'peer-link'\n\n proposed = dict((k, v) for k, v in args.items() if v is not None)\n existing = get_portchannel_vpc_config(module, portchannel)\n\n if state == 'present':\n delta = dict(set(proposed.items()).difference(existing.items()))\n if delta:\n commands = state_present(portchannel, delta, config_value, existing)\n\n elif state == 'absent' and existing:\n commands = state_absent(portchannel, existing)\n\n cmds = flatten_list(commands)\n if cmds:\n if module.check_mode:\n module.exit_json(changed=True, commands=cmds)\n else:\n load_config(module, cmds)\n results['changed'] = True\n if 'configure' in cmds:\n cmds.pop(0)\n\n results['commands'] = cmds\n module.exit_json(**results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/network/nxos/nxos_vpc_interface.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\nDOCUMENTATION = '''\n---\nmodule: nxos_vpc_interface\nextends_documentation_fragment: nxos\nversion_added: \"2.2\"\nshort_description: Manages interface VPC configuration\ndescription:\n - Manages interface VPC configuration\nauthor:\n - Jason Edelman (@jedelman8)\n - Gabriele Gerbino (@GGabriele)\nnotes:\n - Tested against NXOSv 7.3.(0)D1(1) on VIRL\n - Either vpc or peer_link param is required, but not both.\n - C(state=absent) removes whatever VPC config is on a port-channel\n if one exists.\n - Re-assigning a vpc or peerlink from one portchannel to another is not\n supported. The module will force the user to unconfigure an existing\n vpc/pl before configuring the same value on a new portchannel\noptions:\n portchannel:\n description:\n - Group number of the portchannel that will be configured.\n required: true\n vpc:\n description:\n - VPC group/id that will be configured on associated portchannel.\n required: false\n default: null\n peer_link:\n description:\n - Set to true/false for peer link config on associated portchannel.\n required: false\n default: null\n state:\n description:\n - Manages desired state of the resource.\n required: true\n choices: ['present','absent']\n'''\n\nEXAMPLES = '''\n- nxos_vpc_interface:\n portchannel: 10\n vpc: 100\n'''\n\nRETURN = '''\ncommands:\n description: commands sent to the device\n returned: always\n type: list\n sample: [\"interface port-channel100\", \"vpc 10\"]\n'''\n\nfrom ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands\nfrom ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef flatten_list(command_lists):\n flat_command_list = []\n for command in command_lists:\n if isinstance(command, list):\n flat_command_list.extend(command)\n else:\n flat_command_list.append(command)\n return flat_command_list\n\n\ndef get_portchannel_list(module):\n portchannels = []\n pc_list = []\n\n try:\n body = run_commands(module, ['show port-channel summary | json'])[0]\n pc_list = body['TABLE_channel']['ROW_channel']\n except (KeyError, AttributeError, TypeError):\n return portchannels\n\n if pc_list:\n if isinstance(pc_list, dict):\n pc_list = [pc_list]\n\n for pc in pc_list:\n portchannels.append(pc['group'])\n\n return portchannels\n\n\ndef get_existing_portchannel_to_vpc_mappings(module):\n pc_vpc_mapping = {}\n\n try:\n body = run_commands(module, ['show vpc brief | json'])[0]\n vpc_table = body['TABLE_vpc']['ROW_vpc']\n except (KeyError, AttributeError, TypeError):\n vpc_table = None\n\n if vpc_table:\n if isinstance(vpc_table, dict):\n vpc_table = [vpc_table]\n\n for vpc in vpc_table:\n pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex'])\n\n return pc_vpc_mapping\n\n\ndef peer_link_exists(module):\n found = False\n run = get_config(module, flags=['vpc'])\n\n vpc_list = run.split('\\n')\n for each in vpc_list:\n if 'peer-link' in each:\n found = True\n return found\n\n\ndef get_active_vpc_peer_link(module):\n peer_link = None\n\n try:\n body = run_commands(module, ['show vpc brief | json'])[0]\n peer_link = body['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex']\n except (KeyError, AttributeError, TypeError):\n return peer_link\n\n return peer_link\n\n\ndef get_portchannel_vpc_config(module, portchannel):\n peer_link_pc = None\n peer_link = False\n vpc = \"\"\n pc = \"\"\n config = {}\n\n try:\n body = run_commands(module, ['show vpc brief | json'])[0]\n table = body['TABLE_peerlink']['ROW_peerlink']\n except (KeyError, AttributeError, TypeError):\n table = {}\n\n if table:\n peer_link_pc = table.get('peerlink-ifindex', None)\n\n if peer_link_pc:\n plpc = str(peer_link_pc[2:])\n if portchannel == plpc:\n config['portchannel'] = portchannel\n config['peer-link'] = True\n config['vpc'] = vpc\n\n mapping = get_existing_portchannel_to_vpc_mappings(module)\n\n for existing_vpc, port_channel in mapping.items():\n port_ch = str(port_channel[2:])\n if port_ch == portchannel:\n pc = port_ch\n vpc = str(existing_vpc)\n\n config['portchannel'] = pc\n config['peer-link'] = peer_link\n config['vpc'] = vpc\n\n return config\n\n\ndef get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):\n commands = []\n\n if not delta.get('peer-link') and existing.get('peer-link'):\n commands.append('no vpc peer-link')\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n\n elif delta.get('peer-link') and not existing.get('peer-link'):\n commands.append('vpc peer-link')\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n\n elif delta.get('vpc') and not existing.get('vpc'):\n command = 'vpc {0}'.format(config_value)\n commands.append(command)\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n\n return commands\n\n\ndef state_present(portchannel, delta, config_value, existing):\n commands = []\n\n command = get_commands_to_config_vpc_interface(\n portchannel,\n delta,\n config_value,\n existing\n )\n commands.append(command)\n\n return commands\n\n\ndef state_absent(portchannel, existing):\n commands = []\n if existing.get('vpc'):\n command = 'no vpc'\n commands.append(command)\n elif existing.get('peer-link'):\n command = 'no vpc peer-link'\n commands.append(command)\n if commands:\n commands.insert(0, 'interface port-channel{0}'.format(portchannel))\n\n return commands\n\n\ndef main():\n argument_spec = dict(\n portchannel=dict(required=True, type='str'),\n vpc=dict(required=False, type='str'),\n peer_link=dict(required=False, type='bool'),\n state=dict(choices=['absent', 'present'], default='present')\n )\n\n argument_spec.update(nxos_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n mutually_exclusive=[['vpc', 'peer_link']],\n supports_check_mode=True)\n\n warnings = list()\n commands = []\n check_args(module, warnings)\n results = {'changed': False, 'warnings': warnings}\n\n portchannel = module.params['portchannel']\n vpc = module.params['vpc']\n peer_link = module.params['peer_link']\n state = module.params['state']\n\n args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link}\n active_peer_link = None\n\n if portchannel not in get_portchannel_list(module):\n if not portchannel.isdigit() or int(portchannel) not in get_portchannel_list(module):\n module.fail_json(msg=\"The portchannel you are trying to make a\"\n \" VPC or PL is not created yet. \"\n \"Create it first!\")\n if vpc:\n mapping = get_existing_portchannel_to_vpc_mappings(module)\n\n if vpc in mapping and portchannel != mapping[vpc].strip('Po'):\n module.fail_json(msg=\"This vpc is already configured on \"\n \"another portchannel. Remove it first \"\n \"before trying to assign it here. \",\n existing_portchannel=mapping[vpc])\n\n for vpcid, existing_pc in mapping.items():\n if portchannel == existing_pc.strip('Po') and vpcid != vpc:\n module.fail_json(msg=\"This portchannel already has another\"\n \" VPC configured. Remove it first \"\n \"before assigning this one\",\n existing_vpc=vpcid)\n\n if peer_link_exists(module):\n active_peer_link = get_active_vpc_peer_link(module)\n if active_peer_link[-2:] == portchannel:\n module.fail_json(msg=\"That port channel is the current \"\n \"PEER LINK. Remove it if you want it\"\n \" to be a VPC\")\n config_value = vpc\n\n elif peer_link is not None:\n if peer_link_exists(module):\n active_peer_link = get_active_vpc_peer_link(module)[2::]\n if active_peer_link != portchannel:\n if peer_link:\n module.fail_json(msg=\"A peer link already exists on\"\n \" the device. Remove it first\",\n current_peer_link='Po{0}'.format(active_peer_link))\n config_value = 'peer-link'\n\n proposed = dict((k, v) for k, v in args.items() if v is not None)\n existing = get_portchannel_vpc_config(module, portchannel)\n\n if state == 'present':\n delta = dict(set(proposed.items()).difference(existing.items()))\n if delta:\n commands = state_present(portchannel, delta, config_value, existing)\n\n elif state == 'absent' and existing:\n commands = state_absent(portchannel, existing)\n\n cmds = flatten_list(commands)\n if cmds:\n if module.check_mode:\n module.exit_json(changed=True, commands=cmds)\n else:\n load_config(module, cmds)\n results['changed'] = True\n if 'configure' in cmds:\n cmds.pop(0)\n\n results['commands'] = cmds\n module.exit_json(**results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/network/nxos/nxos_vpc_interface.py"}]}
| 3,993 | 303 |
gh_patches_debug_32064
|
rasdani/github-patches
|
git_diff
|
apache__airflow-14274
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
upgrade_check fails db version check
**Apache Airflow version**: 1.10.14 with AWS RDS mysql 5.7.26 as metastore db
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): v1.16.15
**Environment**: DEV
- **Cloud provider or hardware configuration**: AWS
- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)
- **Kernel** (e.g. `uname -a`): Linux airflow-scheduler-765f664c56-4bsfq 4.14.186-146.268.amzn2.x86_64 #1 SMP Tue Jul 14 18:16:52 UTC 2020 x86_64 GNU/Linux
- **Install tools**:
- **Others**: Running on K8S as docker container with apache/airflow:1.10.14 as base
**What happened**: Running `airflow upgrade_check` returns the following error:
```
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$ airflow upgrade_check
==================================================== STATUS ====================================================
Check for latest versions of apache-airflow and checker...............................................SUCCESS
Remove airflow.AirflowMacroPlugin class...............................................................SUCCESS
/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/helpers.py:442: DeprecationWarning: Importing 'DummyOperator' directly from 'airflow.operators' has been deprecated. Please import from 'airflow.operators.[operator_module]' instead. Support for direct imports will be dropped entirely in Airflow 2.0.
DeprecationWarning)
Ensure users are not using custom metaclasses in custom operators.....................................SUCCESS
Chain between DAG and operator not allowed............................................................SUCCESS
Connection.conn_type is not nullable..................................................................SUCCESS
Custom Executors now require full path................................................................SUCCESS
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 118, in run
all_problems = check_upgrade(formatter, rules)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 38, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py", line 56, in check
installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
File "/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py", line 298, in __init__
raise InvalidVersion("Invalid version: '{0}'".format(version))
packaging.version.InvalidVersion: Invalid version: '5.7.26-log'
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$
```
**What you expected to happen**: commands runs through and prints helpful messages
<!-- What do you think went wrong? -->
Running `SELECT VERSION();').scalar()` against the metastore db returns "5.7.26-log' which is possibly not a valid value for Version class `__init__` function because of the "-log" ending?
```
mysql> select VERSION();
+------------+
| VERSION() |
+------------+
| 5.7.26-log |
+------------+
1 row in set (0.00 sec)
```
**How to reproduce it**: Run `airflow upgrade_check` again.
**Anything else we need to know**:
Dockerfile snippet:
```
FROM apache/airflow:1.10.14
...
USER ${AIRFLOW_UID}
RUN pip install --user \
airflow-kubernetes-job-operator \
apache-airflow-backport-providers-cncf-kubernetes \
apache-airflow-backport-providers-ssh \
apache-airflow-upgrade-check
```
How often does this problem occur? Once? Every time etc?: Every time since last week. Has worked before.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 from packaging.version import Version
19
20 from airflow.configuration import conf
21 from airflow.upgrade.rules.base_rule import BaseRule
22 from airflow.utils.db import provide_session
23
24
25 class DatabaseVersionCheckRule(BaseRule):
26 title = "Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0"
27
28 description = """\
29 From Airflow 2.0, the following database versions are supported:
30 PostgreSQl - 9.6, 10, 11, 12, 13;
31 MySQL - 5.7, 8;
32 SQLite - 3.15+
33 """
34
35 @provide_session
36 def check(self, session=None):
37
38 more_info = "See link below for more details: https://github.com/apache/airflow#requirements"
39
40 conn_str = conf.get(section="core", key="sql_alchemy_conn")
41
42 if "sqlite" in conn_str:
43 min_req_sqlite_version = Version('3.15')
44 installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())
45 if installed_sqlite_version < min_req_sqlite_version:
46 return "From Airflow 2.0, SQLite version below 3.15 is no longer supported. \n" + more_info
47
48 elif "postgres" in conn_str:
49 min_req_postgres_version = Version('9.6')
50 installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())
51 if installed_postgres_version < min_req_postgres_version:
52 return "From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \n" + more_info
53
54 elif "mysql" in conn_str:
55 min_req_mysql_version = Version('5.7')
56 installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
57 if installed_mysql_version < min_req_mysql_version:
58 return "From Airflow 2.0, MySQL version below 5.7 is no longer supported. \n" + more_info
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
--- a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
+++ b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
@@ -43,16 +43,23 @@
min_req_sqlite_version = Version('3.15')
installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())
if installed_sqlite_version < min_req_sqlite_version:
- return "From Airflow 2.0, SQLite version below 3.15 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, SQLite version below {} is no longer supported. \n{}".format(
+ min_req_sqlite_version, more_info
+ )
elif "postgres" in conn_str:
min_req_postgres_version = Version('9.6')
installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())
if installed_postgres_version < min_req_postgres_version:
- return "From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, PostgreSQL version below {} is no longer supported. \n{}".format(
+ min_req_postgres_version, more_info
+ )
elif "mysql" in conn_str:
min_req_mysql_version = Version('5.7')
- installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
+ # special treatment is needed here, because MySQL version may include a suffix like '-log'
+ installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])
if installed_mysql_version < min_req_mysql_version:
- return "From Airflow 2.0, MySQL version below 5.7 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, MySQL version below {} is no longer supported. \n{}".format(
+ min_req_mysql_version, more_info
+ )
|
{"golden_diff": "diff --git a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n--- a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n+++ b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n@@ -43,16 +43,23 @@\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n- return \"From Airflow 2.0, SQLite version below 3.15 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, SQLite version below {} is no longer supported. \\n{}\".format(\n+ min_req_sqlite_version, more_info\n+ )\n \n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n- return \"From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, PostgreSQL version below {} is no longer supported. \\n{}\".format(\n+ min_req_postgres_version, more_info\n+ )\n \n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n- installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\n+ # special treatment is needed here, because MySQL version may include a suffix like '-log'\n+ installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])\n if installed_mysql_version < min_req_mysql_version:\n- return \"From Airflow 2.0, MySQL version below 5.7 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, MySQL version below {} is no longer supported. \\n{}\".format(\n+ min_req_mysql_version, more_info\n+ )\n", "issue": "upgrade_check fails db version check\n**Apache Airflow version**: 1.10.14 with AWS RDS mysql 5.7.26 as metastore db\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): v1.16.15\r\n\r\n**Environment**: DEV\r\n\r\n- **Cloud provider or hardware configuration**: AWS\r\n- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)\r\n- **Kernel** (e.g. `uname -a`): Linux airflow-scheduler-765f664c56-4bsfq 4.14.186-146.268.amzn2.x86_64 #1 SMP Tue Jul 14 18:16:52 UTC 2020 x86_64 GNU/Linux\r\n- **Install tools**: \r\n- **Others**: Running on K8S as docker container with apache/airflow:1.10.14 as base\r\n\r\n**What happened**: Running `airflow upgrade_check` returns the following error:\r\n```\r\nairflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$ airflow upgrade_check\r\n\r\n==================================================== STATUS ====================================================\r\nCheck for latest versions of apache-airflow and checker...............................................SUCCESS\r\nRemove airflow.AirflowMacroPlugin class...............................................................SUCCESS\r\n/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/helpers.py:442: DeprecationWarning: Importing 'DummyOperator' directly from 'airflow.operators' has been deprecated. Please import from 'airflow.operators.[operator_module]' instead. Support for direct imports will be dropped entirely in Airflow 2.0.\r\n DeprecationWarning)\r\nEnsure users are not using custom metaclasses in custom operators.....................................SUCCESS\r\nChain between DAG and operator not allowed............................................................SUCCESS\r\nConnection.conn_type is not nullable..................................................................SUCCESS\r\nCustom Executors now require full path................................................................SUCCESS\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py\", line 118, in run\r\n all_problems = check_upgrade(formatter, rules)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py\", line 38, in check_upgrade\r\n rule_status = RuleStatus.from_rule(rule)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py\", line 44, in from_rule\r\n result = rule.check()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py\", line 74, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\", line 56, in check\r\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py\", line 298, in __init__\r\n raise InvalidVersion(\"Invalid version: '{0}'\".format(version))\r\npackaging.version.InvalidVersion: Invalid version: '5.7.26-log'\r\nairflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$\r\n```\r\n\r\n**What you expected to happen**: commands runs through and prints helpful messages\r\n\r\n<!-- What do you think went wrong? -->\r\nRunning `SELECT VERSION();').scalar()` against the metastore db returns \"5.7.26-log' which is possibly not a valid value for Version class `__init__` function because of the \"-log\" ending?\r\n\r\n```\r\nmysql> select VERSION();\r\n+------------+\r\n| VERSION() |\r\n+------------+\r\n| 5.7.26-log |\r\n+------------+\r\n1 row in set (0.00 sec)\r\n```\r\n\r\n**How to reproduce it**: Run `airflow upgrade_check` again.\r\n\r\n**Anything else we need to know**: \r\nDockerfile snippet:\r\n```\r\nFROM apache/airflow:1.10.14\r\n...\r\nUSER ${AIRFLOW_UID}\r\nRUN pip install --user \\\r\n airflow-kubernetes-job-operator \\\r\n apache-airflow-backport-providers-cncf-kubernetes \\\r\n apache-airflow-backport-providers-ssh \\\r\n apache-airflow-upgrade-check\r\n```\r\n\r\nHow often does this problem occur? Once? Every time etc?: Every time since last week. Has worked before.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom packaging.version import Version\n\nfrom airflow.configuration import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.db import provide_session\n\n\nclass DatabaseVersionCheckRule(BaseRule):\n title = \"Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0\"\n\n description = \"\"\"\\\nFrom Airflow 2.0, the following database versions are supported:\nPostgreSQl - 9.6, 10, 11, 12, 13;\nMySQL - 5.7, 8;\nSQLite - 3.15+\n \"\"\"\n\n @provide_session\n def check(self, session=None):\n\n more_info = \"See link below for more details: https://github.com/apache/airflow#requirements\"\n\n conn_str = conf.get(section=\"core\", key=\"sql_alchemy_conn\")\n\n if \"sqlite\" in conn_str:\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n return \"From Airflow 2.0, SQLite version below 3.15 is no longer supported. \\n\" + more_info\n\n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n return \"From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \\n\" + more_info\n\n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\n if installed_mysql_version < min_req_mysql_version:\n return \"From Airflow 2.0, MySQL version below 5.7 is no longer supported. \\n\" + more_info\n", "path": "airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom packaging.version import Version\n\nfrom airflow.configuration import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.db import provide_session\n\n\nclass DatabaseVersionCheckRule(BaseRule):\n title = \"Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0\"\n\n description = \"\"\"\\\nFrom Airflow 2.0, the following database versions are supported:\nPostgreSQl - 9.6, 10, 11, 12, 13;\nMySQL - 5.7, 8;\nSQLite - 3.15+\n \"\"\"\n\n @provide_session\n def check(self, session=None):\n\n more_info = \"See link below for more details: https://github.com/apache/airflow#requirements\"\n\n conn_str = conf.get(section=\"core\", key=\"sql_alchemy_conn\")\n\n if \"sqlite\" in conn_str:\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n return \"From Airflow 2.0, SQLite version below {} is no longer supported. \\n{}\".format(\n min_req_sqlite_version, more_info\n )\n\n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n return \"From Airflow 2.0, PostgreSQL version below {} is no longer supported. \\n{}\".format(\n min_req_postgres_version, more_info\n )\n\n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n # special treatment is needed here, because MySQL version may include a suffix like '-log'\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])\n if installed_mysql_version < min_req_mysql_version:\n return \"From Airflow 2.0, MySQL version below {} is no longer supported. \\n{}\".format(\n min_req_mysql_version, more_info\n )\n", "path": "airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py"}]}
| 2,025 | 486 |
gh_patches_debug_9223
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-685
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restore pex.pex_bootstrapper.is_compressed API
#680 broke https://github.com/wickman/lambdex which has a dependency on this removed function. https://github.com/wickman/lambdex/issues/5 is filed to remove this un-necessary dependence going forward.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/pex_bootstrapper.py`
Content:
```
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import absolute_import, print_function
5
6 import os
7 import sys
8
9 from pex import pex_warnings
10 from pex.common import die
11 from pex.executor import Executor
12 from pex.interpreter import PythonInterpreter
13 from pex.interpreter_constraints import matched_interpreters
14 from pex.tracer import TRACER
15 from pex.variables import ENV
16
17 __all__ = ('bootstrap_pex',)
18
19
20 def find_in_path(target_interpreter):
21 if os.path.exists(target_interpreter):
22 return target_interpreter
23
24 for directory in os.getenv('PATH', '').split(os.pathsep):
25 try_path = os.path.join(directory, target_interpreter)
26 if os.path.exists(try_path):
27 return try_path
28
29
30 def find_compatible_interpreters(pex_python_path, compatibility_constraints):
31 """Find all compatible interpreters on the system within the supplied constraints and use
32 PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.
33 """
34 if pex_python_path:
35 interpreters = []
36 for binary in pex_python_path.split(os.pathsep):
37 try:
38 interpreters.append(PythonInterpreter.from_binary(binary))
39 except Executor.ExecutionError:
40 print("Python interpreter %s in PEX_PYTHON_PATH failed to load properly." % binary,
41 file=sys.stderr)
42 if not interpreters:
43 die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')
44 else:
45 if not os.getenv('PATH', ''):
46 # no $PATH, use sys.executable
47 interpreters = [PythonInterpreter.get()]
48 else:
49 # get all qualifying interpreters found in $PATH
50 interpreters = PythonInterpreter.all()
51
52 return list(
53 matched_interpreters(interpreters, compatibility_constraints)
54 if compatibility_constraints
55 else interpreters
56 )
57
58
59 def _select_pex_python_interpreter(target_python, compatibility_constraints):
60 target = find_in_path(target_python)
61
62 if not target:
63 die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)
64 if compatibility_constraints:
65 pi = PythonInterpreter.from_binary(target)
66 if not list(matched_interpreters([pi], compatibility_constraints)):
67 die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '
68 'interpreter constraints: %s' % (target, str(compatibility_constraints)))
69 if not os.path.exists(target):
70 die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)
71 return target
72
73
74 def _select_interpreter(pex_python_path, compatibility_constraints):
75 compatible_interpreters = find_compatible_interpreters(
76 pex_python_path, compatibility_constraints)
77
78 if not compatible_interpreters:
79 die('Failed to find compatible interpreter for constraints: %s'
80 % str(compatibility_constraints))
81 # TODO: https://github.com/pantsbuild/pex/issues/430
82 target = min(compatible_interpreters).binary
83
84 if os.path.exists(target):
85 return target
86
87
88 def maybe_reexec_pex(compatibility_constraints):
89 """
90 Handle environment overrides for the Python interpreter to use when executing this pex.
91
92 This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO
93 metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the
94 interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for
95 a matching interpreter in accordance with the interpreter constraints. If both variables are
96 present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no
97 compatible interpreters can be found on said path.
98
99 If neither variable is set, we fall back to plain PEX execution using PATH searching or the
100 currently executing interpreter. If compatibility constraints are used, we match those constraints
101 against these interpreters.
102
103 :param compatibility_constraints: list of requirements-style strings that constrain the
104 Python interpreter to re-exec this pex with.
105 """
106 if os.environ.pop('SHOULD_EXIT_BOOTSTRAP_REEXEC', None):
107 # We've already been here and selected an interpreter. Continue to execution.
108 return
109
110 target = None
111 with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):
112 if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:
113 # preserve PEX_PYTHON re-exec for backwards compatibility
114 # TODO: Kill this off completely in favor of PEX_PYTHON_PATH
115 # https://github.com/pantsbuild/pex/issues/431
116 target = _select_pex_python_interpreter(ENV.PEX_PYTHON,
117 compatibility_constraints)
118 elif ENV.PEX_PYTHON_PATH:
119 target = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)
120
121 elif compatibility_constraints:
122 # Apply constraints to target using regular PATH
123 target = _select_interpreter(
124 pex_python_path=None,
125 compatibility_constraints=compatibility_constraints
126 )
127
128 if target and os.path.realpath(target) != os.path.realpath(sys.executable):
129 cmdline = [target] + sys.argv
130 TRACER.log('Re-executing: cmdline="%s", sys.executable="%s", PEX_PYTHON="%s", '
131 'PEX_PYTHON_PATH="%s", COMPATIBILITY_CONSTRAINTS="%s"'
132 % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,
133 compatibility_constraints))
134 ENV.delete('PEX_PYTHON')
135 ENV.delete('PEX_PYTHON_PATH')
136 os.environ['SHOULD_EXIT_BOOTSTRAP_REEXEC'] = '1'
137 os.execve(target, cmdline, ENV.copy())
138
139
140 def _bootstrap(entry_point):
141 from .pex_info import PexInfo
142 pex_info = PexInfo.from_pex(entry_point)
143 pex_warnings.configure_warnings(pex_info)
144
145 from .finders import register_finders
146 register_finders()
147
148 return pex_info
149
150
151 def bootstrap_pex(entry_point):
152 pex_info = _bootstrap(entry_point)
153 maybe_reexec_pex(pex_info.interpreter_constraints)
154
155 from . import pex
156 pex.PEX(entry_point).execute()
157
158
159 def bootstrap_pex_env(entry_point):
160 """Bootstrap the current runtime environment using a given pex."""
161 pex_info = _bootstrap(entry_point)
162
163 from .environment import PEXEnvironment
164 PEXEnvironment(entry_point, pex_info).activate()
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py
--- a/pex/pex_bootstrapper.py
+++ b/pex/pex_bootstrapper.py
@@ -156,6 +156,13 @@
pex.PEX(entry_point).execute()
+# NB: This helper is used by third party libs - namely https://github.com/wickman/lambdex.
+# TODO(John Sirois): Kill once https://github.com/wickman/lambdex/issues/5 is resolved.
+def is_compressed(entry_point):
+ from .pex_info import PexInfo
+ return os.path.exists(entry_point) and not os.path.exists(os.path.join(entry_point, PexInfo.PATH))
+
+
def bootstrap_pex_env(entry_point):
"""Bootstrap the current runtime environment using a given pex."""
pex_info = _bootstrap(entry_point)
|
{"golden_diff": "diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py\n--- a/pex/pex_bootstrapper.py\n+++ b/pex/pex_bootstrapper.py\n@@ -156,6 +156,13 @@\n pex.PEX(entry_point).execute()\n \n \n+# NB: This helper is used by third party libs - namely https://github.com/wickman/lambdex.\n+# TODO(John Sirois): Kill once https://github.com/wickman/lambdex/issues/5 is resolved.\n+def is_compressed(entry_point):\n+ from .pex_info import PexInfo\n+ return os.path.exists(entry_point) and not os.path.exists(os.path.join(entry_point, PexInfo.PATH))\n+\n+\n def bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n pex_info = _bootstrap(entry_point)\n", "issue": "Restore pex.pex_bootstrapper.is_compressed API\n#680 broke https://github.com/wickman/lambdex which has a dependency on this removed function. https://github.com/wickman/lambdex/issues/5 is filed to remove this un-necessary dependence going forward.\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\n\nfrom pex import pex_warnings\nfrom pex.common import die\nfrom pex.executor import Executor\nfrom pex.interpreter import PythonInterpreter\nfrom pex.interpreter_constraints import matched_interpreters\nfrom pex.tracer import TRACER\nfrom pex.variables import ENV\n\n__all__ = ('bootstrap_pex',)\n\n\ndef find_in_path(target_interpreter):\n if os.path.exists(target_interpreter):\n return target_interpreter\n\n for directory in os.getenv('PATH', '').split(os.pathsep):\n try_path = os.path.join(directory, target_interpreter)\n if os.path.exists(try_path):\n return try_path\n\n\ndef find_compatible_interpreters(pex_python_path, compatibility_constraints):\n \"\"\"Find all compatible interpreters on the system within the supplied constraints and use\n PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.\n \"\"\"\n if pex_python_path:\n interpreters = []\n for binary in pex_python_path.split(os.pathsep):\n try:\n interpreters.append(PythonInterpreter.from_binary(binary))\n except Executor.ExecutionError:\n print(\"Python interpreter %s in PEX_PYTHON_PATH failed to load properly.\" % binary,\n file=sys.stderr)\n if not interpreters:\n die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')\n else:\n if not os.getenv('PATH', ''):\n # no $PATH, use sys.executable\n interpreters = [PythonInterpreter.get()]\n else:\n # get all qualifying interpreters found in $PATH\n interpreters = PythonInterpreter.all()\n\n return list(\n matched_interpreters(interpreters, compatibility_constraints)\n if compatibility_constraints\n else interpreters\n )\n\n\ndef _select_pex_python_interpreter(target_python, compatibility_constraints):\n target = find_in_path(target_python)\n\n if not target:\n die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)\n if compatibility_constraints:\n pi = PythonInterpreter.from_binary(target)\n if not list(matched_interpreters([pi], compatibility_constraints)):\n die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '\n 'interpreter constraints: %s' % (target, str(compatibility_constraints)))\n if not os.path.exists(target):\n die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)\n return target\n\n\ndef _select_interpreter(pex_python_path, compatibility_constraints):\n compatible_interpreters = find_compatible_interpreters(\n pex_python_path, compatibility_constraints)\n\n if not compatible_interpreters:\n die('Failed to find compatible interpreter for constraints: %s'\n % str(compatibility_constraints))\n # TODO: https://github.com/pantsbuild/pex/issues/430\n target = min(compatible_interpreters).binary\n\n if os.path.exists(target):\n return target\n\n\ndef maybe_reexec_pex(compatibility_constraints):\n \"\"\"\n Handle environment overrides for the Python interpreter to use when executing this pex.\n\n This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO\n metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the\n interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for\n a matching interpreter in accordance with the interpreter constraints. If both variables are\n present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no\n compatible interpreters can be found on said path.\n\n If neither variable is set, we fall back to plain PEX execution using PATH searching or the\n currently executing interpreter. If compatibility constraints are used, we match those constraints\n against these interpreters.\n\n :param compatibility_constraints: list of requirements-style strings that constrain the\n Python interpreter to re-exec this pex with.\n \"\"\"\n if os.environ.pop('SHOULD_EXIT_BOOTSTRAP_REEXEC', None):\n # We've already been here and selected an interpreter. Continue to execution.\n return\n\n target = None\n with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):\n if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:\n # preserve PEX_PYTHON re-exec for backwards compatibility\n # TODO: Kill this off completely in favor of PEX_PYTHON_PATH\n # https://github.com/pantsbuild/pex/issues/431\n target = _select_pex_python_interpreter(ENV.PEX_PYTHON,\n compatibility_constraints)\n elif ENV.PEX_PYTHON_PATH:\n target = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)\n\n elif compatibility_constraints:\n # Apply constraints to target using regular PATH\n target = _select_interpreter(\n pex_python_path=None,\n compatibility_constraints=compatibility_constraints\n )\n\n if target and os.path.realpath(target) != os.path.realpath(sys.executable):\n cmdline = [target] + sys.argv\n TRACER.log('Re-executing: cmdline=\"%s\", sys.executable=\"%s\", PEX_PYTHON=\"%s\", '\n 'PEX_PYTHON_PATH=\"%s\", COMPATIBILITY_CONSTRAINTS=\"%s\"'\n % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,\n compatibility_constraints))\n ENV.delete('PEX_PYTHON')\n ENV.delete('PEX_PYTHON_PATH')\n os.environ['SHOULD_EXIT_BOOTSTRAP_REEXEC'] = '1'\n os.execve(target, cmdline, ENV.copy())\n\n\ndef _bootstrap(entry_point):\n from .pex_info import PexInfo\n pex_info = PexInfo.from_pex(entry_point)\n pex_warnings.configure_warnings(pex_info)\n\n from .finders import register_finders\n register_finders()\n\n return pex_info\n\n\ndef bootstrap_pex(entry_point):\n pex_info = _bootstrap(entry_point)\n maybe_reexec_pex(pex_info.interpreter_constraints)\n\n from . import pex\n pex.PEX(entry_point).execute()\n\n\ndef bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n pex_info = _bootstrap(entry_point)\n\n from .environment import PEXEnvironment\n PEXEnvironment(entry_point, pex_info).activate()\n", "path": "pex/pex_bootstrapper.py"}], "after_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\n\nfrom pex import pex_warnings\nfrom pex.common import die\nfrom pex.executor import Executor\nfrom pex.interpreter import PythonInterpreter\nfrom pex.interpreter_constraints import matched_interpreters\nfrom pex.tracer import TRACER\nfrom pex.variables import ENV\n\n__all__ = ('bootstrap_pex',)\n\n\ndef find_in_path(target_interpreter):\n if os.path.exists(target_interpreter):\n return target_interpreter\n\n for directory in os.getenv('PATH', '').split(os.pathsep):\n try_path = os.path.join(directory, target_interpreter)\n if os.path.exists(try_path):\n return try_path\n\n\ndef find_compatible_interpreters(pex_python_path, compatibility_constraints):\n \"\"\"Find all compatible interpreters on the system within the supplied constraints and use\n PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.\n \"\"\"\n if pex_python_path:\n interpreters = []\n for binary in pex_python_path.split(os.pathsep):\n try:\n interpreters.append(PythonInterpreter.from_binary(binary))\n except Executor.ExecutionError:\n print(\"Python interpreter %s in PEX_PYTHON_PATH failed to load properly.\" % binary,\n file=sys.stderr)\n if not interpreters:\n die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')\n else:\n if not os.getenv('PATH', ''):\n # no $PATH, use sys.executable\n interpreters = [PythonInterpreter.get()]\n else:\n # get all qualifying interpreters found in $PATH\n interpreters = PythonInterpreter.all()\n\n return list(\n matched_interpreters(interpreters, compatibility_constraints)\n if compatibility_constraints\n else interpreters\n )\n\n\ndef _select_pex_python_interpreter(target_python, compatibility_constraints):\n target = find_in_path(target_python)\n\n if not target:\n die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)\n if compatibility_constraints:\n pi = PythonInterpreter.from_binary(target)\n if not list(matched_interpreters([pi], compatibility_constraints)):\n die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '\n 'interpreter constraints: %s' % (target, str(compatibility_constraints)))\n if not os.path.exists(target):\n die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)\n return target\n\n\ndef _select_interpreter(pex_python_path, compatibility_constraints):\n compatible_interpreters = find_compatible_interpreters(\n pex_python_path, compatibility_constraints)\n\n if not compatible_interpreters:\n die('Failed to find compatible interpreter for constraints: %s'\n % str(compatibility_constraints))\n # TODO: https://github.com/pantsbuild/pex/issues/430\n target = min(compatible_interpreters).binary\n\n if os.path.exists(target):\n return target\n\n\ndef maybe_reexec_pex(compatibility_constraints):\n \"\"\"\n Handle environment overrides for the Python interpreter to use when executing this pex.\n\n This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO\n metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the\n interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for\n a matching interpreter in accordance with the interpreter constraints. If both variables are\n present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no\n compatible interpreters can be found on said path.\n\n If neither variable is set, we fall back to plain PEX execution using PATH searching or the\n currently executing interpreter. If compatibility constraints are used, we match those constraints\n against these interpreters.\n\n :param compatibility_constraints: list of requirements-style strings that constrain the\n Python interpreter to re-exec this pex with.\n \"\"\"\n if os.environ.pop('SHOULD_EXIT_BOOTSTRAP_REEXEC', None):\n # We've already been here and selected an interpreter. Continue to execution.\n return\n\n target = None\n with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):\n if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:\n # preserve PEX_PYTHON re-exec for backwards compatibility\n # TODO: Kill this off completely in favor of PEX_PYTHON_PATH\n # https://github.com/pantsbuild/pex/issues/431\n target = _select_pex_python_interpreter(ENV.PEX_PYTHON,\n compatibility_constraints)\n elif ENV.PEX_PYTHON_PATH:\n target = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)\n\n elif compatibility_constraints:\n # Apply constraints to target using regular PATH\n target = _select_interpreter(\n pex_python_path=None,\n compatibility_constraints=compatibility_constraints\n )\n\n if target and os.path.realpath(target) != os.path.realpath(sys.executable):\n cmdline = [target] + sys.argv\n TRACER.log('Re-executing: cmdline=\"%s\", sys.executable=\"%s\", PEX_PYTHON=\"%s\", '\n 'PEX_PYTHON_PATH=\"%s\", COMPATIBILITY_CONSTRAINTS=\"%s\"'\n % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,\n compatibility_constraints))\n ENV.delete('PEX_PYTHON')\n ENV.delete('PEX_PYTHON_PATH')\n os.environ['SHOULD_EXIT_BOOTSTRAP_REEXEC'] = '1'\n os.execve(target, cmdline, ENV.copy())\n\n\ndef _bootstrap(entry_point):\n from .pex_info import PexInfo\n pex_info = PexInfo.from_pex(entry_point)\n pex_warnings.configure_warnings(pex_info)\n\n from .finders import register_finders\n register_finders()\n\n return pex_info\n\n\ndef bootstrap_pex(entry_point):\n pex_info = _bootstrap(entry_point)\n maybe_reexec_pex(pex_info.interpreter_constraints)\n\n from . import pex\n pex.PEX(entry_point).execute()\n\n\n# NB: This helper is used by third party libs - namely https://github.com/wickman/lambdex.\n# TODO(John Sirois): Kill once https://github.com/wickman/lambdex/issues/5 is resolved.\ndef is_compressed(entry_point):\n from .pex_info import PexInfo\n return os.path.exists(entry_point) and not os.path.exists(os.path.join(entry_point, PexInfo.PATH))\n\n\ndef bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n pex_info = _bootstrap(entry_point)\n\n from .environment import PEXEnvironment\n PEXEnvironment(entry_point, pex_info).activate()\n", "path": "pex/pex_bootstrapper.py"}]}
| 2,164 | 199 |
gh_patches_debug_12748
|
rasdani/github-patches
|
git_diff
|
searx__searx-233
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Requests does not follow on an HTTP 301
We had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.
Anyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.
So why isn't 301 followed ? Is there a way to force it ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/kickass.py`
Content:
```
1 ## Kickass Torrent (Videos, Music, Files)
2 #
3 # @website https://kickass.so
4 # @provide-api no (nothing found)
5 #
6 # @using-api no
7 # @results HTML (using search portal)
8 # @stable yes (HTML can change)
9 # @parse url, title, content, seed, leech, magnetlink
10
11 from urlparse import urljoin
12 from cgi import escape
13 from urllib import quote
14 from lxml import html
15 from operator import itemgetter
16 from searx.engines.xpath import extract_text
17
18 # engine dependent config
19 categories = ['videos', 'music', 'files']
20 paging = True
21
22 # search-url
23 url = 'https://kickass.so/'
24 search_url = url + 'search/{search_term}/{pageno}/'
25
26 # specific xpath variables
27 magnet_xpath = './/a[@title="Torrent magnet link"]'
28 torrent_xpath = './/a[@title="Download torrent file"]'
29 content_xpath = './/span[@class="font11px lightgrey block"]'
30
31
32 # do search-request
33 def request(query, params):
34 params['url'] = search_url.format(search_term=quote(query),
35 pageno=params['pageno'])
36
37 # FIX: SSLError: hostname 'kickass.so'
38 # doesn't match either of '*.kickass.to', 'kickass.to'
39 params['verify'] = False
40
41 return params
42
43
44 # get response from search-request
45 def response(resp):
46 results = []
47
48 dom = html.fromstring(resp.text)
49
50 search_res = dom.xpath('//table[@class="data"]//tr')
51
52 # return empty array if nothing is found
53 if not search_res:
54 return []
55
56 # parse results
57 for result in search_res[1:]:
58 link = result.xpath('.//a[@class="cellMainLink"]')[0]
59 href = urljoin(url, link.attrib['href'])
60 title = extract_text(link)
61 content = escape(extract_text(result.xpath(content_xpath)))
62 seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
63 leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
64 filesize = result.xpath('.//td[contains(@class, "nobr")]/text()')[0]
65 filesize_multiplier = result.xpath('.//td[contains(@class, "nobr")]//span/text()')[0]
66 files = result.xpath('.//td[contains(@class, "center")][2]/text()')[0]
67
68 # convert seed to int if possible
69 if seed.isdigit():
70 seed = int(seed)
71 else:
72 seed = 0
73
74 # convert leech to int if possible
75 if leech.isdigit():
76 leech = int(leech)
77 else:
78 leech = 0
79
80 # convert filesize to byte if possible
81 try:
82 filesize = float(filesize)
83
84 # convert filesize to byte
85 if filesize_multiplier == 'TB':
86 filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
87 elif filesize_multiplier == 'GB':
88 filesize = int(filesize * 1024 * 1024 * 1024)
89 elif filesize_multiplier == 'MB':
90 filesize = int(filesize * 1024 * 1024)
91 elif filesize_multiplier == 'KB':
92 filesize = int(filesize * 1024)
93 except:
94 filesize = None
95
96 # convert files to int if possible
97 if files.isdigit():
98 files = int(files)
99 else:
100 files = None
101
102 magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
103
104 torrentfile = result.xpath(torrent_xpath)[0].attrib['href']
105 torrentfileurl = quote(torrentfile, safe="%/:=&?~#+!$,;'@()*")
106
107 # append result
108 results.append({'url': href,
109 'title': title,
110 'content': content,
111 'seed': seed,
112 'leech': leech,
113 'filesize': filesize,
114 'files': files,
115 'magnetlink': magnetlink,
116 'torrentfile': torrentfileurl,
117 'template': 'torrent.html'})
118
119 # return results sorted by seeder
120 return sorted(results, key=itemgetter('seed'), reverse=True)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -20,7 +20,7 @@
paging = True
# search-url
-url = 'https://kickass.so/'
+url = 'https://kickass.to/'
search_url = url + 'search/{search_term}/{pageno}/'
# specific xpath variables
@@ -45,6 +45,11 @@
def response(resp):
results = []
+ # check if redirect comparing to the True value,
+ # because resp can be a Mock object, and any attribut name returns something.
+ if resp.is_redirect is True:
+ return results
+
dom = html.fromstring(resp.text)
search_res = dom.xpath('//table[@class="data"]//tr')
|
{"golden_diff": "diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py\n--- a/searx/engines/kickass.py\n+++ b/searx/engines/kickass.py\n@@ -20,7 +20,7 @@\n paging = True\n \n # search-url\n-url = 'https://kickass.so/'\n+url = 'https://kickass.to/'\n search_url = url + 'search/{search_term}/{pageno}/'\n \n # specific xpath variables\n@@ -45,6 +45,11 @@\n def response(resp):\n results = []\n \n+ # check if redirect comparing to the True value,\n+ # because resp can be a Mock object, and any attribut name returns something.\n+ if resp.is_redirect is True:\n+ return results\n+\n dom = html.fromstring(resp.text)\n \n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n", "issue": "Requests does not follow on an HTTP 301\nWe had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.\n\nAnyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.\n\nSo why isn't 301 followed ? Is there a way to force it ?\n\n", "before_files": [{"content": "## Kickass Torrent (Videos, Music, Files)\n#\n# @website https://kickass.so\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://kickass.so/'\nsearch_url = url + 'search/{search_term}/{pageno}/'\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Torrent magnet link\"]'\ntorrent_xpath = './/a[@title=\"Download torrent file\"]'\ncontent_xpath = './/span[@class=\"font11px lightgrey block\"]'\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(search_term=quote(query),\n pageno=params['pageno'])\n\n # FIX: SSLError: hostname 'kickass.so'\n # doesn't match either of '*.kickass.to', 'kickass.to'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//a[@class=\"cellMainLink\"]')[0]\n href = urljoin(url, link.attrib['href'])\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed = result.xpath('.//td[contains(@class, \"green\")]/text()')[0]\n leech = result.xpath('.//td[contains(@class, \"red\")]/text()')[0]\n filesize = result.xpath('.//td[contains(@class, \"nobr\")]/text()')[0]\n filesize_multiplier = result.xpath('.//td[contains(@class, \"nobr\")]//span/text()')[0]\n files = result.xpath('.//td[contains(@class, \"center\")][2]/text()')[0]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n # convert filesize to byte if possible\n try:\n filesize = float(filesize)\n\n # convert filesize to byte\n if filesize_multiplier == 'TB':\n filesize = int(filesize * 1024 * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'GB':\n filesize = int(filesize * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'MB':\n filesize = int(filesize * 1024 * 1024)\n elif filesize_multiplier == 'KB':\n filesize = int(filesize * 1024)\n except:\n filesize = None\n\n # convert files to int if possible\n if files.isdigit():\n files = int(files)\n else:\n files = None\n\n magnetlink = result.xpath(magnet_xpath)[0].attrib['href']\n\n torrentfile = result.xpath(torrent_xpath)[0].attrib['href']\n torrentfileurl = quote(torrentfile, safe=\"%/:=&?~#+!$,;'@()*\")\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'filesize': filesize,\n 'files': files,\n 'magnetlink': magnetlink,\n 'torrentfile': torrentfileurl,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/kickass.py"}], "after_files": [{"content": "## Kickass Torrent (Videos, Music, Files)\n#\n# @website https://kickass.so\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://kickass.to/'\nsearch_url = url + 'search/{search_term}/{pageno}/'\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Torrent magnet link\"]'\ntorrent_xpath = './/a[@title=\"Download torrent file\"]'\ncontent_xpath = './/span[@class=\"font11px lightgrey block\"]'\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(search_term=quote(query),\n pageno=params['pageno'])\n\n # FIX: SSLError: hostname 'kickass.so'\n # doesn't match either of '*.kickass.to', 'kickass.to'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n # check if redirect comparing to the True value,\n # because resp can be a Mock object, and any attribut name returns something.\n if resp.is_redirect is True:\n return results\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//a[@class=\"cellMainLink\"]')[0]\n href = urljoin(url, link.attrib['href'])\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed = result.xpath('.//td[contains(@class, \"green\")]/text()')[0]\n leech = result.xpath('.//td[contains(@class, \"red\")]/text()')[0]\n filesize = result.xpath('.//td[contains(@class, \"nobr\")]/text()')[0]\n filesize_multiplier = result.xpath('.//td[contains(@class, \"nobr\")]//span/text()')[0]\n files = result.xpath('.//td[contains(@class, \"center\")][2]/text()')[0]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n # convert filesize to byte if possible\n try:\n filesize = float(filesize)\n\n # convert filesize to byte\n if filesize_multiplier == 'TB':\n filesize = int(filesize * 1024 * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'GB':\n filesize = int(filesize * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'MB':\n filesize = int(filesize * 1024 * 1024)\n elif filesize_multiplier == 'KB':\n filesize = int(filesize * 1024)\n except:\n filesize = None\n\n # convert files to int if possible\n if files.isdigit():\n files = int(files)\n else:\n files = None\n\n magnetlink = result.xpath(magnet_xpath)[0].attrib['href']\n\n torrentfile = result.xpath(torrent_xpath)[0].attrib['href']\n torrentfileurl = quote(torrentfile, safe=\"%/:=&?~#+!$,;'@()*\")\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'filesize': filesize,\n 'files': files,\n 'magnetlink': magnetlink,\n 'torrentfile': torrentfileurl,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/kickass.py"}]}
| 1,570 | 206 |
gh_patches_debug_5809
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-728
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError when saving geodataframe with pandas timestamps to shapefile
Is there any proper way to fix this error inside geopandas (not fiona)?
(Or maybe I'm trying to do the wrong thing and for example should convert my timestamps into strings before writing?)
```
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\geopandas-0.1.0.dev_6163bc9-py3.4.egg\geopandas\geodataframe.py in to_file(self, filename, driver, schema, **kwargs)
326 """
327 from geopandas.io.file import to_file
--> 328 to_file(self, filename, driver, schema, **kwargs)
329
330 def to_crs(self, crs=None, epsg=None, inplace=False):
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\geopandas-0.1.0.dev_6163bc9-py3.4.egg\geopandas\io\file.py in to_file(df, filename, driver, schema, **kwargs)
60 schema=schema, **kwargs) as c:
61 for feature in df.iterfeatures():
---> 62 c.write(feature)
63
64
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\fiona\collection.py in write(self, record)
323 def write(self, record):
324 """Stages a record for writing to disk."""
--> 325 self.writerecords([record])
326
327 def validate_record(self, record):
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\fiona\collection.py in writerecords(self, records)
317 if self.mode not in ('a', 'w'):
318 raise IOError("collection not open for writing")
--> 319 self.session.writerecs(records, self)
320 self._len = self.session.get_length()
321 self._bounds = self.session.get_extent()
fiona/ogrext.pyx in fiona.ogrext.WritingSession.writerecs (fiona/ogrext.c:15333)()
fiona/ogrext.pyx in fiona.ogrext.OGRFeatureBuilder.build (fiona/ogrext.c:5751)()
ValueError: Invalid field type <class 'pandas.tslib.Timestamp'>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/io/file.py`
Content:
```
1 import os
2 from distutils.version import LooseVersion
3
4 import fiona
5 import numpy as np
6
7 import six
8
9 try:
10 from fiona import Env as fiona_env
11 except ImportError:
12 from fiona import drivers as fiona_env
13
14 from geopandas import GeoDataFrame, GeoSeries
15
16
17 _FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion('1.8')
18
19
20 # Adapted from pandas.io.common
21 if six.PY3:
22 from urllib.request import urlopen as _urlopen
23 from urllib.parse import urlparse as parse_url
24 from urllib.parse import uses_relative, uses_netloc, uses_params
25 else:
26 from urllib2 import urlopen as _urlopen
27 from urlparse import urlparse as parse_url
28 from urlparse import uses_relative, uses_netloc, uses_params
29
30 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
31 _VALID_URLS.discard('')
32
33
34 def _is_url(url):
35 """Check to see if *url* has a valid protocol."""
36 try:
37 return parse_url(url).scheme in _VALID_URLS
38 except:
39 return False
40
41
42 def read_file(filename, bbox=None, **kwargs):
43 """
44 Returns a GeoDataFrame from a file or URL.
45
46 Parameters
47 ----------
48 filename: str
49 Either the absolute or relative path to the file or URL to
50 be opened.
51 bbox : tuple | GeoDataFrame or GeoSeries, default None
52 Filter features by given bounding box, GeoSeries, or GeoDataFrame.
53 CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
54 **kwargs:
55 Keyword args to be passed to the `open` or `BytesCollection` method
56 in the fiona library when opening the file. For more information on
57 possible keywords, type:
58 ``import fiona; help(fiona.open)``
59
60 Examples
61 --------
62 >>> df = geopandas.read_file("nybb.shp")
63
64 Returns
65 -------
66 geodataframe : GeoDataFrame
67 """
68 if _is_url(filename):
69 req = _urlopen(filename)
70 path_or_bytes = req.read()
71 reader = fiona.BytesCollection
72 else:
73 path_or_bytes = filename
74 reader = fiona.open
75
76 with fiona_env():
77 with reader(path_or_bytes, **kwargs) as features:
78
79 # In a future Fiona release the crs attribute of features will
80 # no longer be a dict. The following code will be both forward
81 # and backward compatible.
82 if hasattr(features.crs, 'to_dict'):
83 crs = features.crs.to_dict()
84 else:
85 crs = features.crs
86
87 if bbox is not None:
88 if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
89 bbox = tuple(bbox.to_crs(crs).total_bounds)
90 assert len(bbox) == 4
91 f_filt = features.filter(bbox=bbox)
92 else:
93 f_filt = features
94
95 columns = list(features.meta["schema"]["properties"]) + ["geometry"]
96 gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)
97
98 return gdf
99
100
101 def to_file(df, filename, driver="ESRI Shapefile", schema=None,
102 **kwargs):
103 """
104 Write this GeoDataFrame to an OGR data source
105
106 A dictionary of supported OGR providers is available via:
107 >>> import fiona
108 >>> fiona.supported_drivers
109
110 Parameters
111 ----------
112 df : GeoDataFrame to be written
113 filename : string
114 File path or file handle to write to.
115 driver : string, default 'ESRI Shapefile'
116 The OGR format driver used to write the vector file.
117 schema : dict, default None
118 If specified, the schema dictionary is passed to Fiona to
119 better control how the file is written. If None, GeoPandas
120 will determine the schema based on each column's dtype
121
122 The *kwargs* are passed to fiona.open and can be used to write
123 to multi-layer data, store data within archives (zip files), etc.
124 """
125 if schema is None:
126 schema = infer_schema(df)
127 filename = os.path.abspath(os.path.expanduser(filename))
128 with fiona_env():
129 with fiona.open(filename, 'w', driver=driver, crs=df.crs,
130 schema=schema, **kwargs) as colxn:
131 colxn.writerecords(df.iterfeatures())
132
133
134 def infer_schema(df):
135 try:
136 from collections import OrderedDict
137 except ImportError:
138 from ordereddict import OrderedDict
139
140 def convert_type(column, in_type):
141 if in_type == object:
142 return 'str'
143 out_type = type(np.zeros(1, in_type).item()).__name__
144 if out_type == 'long':
145 out_type = 'int'
146 if not _FIONA18 and out_type == 'bool':
147 raise ValueError('column "{}" is boolean type, '.format(column) +
148 'which is unsupported in file writing with fiona '
149 '< 1.8. Consider casting the column to int type.')
150 return out_type
151
152 properties = OrderedDict([
153 (col, convert_type(col, _type)) for col, _type in
154 zip(df.columns, df.dtypes) if col != df._geometry_column_name
155 ])
156
157 if df.empty:
158 raise ValueError("Cannot write empty DataFrame to file.")
159
160 # Since https://github.com/Toblerity/Fiona/issues/446 resolution,
161 # Fiona allows a list of geometry types
162 geom_types = _geometry_types(df)
163
164 schema = {'geometry': geom_types, 'properties': properties}
165
166 return schema
167
168
169 def _geometry_types(df):
170 """
171 Determine the geometry types in the GeoDataFrame for the schema.
172 """
173 if _FIONA18:
174 # Starting from Fiona 1.8, schema submitted to fiona to write a gdf
175 # can have mixed geometries:
176 # - 3D and 2D shapes can coexist in inferred schema
177 # - Shape and MultiShape types can (and must) coexist in inferred
178 # schema
179 geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()
180 geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]
181 geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()
182 geom_types_3D = ["3D " + gtype for gtype in geom_types_3D
183 if gtype is not None]
184 geom_types = geom_types_3D + geom_types_2D
185
186 else:
187 # Before Fiona 1.8, schema submitted to write a gdf should have
188 # one single geometry type whenever possible:
189 # - 3D and 2D shapes cannot coexist in inferred schema
190 # - Shape and MultiShape can not coexist in inferred schema
191 geom_types = _geometry_types_back_compat(df)
192
193 if len(geom_types) == 0:
194 # Default geometry type supported by Fiona
195 # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)
196 return 'Unknown'
197
198 if len(geom_types) == 1:
199 geom_types = geom_types[0]
200
201 return geom_types
202
203
204 def _geometry_types_back_compat(df):
205 """
206 for backward compatibility with Fiona<1.8 only
207 """
208 unique_geom_types = df.geometry.geom_type.unique()
209 unique_geom_types = [
210 gtype for gtype in unique_geom_types if gtype is not None]
211
212 # merge single and Multi types (eg Polygon and MultiPolygon)
213 unique_geom_types = [
214 gtype for gtype in unique_geom_types
215 if not gtype.startswith('Multi') or gtype[5:] not in unique_geom_types]
216
217 if df.geometry.has_z.any():
218 # declare all geometries as 3D geometries
219 unique_geom_types = ["3D " + type for type in unique_geom_types]
220 # by default, all geometries are 2D geometries
221
222 return unique_geom_types
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -140,6 +140,9 @@
def convert_type(column, in_type):
if in_type == object:
return 'str'
+ if in_type.name.startswith('datetime64'):
+ # numpy datetime type regardless of frequency
+ return 'datetime'
out_type = type(np.zeros(1, in_type).item()).__name__
if out_type == 'long':
out_type = 'int'
|
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -140,6 +140,9 @@\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n+ if in_type.name.startswith('datetime64'):\n+ # numpy datetime type regardless of frequency\n+ return 'datetime'\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == 'long':\n out_type = 'int'\n", "issue": "ValueError when saving geodataframe with pandas timestamps to shapefile\nIs there any proper way to fix this error inside geopandas (not fiona)?\n\n(Or maybe I'm trying to do the wrong thing and for example should convert my timestamps into strings before writing?)\n\n```\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\geopandas-0.1.0.dev_6163bc9-py3.4.egg\\geopandas\\geodataframe.py in to_file(self, filename, driver, schema, **kwargs)\n 326 \"\"\"\n 327 from geopandas.io.file import to_file\n--> 328 to_file(self, filename, driver, schema, **kwargs)\n 329 \n 330 def to_crs(self, crs=None, epsg=None, inplace=False):\n\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\geopandas-0.1.0.dev_6163bc9-py3.4.egg\\geopandas\\io\\file.py in to_file(df, filename, driver, schema, **kwargs)\n 60 schema=schema, **kwargs) as c:\n 61 for feature in df.iterfeatures():\n---> 62 c.write(feature)\n 63 \n 64 \n\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\fiona\\collection.py in write(self, record)\n 323 def write(self, record):\n 324 \"\"\"Stages a record for writing to disk.\"\"\"\n--> 325 self.writerecords([record])\n 326 \n 327 def validate_record(self, record):\n\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\fiona\\collection.py in writerecords(self, records)\n 317 if self.mode not in ('a', 'w'):\n 318 raise IOError(\"collection not open for writing\")\n--> 319 self.session.writerecs(records, self)\n 320 self._len = self.session.get_length()\n 321 self._bounds = self.session.get_extent()\n\nfiona/ogrext.pyx in fiona.ogrext.WritingSession.writerecs (fiona/ogrext.c:15333)()\n\nfiona/ogrext.pyx in fiona.ogrext.OGRFeatureBuilder.build (fiona/ogrext.c:5751)()\n\nValueError: Invalid field type <class 'pandas.tslib.Timestamp'>\n```\n\n", "before_files": [{"content": "import os\nfrom distutils.version import LooseVersion\n\nimport fiona\nimport numpy as np\n\nimport six\n\ntry:\n from fiona import Env as fiona_env\nexcept ImportError:\n from fiona import drivers as fiona_env\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\n\n_FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion('1.8')\n\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with fiona_env():\n with reader(path_or_bytes, **kwargs) as features:\n\n # In a future Fiona release the crs attribute of features will\n # no longer be a dict. The following code will be both forward\n # and backward compatible.\n if hasattr(features.crs, 'to_dict'):\n crs = features.crs.to_dict()\n else:\n crs = features.crs\n\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona_env():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == 'long':\n out_type = 'int'\n if not _FIONA18 and out_type == 'bool':\n raise ValueError('column \"{}\" is boolean type, '.format(column) +\n 'which is unsupported in file writing with fiona '\n '< 1.8. Consider casting the column to int type.')\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n # Since https://github.com/Toblerity/Fiona/issues/446 resolution,\n # Fiona allows a list of geometry types\n geom_types = _geometry_types(df)\n\n schema = {'geometry': geom_types, 'properties': properties}\n\n return schema\n\n\ndef _geometry_types(df):\n \"\"\"\n Determine the geometry types in the GeoDataFrame for the schema.\n \"\"\"\n if _FIONA18:\n # Starting from Fiona 1.8, schema submitted to fiona to write a gdf\n # can have mixed geometries:\n # - 3D and 2D shapes can coexist in inferred schema\n # - Shape and MultiShape types can (and must) coexist in inferred\n # schema\n geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()\n geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]\n geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()\n geom_types_3D = [\"3D \" + gtype for gtype in geom_types_3D\n if gtype is not None]\n geom_types = geom_types_3D + geom_types_2D\n\n else:\n # Before Fiona 1.8, schema submitted to write a gdf should have\n # one single geometry type whenever possible:\n # - 3D and 2D shapes cannot coexist in inferred schema\n # - Shape and MultiShape can not coexist in inferred schema\n geom_types = _geometry_types_back_compat(df)\n\n if len(geom_types) == 0:\n # Default geometry type supported by Fiona\n # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)\n return 'Unknown'\n\n if len(geom_types) == 1:\n geom_types = geom_types[0]\n\n return geom_types\n\n\ndef _geometry_types_back_compat(df):\n \"\"\"\n for backward compatibility with Fiona<1.8 only\n \"\"\"\n unique_geom_types = df.geometry.geom_type.unique()\n unique_geom_types = [\n gtype for gtype in unique_geom_types if gtype is not None]\n\n # merge single and Multi types (eg Polygon and MultiPolygon)\n unique_geom_types = [\n gtype for gtype in unique_geom_types\n if not gtype.startswith('Multi') or gtype[5:] not in unique_geom_types]\n\n if df.geometry.has_z.any():\n # declare all geometries as 3D geometries\n unique_geom_types = [\"3D \" + type for type in unique_geom_types]\n # by default, all geometries are 2D geometries\n\n return unique_geom_types\n", "path": "geopandas/io/file.py"}], "after_files": [{"content": "import os\nfrom distutils.version import LooseVersion\n\nimport fiona\nimport numpy as np\n\nimport six\n\ntry:\n from fiona import Env as fiona_env\nexcept ImportError:\n from fiona import drivers as fiona_env\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\n\n_FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion('1.8')\n\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with fiona_env():\n with reader(path_or_bytes, **kwargs) as features:\n\n # In a future Fiona release the crs attribute of features will\n # no longer be a dict. The following code will be both forward\n # and backward compatible.\n if hasattr(features.crs, 'to_dict'):\n crs = features.crs.to_dict()\n else:\n crs = features.crs\n\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona_env():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n if in_type.name.startswith('datetime64'):\n # numpy datetime type regardless of frequency\n return 'datetime'\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == 'long':\n out_type = 'int'\n if not _FIONA18 and out_type == 'bool':\n raise ValueError('column \"{}\" is boolean type, '.format(column) +\n 'which is unsupported in file writing with fiona '\n '< 1.8. Consider casting the column to int type.')\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n # Since https://github.com/Toblerity/Fiona/issues/446 resolution,\n # Fiona allows a list of geometry types\n geom_types = _geometry_types(df)\n\n schema = {'geometry': geom_types, 'properties': properties}\n\n return schema\n\n\ndef _geometry_types(df):\n \"\"\"\n Determine the geometry types in the GeoDataFrame for the schema.\n \"\"\"\n if _FIONA18:\n # Starting from Fiona 1.8, schema submitted to fiona to write a gdf\n # can have mixed geometries:\n # - 3D and 2D shapes can coexist in inferred schema\n # - Shape and MultiShape types can (and must) coexist in inferred\n # schema\n geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()\n geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]\n geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()\n geom_types_3D = [\"3D \" + gtype for gtype in geom_types_3D\n if gtype is not None]\n geom_types = geom_types_3D + geom_types_2D\n\n else:\n # Before Fiona 1.8, schema submitted to write a gdf should have\n # one single geometry type whenever possible:\n # - 3D and 2D shapes cannot coexist in inferred schema\n # - Shape and MultiShape can not coexist in inferred schema\n geom_types = _geometry_types_back_compat(df)\n\n if len(geom_types) == 0:\n # Default geometry type supported by Fiona\n # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)\n return 'Unknown'\n\n if len(geom_types) == 1:\n geom_types = geom_types[0]\n\n return geom_types\n\n\ndef _geometry_types_back_compat(df):\n \"\"\"\n for backward compatibility with Fiona<1.8 only\n \"\"\"\n unique_geom_types = df.geometry.geom_type.unique()\n unique_geom_types = [\n gtype for gtype in unique_geom_types if gtype is not None]\n\n # merge single and Multi types (eg Polygon and MultiPolygon)\n unique_geom_types = [\n gtype for gtype in unique_geom_types\n if not gtype.startswith('Multi') or gtype[5:] not in unique_geom_types]\n\n if df.geometry.has_z.any():\n # declare all geometries as 3D geometries\n unique_geom_types = [\"3D \" + type for type in unique_geom_types]\n # by default, all geometries are 2D geometries\n\n return unique_geom_types\n", "path": "geopandas/io/file.py"}]}
| 3,308 | 132 |
gh_patches_debug_21270
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-210
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add 'About' page content
Using content from existing website.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/general/urls.py`
Content:
```
1 """URL routing for the general application."""
2
3 from django.conf.urls import url
4
5 from . import views
6
7 urlpatterns = [
8 url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
9 url(r'^$', views.GeneralIndexView.as_view(), name='home'),
10 ]
11
```
Path: `csunplugged/general/views.py`
Content:
```
1 """Views for the general application."""
2
3 from django.views.generic import TemplateView
4 from django.http import HttpResponse
5
6
7 class GeneralIndexView(TemplateView):
8 """View for the homepage that renders from a template."""
9
10 template_name = 'general/index.html'
11
12
13 class GeneralAboutView(TemplateView):
14 """View for the about page that renders from a template."""
15
16 template_name = 'general/about.html'
17
18
19 def health_check(request):
20 """Return heath check response for Google App Engine.
21
22 Returns a 200 HTTP response for Google App Engine to detect the system
23 is running.
24 """
25 return HttpResponse(status=200)
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py
--- a/csunplugged/general/urls.py
+++ b/csunplugged/general/urls.py
@@ -6,5 +6,8 @@
urlpatterns = [
url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),
+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),
+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),
url(r'^$', views.GeneralIndexView.as_view(), name='home'),
]
diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -16,6 +16,24 @@
template_name = 'general/about.html'
+class GeneralContactView(TemplateView):
+ """View for the contact page that renders from a template."""
+
+ template_name = 'general/contact.html'
+
+
+class GeneralPeopleView(TemplateView):
+ """View for the people page that renders from a template."""
+
+ template_name = 'general/people.html'
+
+
+class GeneralPrinciplesView(TemplateView):
+ """View for the princples page that renders from a template."""
+
+ template_name = 'general/principles.html'
+
+
def health_check(request):
"""Return heath check response for Google App Engine.
|
{"golden_diff": "diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py\n--- a/csunplugged/general/urls.py\n+++ b/csunplugged/general/urls.py\n@@ -6,5 +6,8 @@\n \n urlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),\n+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),\n+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n ]\ndiff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -16,6 +16,24 @@\n template_name = 'general/about.html'\n \n \n+class GeneralContactView(TemplateView):\n+ \"\"\"View for the contact page that renders from a template.\"\"\"\n+\n+ template_name = 'general/contact.html'\n+\n+\n+class GeneralPeopleView(TemplateView):\n+ \"\"\"View for the people page that renders from a template.\"\"\"\n+\n+ template_name = 'general/people.html'\n+\n+\n+class GeneralPrinciplesView(TemplateView):\n+ \"\"\"View for the princples page that renders from a template.\"\"\"\n+\n+ template_name = 'general/principles.html'\n+\n+\n def health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n", "issue": "Add 'About' page content\nUsing content from existing website.\n", "before_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = 'general/index.html'\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = 'general/about.html'\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}], "after_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),\n url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),\n url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = 'general/index.html'\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = 'general/about.html'\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = 'general/contact.html'\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = 'general/people.html'\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = 'general/principles.html'\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]}
| 548 | 345 |
gh_patches_debug_4715
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-110
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Catch `HttpParserInvalidMethodError` exception
`uvicorn==0.1.1` is throwing these errors
```
uvicorn[7423]: Unhandled exception in event loop
uvicorn[7423]: Traceback (most recent call last):
uvicorn[7423]: File "uvloop/handles/stream.pyx", line 784, in uvloop.loop.__uv_stream_on_read_impl
uvicorn[7423]: File "uvloop/handles/stream.pyx", line 563, in uvloop.loop.UVStream._on_read
uvicorn[7423]: File "/home/web/.virtualenvs/mesh/lib/python3.5/site-packages/uvicorn/protocols/http.py", line 212, in data_received
uvicorn[7423]: self.request_parser.feed_data(data)
uvicorn[7423]: File "httptools/parser/parser.pyx", line 193, in httptools.parser.parser.HttpParser.feed_data
uvicorn[7423]: httptools.parser.errors.HttpParserInvalidMethodError: invalid HTTP method
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/protocols/http/httptools.py`
Content:
```
1 import asyncio
2 from email.utils import formatdate
3 import http
4 import logging
5 import time
6 import traceback
7 from urllib.parse import unquote
8 from uvicorn.protocols.websockets.websockets import websocket_upgrade
9
10 import httptools
11
12
13 def _get_default_headers():
14 current_time = time.time()
15 current_date = formatdate(current_time, usegmt=True).encode()
16 return b"".join([b"server: uvicorn\r\ndate: ", current_date, b"\r\n"])
17
18
19 def _get_status_line(status_code):
20 try:
21 phrase = http.HTTPStatus(status_code).phrase.encode()
22 except ValueError:
23 phrase = b""
24 return b"".join([b"HTTP/1.1 ", str(status_code).encode(), b" ", phrase, b"\r\n"])
25
26
27 STATUS_LINE = {
28 status_code: _get_status_line(status_code) for status_code in range(100, 600)
29 }
30
31 DEFAULT_HEADERS = _get_default_headers()
32
33
34 class HttpToolsProtocol(asyncio.Protocol):
35 __slots__ = (
36 'app', 'loop', 'state', 'logger', 'access_logs', 'parser',
37 'transport', 'server', 'client', 'scheme',
38 'scope', 'headers', 'cycle', 'client_event',
39 'readable', 'writable', 'writable_event'
40 )
41
42 def __init__(self, app, loop=None, state=None, logger=None):
43 self.app = app
44 self.loop = loop or asyncio.get_event_loop()
45 self.state = {"total_requests": 0} if state is None else state
46 self.logger = logger or logging.getLogger()
47 self.access_logs = self.logger.level >= logging.INFO
48 self.parser = httptools.HttpRequestParser(self)
49
50 # Per-connection state
51 self.transport = None
52 self.server = None
53 self.client = None
54 self.scheme = None
55
56 # Per-request state
57 self.scope = None
58 self.headers = None
59 self.cycle = None
60 self.client_event = asyncio.Event()
61
62 # Flow control
63 self.readable = True
64 self.writable = True
65 self.writable_event = asyncio.Event()
66 self.writable_event.set()
67
68 @classmethod
69 def tick(cls):
70 global DEFAULT_HEADERS
71 DEFAULT_HEADERS = _get_default_headers()
72
73 # Protocol interface
74 def connection_made(self, transport):
75 self.transport = transport
76 self.server = transport.get_extra_info("sockname")
77 self.client = transport.get_extra_info("peername")
78 self.scheme = "https" if transport.get_extra_info("sslcontext") else "http"
79 if self.access_logs:
80 self.logger.debug("%s - Connected", self.server[0])
81
82 def connection_lost(self, exc):
83 if self.access_logs:
84 self.logger.debug("%s - Disconnected", self.server[0])
85
86 if self.cycle and self.cycle.more_body:
87 self.cycle.disconnected = True
88 self.client_event.set()
89
90 def eof_received(self):
91 pass
92
93 def data_received(self, data):
94 try:
95 self.parser.feed_data(data)
96 except httptools.HttpParserUpgrade:
97 websocket_upgrade(self)
98
99 # Parser callbacks
100 def on_url(self, url):
101 method = self.parser.get_method()
102 parsed_url = httptools.parse_url(url)
103 self.headers = []
104 self.scope = {
105 "type": "http",
106 "http_version": "1.1",
107 "server": self.server,
108 "client": self.client,
109 "scheme": self.scheme,
110 "method": method.decode("ascii"),
111 "path": parsed_url.path.decode("ascii"),
112 "query_string": parsed_url.query if parsed_url.query else b"",
113 "headers": self.headers,
114 }
115
116 def on_header(self, name: bytes, value: bytes):
117 self.headers.append((name.lower(), value))
118
119 def on_headers_complete(self):
120 http_version = self.parser.get_http_version()
121 if http_version != "1.1":
122 self.scope["http_version"] = http_version
123 if self.parser.should_upgrade():
124 return
125 self.cycle = RequestResponseCycle(self.scope, self)
126 self.loop.create_task(self.cycle.run_asgi(self.app))
127
128 def on_body(self, body: bytes):
129 if self.parser.should_upgrade() or self.cycle.response_complete:
130 return
131 self.cycle.body += body
132 self.pause_reading()
133 self.client_event.set()
134
135 def on_message_complete(self):
136 if self.parser.should_upgrade() or self.cycle.response_complete:
137 return
138 self.cycle.more_body = False
139 self.pause_reading()
140 self.client_event.set()
141
142 # Flow control
143 def pause_reading(self):
144 if self.readable:
145 self.readable = False
146 self.transport.pause_reading()
147
148 def resume_reading(self):
149 if not self.readable:
150 self.readable = True
151 self.transport.resume_reading()
152
153 def pause_writing(self):
154 if self.writable:
155 self.writable = False
156 self.writable_event.clear()
157
158 def resume_writing(self):
159 if not self.writable:
160 self.writable = True
161 self.writable_event.set()
162
163
164 class RequestResponseCycle:
165 __slots__ = (
166 'scope', 'protocol',
167 'body', 'more_body', 'disconnected', 'receive_finished',
168 'response_started', 'response_complete', 'keep_alive', 'chunked_encoding', 'expected_content_length'
169 )
170
171 def __init__(self, scope, protocol):
172 self.scope = scope
173 self.protocol = protocol
174
175 # Request state
176 self.body = b""
177 self.more_body = True
178 self.disconnected = False
179 self.receive_finished = False
180
181 # Response state
182 self.response_started = False
183 self.response_complete = False
184 self.keep_alive = True
185 self.chunked_encoding = None
186 self.expected_content_length = 0
187
188 # ASGI exception wrapper
189 async def run_asgi(self, app):
190 try:
191 asgi = app(self.scope)
192 result = await asgi(self.receive, self.send)
193 except:
194 msg = "Exception in ASGI application\n%s"
195 traceback_text = "".join(traceback.format_exc())
196 self.protocol.logger.error(msg, traceback_text)
197 if not self.response_started:
198 await self.send_500_response()
199 else:
200 self.protocol.transport.close()
201 else:
202 if not self.response_started:
203 msg = "ASGI callable returned without starting response."
204 self.protocol.logger.error(msg)
205 await self.send_500_response()
206 elif not self.response_complete:
207 msg = "ASGI callable returned without completing response."
208 self.protocol.logger.error(msg)
209 self.protocol.transport.close()
210 elif result is not None:
211 msg = "ASGI callable should return None, but returned '%s'."
212 self.protocol.logger.error(msg, result)
213 self.protocol.transport.close()
214 finally:
215 self.protocol.state["total_requests"] += 1
216
217 async def send_500_response(self):
218 await self.send(
219 {
220 "type": "http.response.start",
221 "status": 500,
222 "headers": [
223 (b"content-type", b"text/plain; charset=utf-8"),
224 (b"connection", b"close"),
225 ],
226 }
227 )
228 await self.send(
229 {"type": "http.response.body", "body": b"Internal Server Error"}
230 )
231
232 # ASGI interface
233 async def send(self, message):
234 protocol = self.protocol
235 message_type = message["type"]
236
237 if not protocol.writable:
238 await protocol.writable_event.wait()
239
240 if not self.response_started:
241 # Sending response status line and headers
242 if message_type != "http.response.start":
243 msg = "Expected ASGI message 'http.response.start', but got '%s'."
244 raise RuntimeError(msg % message_type)
245
246 self.response_started = True
247
248 status_code = message["status"]
249 headers = message.get("headers", [])
250
251 if protocol.access_logs:
252 protocol.logger.info(
253 '%s - "%s %s HTTP/%s" %d',
254 self.scope["server"][0],
255 self.scope["method"],
256 self.scope["path"],
257 self.scope["http_version"],
258 status_code,
259 )
260
261 # Write response status line and headers
262 content = [STATUS_LINE[status_code], DEFAULT_HEADERS]
263
264 for name, value in headers:
265 name = name.lower()
266 if name == b"content-length" and self.chunked_encoding is None:
267 self.expected_content_length = int(value.decode())
268 self.chunked_encoding = False
269 elif name == b"transfer-encoding" and value.lower() == b"chunked":
270 self.chunked_encoding = True
271 elif name == b"connection" and value.lower() == b"close":
272 self.keep_alive = False
273 content.extend([name, b": ", value, b"\r\n"])
274
275 if self.chunked_encoding is None:
276 # Neither content-length nor transfer-encoding specified
277 self.chunked_encoding = True
278 content.append(b"transfer-encoding: chunked\r\n")
279
280 content.append(b"\r\n")
281 protocol.transport.write(b"".join(content))
282
283 elif not self.response_complete:
284 # Sending response body
285 if message_type != "http.response.body":
286 msg = "Expected ASGI message 'http.response.body', but got '%s'."
287 raise RuntimeError(msg % message_type)
288
289 body = message.get("body", b"")
290 more_body = message.get("more_body", False)
291
292 # Write response body
293 if self.chunked_encoding:
294 content = [b"%x\r\n" % len(body), body, b"\r\n"]
295 if not more_body:
296 content.append(b"0\r\n\r\n")
297 protocol.transport.write(b"".join(content))
298 else:
299 num_bytes = len(body)
300 if num_bytes > self.expected_content_length:
301 raise RuntimeError("Response content longer than Content-Length")
302 else:
303 self.expected_content_length -= num_bytes
304 protocol.transport.write(body)
305
306 # Handle response completion
307 if not more_body:
308 if self.expected_content_length != 0:
309 raise RuntimeError("Response content shorter than Content-Length")
310 self.response_complete = True
311 if not self.keep_alive:
312 protocol.transport.close()
313 else:
314 protocol.resume_reading()
315 else:
316 # Response already sent
317 msg = "Unexpected ASGI message '%s' sent, after response already completed."
318 raise RuntimeError(msg % message_type)
319
320 async def receive(self):
321 protocol = self.protocol
322
323 if self.receive_finished:
324 msg = "Receive channel fully consumed."
325 raise RuntimeError(msg)
326
327 if self.more_body and not self.body and not self.disconnected:
328 protocol.resume_reading()
329 await protocol.client_event.wait()
330 protocol.client_event.clear()
331
332 if self.disconnected:
333 message = {"type": "http.disconnect"}
334 self.receive_finished = True
335 else:
336 message = {
337 "type": "http.request",
338 "body": self.body,
339 "more_body": self.more_body,
340 }
341 self.receive_finished = not (self.more_body)
342 self.body = b""
343 protocol.resume_reading()
344
345 return message
346
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/protocols/http/httptools.py b/uvicorn/protocols/http/httptools.py
--- a/uvicorn/protocols/http/httptools.py
+++ b/uvicorn/protocols/http/httptools.py
@@ -93,6 +93,10 @@
def data_received(self, data):
try:
self.parser.feed_data(data)
+ except httptools.parser.errors.HttpParserError:
+ msg = "Invalid HTTP request received."
+ self.logger.warn(msg)
+ self.transport.close()
except httptools.HttpParserUpgrade:
websocket_upgrade(self)
|
{"golden_diff": "diff --git a/uvicorn/protocols/http/httptools.py b/uvicorn/protocols/http/httptools.py\n--- a/uvicorn/protocols/http/httptools.py\n+++ b/uvicorn/protocols/http/httptools.py\n@@ -93,6 +93,10 @@\n def data_received(self, data):\n try:\n self.parser.feed_data(data)\n+ except httptools.parser.errors.HttpParserError:\n+ msg = \"Invalid HTTP request received.\"\n+ self.logger.warn(msg)\n+ self.transport.close()\n except httptools.HttpParserUpgrade:\n websocket_upgrade(self)\n", "issue": "Catch `HttpParserInvalidMethodError` exception\n`uvicorn==0.1.1` is throwing these errors\r\n\r\n```\r\nuvicorn[7423]: Unhandled exception in event loop\r\nuvicorn[7423]: Traceback (most recent call last):\r\nuvicorn[7423]: File \"uvloop/handles/stream.pyx\", line 784, in uvloop.loop.__uv_stream_on_read_impl\r\nuvicorn[7423]: File \"uvloop/handles/stream.pyx\", line 563, in uvloop.loop.UVStream._on_read\r\nuvicorn[7423]: File \"/home/web/.virtualenvs/mesh/lib/python3.5/site-packages/uvicorn/protocols/http.py\", line 212, in data_received\r\nuvicorn[7423]: self.request_parser.feed_data(data)\r\nuvicorn[7423]: File \"httptools/parser/parser.pyx\", line 193, in httptools.parser.parser.HttpParser.feed_data\r\nuvicorn[7423]: httptools.parser.errors.HttpParserInvalidMethodError: invalid HTTP method\r\n```\n", "before_files": [{"content": "import asyncio\nfrom email.utils import formatdate\nimport http\nimport logging\nimport time\nimport traceback\nfrom urllib.parse import unquote\nfrom uvicorn.protocols.websockets.websockets import websocket_upgrade\n\nimport httptools\n\n\ndef _get_default_headers():\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n return b\"\".join([b\"server: uvicorn\\r\\ndate: \", current_date, b\"\\r\\n\"])\n\n\ndef _get_status_line(status_code):\n try:\n phrase = http.HTTPStatus(status_code).phrase.encode()\n except ValueError:\n phrase = b\"\"\n return b\"\".join([b\"HTTP/1.1 \", str(status_code).encode(), b\" \", phrase, b\"\\r\\n\"])\n\n\nSTATUS_LINE = {\n status_code: _get_status_line(status_code) for status_code in range(100, 600)\n}\n\nDEFAULT_HEADERS = _get_default_headers()\n\n\nclass HttpToolsProtocol(asyncio.Protocol):\n __slots__ = (\n 'app', 'loop', 'state', 'logger', 'access_logs', 'parser',\n 'transport', 'server', 'client', 'scheme',\n 'scope', 'headers', 'cycle', 'client_event',\n 'readable', 'writable', 'writable_event'\n )\n\n def __init__(self, app, loop=None, state=None, logger=None):\n self.app = app\n self.loop = loop or asyncio.get_event_loop()\n self.state = {\"total_requests\": 0} if state is None else state\n self.logger = logger or logging.getLogger()\n self.access_logs = self.logger.level >= logging.INFO\n self.parser = httptools.HttpRequestParser(self)\n\n # Per-connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Per-request state\n self.scope = None\n self.headers = None\n self.cycle = None\n self.client_event = asyncio.Event()\n\n # Flow control\n self.readable = True\n self.writable = True\n self.writable_event = asyncio.Event()\n self.writable_event.set()\n\n @classmethod\n def tick(cls):\n global DEFAULT_HEADERS\n DEFAULT_HEADERS = _get_default_headers()\n\n # Protocol interface\n def connection_made(self, transport):\n self.transport = transport\n self.server = transport.get_extra_info(\"sockname\")\n self.client = transport.get_extra_info(\"peername\")\n self.scheme = \"https\" if transport.get_extra_info(\"sslcontext\") else \"http\"\n if self.access_logs:\n self.logger.debug(\"%s - Connected\", self.server[0])\n\n def connection_lost(self, exc):\n if self.access_logs:\n self.logger.debug(\"%s - Disconnected\", self.server[0])\n\n if self.cycle and self.cycle.more_body:\n self.cycle.disconnected = True\n self.client_event.set()\n\n def eof_received(self):\n pass\n\n def data_received(self, data):\n try:\n self.parser.feed_data(data)\n except httptools.HttpParserUpgrade:\n websocket_upgrade(self)\n\n # Parser callbacks\n def on_url(self, url):\n method = self.parser.get_method()\n parsed_url = httptools.parse_url(url)\n self.headers = []\n self.scope = {\n \"type\": \"http\",\n \"http_version\": \"1.1\",\n \"server\": self.server,\n \"client\": self.client,\n \"scheme\": self.scheme,\n \"method\": method.decode(\"ascii\"),\n \"path\": parsed_url.path.decode(\"ascii\"),\n \"query_string\": parsed_url.query if parsed_url.query else b\"\",\n \"headers\": self.headers,\n }\n\n def on_header(self, name: bytes, value: bytes):\n self.headers.append((name.lower(), value))\n\n def on_headers_complete(self):\n http_version = self.parser.get_http_version()\n if http_version != \"1.1\":\n self.scope[\"http_version\"] = http_version\n if self.parser.should_upgrade():\n return\n self.cycle = RequestResponseCycle(self.scope, self)\n self.loop.create_task(self.cycle.run_asgi(self.app))\n\n def on_body(self, body: bytes):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.body += body\n self.pause_reading()\n self.client_event.set()\n\n def on_message_complete(self):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.more_body = False\n self.pause_reading()\n self.client_event.set()\n\n # Flow control\n def pause_reading(self):\n if self.readable:\n self.readable = False\n self.transport.pause_reading()\n\n def resume_reading(self):\n if not self.readable:\n self.readable = True\n self.transport.resume_reading()\n\n def pause_writing(self):\n if self.writable:\n self.writable = False\n self.writable_event.clear()\n\n def resume_writing(self):\n if not self.writable:\n self.writable = True\n self.writable_event.set()\n\n\nclass RequestResponseCycle:\n __slots__ = (\n 'scope', 'protocol',\n 'body', 'more_body', 'disconnected', 'receive_finished',\n 'response_started', 'response_complete', 'keep_alive', 'chunked_encoding', 'expected_content_length'\n )\n\n def __init__(self, scope, protocol):\n self.scope = scope\n self.protocol = protocol\n\n # Request state\n self.body = b\"\"\n self.more_body = True\n self.disconnected = False\n self.receive_finished = False\n\n # Response state\n self.response_started = False\n self.response_complete = False\n self.keep_alive = True\n self.chunked_encoding = None\n self.expected_content_length = 0\n\n # ASGI exception wrapper\n async def run_asgi(self, app):\n try:\n asgi = app(self.scope)\n result = await asgi(self.receive, self.send)\n except:\n msg = \"Exception in ASGI application\\n%s\"\n traceback_text = \"\".join(traceback.format_exc())\n self.protocol.logger.error(msg, traceback_text)\n if not self.response_started:\n await self.send_500_response()\n else:\n self.protocol.transport.close()\n else:\n if not self.response_started:\n msg = \"ASGI callable returned without starting response.\"\n self.protocol.logger.error(msg)\n await self.send_500_response()\n elif not self.response_complete:\n msg = \"ASGI callable returned without completing response.\"\n self.protocol.logger.error(msg)\n self.protocol.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.protocol.logger.error(msg, result)\n self.protocol.transport.close()\n finally:\n self.protocol.state[\"total_requests\"] += 1\n\n async def send_500_response(self):\n await self.send(\n {\n \"type\": \"http.response.start\",\n \"status\": 500,\n \"headers\": [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ],\n }\n )\n await self.send(\n {\"type\": \"http.response.body\", \"body\": b\"Internal Server Error\"}\n )\n\n # ASGI interface\n async def send(self, message):\n protocol = self.protocol\n message_type = message[\"type\"]\n\n if not protocol.writable:\n await protocol.writable_event.wait()\n\n if not self.response_started:\n # Sending response status line and headers\n if message_type != \"http.response.start\":\n msg = \"Expected ASGI message 'http.response.start', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n self.response_started = True\n\n status_code = message[\"status\"]\n headers = message.get(\"headers\", [])\n\n if protocol.access_logs:\n protocol.logger.info(\n '%s - \"%s %s HTTP/%s\" %d',\n self.scope[\"server\"][0],\n self.scope[\"method\"],\n self.scope[\"path\"],\n self.scope[\"http_version\"],\n status_code,\n )\n\n # Write response status line and headers\n content = [STATUS_LINE[status_code], DEFAULT_HEADERS]\n\n for name, value in headers:\n name = name.lower()\n if name == b\"content-length\" and self.chunked_encoding is None:\n self.expected_content_length = int(value.decode())\n self.chunked_encoding = False\n elif name == b\"transfer-encoding\" and value.lower() == b\"chunked\":\n self.chunked_encoding = True\n elif name == b\"connection\" and value.lower() == b\"close\":\n self.keep_alive = False\n content.extend([name, b\": \", value, b\"\\r\\n\"])\n\n if self.chunked_encoding is None:\n # Neither content-length nor transfer-encoding specified\n self.chunked_encoding = True\n content.append(b\"transfer-encoding: chunked\\r\\n\")\n\n content.append(b\"\\r\\n\")\n protocol.transport.write(b\"\".join(content))\n\n elif not self.response_complete:\n # Sending response body\n if message_type != \"http.response.body\":\n msg = \"Expected ASGI message 'http.response.body', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n # Write response body\n if self.chunked_encoding:\n content = [b\"%x\\r\\n\" % len(body), body, b\"\\r\\n\"]\n if not more_body:\n content.append(b\"0\\r\\n\\r\\n\")\n protocol.transport.write(b\"\".join(content))\n else:\n num_bytes = len(body)\n if num_bytes > self.expected_content_length:\n raise RuntimeError(\"Response content longer than Content-Length\")\n else:\n self.expected_content_length -= num_bytes\n protocol.transport.write(body)\n\n # Handle response completion\n if not more_body:\n if self.expected_content_length != 0:\n raise RuntimeError(\"Response content shorter than Content-Length\")\n self.response_complete = True\n if not self.keep_alive:\n protocol.transport.close()\n else:\n protocol.resume_reading()\n else:\n # Response already sent\n msg = \"Unexpected ASGI message '%s' sent, after response already completed.\"\n raise RuntimeError(msg % message_type)\n\n async def receive(self):\n protocol = self.protocol\n\n if self.receive_finished:\n msg = \"Receive channel fully consumed.\"\n raise RuntimeError(msg)\n\n if self.more_body and not self.body and not self.disconnected:\n protocol.resume_reading()\n await protocol.client_event.wait()\n protocol.client_event.clear()\n\n if self.disconnected:\n message = {\"type\": \"http.disconnect\"}\n self.receive_finished = True\n else:\n message = {\n \"type\": \"http.request\",\n \"body\": self.body,\n \"more_body\": self.more_body,\n }\n self.receive_finished = not (self.more_body)\n self.body = b\"\"\n protocol.resume_reading()\n\n return message\n", "path": "uvicorn/protocols/http/httptools.py"}], "after_files": [{"content": "import asyncio\nfrom email.utils import formatdate\nimport http\nimport logging\nimport time\nimport traceback\nfrom urllib.parse import unquote\nfrom uvicorn.protocols.websockets.websockets import websocket_upgrade\n\nimport httptools\n\n\ndef _get_default_headers():\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n return b\"\".join([b\"server: uvicorn\\r\\ndate: \", current_date, b\"\\r\\n\"])\n\n\ndef _get_status_line(status_code):\n try:\n phrase = http.HTTPStatus(status_code).phrase.encode()\n except ValueError:\n phrase = b\"\"\n return b\"\".join([b\"HTTP/1.1 \", str(status_code).encode(), b\" \", phrase, b\"\\r\\n\"])\n\n\nSTATUS_LINE = {\n status_code: _get_status_line(status_code) for status_code in range(100, 600)\n}\n\nDEFAULT_HEADERS = _get_default_headers()\n\n\nclass HttpToolsProtocol(asyncio.Protocol):\n __slots__ = (\n 'app', 'loop', 'state', 'logger', 'access_logs', 'parser',\n 'transport', 'server', 'client', 'scheme',\n 'scope', 'headers', 'cycle', 'client_event',\n 'readable', 'writable', 'writable_event'\n )\n\n def __init__(self, app, loop=None, state=None, logger=None):\n self.app = app\n self.loop = loop or asyncio.get_event_loop()\n self.state = {\"total_requests\": 0} if state is None else state\n self.logger = logger or logging.getLogger()\n self.access_logs = self.logger.level >= logging.INFO\n self.parser = httptools.HttpRequestParser(self)\n\n # Per-connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Per-request state\n self.scope = None\n self.headers = None\n self.cycle = None\n self.client_event = asyncio.Event()\n\n # Flow control\n self.readable = True\n self.writable = True\n self.writable_event = asyncio.Event()\n self.writable_event.set()\n\n @classmethod\n def tick(cls):\n global DEFAULT_HEADERS\n DEFAULT_HEADERS = _get_default_headers()\n\n # Protocol interface\n def connection_made(self, transport):\n self.transport = transport\n self.server = transport.get_extra_info(\"sockname\")\n self.client = transport.get_extra_info(\"peername\")\n self.scheme = \"https\" if transport.get_extra_info(\"sslcontext\") else \"http\"\n if self.access_logs:\n self.logger.debug(\"%s - Connected\", self.server[0])\n\n def connection_lost(self, exc):\n if self.access_logs:\n self.logger.debug(\"%s - Disconnected\", self.server[0])\n\n if self.cycle and self.cycle.more_body:\n self.cycle.disconnected = True\n self.client_event.set()\n\n def eof_received(self):\n pass\n\n def data_received(self, data):\n try:\n self.parser.feed_data(data)\n except httptools.parser.errors.HttpParserError:\n msg = \"Invalid HTTP request received.\"\n self.logger.warn(msg)\n self.transport.close()\n except httptools.HttpParserUpgrade:\n websocket_upgrade(self)\n\n # Parser callbacks\n def on_url(self, url):\n method = self.parser.get_method()\n parsed_url = httptools.parse_url(url)\n self.headers = []\n self.scope = {\n \"type\": \"http\",\n \"http_version\": \"1.1\",\n \"server\": self.server,\n \"client\": self.client,\n \"scheme\": self.scheme,\n \"method\": method.decode(\"ascii\"),\n \"path\": parsed_url.path.decode(\"ascii\"),\n \"query_string\": parsed_url.query if parsed_url.query else b\"\",\n \"headers\": self.headers,\n }\n\n def on_header(self, name: bytes, value: bytes):\n self.headers.append((name.lower(), value))\n\n def on_headers_complete(self):\n http_version = self.parser.get_http_version()\n if http_version != \"1.1\":\n self.scope[\"http_version\"] = http_version\n if self.parser.should_upgrade():\n return\n self.cycle = RequestResponseCycle(self.scope, self)\n self.loop.create_task(self.cycle.run_asgi(self.app))\n\n def on_body(self, body: bytes):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.body += body\n self.pause_reading()\n self.client_event.set()\n\n def on_message_complete(self):\n if self.parser.should_upgrade() or self.cycle.response_complete:\n return\n self.cycle.more_body = False\n self.pause_reading()\n self.client_event.set()\n\n # Flow control\n def pause_reading(self):\n if self.readable:\n self.readable = False\n self.transport.pause_reading()\n\n def resume_reading(self):\n if not self.readable:\n self.readable = True\n self.transport.resume_reading()\n\n def pause_writing(self):\n if self.writable:\n self.writable = False\n self.writable_event.clear()\n\n def resume_writing(self):\n if not self.writable:\n self.writable = True\n self.writable_event.set()\n\n\nclass RequestResponseCycle:\n __slots__ = (\n 'scope', 'protocol',\n 'body', 'more_body', 'disconnected', 'receive_finished',\n 'response_started', 'response_complete', 'keep_alive', 'chunked_encoding', 'expected_content_length'\n )\n\n def __init__(self, scope, protocol):\n self.scope = scope\n self.protocol = protocol\n\n # Request state\n self.body = b\"\"\n self.more_body = True\n self.disconnected = False\n self.receive_finished = False\n\n # Response state\n self.response_started = False\n self.response_complete = False\n self.keep_alive = True\n self.chunked_encoding = None\n self.expected_content_length = 0\n\n # ASGI exception wrapper\n async def run_asgi(self, app):\n try:\n asgi = app(self.scope)\n result = await asgi(self.receive, self.send)\n except:\n msg = \"Exception in ASGI application\\n%s\"\n traceback_text = \"\".join(traceback.format_exc())\n self.protocol.logger.error(msg, traceback_text)\n if not self.response_started:\n await self.send_500_response()\n else:\n self.protocol.transport.close()\n else:\n if not self.response_started:\n msg = \"ASGI callable returned without starting response.\"\n self.protocol.logger.error(msg)\n await self.send_500_response()\n elif not self.response_complete:\n msg = \"ASGI callable returned without completing response.\"\n self.protocol.logger.error(msg)\n self.protocol.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.protocol.logger.error(msg, result)\n self.protocol.transport.close()\n finally:\n self.protocol.state[\"total_requests\"] += 1\n\n async def send_500_response(self):\n await self.send(\n {\n \"type\": \"http.response.start\",\n \"status\": 500,\n \"headers\": [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ],\n }\n )\n await self.send(\n {\"type\": \"http.response.body\", \"body\": b\"Internal Server Error\"}\n )\n\n # ASGI interface\n async def send(self, message):\n protocol = self.protocol\n message_type = message[\"type\"]\n\n if not protocol.writable:\n await protocol.writable_event.wait()\n\n if not self.response_started:\n # Sending response status line and headers\n if message_type != \"http.response.start\":\n msg = \"Expected ASGI message 'http.response.start', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n self.response_started = True\n\n status_code = message[\"status\"]\n headers = message.get(\"headers\", [])\n\n if protocol.access_logs:\n protocol.logger.info(\n '%s - \"%s %s HTTP/%s\" %d',\n self.scope[\"server\"][0],\n self.scope[\"method\"],\n self.scope[\"path\"],\n self.scope[\"http_version\"],\n status_code,\n )\n\n # Write response status line and headers\n content = [STATUS_LINE[status_code], DEFAULT_HEADERS]\n\n for name, value in headers:\n name = name.lower()\n if name == b\"content-length\" and self.chunked_encoding is None:\n self.expected_content_length = int(value.decode())\n self.chunked_encoding = False\n elif name == b\"transfer-encoding\" and value.lower() == b\"chunked\":\n self.chunked_encoding = True\n elif name == b\"connection\" and value.lower() == b\"close\":\n self.keep_alive = False\n content.extend([name, b\": \", value, b\"\\r\\n\"])\n\n if self.chunked_encoding is None:\n # Neither content-length nor transfer-encoding specified\n self.chunked_encoding = True\n content.append(b\"transfer-encoding: chunked\\r\\n\")\n\n content.append(b\"\\r\\n\")\n protocol.transport.write(b\"\".join(content))\n\n elif not self.response_complete:\n # Sending response body\n if message_type != \"http.response.body\":\n msg = \"Expected ASGI message 'http.response.body', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n # Write response body\n if self.chunked_encoding:\n content = [b\"%x\\r\\n\" % len(body), body, b\"\\r\\n\"]\n if not more_body:\n content.append(b\"0\\r\\n\\r\\n\")\n protocol.transport.write(b\"\".join(content))\n else:\n num_bytes = len(body)\n if num_bytes > self.expected_content_length:\n raise RuntimeError(\"Response content longer than Content-Length\")\n else:\n self.expected_content_length -= num_bytes\n protocol.transport.write(body)\n\n # Handle response completion\n if not more_body:\n if self.expected_content_length != 0:\n raise RuntimeError(\"Response content shorter than Content-Length\")\n self.response_complete = True\n if not self.keep_alive:\n protocol.transport.close()\n else:\n protocol.resume_reading()\n else:\n # Response already sent\n msg = \"Unexpected ASGI message '%s' sent, after response already completed.\"\n raise RuntimeError(msg % message_type)\n\n async def receive(self):\n protocol = self.protocol\n\n if self.receive_finished:\n msg = \"Receive channel fully consumed.\"\n raise RuntimeError(msg)\n\n if self.more_body and not self.body and not self.disconnected:\n protocol.resume_reading()\n await protocol.client_event.wait()\n protocol.client_event.clear()\n\n if self.disconnected:\n message = {\"type\": \"http.disconnect\"}\n self.receive_finished = True\n else:\n message = {\n \"type\": \"http.request\",\n \"body\": self.body,\n \"more_body\": self.more_body,\n }\n self.receive_finished = not (self.more_body)\n self.body = b\"\"\n protocol.resume_reading()\n\n return message\n", "path": "uvicorn/protocols/http/httptools.py"}]}
| 3,865 | 136 |
gh_patches_debug_26686
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-24878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError in bench get-untranslated: Circular Import Issue
I encountered an ImportError while running the '**bench get-untranslated**' command in Frappe. This issue seems to be related to a circular import in the '**frappe.translate**' module. When I execute '**bench get-untranslated RU RU_UNTRANSLATED.TXT**', the system throws an ImportError, stating that it cannot import '**get_all_translations**' from the partially initialized module '**frappe.translate**'. This error points towards a potential circular import problem.
### Steps to Reproduce:
1. Run in Frappe Bench environment:
> bench get-untranslated RU RU_UNTRANSLATED.TXT
2. Observe the ImportError related to 'get_all_translations'.
I suspect this might be due to a cyclical dependency within the Frappe's translate module. Any insights or fixes would be greatly appreciated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/model/__init__.py`
Content:
```
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # License: MIT. See LICENSE
3
4 # model __init__.py
5 import frappe
6 from frappe import _
7
8 data_fieldtypes = (
9 "Currency",
10 "Int",
11 "Long Int",
12 "Float",
13 "Percent",
14 "Check",
15 "Small Text",
16 "Long Text",
17 "Code",
18 "Text Editor",
19 "Markdown Editor",
20 "HTML Editor",
21 "Date",
22 "Datetime",
23 "Time",
24 "Text",
25 "Data",
26 "Link",
27 "Dynamic Link",
28 "Password",
29 "Select",
30 "Rating",
31 "Read Only",
32 "Attach",
33 "Attach Image",
34 "Signature",
35 "Color",
36 "Barcode",
37 "Geolocation",
38 "Duration",
39 "Icon",
40 "Phone",
41 "Autocomplete",
42 "JSON",
43 )
44
45 float_like_fields = {"Float", "Currency", "Percent"}
46 datetime_fields = {"Datetime", "Date", "Time"}
47
48 attachment_fieldtypes = (
49 "Attach",
50 "Attach Image",
51 )
52
53 no_value_fields = (
54 "Section Break",
55 "Column Break",
56 "Tab Break",
57 "HTML",
58 "Table",
59 "Table MultiSelect",
60 "Button",
61 "Image",
62 "Fold",
63 "Heading",
64 )
65
66 display_fieldtypes = (
67 "Section Break",
68 "Column Break",
69 "Tab Break",
70 "HTML",
71 "Button",
72 "Image",
73 "Fold",
74 "Heading",
75 )
76
77 numeric_fieldtypes = ("Currency", "Int", "Long Int", "Float", "Percent", "Check")
78
79 data_field_options = ("Email", "Name", "Phone", "URL", "Barcode")
80
81 default_fields = (
82 "doctype",
83 "name",
84 "owner",
85 "creation",
86 "modified",
87 "modified_by",
88 "docstatus",
89 "idx",
90 )
91
92 child_table_fields = ("parent", "parentfield", "parenttype")
93
94 optional_fields = ("_user_tags", "_comments", "_assign", "_liked_by", "_seen")
95
96 table_fields = ("Table", "Table MultiSelect")
97
98 core_doctypes_list = (
99 "DefaultValue",
100 "DocType",
101 "DocField",
102 "DocPerm",
103 "DocType Action",
104 "DocType Link",
105 "User",
106 "Role",
107 "Has Role",
108 "Page",
109 "Module Def",
110 "Print Format",
111 "Report",
112 "Customize Form",
113 "Customize Form Field",
114 "Property Setter",
115 "Custom Field",
116 "Client Script",
117 )
118
119 log_types = (
120 "Version",
121 "Error Log",
122 "Scheduled Job Log",
123 "Event Sync Log",
124 "Event Update Log",
125 "Access Log",
126 "View Log",
127 "Activity Log",
128 "Energy Point Log",
129 "Notification Log",
130 "Email Queue",
131 "DocShare",
132 "Document Follow",
133 "Console Log",
134 )
135
136 std_fields = [
137 {"fieldname": "name", "fieldtype": "Link", "label": _("ID")},
138 {"fieldname": "owner", "fieldtype": "Link", "label": _("Created By"), "options": "User"},
139 {"fieldname": "idx", "fieldtype": "Int", "label": _("Index")},
140 {"fieldname": "creation", "fieldtype": "Datetime", "label": _("Created On")},
141 {"fieldname": "modified", "fieldtype": "Datetime", "label": _("Last Updated On")},
142 {
143 "fieldname": "modified_by",
144 "fieldtype": "Link",
145 "label": _("Last Updated By"),
146 "options": "User",
147 },
148 {"fieldname": "_user_tags", "fieldtype": "Data", "label": _("Tags")},
149 {"fieldname": "_liked_by", "fieldtype": "Data", "label": _("Liked By")},
150 {"fieldname": "_comments", "fieldtype": "Text", "label": _("Comments")},
151 {"fieldname": "_assign", "fieldtype": "Text", "label": _("Assigned To")},
152 {"fieldname": "docstatus", "fieldtype": "Int", "label": _("Document Status")},
153 ]
154
155
156 def delete_fields(args_dict, delete=0):
157 """
158 Delete a field.
159 * Deletes record from `tabDocField`
160 * If not single doctype: Drops column from table
161 * If single, deletes record from `tabSingles`
162 args_dict = { dt: [field names] }
163 """
164 import frappe.utils
165
166 for dt in args_dict:
167 fields = args_dict[dt]
168 if not fields:
169 continue
170
171 frappe.db.delete(
172 "DocField",
173 {
174 "parent": dt,
175 "fieldname": ("in", fields),
176 },
177 )
178
179 # Delete the data/column only if delete is specified
180 if not delete:
181 continue
182
183 if frappe.db.get_value("DocType", dt, "issingle"):
184 frappe.db.delete(
185 "Singles",
186 {
187 "doctype": dt,
188 "field": ("in", fields),
189 },
190 )
191 else:
192 existing_fields = frappe.db.describe(dt)
193 existing_fields = existing_fields and [e[0] for e in existing_fields] or []
194 fields_need_to_delete = set(fields) & set(existing_fields)
195 if not fields_need_to_delete:
196 continue
197
198 if frappe.db.db_type == "mariadb":
199 # mariadb implicitly commits before DDL, make it explicit
200 frappe.db.commit()
201
202 query = "ALTER TABLE `tab%s` " % dt + ", ".join(
203 "DROP COLUMN `%s`" % f for f in fields_need_to_delete
204 )
205 frappe.db.sql(query)
206
207 if frappe.db.db_type == "postgres":
208 # commit the results to db
209 frappe.db.commit()
210
211
212 def get_permitted_fields(
213 doctype: str,
214 parenttype: str | None = None,
215 user: str | None = None,
216 permission_type: str | None = None,
217 *,
218 ignore_virtual=False,
219 ) -> list[str]:
220 meta = frappe.get_meta(doctype)
221 valid_columns = meta.get_valid_columns()
222
223 if doctype in core_doctypes_list:
224 return valid_columns
225
226 # DocType has only fields of type Table (Table, Table MultiSelect)
227 if set(valid_columns).issubset(default_fields):
228 return valid_columns
229
230 if permission_type is None:
231 permission_type = "select" if frappe.only_has_select_perm(doctype, user=user) else "read"
232
233 meta_fields = meta.default_fields.copy()
234 optional_meta_fields = [x for x in optional_fields if x in valid_columns]
235
236 if permitted_fields := meta.get_permitted_fieldnames(
237 parenttype=parenttype,
238 user=user,
239 permission_type=permission_type,
240 with_virtual_fields=not ignore_virtual,
241 ):
242 if permission_type == "select":
243 return permitted_fields
244
245 if meta.istable:
246 meta_fields.extend(child_table_fields)
247
248 return meta_fields + permitted_fields + optional_meta_fields
249
250 return meta_fields + optional_meta_fields
251
252
253 def is_default_field(fieldname: str) -> bool:
254 return fieldname in default_fields
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/model/__init__.py b/frappe/model/__init__.py
--- a/frappe/model/__init__.py
+++ b/frappe/model/__init__.py
@@ -134,22 +134,22 @@
)
std_fields = [
- {"fieldname": "name", "fieldtype": "Link", "label": _("ID")},
- {"fieldname": "owner", "fieldtype": "Link", "label": _("Created By"), "options": "User"},
- {"fieldname": "idx", "fieldtype": "Int", "label": _("Index")},
- {"fieldname": "creation", "fieldtype": "Datetime", "label": _("Created On")},
- {"fieldname": "modified", "fieldtype": "Datetime", "label": _("Last Updated On")},
+ {"fieldname": "name", "fieldtype": "Link", "label": "ID"},
+ {"fieldname": "owner", "fieldtype": "Link", "label": "Created By", "options": "User"},
+ {"fieldname": "idx", "fieldtype": "Int", "label": "Index"},
+ {"fieldname": "creation", "fieldtype": "Datetime", "label": "Created On"},
+ {"fieldname": "modified", "fieldtype": "Datetime", "label": "Last Updated On"},
{
"fieldname": "modified_by",
"fieldtype": "Link",
- "label": _("Last Updated By"),
+ "label": "Last Updated By",
"options": "User",
},
- {"fieldname": "_user_tags", "fieldtype": "Data", "label": _("Tags")},
- {"fieldname": "_liked_by", "fieldtype": "Data", "label": _("Liked By")},
- {"fieldname": "_comments", "fieldtype": "Text", "label": _("Comments")},
- {"fieldname": "_assign", "fieldtype": "Text", "label": _("Assigned To")},
- {"fieldname": "docstatus", "fieldtype": "Int", "label": _("Document Status")},
+ {"fieldname": "_user_tags", "fieldtype": "Data", "label": "Tags"},
+ {"fieldname": "_liked_by", "fieldtype": "Data", "label": "Liked By"},
+ {"fieldname": "_comments", "fieldtype": "Text", "label": "Comments"},
+ {"fieldname": "_assign", "fieldtype": "Text", "label": "Assigned To"},
+ {"fieldname": "docstatus", "fieldtype": "Int", "label": "Document Status"},
]
|
{"golden_diff": "diff --git a/frappe/model/__init__.py b/frappe/model/__init__.py\n--- a/frappe/model/__init__.py\n+++ b/frappe/model/__init__.py\n@@ -134,22 +134,22 @@\n )\n \n std_fields = [\n-\t{\"fieldname\": \"name\", \"fieldtype\": \"Link\", \"label\": _(\"ID\")},\n-\t{\"fieldname\": \"owner\", \"fieldtype\": \"Link\", \"label\": _(\"Created By\"), \"options\": \"User\"},\n-\t{\"fieldname\": \"idx\", \"fieldtype\": \"Int\", \"label\": _(\"Index\")},\n-\t{\"fieldname\": \"creation\", \"fieldtype\": \"Datetime\", \"label\": _(\"Created On\")},\n-\t{\"fieldname\": \"modified\", \"fieldtype\": \"Datetime\", \"label\": _(\"Last Updated On\")},\n+\t{\"fieldname\": \"name\", \"fieldtype\": \"Link\", \"label\": \"ID\"},\n+\t{\"fieldname\": \"owner\", \"fieldtype\": \"Link\", \"label\": \"Created By\", \"options\": \"User\"},\n+\t{\"fieldname\": \"idx\", \"fieldtype\": \"Int\", \"label\": \"Index\"},\n+\t{\"fieldname\": \"creation\", \"fieldtype\": \"Datetime\", \"label\": \"Created On\"},\n+\t{\"fieldname\": \"modified\", \"fieldtype\": \"Datetime\", \"label\": \"Last Updated On\"},\n \t{\n \t\t\"fieldname\": \"modified_by\",\n \t\t\"fieldtype\": \"Link\",\n-\t\t\"label\": _(\"Last Updated By\"),\n+\t\t\"label\": \"Last Updated By\",\n \t\t\"options\": \"User\",\n \t},\n-\t{\"fieldname\": \"_user_tags\", \"fieldtype\": \"Data\", \"label\": _(\"Tags\")},\n-\t{\"fieldname\": \"_liked_by\", \"fieldtype\": \"Data\", \"label\": _(\"Liked By\")},\n-\t{\"fieldname\": \"_comments\", \"fieldtype\": \"Text\", \"label\": _(\"Comments\")},\n-\t{\"fieldname\": \"_assign\", \"fieldtype\": \"Text\", \"label\": _(\"Assigned To\")},\n-\t{\"fieldname\": \"docstatus\", \"fieldtype\": \"Int\", \"label\": _(\"Document Status\")},\n+\t{\"fieldname\": \"_user_tags\", \"fieldtype\": \"Data\", \"label\": \"Tags\"},\n+\t{\"fieldname\": \"_liked_by\", \"fieldtype\": \"Data\", \"label\": \"Liked By\"},\n+\t{\"fieldname\": \"_comments\", \"fieldtype\": \"Text\", \"label\": \"Comments\"},\n+\t{\"fieldname\": \"_assign\", \"fieldtype\": \"Text\", \"label\": \"Assigned To\"},\n+\t{\"fieldname\": \"docstatus\", \"fieldtype\": \"Int\", \"label\": \"Document Status\"},\n ]\n", "issue": "ImportError in bench get-untranslated: Circular Import Issue\nI encountered an ImportError while running the '**bench get-untranslated**' command in Frappe. This issue seems to be related to a circular import in the '**frappe.translate**' module. When I execute '**bench get-untranslated RU RU_UNTRANSLATED.TXT**', the system throws an ImportError, stating that it cannot import '**get_all_translations**' from the partially initialized module '**frappe.translate**'. This error points towards a potential circular import problem.\r\n\r\n### Steps to Reproduce:\r\n\r\n1. Run in Frappe Bench environment:\r\n\r\n> bench get-untranslated RU RU_UNTRANSLATED.TXT\r\n\r\n2. Observe the ImportError related to 'get_all_translations'.\r\n\r\nI suspect this might be due to a cyclical dependency within the Frappe's translate module. Any insights or fixes would be greatly appreciated.\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\n# model __init__.py\nimport frappe\nfrom frappe import _\n\ndata_fieldtypes = (\n\t\"Currency\",\n\t\"Int\",\n\t\"Long Int\",\n\t\"Float\",\n\t\"Percent\",\n\t\"Check\",\n\t\"Small Text\",\n\t\"Long Text\",\n\t\"Code\",\n\t\"Text Editor\",\n\t\"Markdown Editor\",\n\t\"HTML Editor\",\n\t\"Date\",\n\t\"Datetime\",\n\t\"Time\",\n\t\"Text\",\n\t\"Data\",\n\t\"Link\",\n\t\"Dynamic Link\",\n\t\"Password\",\n\t\"Select\",\n\t\"Rating\",\n\t\"Read Only\",\n\t\"Attach\",\n\t\"Attach Image\",\n\t\"Signature\",\n\t\"Color\",\n\t\"Barcode\",\n\t\"Geolocation\",\n\t\"Duration\",\n\t\"Icon\",\n\t\"Phone\",\n\t\"Autocomplete\",\n\t\"JSON\",\n)\n\nfloat_like_fields = {\"Float\", \"Currency\", \"Percent\"}\ndatetime_fields = {\"Datetime\", \"Date\", \"Time\"}\n\nattachment_fieldtypes = (\n\t\"Attach\",\n\t\"Attach Image\",\n)\n\nno_value_fields = (\n\t\"Section Break\",\n\t\"Column Break\",\n\t\"Tab Break\",\n\t\"HTML\",\n\t\"Table\",\n\t\"Table MultiSelect\",\n\t\"Button\",\n\t\"Image\",\n\t\"Fold\",\n\t\"Heading\",\n)\n\ndisplay_fieldtypes = (\n\t\"Section Break\",\n\t\"Column Break\",\n\t\"Tab Break\",\n\t\"HTML\",\n\t\"Button\",\n\t\"Image\",\n\t\"Fold\",\n\t\"Heading\",\n)\n\nnumeric_fieldtypes = (\"Currency\", \"Int\", \"Long Int\", \"Float\", \"Percent\", \"Check\")\n\ndata_field_options = (\"Email\", \"Name\", \"Phone\", \"URL\", \"Barcode\")\n\ndefault_fields = (\n\t\"doctype\",\n\t\"name\",\n\t\"owner\",\n\t\"creation\",\n\t\"modified\",\n\t\"modified_by\",\n\t\"docstatus\",\n\t\"idx\",\n)\n\nchild_table_fields = (\"parent\", \"parentfield\", \"parenttype\")\n\noptional_fields = (\"_user_tags\", \"_comments\", \"_assign\", \"_liked_by\", \"_seen\")\n\ntable_fields = (\"Table\", \"Table MultiSelect\")\n\ncore_doctypes_list = (\n\t\"DefaultValue\",\n\t\"DocType\",\n\t\"DocField\",\n\t\"DocPerm\",\n\t\"DocType Action\",\n\t\"DocType Link\",\n\t\"User\",\n\t\"Role\",\n\t\"Has Role\",\n\t\"Page\",\n\t\"Module Def\",\n\t\"Print Format\",\n\t\"Report\",\n\t\"Customize Form\",\n\t\"Customize Form Field\",\n\t\"Property Setter\",\n\t\"Custom Field\",\n\t\"Client Script\",\n)\n\nlog_types = (\n\t\"Version\",\n\t\"Error Log\",\n\t\"Scheduled Job Log\",\n\t\"Event Sync Log\",\n\t\"Event Update Log\",\n\t\"Access Log\",\n\t\"View Log\",\n\t\"Activity Log\",\n\t\"Energy Point Log\",\n\t\"Notification Log\",\n\t\"Email Queue\",\n\t\"DocShare\",\n\t\"Document Follow\",\n\t\"Console Log\",\n)\n\nstd_fields = [\n\t{\"fieldname\": \"name\", \"fieldtype\": \"Link\", \"label\": _(\"ID\")},\n\t{\"fieldname\": \"owner\", \"fieldtype\": \"Link\", \"label\": _(\"Created By\"), \"options\": \"User\"},\n\t{\"fieldname\": \"idx\", \"fieldtype\": \"Int\", \"label\": _(\"Index\")},\n\t{\"fieldname\": \"creation\", \"fieldtype\": \"Datetime\", \"label\": _(\"Created On\")},\n\t{\"fieldname\": \"modified\", \"fieldtype\": \"Datetime\", \"label\": _(\"Last Updated On\")},\n\t{\n\t\t\"fieldname\": \"modified_by\",\n\t\t\"fieldtype\": \"Link\",\n\t\t\"label\": _(\"Last Updated By\"),\n\t\t\"options\": \"User\",\n\t},\n\t{\"fieldname\": \"_user_tags\", \"fieldtype\": \"Data\", \"label\": _(\"Tags\")},\n\t{\"fieldname\": \"_liked_by\", \"fieldtype\": \"Data\", \"label\": _(\"Liked By\")},\n\t{\"fieldname\": \"_comments\", \"fieldtype\": \"Text\", \"label\": _(\"Comments\")},\n\t{\"fieldname\": \"_assign\", \"fieldtype\": \"Text\", \"label\": _(\"Assigned To\")},\n\t{\"fieldname\": \"docstatus\", \"fieldtype\": \"Int\", \"label\": _(\"Document Status\")},\n]\n\n\ndef delete_fields(args_dict, delete=0):\n\t\"\"\"\n\tDelete a field.\n\t* Deletes record from `tabDocField`\n\t* If not single doctype: Drops column from table\n\t* If single, deletes record from `tabSingles`\n\targs_dict = { dt: [field names] }\n\t\"\"\"\n\timport frappe.utils\n\n\tfor dt in args_dict:\n\t\tfields = args_dict[dt]\n\t\tif not fields:\n\t\t\tcontinue\n\n\t\tfrappe.db.delete(\n\t\t\t\"DocField\",\n\t\t\t{\n\t\t\t\t\"parent\": dt,\n\t\t\t\t\"fieldname\": (\"in\", fields),\n\t\t\t},\n\t\t)\n\n\t\t# Delete the data/column only if delete is specified\n\t\tif not delete:\n\t\t\tcontinue\n\n\t\tif frappe.db.get_value(\"DocType\", dt, \"issingle\"):\n\t\t\tfrappe.db.delete(\n\t\t\t\t\"Singles\",\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": dt,\n\t\t\t\t\t\"field\": (\"in\", fields),\n\t\t\t\t},\n\t\t\t)\n\t\telse:\n\t\t\texisting_fields = frappe.db.describe(dt)\n\t\t\texisting_fields = existing_fields and [e[0] for e in existing_fields] or []\n\t\t\tfields_need_to_delete = set(fields) & set(existing_fields)\n\t\t\tif not fields_need_to_delete:\n\t\t\t\tcontinue\n\n\t\t\tif frappe.db.db_type == \"mariadb\":\n\t\t\t\t# mariadb implicitly commits before DDL, make it explicit\n\t\t\t\tfrappe.db.commit()\n\n\t\t\tquery = \"ALTER TABLE `tab%s` \" % dt + \", \".join(\n\t\t\t\t\"DROP COLUMN `%s`\" % f for f in fields_need_to_delete\n\t\t\t)\n\t\t\tfrappe.db.sql(query)\n\n\t\tif frappe.db.db_type == \"postgres\":\n\t\t\t# commit the results to db\n\t\t\tfrappe.db.commit()\n\n\ndef get_permitted_fields(\n\tdoctype: str,\n\tparenttype: str | None = None,\n\tuser: str | None = None,\n\tpermission_type: str | None = None,\n\t*,\n\tignore_virtual=False,\n) -> list[str]:\n\tmeta = frappe.get_meta(doctype)\n\tvalid_columns = meta.get_valid_columns()\n\n\tif doctype in core_doctypes_list:\n\t\treturn valid_columns\n\n\t# DocType has only fields of type Table (Table, Table MultiSelect)\n\tif set(valid_columns).issubset(default_fields):\n\t\treturn valid_columns\n\n\tif permission_type is None:\n\t\tpermission_type = \"select\" if frappe.only_has_select_perm(doctype, user=user) else \"read\"\n\n\tmeta_fields = meta.default_fields.copy()\n\toptional_meta_fields = [x for x in optional_fields if x in valid_columns]\n\n\tif permitted_fields := meta.get_permitted_fieldnames(\n\t\tparenttype=parenttype,\n\t\tuser=user,\n\t\tpermission_type=permission_type,\n\t\twith_virtual_fields=not ignore_virtual,\n\t):\n\t\tif permission_type == \"select\":\n\t\t\treturn permitted_fields\n\n\t\tif meta.istable:\n\t\t\tmeta_fields.extend(child_table_fields)\n\n\t\treturn meta_fields + permitted_fields + optional_meta_fields\n\n\treturn meta_fields + optional_meta_fields\n\n\ndef is_default_field(fieldname: str) -> bool:\n\treturn fieldname in default_fields\n", "path": "frappe/model/__init__.py"}], "after_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\n# model __init__.py\nimport frappe\nfrom frappe import _\n\ndata_fieldtypes = (\n\t\"Currency\",\n\t\"Int\",\n\t\"Long Int\",\n\t\"Float\",\n\t\"Percent\",\n\t\"Check\",\n\t\"Small Text\",\n\t\"Long Text\",\n\t\"Code\",\n\t\"Text Editor\",\n\t\"Markdown Editor\",\n\t\"HTML Editor\",\n\t\"Date\",\n\t\"Datetime\",\n\t\"Time\",\n\t\"Text\",\n\t\"Data\",\n\t\"Link\",\n\t\"Dynamic Link\",\n\t\"Password\",\n\t\"Select\",\n\t\"Rating\",\n\t\"Read Only\",\n\t\"Attach\",\n\t\"Attach Image\",\n\t\"Signature\",\n\t\"Color\",\n\t\"Barcode\",\n\t\"Geolocation\",\n\t\"Duration\",\n\t\"Icon\",\n\t\"Phone\",\n\t\"Autocomplete\",\n\t\"JSON\",\n)\n\nfloat_like_fields = {\"Float\", \"Currency\", \"Percent\"}\ndatetime_fields = {\"Datetime\", \"Date\", \"Time\"}\n\nattachment_fieldtypes = (\n\t\"Attach\",\n\t\"Attach Image\",\n)\n\nno_value_fields = (\n\t\"Section Break\",\n\t\"Column Break\",\n\t\"Tab Break\",\n\t\"HTML\",\n\t\"Table\",\n\t\"Table MultiSelect\",\n\t\"Button\",\n\t\"Image\",\n\t\"Fold\",\n\t\"Heading\",\n)\n\ndisplay_fieldtypes = (\n\t\"Section Break\",\n\t\"Column Break\",\n\t\"Tab Break\",\n\t\"HTML\",\n\t\"Button\",\n\t\"Image\",\n\t\"Fold\",\n\t\"Heading\",\n)\n\nnumeric_fieldtypes = (\"Currency\", \"Int\", \"Long Int\", \"Float\", \"Percent\", \"Check\")\n\ndata_field_options = (\"Email\", \"Name\", \"Phone\", \"URL\", \"Barcode\")\n\ndefault_fields = (\n\t\"doctype\",\n\t\"name\",\n\t\"owner\",\n\t\"creation\",\n\t\"modified\",\n\t\"modified_by\",\n\t\"docstatus\",\n\t\"idx\",\n)\n\nchild_table_fields = (\"parent\", \"parentfield\", \"parenttype\")\n\noptional_fields = (\"_user_tags\", \"_comments\", \"_assign\", \"_liked_by\", \"_seen\")\n\ntable_fields = (\"Table\", \"Table MultiSelect\")\n\ncore_doctypes_list = (\n\t\"DefaultValue\",\n\t\"DocType\",\n\t\"DocField\",\n\t\"DocPerm\",\n\t\"DocType Action\",\n\t\"DocType Link\",\n\t\"User\",\n\t\"Role\",\n\t\"Has Role\",\n\t\"Page\",\n\t\"Module Def\",\n\t\"Print Format\",\n\t\"Report\",\n\t\"Customize Form\",\n\t\"Customize Form Field\",\n\t\"Property Setter\",\n\t\"Custom Field\",\n\t\"Client Script\",\n)\n\nlog_types = (\n\t\"Version\",\n\t\"Error Log\",\n\t\"Scheduled Job Log\",\n\t\"Event Sync Log\",\n\t\"Event Update Log\",\n\t\"Access Log\",\n\t\"View Log\",\n\t\"Activity Log\",\n\t\"Energy Point Log\",\n\t\"Notification Log\",\n\t\"Email Queue\",\n\t\"DocShare\",\n\t\"Document Follow\",\n\t\"Console Log\",\n)\n\nstd_fields = [\n\t{\"fieldname\": \"name\", \"fieldtype\": \"Link\", \"label\": \"ID\"},\n\t{\"fieldname\": \"owner\", \"fieldtype\": \"Link\", \"label\": \"Created By\", \"options\": \"User\"},\n\t{\"fieldname\": \"idx\", \"fieldtype\": \"Int\", \"label\": \"Index\"},\n\t{\"fieldname\": \"creation\", \"fieldtype\": \"Datetime\", \"label\": \"Created On\"},\n\t{\"fieldname\": \"modified\", \"fieldtype\": \"Datetime\", \"label\": \"Last Updated On\"},\n\t{\n\t\t\"fieldname\": \"modified_by\",\n\t\t\"fieldtype\": \"Link\",\n\t\t\"label\": \"Last Updated By\",\n\t\t\"options\": \"User\",\n\t},\n\t{\"fieldname\": \"_user_tags\", \"fieldtype\": \"Data\", \"label\": \"Tags\"},\n\t{\"fieldname\": \"_liked_by\", \"fieldtype\": \"Data\", \"label\": \"Liked By\"},\n\t{\"fieldname\": \"_comments\", \"fieldtype\": \"Text\", \"label\": \"Comments\"},\n\t{\"fieldname\": \"_assign\", \"fieldtype\": \"Text\", \"label\": \"Assigned To\"},\n\t{\"fieldname\": \"docstatus\", \"fieldtype\": \"Int\", \"label\": \"Document Status\"},\n]\n\n\ndef delete_fields(args_dict, delete=0):\n\t\"\"\"\n\tDelete a field.\n\t* Deletes record from `tabDocField`\n\t* If not single doctype: Drops column from table\n\t* If single, deletes record from `tabSingles`\n\targs_dict = { dt: [field names] }\n\t\"\"\"\n\timport frappe.utils\n\n\tfor dt in args_dict:\n\t\tfields = args_dict[dt]\n\t\tif not fields:\n\t\t\tcontinue\n\n\t\tfrappe.db.delete(\n\t\t\t\"DocField\",\n\t\t\t{\n\t\t\t\t\"parent\": dt,\n\t\t\t\t\"fieldname\": (\"in\", fields),\n\t\t\t},\n\t\t)\n\n\t\t# Delete the data/column only if delete is specified\n\t\tif not delete:\n\t\t\tcontinue\n\n\t\tif frappe.db.get_value(\"DocType\", dt, \"issingle\"):\n\t\t\tfrappe.db.delete(\n\t\t\t\t\"Singles\",\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": dt,\n\t\t\t\t\t\"field\": (\"in\", fields),\n\t\t\t\t},\n\t\t\t)\n\t\telse:\n\t\t\texisting_fields = frappe.db.describe(dt)\n\t\t\texisting_fields = existing_fields and [e[0] for e in existing_fields] or []\n\t\t\tfields_need_to_delete = set(fields) & set(existing_fields)\n\t\t\tif not fields_need_to_delete:\n\t\t\t\tcontinue\n\n\t\t\tif frappe.db.db_type == \"mariadb\":\n\t\t\t\t# mariadb implicitly commits before DDL, make it explicit\n\t\t\t\tfrappe.db.commit()\n\n\t\t\tquery = \"ALTER TABLE `tab%s` \" % dt + \", \".join(\n\t\t\t\t\"DROP COLUMN `%s`\" % f for f in fields_need_to_delete\n\t\t\t)\n\t\t\tfrappe.db.sql(query)\n\n\t\tif frappe.db.db_type == \"postgres\":\n\t\t\t# commit the results to db\n\t\t\tfrappe.db.commit()\n\n\ndef get_permitted_fields(\n\tdoctype: str,\n\tparenttype: str | None = None,\n\tuser: str | None = None,\n\tpermission_type: str | None = None,\n\t*,\n\tignore_virtual=False,\n) -> list[str]:\n\tmeta = frappe.get_meta(doctype)\n\tvalid_columns = meta.get_valid_columns()\n\n\tif doctype in core_doctypes_list:\n\t\treturn valid_columns\n\n\t# DocType has only fields of type Table (Table, Table MultiSelect)\n\tif set(valid_columns).issubset(default_fields):\n\t\treturn valid_columns\n\n\tif permission_type is None:\n\t\tpermission_type = \"select\" if frappe.only_has_select_perm(doctype, user=user) else \"read\"\n\n\tmeta_fields = meta.default_fields.copy()\n\toptional_meta_fields = [x for x in optional_fields if x in valid_columns]\n\n\tif permitted_fields := meta.get_permitted_fieldnames(\n\t\tparenttype=parenttype,\n\t\tuser=user,\n\t\tpermission_type=permission_type,\n\t\twith_virtual_fields=not ignore_virtual,\n\t):\n\t\tif permission_type == \"select\":\n\t\t\treturn permitted_fields\n\n\t\tif meta.istable:\n\t\t\tmeta_fields.extend(child_table_fields)\n\n\t\treturn meta_fields + permitted_fields + optional_meta_fields\n\n\treturn meta_fields + optional_meta_fields\n\n\ndef is_default_field(fieldname: str) -> bool:\n\treturn fieldname in default_fields\n", "path": "frappe/model/__init__.py"}]}
| 2,817 | 567 |
gh_patches_debug_1284
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-875
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
If DB_PORT is not set, config process sets port to an empty string
I have an existing environment that sets up the datacube connection using this:
```
- DB_HOSTNAME=host.docker.internal
- DB_USERNAME=opendatacube
- DB_PASSWORD=opendatacubepassword
- DB_DATABASE=opendatacube
```
and with the new changes to read config from environment variables over the config file, the port is required to be set with `DB_PORT=5432`.
Expected behaviour was that if the port is blank it is set to the default for Postgres.
https://github.com/opendatacube/datacube-core/blob/8481d907b198a1c8946326b8b70625a9a8523a12/datacube/config.py#L265
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/config.py`
Content:
```
1 # coding=utf-8
2 """
3 User configuration.
4 """
5
6 import os
7 from pathlib import Path
8 import configparser
9 from urllib.parse import unquote_plus, urlparse
10 from typing import Optional, Iterable, Union, Any, Tuple, Dict
11
12 PathLike = Union[str, 'os.PathLike[Any]']
13
14
15 ENVIRONMENT_VARNAME = 'DATACUBE_CONFIG_PATH'
16 #: Config locations in order. Properties found in latter locations override
17 #: earlier ones.
18 #:
19 #: - `/etc/datacube.conf`
20 #: - file at `$DATACUBE_CONFIG_PATH` environment variable
21 #: - `~/.datacube.conf`
22 #: - `datacube.conf`
23 DEFAULT_CONF_PATHS = tuple(p for p in ['/etc/datacube.conf',
24 os.environ.get(ENVIRONMENT_VARNAME, ''),
25 str(os.path.expanduser("~/.datacube.conf")),
26 'datacube.conf'] if len(p) > 0)
27
28 DEFAULT_ENV = 'default'
29
30 # Default configuration options.
31 _DEFAULT_CONF = """
32 [DEFAULT]
33 # Blank implies localhost
34 db_hostname:
35 db_database: datacube
36 index_driver: default
37 # If a connection is unused for this length of time, expect it to be invalidated.
38 db_connection_timeout: 60
39
40 [user]
41 # Which environment to use when none is specified explicitly.
42 # note: will fail if default_environment points to non-existent section
43 # default_environment: datacube
44 """
45
46 #: Used in place of None as a default, when None is a valid but not default parameter to a function
47 _UNSET = object()
48
49
50 def read_config(default_text: Optional[str] = None) -> configparser.ConfigParser:
51 config = configparser.ConfigParser()
52 if default_text is not None:
53 config.read_string(default_text)
54 return config
55
56
57 class LocalConfig(object):
58 """
59 System configuration for the user.
60
61 This loads from a set of possible configuration files which define the available environments.
62 An environment contains connection details for a Data Cube Index, which provides access to
63 available data.
64
65 """
66
67 def __init__(self, config: configparser.ConfigParser,
68 files_loaded: Optional[Iterable[str]] = None,
69 env: Optional[str] = None):
70 """
71 Datacube environment resolution precedence is:
72 1. Supplied as a function argument `env`
73 2. DATACUBE_ENVIRONMENT environment variable
74 3. user.default_environment option in the config
75 4. 'default' or 'datacube' whichever is present
76
77 If environment is supplied by any of the first 3 methods is not present
78 in the config, then throw an exception.
79 """
80 self._config = config
81 self.files_loaded = [] if files_loaded is None else list(iter(files_loaded))
82
83 if env is None:
84 env = os.environ.get('DATACUBE_ENVIRONMENT',
85 config.get('user', 'default_environment', fallback=None))
86
87 # If the user specifies a particular env, we either want to use it or Fail
88 if env:
89 if config.has_section(env):
90 self._env = env
91 # All is good
92 return
93 else:
94 raise ValueError('No config section found for environment %r' % (env,))
95 else:
96 # If an env hasn't been specifically selected, we can fall back defaults
97 fallbacks = [DEFAULT_ENV, 'datacube']
98 for fallback_env in fallbacks:
99 if config.has_section(fallback_env):
100 self._env = fallback_env
101 return
102 raise ValueError('No ODC environment, checked configurations for %s' % fallbacks)
103
104 @classmethod
105 def find(cls,
106 paths: Optional[Union[str, Iterable[PathLike]]] = None,
107 env: Optional[str] = None) -> 'LocalConfig':
108 """
109 Find config from environment variables or possible filesystem locations.
110
111 'env' is which environment to use from the config: it corresponds to the name of a
112 config section
113 """
114 config = read_config(_DEFAULT_CONF)
115
116 if paths is None:
117 if env is None:
118 env_opts = parse_env_params()
119 if env_opts:
120 return _cfg_from_env_opts(env_opts, config)
121
122 paths = DEFAULT_CONF_PATHS
123
124 if isinstance(paths, str) or hasattr(paths, '__fspath__'): # Use os.PathLike in 3.6+
125 paths = [str(paths)]
126
127 files_loaded = config.read(str(p) for p in paths if p)
128
129 return LocalConfig(
130 config,
131 files_loaded=files_loaded,
132 env=env,
133 )
134
135 def get(self, item: str, fallback=_UNSET):
136 if fallback == _UNSET:
137 return self._config.get(self._env, item)
138 else:
139 return self._config.get(self._env, item, fallback=fallback)
140
141 def __getitem__(self, item: str):
142 return self.get(item, fallback=None)
143
144 def __str__(self) -> str:
145 return "LocalConfig<loaded_from={}, environment={!r}, config={}>".format(
146 self.files_loaded or 'defaults',
147 self._env,
148 dict(self._config[self._env]),
149 )
150
151 def __repr__(self) -> str:
152 return str(self)
153
154
155 OPTIONS = {'reproject_threads': 4}
156
157
158 #: pylint: disable=invalid-name
159 class set_options(object):
160 """Set global state within a controlled context
161
162 Currently, the only supported options are:
163 * reproject_threads: The number of threads to use when reprojecting
164
165 You can use ``set_options`` either as a context manager::
166
167 with datacube.set_options(reproject_threads=16):
168 ...
169
170 Or to set global options::
171
172 datacube.set_options(reproject_threads=16)
173 """
174
175 def __init__(self, **kwargs):
176 self.old = OPTIONS.copy()
177 OPTIONS.update(kwargs)
178
179 def __enter__(self):
180 return
181
182 def __exit__(self, exc_type, value, traceback):
183 OPTIONS.clear()
184 OPTIONS.update(self.old)
185
186
187 DB_KEYS = ('hostname', 'port', 'database', 'username', 'password')
188
189
190 def parse_connect_url(url: str) -> Dict[str, str]:
191 """ Extract database,hostname,port,username,password from db URL.
192
193 Example: postgresql://username:password@hostname:port/database
194
195 For local password-less db use `postgresql:///<your db>`
196 """
197 def split2(s: str, separator: str) -> Tuple[str, str]:
198 i = s.find(separator)
199 return (s, '') if i < 0 else (s[:i], s[i+1:])
200
201 _, netloc, path, *_ = urlparse(url)
202
203 db = path[1:] if path else ''
204 if '@' in netloc:
205 (user, password), (host, port) = (split2(p, ':') for p in split2(netloc, '@'))
206 else:
207 user, password = '', ''
208 host, port = split2(netloc, ':')
209
210 oo = dict(hostname=host, database=db)
211
212 if port:
213 oo['port'] = port
214 if password:
215 oo['password'] = unquote_plus(password)
216 if user:
217 oo['username'] = user
218 return oo
219
220
221 def parse_env_params() -> Dict[str, str]:
222 """
223 - Extract parameters from DATACUBE_DB_URL if present
224 - Else look for DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE
225 - Return {} otherwise
226 """
227
228 db_url = os.environ.get('DATACUBE_DB_URL', None)
229 if db_url is not None:
230 return parse_connect_url(db_url)
231
232 params = {k: os.environ.get('DB_{}'.format(k.upper()), None)
233 for k in DB_KEYS}
234 return {k: v
235 for k, v in params.items()
236 if v is not None}
237
238
239 def _cfg_from_env_opts(opts: Dict[str, str],
240 base: configparser.ConfigParser) -> LocalConfig:
241 base['default'] = {'db_'+k: v for k, v in opts.items()}
242 return LocalConfig(base, files_loaded=[], env='default')
243
244
245 def render_dc_config(params: Dict[str, Any],
246 section_name: str = 'default') -> str:
247 """ Render output of parse_env_params to a string that can be written to config file.
248 """
249 oo = '[{}]\n'.format(section_name)
250 for k in DB_KEYS:
251 v = params.get(k, None)
252 if v is not None:
253 oo += 'db_{k}: {v}\n'.format(k=k, v=v)
254 return oo
255
256
257 def auto_config() -> str:
258 """
259 Render config to $DATACUBE_CONFIG_PATH or ~/.datacube.conf, but only if doesn't exist.
260
261 option1:
262 DATACUBE_DB_URL postgresql://user:password@host/database
263
264 option2:
265 DB_{HOSTNAME|PORT|USERNAME|PASSWORD|DATABASE}
266
267 option3:
268 default config
269 """
270 cfg_path = os.environ.get('DATACUBE_CONFIG_PATH', None)
271 cfg_path = Path(cfg_path) if cfg_path else Path.home()/'.datacube.conf'
272
273 if cfg_path.exists():
274 return str(cfg_path)
275
276 opts = parse_env_params()
277
278 if len(opts) == 0:
279 opts['hostname'] = ''
280 opts['database'] = 'datacube'
281
282 cfg_text = render_dc_config(opts)
283 with open(str(cfg_path), 'wt') as f:
284 f.write(cfg_text)
285
286 return str(cfg_path)
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datacube/config.py b/datacube/config.py
--- a/datacube/config.py
+++ b/datacube/config.py
@@ -233,7 +233,7 @@
for k in DB_KEYS}
return {k: v
for k, v in params.items()
- if v is not None}
+ if v is not None and v != ""}
def _cfg_from_env_opts(opts: Dict[str, str],
|
{"golden_diff": "diff --git a/datacube/config.py b/datacube/config.py\n--- a/datacube/config.py\n+++ b/datacube/config.py\n@@ -233,7 +233,7 @@\n for k in DB_KEYS}\n return {k: v\n for k, v in params.items()\n- if v is not None}\n+ if v is not None and v != \"\"}\n \n \n def _cfg_from_env_opts(opts: Dict[str, str],\n", "issue": "If DB_PORT is not set, config process sets port to an empty string\nI have an existing environment that sets up the datacube connection using this:\r\n```\r\n - DB_HOSTNAME=host.docker.internal\r\n - DB_USERNAME=opendatacube\r\n - DB_PASSWORD=opendatacubepassword\r\n - DB_DATABASE=opendatacube\r\n```\r\n\r\nand with the new changes to read config from environment variables over the config file, the port is required to be set with `DB_PORT=5432`.\r\n\r\nExpected behaviour was that if the port is blank it is set to the default for Postgres.\r\n\r\nhttps://github.com/opendatacube/datacube-core/blob/8481d907b198a1c8946326b8b70625a9a8523a12/datacube/config.py#L265\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nUser configuration.\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport configparser\nfrom urllib.parse import unquote_plus, urlparse\nfrom typing import Optional, Iterable, Union, Any, Tuple, Dict\n\nPathLike = Union[str, 'os.PathLike[Any]']\n\n\nENVIRONMENT_VARNAME = 'DATACUBE_CONFIG_PATH'\n#: Config locations in order. Properties found in latter locations override\n#: earlier ones.\n#:\n#: - `/etc/datacube.conf`\n#: - file at `$DATACUBE_CONFIG_PATH` environment variable\n#: - `~/.datacube.conf`\n#: - `datacube.conf`\nDEFAULT_CONF_PATHS = tuple(p for p in ['/etc/datacube.conf',\n os.environ.get(ENVIRONMENT_VARNAME, ''),\n str(os.path.expanduser(\"~/.datacube.conf\")),\n 'datacube.conf'] if len(p) > 0)\n\nDEFAULT_ENV = 'default'\n\n# Default configuration options.\n_DEFAULT_CONF = \"\"\"\n[DEFAULT]\n# Blank implies localhost\ndb_hostname:\ndb_database: datacube\nindex_driver: default\n# If a connection is unused for this length of time, expect it to be invalidated.\ndb_connection_timeout: 60\n\n[user]\n# Which environment to use when none is specified explicitly.\n# note: will fail if default_environment points to non-existent section\n# default_environment: datacube\n\"\"\"\n\n#: Used in place of None as a default, when None is a valid but not default parameter to a function\n_UNSET = object()\n\n\ndef read_config(default_text: Optional[str] = None) -> configparser.ConfigParser:\n config = configparser.ConfigParser()\n if default_text is not None:\n config.read_string(default_text)\n return config\n\n\nclass LocalConfig(object):\n \"\"\"\n System configuration for the user.\n\n This loads from a set of possible configuration files which define the available environments.\n An environment contains connection details for a Data Cube Index, which provides access to\n available data.\n\n \"\"\"\n\n def __init__(self, config: configparser.ConfigParser,\n files_loaded: Optional[Iterable[str]] = None,\n env: Optional[str] = None):\n \"\"\"\n Datacube environment resolution precedence is:\n 1. Supplied as a function argument `env`\n 2. DATACUBE_ENVIRONMENT environment variable\n 3. user.default_environment option in the config\n 4. 'default' or 'datacube' whichever is present\n\n If environment is supplied by any of the first 3 methods is not present\n in the config, then throw an exception.\n \"\"\"\n self._config = config\n self.files_loaded = [] if files_loaded is None else list(iter(files_loaded))\n\n if env is None:\n env = os.environ.get('DATACUBE_ENVIRONMENT',\n config.get('user', 'default_environment', fallback=None))\n\n # If the user specifies a particular env, we either want to use it or Fail\n if env:\n if config.has_section(env):\n self._env = env\n # All is good\n return\n else:\n raise ValueError('No config section found for environment %r' % (env,))\n else:\n # If an env hasn't been specifically selected, we can fall back defaults\n fallbacks = [DEFAULT_ENV, 'datacube']\n for fallback_env in fallbacks:\n if config.has_section(fallback_env):\n self._env = fallback_env\n return\n raise ValueError('No ODC environment, checked configurations for %s' % fallbacks)\n\n @classmethod\n def find(cls,\n paths: Optional[Union[str, Iterable[PathLike]]] = None,\n env: Optional[str] = None) -> 'LocalConfig':\n \"\"\"\n Find config from environment variables or possible filesystem locations.\n\n 'env' is which environment to use from the config: it corresponds to the name of a\n config section\n \"\"\"\n config = read_config(_DEFAULT_CONF)\n\n if paths is None:\n if env is None:\n env_opts = parse_env_params()\n if env_opts:\n return _cfg_from_env_opts(env_opts, config)\n\n paths = DEFAULT_CONF_PATHS\n\n if isinstance(paths, str) or hasattr(paths, '__fspath__'): # Use os.PathLike in 3.6+\n paths = [str(paths)]\n\n files_loaded = config.read(str(p) for p in paths if p)\n\n return LocalConfig(\n config,\n files_loaded=files_loaded,\n env=env,\n )\n\n def get(self, item: str, fallback=_UNSET):\n if fallback == _UNSET:\n return self._config.get(self._env, item)\n else:\n return self._config.get(self._env, item, fallback=fallback)\n\n def __getitem__(self, item: str):\n return self.get(item, fallback=None)\n\n def __str__(self) -> str:\n return \"LocalConfig<loaded_from={}, environment={!r}, config={}>\".format(\n self.files_loaded or 'defaults',\n self._env,\n dict(self._config[self._env]),\n )\n\n def __repr__(self) -> str:\n return str(self)\n\n\nOPTIONS = {'reproject_threads': 4}\n\n\n#: pylint: disable=invalid-name\nclass set_options(object):\n \"\"\"Set global state within a controlled context\n\n Currently, the only supported options are:\n * reproject_threads: The number of threads to use when reprojecting\n\n You can use ``set_options`` either as a context manager::\n\n with datacube.set_options(reproject_threads=16):\n ...\n\n Or to set global options::\n\n datacube.set_options(reproject_threads=16)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = OPTIONS.copy()\n OPTIONS.update(kwargs)\n\n def __enter__(self):\n return\n\n def __exit__(self, exc_type, value, traceback):\n OPTIONS.clear()\n OPTIONS.update(self.old)\n\n\nDB_KEYS = ('hostname', 'port', 'database', 'username', 'password')\n\n\ndef parse_connect_url(url: str) -> Dict[str, str]:\n \"\"\" Extract database,hostname,port,username,password from db URL.\n\n Example: postgresql://username:password@hostname:port/database\n\n For local password-less db use `postgresql:///<your db>`\n \"\"\"\n def split2(s: str, separator: str) -> Tuple[str, str]:\n i = s.find(separator)\n return (s, '') if i < 0 else (s[:i], s[i+1:])\n\n _, netloc, path, *_ = urlparse(url)\n\n db = path[1:] if path else ''\n if '@' in netloc:\n (user, password), (host, port) = (split2(p, ':') for p in split2(netloc, '@'))\n else:\n user, password = '', ''\n host, port = split2(netloc, ':')\n\n oo = dict(hostname=host, database=db)\n\n if port:\n oo['port'] = port\n if password:\n oo['password'] = unquote_plus(password)\n if user:\n oo['username'] = user\n return oo\n\n\ndef parse_env_params() -> Dict[str, str]:\n \"\"\"\n - Extract parameters from DATACUBE_DB_URL if present\n - Else look for DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE\n - Return {} otherwise\n \"\"\"\n\n db_url = os.environ.get('DATACUBE_DB_URL', None)\n if db_url is not None:\n return parse_connect_url(db_url)\n\n params = {k: os.environ.get('DB_{}'.format(k.upper()), None)\n for k in DB_KEYS}\n return {k: v\n for k, v in params.items()\n if v is not None}\n\n\ndef _cfg_from_env_opts(opts: Dict[str, str],\n base: configparser.ConfigParser) -> LocalConfig:\n base['default'] = {'db_'+k: v for k, v in opts.items()}\n return LocalConfig(base, files_loaded=[], env='default')\n\n\ndef render_dc_config(params: Dict[str, Any],\n section_name: str = 'default') -> str:\n \"\"\" Render output of parse_env_params to a string that can be written to config file.\n \"\"\"\n oo = '[{}]\\n'.format(section_name)\n for k in DB_KEYS:\n v = params.get(k, None)\n if v is not None:\n oo += 'db_{k}: {v}\\n'.format(k=k, v=v)\n return oo\n\n\ndef auto_config() -> str:\n \"\"\"\n Render config to $DATACUBE_CONFIG_PATH or ~/.datacube.conf, but only if doesn't exist.\n\n option1:\n DATACUBE_DB_URL postgresql://user:password@host/database\n\n option2:\n DB_{HOSTNAME|PORT|USERNAME|PASSWORD|DATABASE}\n\n option3:\n default config\n \"\"\"\n cfg_path = os.environ.get('DATACUBE_CONFIG_PATH', None)\n cfg_path = Path(cfg_path) if cfg_path else Path.home()/'.datacube.conf'\n\n if cfg_path.exists():\n return str(cfg_path)\n\n opts = parse_env_params()\n\n if len(opts) == 0:\n opts['hostname'] = ''\n opts['database'] = 'datacube'\n\n cfg_text = render_dc_config(opts)\n with open(str(cfg_path), 'wt') as f:\n f.write(cfg_text)\n\n return str(cfg_path)\n", "path": "datacube/config.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nUser configuration.\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport configparser\nfrom urllib.parse import unquote_plus, urlparse\nfrom typing import Optional, Iterable, Union, Any, Tuple, Dict\n\nPathLike = Union[str, 'os.PathLike[Any]']\n\n\nENVIRONMENT_VARNAME = 'DATACUBE_CONFIG_PATH'\n#: Config locations in order. Properties found in latter locations override\n#: earlier ones.\n#:\n#: - `/etc/datacube.conf`\n#: - file at `$DATACUBE_CONFIG_PATH` environment variable\n#: - `~/.datacube.conf`\n#: - `datacube.conf`\nDEFAULT_CONF_PATHS = tuple(p for p in ['/etc/datacube.conf',\n os.environ.get(ENVIRONMENT_VARNAME, ''),\n str(os.path.expanduser(\"~/.datacube.conf\")),\n 'datacube.conf'] if len(p) > 0)\n\nDEFAULT_ENV = 'default'\n\n# Default configuration options.\n_DEFAULT_CONF = \"\"\"\n[DEFAULT]\n# Blank implies localhost\ndb_hostname:\ndb_database: datacube\nindex_driver: default\n# If a connection is unused for this length of time, expect it to be invalidated.\ndb_connection_timeout: 60\n\n[user]\n# Which environment to use when none is specified explicitly.\n# note: will fail if default_environment points to non-existent section\n# default_environment: datacube\n\"\"\"\n\n#: Used in place of None as a default, when None is a valid but not default parameter to a function\n_UNSET = object()\n\n\ndef read_config(default_text: Optional[str] = None) -> configparser.ConfigParser:\n config = configparser.ConfigParser()\n if default_text is not None:\n config.read_string(default_text)\n return config\n\n\nclass LocalConfig(object):\n \"\"\"\n System configuration for the user.\n\n This loads from a set of possible configuration files which define the available environments.\n An environment contains connection details for a Data Cube Index, which provides access to\n available data.\n\n \"\"\"\n\n def __init__(self, config: configparser.ConfigParser,\n files_loaded: Optional[Iterable[str]] = None,\n env: Optional[str] = None):\n \"\"\"\n Datacube environment resolution precedence is:\n 1. Supplied as a function argument `env`\n 2. DATACUBE_ENVIRONMENT environment variable\n 3. user.default_environment option in the config\n 4. 'default' or 'datacube' whichever is present\n\n If environment is supplied by any of the first 3 methods is not present\n in the config, then throw an exception.\n \"\"\"\n self._config = config\n self.files_loaded = [] if files_loaded is None else list(iter(files_loaded))\n\n if env is None:\n env = os.environ.get('DATACUBE_ENVIRONMENT',\n config.get('user', 'default_environment', fallback=None))\n\n # If the user specifies a particular env, we either want to use it or Fail\n if env:\n if config.has_section(env):\n self._env = env\n # All is good\n return\n else:\n raise ValueError('No config section found for environment %r' % (env,))\n else:\n # If an env hasn't been specifically selected, we can fall back defaults\n fallbacks = [DEFAULT_ENV, 'datacube']\n for fallback_env in fallbacks:\n if config.has_section(fallback_env):\n self._env = fallback_env\n return\n raise ValueError('No ODC environment, checked configurations for %s' % fallbacks)\n\n @classmethod\n def find(cls,\n paths: Optional[Union[str, Iterable[PathLike]]] = None,\n env: Optional[str] = None) -> 'LocalConfig':\n \"\"\"\n Find config from environment variables or possible filesystem locations.\n\n 'env' is which environment to use from the config: it corresponds to the name of a\n config section\n \"\"\"\n config = read_config(_DEFAULT_CONF)\n\n if paths is None:\n if env is None:\n env_opts = parse_env_params()\n if env_opts:\n return _cfg_from_env_opts(env_opts, config)\n\n paths = DEFAULT_CONF_PATHS\n\n if isinstance(paths, str) or hasattr(paths, '__fspath__'): # Use os.PathLike in 3.6+\n paths = [str(paths)]\n\n files_loaded = config.read(str(p) for p in paths if p)\n\n return LocalConfig(\n config,\n files_loaded=files_loaded,\n env=env,\n )\n\n def get(self, item: str, fallback=_UNSET):\n if fallback == _UNSET:\n return self._config.get(self._env, item)\n else:\n return self._config.get(self._env, item, fallback=fallback)\n\n def __getitem__(self, item: str):\n return self.get(item, fallback=None)\n\n def __str__(self) -> str:\n return \"LocalConfig<loaded_from={}, environment={!r}, config={}>\".format(\n self.files_loaded or 'defaults',\n self._env,\n dict(self._config[self._env]),\n )\n\n def __repr__(self) -> str:\n return str(self)\n\n\nOPTIONS = {'reproject_threads': 4}\n\n\n#: pylint: disable=invalid-name\nclass set_options(object):\n \"\"\"Set global state within a controlled context\n\n Currently, the only supported options are:\n * reproject_threads: The number of threads to use when reprojecting\n\n You can use ``set_options`` either as a context manager::\n\n with datacube.set_options(reproject_threads=16):\n ...\n\n Or to set global options::\n\n datacube.set_options(reproject_threads=16)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = OPTIONS.copy()\n OPTIONS.update(kwargs)\n\n def __enter__(self):\n return\n\n def __exit__(self, exc_type, value, traceback):\n OPTIONS.clear()\n OPTIONS.update(self.old)\n\n\nDB_KEYS = ('hostname', 'port', 'database', 'username', 'password')\n\n\ndef parse_connect_url(url: str) -> Dict[str, str]:\n \"\"\" Extract database,hostname,port,username,password from db URL.\n\n Example: postgresql://username:password@hostname:port/database\n\n For local password-less db use `postgresql:///<your db>`\n \"\"\"\n def split2(s: str, separator: str) -> Tuple[str, str]:\n i = s.find(separator)\n return (s, '') if i < 0 else (s[:i], s[i+1:])\n\n _, netloc, path, *_ = urlparse(url)\n\n db = path[1:] if path else ''\n if '@' in netloc:\n (user, password), (host, port) = (split2(p, ':') for p in split2(netloc, '@'))\n else:\n user, password = '', ''\n host, port = split2(netloc, ':')\n\n oo = dict(hostname=host, database=db)\n\n if port:\n oo['port'] = port\n if password:\n oo['password'] = unquote_plus(password)\n if user:\n oo['username'] = user\n return oo\n\n\ndef parse_env_params() -> Dict[str, str]:\n \"\"\"\n - Extract parameters from DATACUBE_DB_URL if present\n - Else look for DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE\n - Return {} otherwise\n \"\"\"\n\n db_url = os.environ.get('DATACUBE_DB_URL', None)\n if db_url is not None:\n return parse_connect_url(db_url)\n\n params = {k: os.environ.get('DB_{}'.format(k.upper()), None)\n for k in DB_KEYS}\n return {k: v\n for k, v in params.items()\n if v is not None and v != \"\"}\n\n\ndef _cfg_from_env_opts(opts: Dict[str, str],\n base: configparser.ConfigParser) -> LocalConfig:\n base['default'] = {'db_'+k: v for k, v in opts.items()}\n return LocalConfig(base, files_loaded=[], env='default')\n\n\ndef render_dc_config(params: Dict[str, Any],\n section_name: str = 'default') -> str:\n \"\"\" Render output of parse_env_params to a string that can be written to config file.\n \"\"\"\n oo = '[{}]\\n'.format(section_name)\n for k in DB_KEYS:\n v = params.get(k, None)\n if v is not None:\n oo += 'db_{k}: {v}\\n'.format(k=k, v=v)\n return oo\n\n\ndef auto_config() -> str:\n \"\"\"\n Render config to $DATACUBE_CONFIG_PATH or ~/.datacube.conf, but only if doesn't exist.\n\n option1:\n DATACUBE_DB_URL postgresql://user:password@host/database\n\n option2:\n DB_{HOSTNAME|PORT|USERNAME|PASSWORD|DATABASE}\n\n option3:\n default config\n \"\"\"\n cfg_path = os.environ.get('DATACUBE_CONFIG_PATH', None)\n cfg_path = Path(cfg_path) if cfg_path else Path.home()/'.datacube.conf'\n\n if cfg_path.exists():\n return str(cfg_path)\n\n opts = parse_env_params()\n\n if len(opts) == 0:\n opts['hostname'] = ''\n opts['database'] = 'datacube'\n\n cfg_text = render_dc_config(opts)\n with open(str(cfg_path), 'wt') as f:\n f.write(cfg_text)\n\n return str(cfg_path)\n", "path": "datacube/config.py"}]}
| 3,271 | 98 |
gh_patches_debug_1938
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-2959
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Undefined name: make_env in ./server/src/prefect_server/cli/dev.py
## Description
*A clear description of the bug*
An _undefined name_ like #2235 and #1199
https://github.com/PrefectHQ/prefect/blob/master/server/src/prefect_server/cli/dev.py#L88
`make_env` is an undefined name in this context which will raise a `NameError` at runtime. Should this be `make_dev_env()` defined on line 36 or is `from prefect.cli.server import make_env` the right solution?
[flake8](http://flake8.pycqa.org) testing of https://github.com/PrefectHQ/prefect on Python 3.8.3
$ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__
```
./server/src/prefect_server/cli/dev.py:88:11: F821 undefined name 'make_env'
env = make_env()
^
1 F821 undefined name 'make_env'
1
```
https://flake8.pycqa.org/en/latest/user/error-codes.html
On the flake8 test selection, this PR does _not_ focus on "_style violations_" (the majority of flake8 error codes that [__psf/black__](https://github.com/psf/black) can autocorrect). Instead these tests are focus on runtime safety and correctness:
* E9 tests are about Python syntax errors usually raised because flake8 can not build an Abstract Syntax Tree (AST). Often these issues are a sign of unused code or code that has not been ported to Python 3. These would be compile-time errors in a compiled language but in a dynamic language like Python they result in the script halting/crashing on the user.
* F63 tests are usually about the confusion between identity and equality in Python. Use ==/!= to compare str, bytes, and int literals is the classic case. These are areas where __a == b__ is True but __a is b__ is False (or vice versa). Python >= 3.8 will raise SyntaxWarnings on these instances.
* F7 tests logic errors and syntax errors in type hints
* F82 tests are almost always _undefined names_ which are usually a sign of a typo, missing imports, or code that has not been ported to Python 3. These also would be compile-time errors in a compiled language but in Python a __NameError__ is raised which will halt/crash the script on the user.
## Expected Behavior
*What did you expect to happen instead?*
## Reproduction
*A minimal example that exhibits the behavior.*
## Environment
*Any additional information about your environment*
*Optionally run `prefect diagnostics` from the command line and paste the information here*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/src/prefect_server/cli/dev.py`
Content:
```
1 # Licensed under the Prefect Community License, available at
2 # https://www.prefect.io/legal/prefect-community-license
3
4
5 import glob
6 import os
7 import shutil
8 import signal
9 import subprocess
10 import time
11 from pathlib import Path
12
13 import click
14
15 import prefect
16 import prefect_server
17 from prefect_server import api, cli, config, utilities
18 from prefect_server.database import models
19
20
21 @click.group()
22 def dev():
23 """
24 Commands for developing Server
25
26 \b
27 Usage:
28 $ prefect-server ...
29
30 \b
31 Arguments:
32 build builds prefect server, ui, apollo from source
33 """
34
35
36 def make_dev_env(fname=None):
37
38 # replace localhost with postgres to use docker-compose dns
39 PREFECT_ENV = dict(
40 DB_CONNECTION_URL=config.database.connection_url.replace(
41 "localhost", "postgres"
42 )
43 )
44
45 APOLLO_ENV = dict(
46 HASURA_API_URL=f"http://{config.hasura.host}:{config.hasura.port}/v1alpha1/graphql",
47 HASURA_WS_URL=f"ws://{config.hasura.host}:{config.hasura.port}/v1alpha1/graphql",
48 PREFECT_API_URL=f"http://{config.services.graphql.host}:{config.services.graphql.port}{config.services.graphql.path}",
49 PREFECT_API_HEALTH_URL=f"http://{config.services.graphql.host}:{config.services.graphql.port}/health",
50 )
51
52 POSTGRES_ENV = dict(
53 POSTGRES_USER=config.database.username,
54 POSTGRES_PASSWORD=config.database.password,
55 POSTGRES_DB=config.database.name,
56 )
57
58 HASURA_ENV = dict()
59
60 UI_ENV = dict(GRAPHQL_URL=config.services.ui.graphql_url)
61
62 ENV = os.environ.copy()
63 ENV.update(**PREFECT_ENV, **APOLLO_ENV, **POSTGRES_ENV, **UI_ENV, **HASURA_ENV)
64
65 if fname is not None:
66 list_of_pairs = [
67 f"{k}={repr(v)}" if "\n" in v else f"{k}={v}" for k, v in ENV.items()
68 ]
69 with open(fname, "w") as f:
70 f.write("\n".join(list_of_pairs))
71 return ENV.copy()
72
73
74 @dev.command(hidden=True)
75 @click.option(
76 "--version",
77 "-v",
78 help="The server image versions to build (for example, '0.10.0' or 'master')",
79 # TODO: update this default to use prefect.__version__ logic
80 default="latest",
81 )
82 def build(version):
83 """
84 foobar
85 """
86 docker_dir = Path(prefect_server.__file__).parents[2] / "docker"
87
88 env = make_env()
89
90 if "PREFECT_SERVER_TAG" not in env:
91 env.update(PREFECT_SERVER_TAG=version)
92
93 proc = None
94 cmd = ["docker-compose", "build"]
95 proc = subprocess.Popen(cmd, cwd=docker_dir, env=env)
96
97
98 @dev.command()
99 @click.option("--tag", "-t", help="The server image/tag to use", default="latest")
100 @click.option(
101 "--skip-pull",
102 help="Pass this flag to skip pulling new images (if available)",
103 is_flag=True,
104 )
105 def infrastructure(tag, skip_pull):
106 """
107 This command:
108 - starts a PostgreSQL database
109 - starts Hasura
110 """
111 docker_dir = Path(prefect_server.__file__).parents[2] / "docker"
112
113 env = make_dev_env()
114
115 proc = None
116 try:
117 if not skip_pull:
118 subprocess.check_call(
119 ["docker-compose", "pull", "postgres", "hasura"],
120 cwd=docker_dir,
121 env=env,
122 )
123 proc = subprocess.Popen(
124 ["docker-compose", "up", "postgres", "hasura"], cwd=docker_dir, env=env
125 )
126
127 # if not initialize, just run hasura (and dependencies), which will skip the init step
128 while True:
129 time.sleep(0.5)
130 except:
131 click.secho(
132 "Exception caught; killing services (press ctrl-C to force)",
133 fg="white",
134 bg="red",
135 )
136 subprocess.check_output(["docker-compose", "down"], cwd=docker_dir, env=env)
137 if proc:
138 proc.kill()
139 raise
140
141
142 @dev.command()
143 @click.option("--skip-ui", help="Pass this flag to skip UI dependencies", is_flag=True)
144 @click.option(
145 "--skip-apollo", help="Pass this flag to skip Apollo dependencies", is_flag=True
146 )
147 def install_dependencies(skip_ui, skip_apollo):
148 """
149 This command:
150 - installs Apollo dependencies
151 - install UI dependencies
152 """
153 if not skip_ui:
154 click.secho("Installing UI dependencies...")
155 time.sleep(0.5)
156 install_ui_dependencies()
157
158 if not skip_apollo:
159 click.secho("Installing Apollo dependencies...")
160 time.sleep(0.5)
161 install_apollo_dependencies()
162
163 if skip_ui and skip_apollo:
164 click.secho("No dependencies were installed because all were skipped.")
165
166
167 def install_apollo_dependencies():
168 apollo_dir = Path(prefect_server.__file__).parents[2] / "services" / "apollo"
169
170 proc = None
171 try:
172 proc = subprocess.check_call(["npm", "install"], cwd=apollo_dir)
173 click.secho("Apollo dependencies installed! 🚀🚀🚀")
174 except:
175 click.secho(
176 "Exception caught while installing Apollo dependencies.",
177 fg="white",
178 bg="red",
179 )
180 if proc:
181 proc.kill()
182 raise
183
184
185 def install_ui_dependencies():
186 ui_dir = Path(prefect_server.__file__).parents[2] / "services" / "ui"
187
188 proc = None
189 try:
190 proc = subprocess.check_call(["npm", "install"], cwd=ui_dir)
191 click.secho("UI dependencies installed! 🕹🕹🕹")
192 except:
193 click.secho(
194 "Exception caught while installing UI dependencies.", fg="white", bg="red"
195 )
196 if proc:
197 proc.kill()
198 raise
199
200
201 def is_process_group_empty(pgid: int):
202 proc = subprocess.Popen(
203 ["pgrep", "-g", str(pgid)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
204 )
205 proc.wait()
206 return proc.returncode != 0
207
208
209 def kill_process_group(proc, timeout: int = 3):
210 try:
211 pgid = os.getpgid(proc.pid)
212 os.killpg(pgid, signal.SIGTERM)
213 proc.terminate()
214
215 for _ in range(timeout):
216 if is_process_group_empty(pgid):
217 return
218 click.secho("Waiting for process group to exit...", fg="white", bg="blue")
219 time.sleep(1)
220
221 click.secho("Timeout while shutting down, killing!", fg="white", bg="red")
222 os.killpg(pgid, signal.SIGKILL)
223 proc.kill()
224 except Exception as exc:
225 click.secho(exc)
226
227
228 @dev.command()
229 @click.option(
230 "--include", "-i", help="A comma-seperated list of serivces that should be run"
231 )
232 @click.option(
233 "--exclude", "-e", help="A comma-seperated list of services that should not be run"
234 )
235 def services(include, exclude):
236 """
237 This command starts services
238 """
239
240 all_services = ["graphql", "scheduler", "apollo", "ui"]
241 if not include:
242 include = all_services
243 else:
244 include = include.split(",")
245 if not exclude:
246 exclude = ""
247 run_services = sorted(set(include).difference(exclude.split(",")))
248
249 click.secho(
250 f"\n\nStarting Prefect Server services: {' '.join(run_services)}\n\n",
251 fg="green",
252 )
253
254 procs = []
255 for service in run_services:
256 procs.append(
257 subprocess.Popen(
258 ["prefect-server", "services", service],
259 env=make_dev_env(),
260 preexec_fn=os.setsid,
261 )
262 )
263
264 try:
265 while True:
266 time.sleep(1)
267 except:
268 click.secho("Exception caught; shutting down!", fg="white", bg="red")
269 for proc in procs:
270 kill_process_group(proc)
271
272
273 def config_to_dict(config):
274 if isinstance(config, (list, tuple, set)):
275 return type(config)([config_to_dict(d) for d in config])
276 elif isinstance(config, prefect.configuration.Config):
277 return dict({k: config_to_dict(v) for k, v in config.items()})
278 return config
279
280
281 def set_nested(dictionary, path: str, value: str):
282 path = path.split(".")
283 for level in path[:-1]:
284 dictionary = dictionary.setdefault(level, {})
285 dictionary[path[-1]] = value
286
287
288 @dev.command()
289 @click.option("-m", "--migration-message", required=True)
290 def generate_migration(migration_message):
291 # ensure this is called from the root server directory
292 if Path(prefect_server.__file__).parents[2] != Path(os.getcwd()):
293 raise click.ClickException(
294 "generate-migration must be run from the server root directory."
295 )
296 # find the most recent revision
297 alembic_migrations_path = "../../../services/postgres/alembic/versions"
298 versions = glob.glob(
299 os.path.join(os.path.dirname(__file__), alembic_migrations_path, "*.py")
300 )
301 versions.sort()
302 most_recent_migration = versions[-1]
303 with open(
304 os.path.join(
305 os.path.dirname(__file__), alembic_migrations_path, most_recent_migration
306 )
307 ) as migration:
308 for line in migration.readlines():
309 if line.startswith("Revision ID:"):
310 revision = line.split(": ")[1].strip()
311 click.echo(f"Most recent Alembic revision is {revision}")
312 # copy metadata to a backup for corresponding revision
313 hasura_migrations_path = "../../../services/hasura/migrations"
314 backup_metadata_file = f"metadata-{revision}.yaml"
315 backup_metadata_destination = os.path.abspath(
316 os.path.join(
317 os.path.dirname(__file__),
318 hasura_migrations_path,
319 "versions",
320 backup_metadata_file,
321 )
322 )
323 shutil.copy(
324 os.path.join(
325 os.path.dirname(__file__), hasura_migrations_path, "metadata.yaml"
326 ),
327 backup_metadata_destination,
328 )
329 click.echo(f"Copied metadata to {backup_metadata_destination}")
330 # create a new revision
331 click.echo(
332 subprocess.check_output(["alembic", "revision", "-m", migration_message])
333 )
334 click.secho("Prefect Server migration generated!", fg="green")
335
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/server/src/prefect_server/cli/dev.py b/server/src/prefect_server/cli/dev.py
--- a/server/src/prefect_server/cli/dev.py
+++ b/server/src/prefect_server/cli/dev.py
@@ -85,7 +85,7 @@
"""
docker_dir = Path(prefect_server.__file__).parents[2] / "docker"
- env = make_env()
+ env = make_dev_env()
if "PREFECT_SERVER_TAG" not in env:
env.update(PREFECT_SERVER_TAG=version)
|
{"golden_diff": "diff --git a/server/src/prefect_server/cli/dev.py b/server/src/prefect_server/cli/dev.py\n--- a/server/src/prefect_server/cli/dev.py\n+++ b/server/src/prefect_server/cli/dev.py\n@@ -85,7 +85,7 @@\n \"\"\"\n docker_dir = Path(prefect_server.__file__).parents[2] / \"docker\"\n \n- env = make_env()\n+ env = make_dev_env()\n \n if \"PREFECT_SERVER_TAG\" not in env:\n env.update(PREFECT_SERVER_TAG=version)\n", "issue": "Undefined name: make_env in ./server/src/prefect_server/cli/dev.py\n## Description\r\n*A clear description of the bug*\r\n\r\nAn _undefined name_ like #2235 and #1199\r\nhttps://github.com/PrefectHQ/prefect/blob/master/server/src/prefect_server/cli/dev.py#L88\r\n\r\n`make_env` is an undefined name in this context which will raise a `NameError` at runtime. Should this be `make_dev_env()` defined on line 36 or is `from prefect.cli.server import make_env` the right solution?\r\n\r\n[flake8](http://flake8.pycqa.org) testing of https://github.com/PrefectHQ/prefect on Python 3.8.3\r\n\r\n$ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__\r\n```\r\n./server/src/prefect_server/cli/dev.py:88:11: F821 undefined name 'make_env'\r\n env = make_env()\r\n ^\r\n1 F821 undefined name 'make_env'\r\n1\r\n```\r\nhttps://flake8.pycqa.org/en/latest/user/error-codes.html\r\n\r\nOn the flake8 test selection, this PR does _not_ focus on \"_style violations_\" (the majority of flake8 error codes that [__psf/black__](https://github.com/psf/black) can autocorrect). Instead these tests are focus on runtime safety and correctness:\r\n* E9 tests are about Python syntax errors usually raised because flake8 can not build an Abstract Syntax Tree (AST). Often these issues are a sign of unused code or code that has not been ported to Python 3. These would be compile-time errors in a compiled language but in a dynamic language like Python they result in the script halting/crashing on the user.\r\n* F63 tests are usually about the confusion between identity and equality in Python. Use ==/!= to compare str, bytes, and int literals is the classic case. These are areas where __a == b__ is True but __a is b__ is False (or vice versa). Python >= 3.8 will raise SyntaxWarnings on these instances.\r\n* F7 tests logic errors and syntax errors in type hints\r\n* F82 tests are almost always _undefined names_ which are usually a sign of a typo, missing imports, or code that has not been ported to Python 3. These also would be compile-time errors in a compiled language but in Python a __NameError__ is raised which will halt/crash the script on the user.\r\n\r\n\r\n\r\n\r\n## Expected Behavior\r\n*What did you expect to happen instead?*\r\n\r\n\r\n\r\n\r\n## Reproduction\r\n*A minimal example that exhibits the behavior.*\r\n\r\n\r\n\r\n\r\n## Environment\r\n*Any additional information about your environment*\r\n\r\n*Optionally run `prefect diagnostics` from the command line and paste the information here*\r\n\n", "before_files": [{"content": "# Licensed under the Prefect Community License, available at\n# https://www.prefect.io/legal/prefect-community-license\n\n\nimport glob\nimport os\nimport shutil\nimport signal\nimport subprocess\nimport time\nfrom pathlib import Path\n\nimport click\n\nimport prefect\nimport prefect_server\nfrom prefect_server import api, cli, config, utilities\nfrom prefect_server.database import models\n\n\[email protected]()\ndef dev():\n \"\"\"\n Commands for developing Server\n\n \\b\n Usage:\n $ prefect-server ...\n \n \\b\n Arguments:\n build builds prefect server, ui, apollo from source\n \"\"\"\n\n\ndef make_dev_env(fname=None):\n\n # replace localhost with postgres to use docker-compose dns\n PREFECT_ENV = dict(\n DB_CONNECTION_URL=config.database.connection_url.replace(\n \"localhost\", \"postgres\"\n )\n )\n\n APOLLO_ENV = dict(\n HASURA_API_URL=f\"http://{config.hasura.host}:{config.hasura.port}/v1alpha1/graphql\",\n HASURA_WS_URL=f\"ws://{config.hasura.host}:{config.hasura.port}/v1alpha1/graphql\",\n PREFECT_API_URL=f\"http://{config.services.graphql.host}:{config.services.graphql.port}{config.services.graphql.path}\",\n PREFECT_API_HEALTH_URL=f\"http://{config.services.graphql.host}:{config.services.graphql.port}/health\",\n )\n\n POSTGRES_ENV = dict(\n POSTGRES_USER=config.database.username,\n POSTGRES_PASSWORD=config.database.password,\n POSTGRES_DB=config.database.name,\n )\n\n HASURA_ENV = dict()\n\n UI_ENV = dict(GRAPHQL_URL=config.services.ui.graphql_url)\n\n ENV = os.environ.copy()\n ENV.update(**PREFECT_ENV, **APOLLO_ENV, **POSTGRES_ENV, **UI_ENV, **HASURA_ENV)\n\n if fname is not None:\n list_of_pairs = [\n f\"{k}={repr(v)}\" if \"\\n\" in v else f\"{k}={v}\" for k, v in ENV.items()\n ]\n with open(fname, \"w\") as f:\n f.write(\"\\n\".join(list_of_pairs))\n return ENV.copy()\n\n\[email protected](hidden=True)\[email protected](\n \"--version\",\n \"-v\",\n help=\"The server image versions to build (for example, '0.10.0' or 'master')\",\n # TODO: update this default to use prefect.__version__ logic\n default=\"latest\",\n)\ndef build(version):\n \"\"\"\n foobar\n \"\"\"\n docker_dir = Path(prefect_server.__file__).parents[2] / \"docker\"\n\n env = make_env()\n\n if \"PREFECT_SERVER_TAG\" not in env:\n env.update(PREFECT_SERVER_TAG=version)\n\n proc = None\n cmd = [\"docker-compose\", \"build\"]\n proc = subprocess.Popen(cmd, cwd=docker_dir, env=env)\n\n\[email protected]()\[email protected](\"--tag\", \"-t\", help=\"The server image/tag to use\", default=\"latest\")\[email protected](\n \"--skip-pull\",\n help=\"Pass this flag to skip pulling new images (if available)\",\n is_flag=True,\n)\ndef infrastructure(tag, skip_pull):\n \"\"\"\n This command:\n - starts a PostgreSQL database\n - starts Hasura\n \"\"\"\n docker_dir = Path(prefect_server.__file__).parents[2] / \"docker\"\n\n env = make_dev_env()\n\n proc = None\n try:\n if not skip_pull:\n subprocess.check_call(\n [\"docker-compose\", \"pull\", \"postgres\", \"hasura\"],\n cwd=docker_dir,\n env=env,\n )\n proc = subprocess.Popen(\n [\"docker-compose\", \"up\", \"postgres\", \"hasura\"], cwd=docker_dir, env=env\n )\n\n # if not initialize, just run hasura (and dependencies), which will skip the init step\n while True:\n time.sleep(0.5)\n except:\n click.secho(\n \"Exception caught; killing services (press ctrl-C to force)\",\n fg=\"white\",\n bg=\"red\",\n )\n subprocess.check_output([\"docker-compose\", \"down\"], cwd=docker_dir, env=env)\n if proc:\n proc.kill()\n raise\n\n\[email protected]()\[email protected](\"--skip-ui\", help=\"Pass this flag to skip UI dependencies\", is_flag=True)\[email protected](\n \"--skip-apollo\", help=\"Pass this flag to skip Apollo dependencies\", is_flag=True\n)\ndef install_dependencies(skip_ui, skip_apollo):\n \"\"\"\n This command:\n - installs Apollo dependencies\n - install UI dependencies\n \"\"\"\n if not skip_ui:\n click.secho(\"Installing UI dependencies...\")\n time.sleep(0.5)\n install_ui_dependencies()\n\n if not skip_apollo:\n click.secho(\"Installing Apollo dependencies...\")\n time.sleep(0.5)\n install_apollo_dependencies()\n\n if skip_ui and skip_apollo:\n click.secho(\"No dependencies were installed because all were skipped.\")\n\n\ndef install_apollo_dependencies():\n apollo_dir = Path(prefect_server.__file__).parents[2] / \"services\" / \"apollo\"\n\n proc = None\n try:\n proc = subprocess.check_call([\"npm\", \"install\"], cwd=apollo_dir)\n click.secho(\"Apollo dependencies installed! \ud83d\ude80\ud83d\ude80\ud83d\ude80\")\n except:\n click.secho(\n \"Exception caught while installing Apollo dependencies.\",\n fg=\"white\",\n bg=\"red\",\n )\n if proc:\n proc.kill()\n raise\n\n\ndef install_ui_dependencies():\n ui_dir = Path(prefect_server.__file__).parents[2] / \"services\" / \"ui\"\n\n proc = None\n try:\n proc = subprocess.check_call([\"npm\", \"install\"], cwd=ui_dir)\n click.secho(\"UI dependencies installed! \ud83d\udd79\ud83d\udd79\ud83d\udd79\")\n except:\n click.secho(\n \"Exception caught while installing UI dependencies.\", fg=\"white\", bg=\"red\"\n )\n if proc:\n proc.kill()\n raise\n\n\ndef is_process_group_empty(pgid: int):\n proc = subprocess.Popen(\n [\"pgrep\", \"-g\", str(pgid)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL\n )\n proc.wait()\n return proc.returncode != 0\n\n\ndef kill_process_group(proc, timeout: int = 3):\n try:\n pgid = os.getpgid(proc.pid)\n os.killpg(pgid, signal.SIGTERM)\n proc.terminate()\n\n for _ in range(timeout):\n if is_process_group_empty(pgid):\n return\n click.secho(\"Waiting for process group to exit...\", fg=\"white\", bg=\"blue\")\n time.sleep(1)\n\n click.secho(\"Timeout while shutting down, killing!\", fg=\"white\", bg=\"red\")\n os.killpg(pgid, signal.SIGKILL)\n proc.kill()\n except Exception as exc:\n click.secho(exc)\n\n\[email protected]()\[email protected](\n \"--include\", \"-i\", help=\"A comma-seperated list of serivces that should be run\"\n)\[email protected](\n \"--exclude\", \"-e\", help=\"A comma-seperated list of services that should not be run\"\n)\ndef services(include, exclude):\n \"\"\"\n This command starts services\n \"\"\"\n\n all_services = [\"graphql\", \"scheduler\", \"apollo\", \"ui\"]\n if not include:\n include = all_services\n else:\n include = include.split(\",\")\n if not exclude:\n exclude = \"\"\n run_services = sorted(set(include).difference(exclude.split(\",\")))\n\n click.secho(\n f\"\\n\\nStarting Prefect Server services: {' '.join(run_services)}\\n\\n\",\n fg=\"green\",\n )\n\n procs = []\n for service in run_services:\n procs.append(\n subprocess.Popen(\n [\"prefect-server\", \"services\", service],\n env=make_dev_env(),\n preexec_fn=os.setsid,\n )\n )\n\n try:\n while True:\n time.sleep(1)\n except:\n click.secho(\"Exception caught; shutting down!\", fg=\"white\", bg=\"red\")\n for proc in procs:\n kill_process_group(proc)\n\n\ndef config_to_dict(config):\n if isinstance(config, (list, tuple, set)):\n return type(config)([config_to_dict(d) for d in config])\n elif isinstance(config, prefect.configuration.Config):\n return dict({k: config_to_dict(v) for k, v in config.items()})\n return config\n\n\ndef set_nested(dictionary, path: str, value: str):\n path = path.split(\".\")\n for level in path[:-1]:\n dictionary = dictionary.setdefault(level, {})\n dictionary[path[-1]] = value\n\n\[email protected]()\[email protected](\"-m\", \"--migration-message\", required=True)\ndef generate_migration(migration_message):\n # ensure this is called from the root server directory\n if Path(prefect_server.__file__).parents[2] != Path(os.getcwd()):\n raise click.ClickException(\n \"generate-migration must be run from the server root directory.\"\n )\n # find the most recent revision\n alembic_migrations_path = \"../../../services/postgres/alembic/versions\"\n versions = glob.glob(\n os.path.join(os.path.dirname(__file__), alembic_migrations_path, \"*.py\")\n )\n versions.sort()\n most_recent_migration = versions[-1]\n with open(\n os.path.join(\n os.path.dirname(__file__), alembic_migrations_path, most_recent_migration\n )\n ) as migration:\n for line in migration.readlines():\n if line.startswith(\"Revision ID:\"):\n revision = line.split(\": \")[1].strip()\n click.echo(f\"Most recent Alembic revision is {revision}\")\n # copy metadata to a backup for corresponding revision\n hasura_migrations_path = \"../../../services/hasura/migrations\"\n backup_metadata_file = f\"metadata-{revision}.yaml\"\n backup_metadata_destination = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n hasura_migrations_path,\n \"versions\",\n backup_metadata_file,\n )\n )\n shutil.copy(\n os.path.join(\n os.path.dirname(__file__), hasura_migrations_path, \"metadata.yaml\"\n ),\n backup_metadata_destination,\n )\n click.echo(f\"Copied metadata to {backup_metadata_destination}\")\n # create a new revision\n click.echo(\n subprocess.check_output([\"alembic\", \"revision\", \"-m\", migration_message])\n )\n click.secho(\"Prefect Server migration generated!\", fg=\"green\")\n", "path": "server/src/prefect_server/cli/dev.py"}], "after_files": [{"content": "# Licensed under the Prefect Community License, available at\n# https://www.prefect.io/legal/prefect-community-license\n\n\nimport glob\nimport os\nimport shutil\nimport signal\nimport subprocess\nimport time\nfrom pathlib import Path\n\nimport click\n\nimport prefect\nimport prefect_server\nfrom prefect_server import api, cli, config, utilities\nfrom prefect_server.database import models\n\n\[email protected]()\ndef dev():\n \"\"\"\n Commands for developing Server\n\n \\b\n Usage:\n $ prefect-server ...\n \n \\b\n Arguments:\n build builds prefect server, ui, apollo from source\n \"\"\"\n\n\ndef make_dev_env(fname=None):\n\n # replace localhost with postgres to use docker-compose dns\n PREFECT_ENV = dict(\n DB_CONNECTION_URL=config.database.connection_url.replace(\n \"localhost\", \"postgres\"\n )\n )\n\n APOLLO_ENV = dict(\n HASURA_API_URL=f\"http://{config.hasura.host}:{config.hasura.port}/v1alpha1/graphql\",\n HASURA_WS_URL=f\"ws://{config.hasura.host}:{config.hasura.port}/v1alpha1/graphql\",\n PREFECT_API_URL=f\"http://{config.services.graphql.host}:{config.services.graphql.port}{config.services.graphql.path}\",\n PREFECT_API_HEALTH_URL=f\"http://{config.services.graphql.host}:{config.services.graphql.port}/health\",\n )\n\n POSTGRES_ENV = dict(\n POSTGRES_USER=config.database.username,\n POSTGRES_PASSWORD=config.database.password,\n POSTGRES_DB=config.database.name,\n )\n\n HASURA_ENV = dict()\n\n UI_ENV = dict(GRAPHQL_URL=config.services.ui.graphql_url)\n\n ENV = os.environ.copy()\n ENV.update(**PREFECT_ENV, **APOLLO_ENV, **POSTGRES_ENV, **UI_ENV, **HASURA_ENV)\n\n if fname is not None:\n list_of_pairs = [\n f\"{k}={repr(v)}\" if \"\\n\" in v else f\"{k}={v}\" for k, v in ENV.items()\n ]\n with open(fname, \"w\") as f:\n f.write(\"\\n\".join(list_of_pairs))\n return ENV.copy()\n\n\[email protected](hidden=True)\[email protected](\n \"--version\",\n \"-v\",\n help=\"The server image versions to build (for example, '0.10.0' or 'master')\",\n # TODO: update this default to use prefect.__version__ logic\n default=\"latest\",\n)\ndef build(version):\n \"\"\"\n foobar\n \"\"\"\n docker_dir = Path(prefect_server.__file__).parents[2] / \"docker\"\n\n env = make_dev_env()\n\n if \"PREFECT_SERVER_TAG\" not in env:\n env.update(PREFECT_SERVER_TAG=version)\n\n proc = None\n cmd = [\"docker-compose\", \"build\"]\n proc = subprocess.Popen(cmd, cwd=docker_dir, env=env)\n\n\[email protected]()\[email protected](\"--tag\", \"-t\", help=\"The server image/tag to use\", default=\"latest\")\[email protected](\n \"--skip-pull\",\n help=\"Pass this flag to skip pulling new images (if available)\",\n is_flag=True,\n)\ndef infrastructure(tag, skip_pull):\n \"\"\"\n This command:\n - starts a PostgreSQL database\n - starts Hasura\n \"\"\"\n docker_dir = Path(prefect_server.__file__).parents[2] / \"docker\"\n\n env = make_dev_env()\n\n proc = None\n try:\n if not skip_pull:\n subprocess.check_call(\n [\"docker-compose\", \"pull\", \"postgres\", \"hasura\"],\n cwd=docker_dir,\n env=env,\n )\n proc = subprocess.Popen(\n [\"docker-compose\", \"up\", \"postgres\", \"hasura\"], cwd=docker_dir, env=env\n )\n\n # if not initialize, just run hasura (and dependencies), which will skip the init step\n while True:\n time.sleep(0.5)\n except:\n click.secho(\n \"Exception caught; killing services (press ctrl-C to force)\",\n fg=\"white\",\n bg=\"red\",\n )\n subprocess.check_output([\"docker-compose\", \"down\"], cwd=docker_dir, env=env)\n if proc:\n proc.kill()\n raise\n\n\[email protected]()\[email protected](\"--skip-ui\", help=\"Pass this flag to skip UI dependencies\", is_flag=True)\[email protected](\n \"--skip-apollo\", help=\"Pass this flag to skip Apollo dependencies\", is_flag=True\n)\ndef install_dependencies(skip_ui, skip_apollo):\n \"\"\"\n This command:\n - installs Apollo dependencies\n - install UI dependencies\n \"\"\"\n if not skip_ui:\n click.secho(\"Installing UI dependencies...\")\n time.sleep(0.5)\n install_ui_dependencies()\n\n if not skip_apollo:\n click.secho(\"Installing Apollo dependencies...\")\n time.sleep(0.5)\n install_apollo_dependencies()\n\n if skip_ui and skip_apollo:\n click.secho(\"No dependencies were installed because all were skipped.\")\n\n\ndef install_apollo_dependencies():\n apollo_dir = Path(prefect_server.__file__).parents[2] / \"services\" / \"apollo\"\n\n proc = None\n try:\n proc = subprocess.check_call([\"npm\", \"install\"], cwd=apollo_dir)\n click.secho(\"Apollo dependencies installed! \ud83d\ude80\ud83d\ude80\ud83d\ude80\")\n except:\n click.secho(\n \"Exception caught while installing Apollo dependencies.\",\n fg=\"white\",\n bg=\"red\",\n )\n if proc:\n proc.kill()\n raise\n\n\ndef install_ui_dependencies():\n ui_dir = Path(prefect_server.__file__).parents[2] / \"services\" / \"ui\"\n\n proc = None\n try:\n proc = subprocess.check_call([\"npm\", \"install\"], cwd=ui_dir)\n click.secho(\"UI dependencies installed! \ud83d\udd79\ud83d\udd79\ud83d\udd79\")\n except:\n click.secho(\n \"Exception caught while installing UI dependencies.\", fg=\"white\", bg=\"red\"\n )\n if proc:\n proc.kill()\n raise\n\n\ndef is_process_group_empty(pgid: int):\n proc = subprocess.Popen(\n [\"pgrep\", \"-g\", str(pgid)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL\n )\n proc.wait()\n return proc.returncode != 0\n\n\ndef kill_process_group(proc, timeout: int = 3):\n try:\n pgid = os.getpgid(proc.pid)\n os.killpg(pgid, signal.SIGTERM)\n proc.terminate()\n\n for _ in range(timeout):\n if is_process_group_empty(pgid):\n return\n click.secho(\"Waiting for process group to exit...\", fg=\"white\", bg=\"blue\")\n time.sleep(1)\n\n click.secho(\"Timeout while shutting down, killing!\", fg=\"white\", bg=\"red\")\n os.killpg(pgid, signal.SIGKILL)\n proc.kill()\n except Exception as exc:\n click.secho(exc)\n\n\[email protected]()\[email protected](\n \"--include\", \"-i\", help=\"A comma-seperated list of serivces that should be run\"\n)\[email protected](\n \"--exclude\", \"-e\", help=\"A comma-seperated list of services that should not be run\"\n)\ndef services(include, exclude):\n \"\"\"\n This command starts services\n \"\"\"\n\n all_services = [\"graphql\", \"scheduler\", \"apollo\", \"ui\"]\n if not include:\n include = all_services\n else:\n include = include.split(\",\")\n if not exclude:\n exclude = \"\"\n run_services = sorted(set(include).difference(exclude.split(\",\")))\n\n click.secho(\n f\"\\n\\nStarting Prefect Server services: {' '.join(run_services)}\\n\\n\",\n fg=\"green\",\n )\n\n procs = []\n for service in run_services:\n procs.append(\n subprocess.Popen(\n [\"prefect-server\", \"services\", service],\n env=make_dev_env(),\n preexec_fn=os.setsid,\n )\n )\n\n try:\n while True:\n time.sleep(1)\n except:\n click.secho(\"Exception caught; shutting down!\", fg=\"white\", bg=\"red\")\n for proc in procs:\n kill_process_group(proc)\n\n\ndef config_to_dict(config):\n if isinstance(config, (list, tuple, set)):\n return type(config)([config_to_dict(d) for d in config])\n elif isinstance(config, prefect.configuration.Config):\n return dict({k: config_to_dict(v) for k, v in config.items()})\n return config\n\n\ndef set_nested(dictionary, path: str, value: str):\n path = path.split(\".\")\n for level in path[:-1]:\n dictionary = dictionary.setdefault(level, {})\n dictionary[path[-1]] = value\n\n\[email protected]()\[email protected](\"-m\", \"--migration-message\", required=True)\ndef generate_migration(migration_message):\n # ensure this is called from the root server directory\n if Path(prefect_server.__file__).parents[2] != Path(os.getcwd()):\n raise click.ClickException(\n \"generate-migration must be run from the server root directory.\"\n )\n # find the most recent revision\n alembic_migrations_path = \"../../../services/postgres/alembic/versions\"\n versions = glob.glob(\n os.path.join(os.path.dirname(__file__), alembic_migrations_path, \"*.py\")\n )\n versions.sort()\n most_recent_migration = versions[-1]\n with open(\n os.path.join(\n os.path.dirname(__file__), alembic_migrations_path, most_recent_migration\n )\n ) as migration:\n for line in migration.readlines():\n if line.startswith(\"Revision ID:\"):\n revision = line.split(\": \")[1].strip()\n click.echo(f\"Most recent Alembic revision is {revision}\")\n # copy metadata to a backup for corresponding revision\n hasura_migrations_path = \"../../../services/hasura/migrations\"\n backup_metadata_file = f\"metadata-{revision}.yaml\"\n backup_metadata_destination = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n hasura_migrations_path,\n \"versions\",\n backup_metadata_file,\n )\n )\n shutil.copy(\n os.path.join(\n os.path.dirname(__file__), hasura_migrations_path, \"metadata.yaml\"\n ),\n backup_metadata_destination,\n )\n click.echo(f\"Copied metadata to {backup_metadata_destination}\")\n # create a new revision\n click.echo(\n subprocess.check_output([\"alembic\", \"revision\", \"-m\", migration_message])\n )\n click.secho(\"Prefect Server migration generated!\", fg=\"green\")\n", "path": "server/src/prefect_server/cli/dev.py"}]}
| 4,074 | 122 |
gh_patches_debug_12277
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-2899
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make poetry version plugin to update the pyproject.toml as well not just the build
/kind bug
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
Currently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml
This makes it harder to easily decipher the version of the package.
**What did you expect to happen:**
Running poetry build or install should set the version of the package in pyproject.toml
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py`
Content:
```
1 import ast
2 import subprocess
3 from pathlib import Path
4 from typing import Any, Dict, Optional
5
6 from cleo.io.io import IO
7 from poetry.plugins.plugin import Plugin
8 from poetry.poetry import Poetry
9 from poetry.core.utils.helpers import module_name
10
11
12 class VersionPlugin(Plugin): # type: ignore
13 def activate(self, poetry: Poetry, io: IO) -> None:
14 poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(
15 "tool", {}
16 ).get("poetry-version-plugin")
17 if poetry_version_config is None:
18 return
19 version_source = poetry_version_config.get("source")
20 version_file_path = poetry_version_config.get("file_path")
21 if not version_source:
22 message = (
23 "<b>poetry-version-plugin</b>: No <b>source</b> configuration found in "
24 "[tool.poetry-version-plugin] in pyproject.toml, not extracting "
25 "dynamic version"
26 )
27 io.write_error_line(message)
28 raise RuntimeError(message)
29
30 # Update version using file
31 if version_source == "file" and version_file_path:
32 with open(version_file_path, "r") as version_file:
33 version = version_file.read().strip()
34 poetry.package._set_version(version)
35 io.write_line(
36 "<b>poetry-version-plugin</b>: New package "
37 "version is updated from given file "
38 f"version is: <b>{version}</b>"
39 )
40
41 if version_source == "init":
42 packages = poetry.local_config.get("packages")
43 if packages:
44 if len(packages) == 1:
45 package_name = packages[0]["include"]
46 else:
47 message = (
48 "<b>poetry-version-plugin</b>: More than one package set, "
49 "cannot extract dynamic version"
50 )
51 io.write_error_line(message)
52 raise RuntimeError(message)
53 else:
54 package_name = module_name(poetry.package.name)
55 init_path = Path(package_name) / "__init__.py"
56 if not init_path.is_file():
57 message = (
58 "<b>poetry-version-plugin</b>: __init__.py file not found at "
59 f"{init_path} cannot extract dynamic version"
60 )
61 io.write_error_line(message)
62 raise RuntimeError(message)
63 else:
64 io.write_line(
65 "<b>poetry-version-plugin</b>: Using __init__.py file at "
66 f"{init_path} for dynamic version"
67 )
68 tree = ast.parse(init_path.read_text())
69 for el in tree.body:
70 if isinstance(el, ast.Assign):
71 if len(el.targets) == 1:
72 target = el.targets[0]
73 if isinstance(target, ast.Name):
74 if target.id == "__version__":
75 value_node = el.value
76 if isinstance(value_node, ast.Constant):
77 version = value_node.value
78 elif isinstance(value_node, ast.Str):
79 version = value_node.s
80 else: # pragma: nocover
81 # This is actually covered by tests, but can't be
82 # reported by Coverage
83 # Ref: https://github.com/nedbat/coveragepy/issues/198
84 continue
85 io.write_line(
86 "<b>poetry-version-plugin</b>: Setting package "
87 "dynamic version to __version__ "
88 f"variable from __init__.py: <b>{version}</b>"
89 )
90 poetry.package._set_version(version)
91 return
92 message = (
93 "<b>poetry-version-plugin</b>: No valid __version__ variable found "
94 "in __init__.py, cannot extract dynamic version"
95 )
96 io.write_error_line(message)
97 raise RuntimeError(message)
98 elif version_source == "git-tag":
99 result = subprocess.run(
100 ["git", "describe", "--exact-match", "--tags", "HEAD"],
101 stdin=subprocess.PIPE,
102 stdout=subprocess.PIPE,
103 universal_newlines=True,
104 )
105 if result.returncode == 0:
106 tag = result.stdout.strip()
107 io.write_line(
108 "<b>poetry-version-plugin</b>: Git tag found, setting "
109 f"dynamic version to: {tag}"
110 )
111 poetry.package._set_version(tag)
112 return
113 else:
114 message = (
115 "<b>poetry-version-plugin</b>: No Git tag found, not "
116 "extracting dynamic version"
117 )
118 io.write_error_line(message)
119 raise RuntimeError(message)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
@@ -37,6 +37,13 @@
"version is updated from given file "
f"version is: <b>{version}</b>"
)
+ content: dict[str, Any] = poetry.file.read()
+ poetry_content = content["tool"]["poetry"]
+ poetry_content["version"] = version
+ poetry.file.write(content)
+ io.write_line(
+ "<b>poetry-version-plugin</b>: New version updated in toml file"
+ )
if version_source == "init":
packages = poetry.local_config.get("packages")
|
{"golden_diff": "diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n@@ -37,6 +37,13 @@\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n+ content: dict[str, Any] = poetry.file.read()\n+ poetry_content = content[\"tool\"][\"poetry\"]\n+ poetry_content[\"version\"] = version\n+ poetry.file.write(content)\n+ io.write_line(\n+ \"<b>poetry-version-plugin</b>: New version updated in toml file\"\n+ )\n \n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n", "issue": "Make poetry version plugin to update the pyproject.toml as well not just the build\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nCurrently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml\r\nThis makes it harder to easily decipher the version of the package.\r\n\r\n**What did you expect to happen:**\r\nRunning poetry build or install should set the version of the package in pyproject.toml\r\n\r\n\n", "before_files": [{"content": "import ast\nimport subprocess\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom cleo.io.io import IO\nfrom poetry.plugins.plugin import Plugin\nfrom poetry.poetry import Poetry\nfrom poetry.core.utils.helpers import module_name\n\n\nclass VersionPlugin(Plugin): # type: ignore\n def activate(self, poetry: Poetry, io: IO) -> None:\n poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(\n \"tool\", {}\n ).get(\"poetry-version-plugin\")\n if poetry_version_config is None:\n return\n version_source = poetry_version_config.get(\"source\")\n version_file_path = poetry_version_config.get(\"file_path\")\n if not version_source:\n message = (\n \"<b>poetry-version-plugin</b>: No <b>source</b> configuration found in \"\n \"[tool.poetry-version-plugin] in pyproject.toml, not extracting \"\n \"dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n\n # Update version using file\n if version_source == \"file\" and version_file_path:\n with open(version_file_path, \"r\") as version_file:\n version = version_file.read().strip()\n poetry.package._set_version(version)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New package \"\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n\n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n if packages:\n if len(packages) == 1:\n package_name = packages[0][\"include\"]\n else:\n message = (\n \"<b>poetry-version-plugin</b>: More than one package set, \"\n \"cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n package_name = module_name(poetry.package.name)\n init_path = Path(package_name) / \"__init__.py\"\n if not init_path.is_file():\n message = (\n \"<b>poetry-version-plugin</b>: __init__.py file not found at \"\n f\"{init_path} cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n io.write_line(\n \"<b>poetry-version-plugin</b>: Using __init__.py file at \"\n f\"{init_path} for dynamic version\"\n )\n tree = ast.parse(init_path.read_text())\n for el in tree.body:\n if isinstance(el, ast.Assign):\n if len(el.targets) == 1:\n target = el.targets[0]\n if isinstance(target, ast.Name):\n if target.id == \"__version__\":\n value_node = el.value\n if isinstance(value_node, ast.Constant):\n version = value_node.value\n elif isinstance(value_node, ast.Str):\n version = value_node.s\n else: # pragma: nocover\n # This is actually covered by tests, but can't be\n # reported by Coverage\n # Ref: https://github.com/nedbat/coveragepy/issues/198\n continue\n io.write_line(\n \"<b>poetry-version-plugin</b>: Setting package \"\n \"dynamic version to __version__ \"\n f\"variable from __init__.py: <b>{version}</b>\"\n )\n poetry.package._set_version(version)\n return\n message = (\n \"<b>poetry-version-plugin</b>: No valid __version__ variable found \"\n \"in __init__.py, cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n elif version_source == \"git-tag\":\n result = subprocess.run(\n [\"git\", \"describe\", \"--exact-match\", \"--tags\", \"HEAD\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if result.returncode == 0:\n tag = result.stdout.strip()\n io.write_line(\n \"<b>poetry-version-plugin</b>: Git tag found, setting \"\n f\"dynamic version to: {tag}\"\n )\n poetry.package._set_version(tag)\n return\n else:\n message = (\n \"<b>poetry-version-plugin</b>: No Git tag found, not \"\n \"extracting dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n", "path": "python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py"}], "after_files": [{"content": "import ast\nimport subprocess\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom cleo.io.io import IO\nfrom poetry.plugins.plugin import Plugin\nfrom poetry.poetry import Poetry\nfrom poetry.core.utils.helpers import module_name\n\n\nclass VersionPlugin(Plugin): # type: ignore\n def activate(self, poetry: Poetry, io: IO) -> None:\n poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(\n \"tool\", {}\n ).get(\"poetry-version-plugin\")\n if poetry_version_config is None:\n return\n version_source = poetry_version_config.get(\"source\")\n version_file_path = poetry_version_config.get(\"file_path\")\n if not version_source:\n message = (\n \"<b>poetry-version-plugin</b>: No <b>source</b> configuration found in \"\n \"[tool.poetry-version-plugin] in pyproject.toml, not extracting \"\n \"dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n\n # Update version using file\n if version_source == \"file\" and version_file_path:\n with open(version_file_path, \"r\") as version_file:\n version = version_file.read().strip()\n poetry.package._set_version(version)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New package \"\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n content: dict[str, Any] = poetry.file.read()\n poetry_content = content[\"tool\"][\"poetry\"]\n poetry_content[\"version\"] = version\n poetry.file.write(content)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New version updated in toml file\"\n )\n\n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n if packages:\n if len(packages) == 1:\n package_name = packages[0][\"include\"]\n else:\n message = (\n \"<b>poetry-version-plugin</b>: More than one package set, \"\n \"cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n package_name = module_name(poetry.package.name)\n init_path = Path(package_name) / \"__init__.py\"\n if not init_path.is_file():\n message = (\n \"<b>poetry-version-plugin</b>: __init__.py file not found at \"\n f\"{init_path} cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n io.write_line(\n \"<b>poetry-version-plugin</b>: Using __init__.py file at \"\n f\"{init_path} for dynamic version\"\n )\n tree = ast.parse(init_path.read_text())\n for el in tree.body:\n if isinstance(el, ast.Assign):\n if len(el.targets) == 1:\n target = el.targets[0]\n if isinstance(target, ast.Name):\n if target.id == \"__version__\":\n value_node = el.value\n if isinstance(value_node, ast.Constant):\n version = value_node.value\n elif isinstance(value_node, ast.Str):\n version = value_node.s\n else: # pragma: nocover\n # This is actually covered by tests, but can't be\n # reported by Coverage\n # Ref: https://github.com/nedbat/coveragepy/issues/198\n continue\n io.write_line(\n \"<b>poetry-version-plugin</b>: Setting package \"\n \"dynamic version to __version__ \"\n f\"variable from __init__.py: <b>{version}</b>\"\n )\n poetry.package._set_version(version)\n return\n message = (\n \"<b>poetry-version-plugin</b>: No valid __version__ variable found \"\n \"in __init__.py, cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n elif version_source == \"git-tag\":\n result = subprocess.run(\n [\"git\", \"describe\", \"--exact-match\", \"--tags\", \"HEAD\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if result.returncode == 0:\n tag = result.stdout.strip()\n io.write_line(\n \"<b>poetry-version-plugin</b>: Git tag found, setting \"\n f\"dynamic version to: {tag}\"\n )\n poetry.package._set_version(tag)\n return\n else:\n message = (\n \"<b>poetry-version-plugin</b>: No Git tag found, not \"\n \"extracting dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n", "path": "python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py"}]}
| 1,573 | 196 |
gh_patches_debug_35241
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-26588
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Zoom support for audio-only calls
Following #25922, we now have support for audio calls in Zulip, but the Zoom integration doesn't yet support it.
We should figure out what parameters we need to pass when creating a Zoom call to support an "Audio call" button for Zoom users as well.
Tagged as an 8.0 release goal, since we'll want to document the audio calls feature as available for all call providers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `version.py`
Content:
```
1 import os
2
3 ZULIP_VERSION = "8.0-dev+git"
4
5 # Add information on number of commits and commit hash to version, if available
6 zulip_git_version_file = os.path.join(
7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version"
8 )
9 lines = [ZULIP_VERSION, ""]
10 if os.path.exists(zulip_git_version_file):
11 with open(zulip_git_version_file) as f:
12 lines = [*f, "", ""]
13 ZULIP_VERSION = lines.pop(0).strip()
14 ZULIP_MERGE_BASE = lines.pop(0).strip()
15
16 LATEST_MAJOR_VERSION = "7.0"
17 LATEST_RELEASE_VERSION = "7.3"
18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2023/05/31/zulip-7-0-released/"
19
20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be
21 # prevented from connecting to the Zulip server. Versions above
22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have
23 # a banner at the top of the page asking the user to upgrade.
24 DESKTOP_MINIMUM_VERSION = "5.4.3"
25 DESKTOP_WARNING_VERSION = "5.9.3"
26
27 # Bump the API_FEATURE_LEVEL whenever an API change is made
28 # that clients might want to condition on. If we forget at
29 # the time we make the change, then bump it later as soon
30 # as we notice; clients using API_FEATURE_LEVEL will just not
31 # use the new feature/API until the bump.
32 #
33 # Changes should be accompanied by documentation explaining what the
34 # new level means in api_docs/changelog.md, as well as "**Changes**"
35 # entries in the endpoint's documentation in `zulip.yaml`.
36 API_FEATURE_LEVEL = 205
37
38 # Bump the minor PROVISION_VERSION to indicate that folks should provision
39 # only when going from an old version of the code to a newer version. Bump
40 # the major version to indicate that folks should provision in both
41 # directions.
42
43 # Typically,
44 # * adding a dependency only requires a minor version bump;
45 # * removing a dependency requires a major version bump;
46 # * upgrading a dependency requires a major version bump, unless the
47 # upgraded dependency is backwards compatible with all of our
48 # historical commits sharing the same major version, in which case a
49 # minor version bump suffices.
50
51 PROVISION_VERSION = (247, 6)
52
```
Path: `zerver/views/video_calls.py`
Content:
```
1 import hashlib
2 import json
3 import random
4 import secrets
5 from base64 import b32encode
6 from functools import partial
7 from typing import Dict
8 from urllib.parse import quote, urlencode, urljoin
9
10 import requests
11 from defusedxml import ElementTree
12 from django.conf import settings
13 from django.core.signing import Signer
14 from django.http import HttpRequest, HttpResponse
15 from django.middleware import csrf
16 from django.shortcuts import redirect, render
17 from django.utils.crypto import constant_time_compare, salted_hmac
18 from django.utils.translation import gettext as _
19 from django.views.decorators.cache import never_cache
20 from django.views.decorators.csrf import csrf_exempt
21 from django.views.decorators.http import require_POST
22 from oauthlib.oauth2 import OAuth2Error
23 from requests_oauthlib import OAuth2Session
24
25 from zerver.actions.video_calls import do_set_zoom_token
26 from zerver.decorator import zulip_login_required
27 from zerver.lib.exceptions import ErrorCode, JsonableError
28 from zerver.lib.outgoing_http import OutgoingSession
29 from zerver.lib.pysa import mark_sanitized
30 from zerver.lib.request import REQ, has_request_variables
31 from zerver.lib.response import json_success
32 from zerver.lib.subdomains import get_subdomain
33 from zerver.lib.url_encoding import append_url_query_string
34 from zerver.lib.validator import check_dict, check_string
35 from zerver.models import UserProfile, get_realm
36
37
38 class VideoCallSession(OutgoingSession):
39 def __init__(self) -> None:
40 super().__init__(role="video_calls", timeout=5)
41
42
43 class InvalidZoomTokenError(JsonableError):
44 code = ErrorCode.INVALID_ZOOM_TOKEN
45
46 def __init__(self) -> None:
47 super().__init__(_("Invalid Zoom access token"))
48
49
50 def get_zoom_session(user: UserProfile) -> OAuth2Session:
51 if settings.VIDEO_ZOOM_CLIENT_ID is None:
52 raise JsonableError(_("Zoom credentials have not been configured"))
53
54 client_id = settings.VIDEO_ZOOM_CLIENT_ID
55 client_secret = settings.VIDEO_ZOOM_CLIENT_SECRET
56
57 return OAuth2Session(
58 client_id,
59 redirect_uri=urljoin(settings.ROOT_DOMAIN_URI, "/calls/zoom/complete"),
60 auto_refresh_url="https://zoom.us/oauth/token",
61 auto_refresh_kwargs={
62 "client_id": client_id,
63 "client_secret": client_secret,
64 },
65 token=user.zoom_token,
66 token_updater=partial(do_set_zoom_token, user),
67 )
68
69
70 def get_zoom_sid(request: HttpRequest) -> str:
71 # This is used to prevent CSRF attacks on the Zoom OAuth
72 # authentication flow. We want this value to be unpredictable and
73 # tied to the session, but we don’t want to expose the main CSRF
74 # token directly to the Zoom server.
75
76 csrf.get_token(request)
77 # Use 'mark_sanitized' to cause Pysa to ignore the flow of user controlled
78 # data out of this function. 'request.META' is indeed user controlled, but
79 # post-HMAC output is no longer meaningfully controllable.
80 return mark_sanitized(
81 ""
82 if getattr(request, "_dont_enforce_csrf_checks", False)
83 else salted_hmac("Zulip Zoom sid", request.META["CSRF_COOKIE"]).hexdigest()
84 )
85
86
87 @zulip_login_required
88 @never_cache
89 def register_zoom_user(request: HttpRequest) -> HttpResponse:
90 assert request.user.is_authenticated
91
92 oauth = get_zoom_session(request.user)
93 authorization_url, state = oauth.authorization_url(
94 "https://zoom.us/oauth/authorize",
95 state=json.dumps(
96 {"realm": get_subdomain(request), "sid": get_zoom_sid(request)},
97 ),
98 )
99 return redirect(authorization_url)
100
101
102 @never_cache
103 @has_request_variables
104 def complete_zoom_user(
105 request: HttpRequest,
106 state: Dict[str, str] = REQ(
107 json_validator=check_dict([("realm", check_string)], value_validator=check_string)
108 ),
109 ) -> HttpResponse:
110 if get_subdomain(request) != state["realm"]:
111 return redirect(urljoin(get_realm(state["realm"]).uri, request.get_full_path()))
112 return complete_zoom_user_in_realm(request)
113
114
115 @zulip_login_required
116 @has_request_variables
117 def complete_zoom_user_in_realm(
118 request: HttpRequest,
119 code: str = REQ(),
120 state: Dict[str, str] = REQ(
121 json_validator=check_dict([("sid", check_string)], value_validator=check_string)
122 ),
123 ) -> HttpResponse:
124 assert request.user.is_authenticated
125
126 if not constant_time_compare(state["sid"], get_zoom_sid(request)):
127 raise JsonableError(_("Invalid Zoom session identifier"))
128
129 client_secret = settings.VIDEO_ZOOM_CLIENT_SECRET
130
131 oauth = get_zoom_session(request.user)
132 try:
133 token = oauth.fetch_token(
134 "https://zoom.us/oauth/token",
135 code=code,
136 client_secret=client_secret,
137 )
138 except OAuth2Error:
139 raise JsonableError(_("Invalid Zoom credentials"))
140
141 do_set_zoom_token(request.user, token)
142 return render(request, "zerver/close_window.html")
143
144
145 def make_zoom_video_call(request: HttpRequest, user: UserProfile) -> HttpResponse:
146 oauth = get_zoom_session(user)
147 if not oauth.authorized:
148 raise InvalidZoomTokenError
149
150 try:
151 res = oauth.post("https://api.zoom.us/v2/users/me/meetings", json={})
152 except OAuth2Error:
153 do_set_zoom_token(user, None)
154 raise InvalidZoomTokenError
155
156 if res.status_code == 401:
157 do_set_zoom_token(user, None)
158 raise InvalidZoomTokenError
159 elif not res.ok:
160 raise JsonableError(_("Failed to create Zoom call"))
161
162 return json_success(request, data={"url": res.json()["join_url"]})
163
164
165 @csrf_exempt
166 @require_POST
167 @has_request_variables
168 def deauthorize_zoom_user(request: HttpRequest) -> HttpResponse:
169 return json_success(request)
170
171
172 @has_request_variables
173 def get_bigbluebutton_url(
174 request: HttpRequest, user_profile: UserProfile, meeting_name: str = REQ()
175 ) -> HttpResponse:
176 # https://docs.bigbluebutton.org/dev/api.html#create for reference on the API calls
177 # https://docs.bigbluebutton.org/dev/api.html#usage for reference for checksum
178 id = "zulip-" + str(random.randint(100000000000, 999999999999))
179 password = b32encode(secrets.token_bytes(20)).decode() # 20 bytes means 32 characters
180
181 # We sign our data here to ensure a Zulip user can not tamper with
182 # the join link to gain access to other meetings that are on the
183 # same bigbluebutton server.
184 signed = Signer().sign_object(
185 {
186 "meeting_id": id,
187 "name": meeting_name,
188 "password": password,
189 }
190 )
191 url = append_url_query_string("/calls/bigbluebutton/join", "bigbluebutton=" + signed)
192 return json_success(request, {"url": url})
193
194
195 # We use zulip_login_required here mainly to get access to the user's
196 # full name from Zulip to prepopulate the user's name in the
197 # BigBlueButton meeting. Since the meeting's details are encoded in
198 # the link the user is clicking, there is no validation tying this
199 # meeting to the Zulip organization it was created in.
200 @zulip_login_required
201 @never_cache
202 @has_request_variables
203 def join_bigbluebutton(request: HttpRequest, bigbluebutton: str = REQ()) -> HttpResponse:
204 assert request.user.is_authenticated
205
206 if settings.BIG_BLUE_BUTTON_URL is None or settings.BIG_BLUE_BUTTON_SECRET is None:
207 raise JsonableError(_("BigBlueButton is not configured."))
208
209 try:
210 bigbluebutton_data = Signer().unsign_object(bigbluebutton)
211 except Exception:
212 raise JsonableError(_("Invalid signature."))
213
214 create_params = urlencode(
215 {
216 "meetingID": bigbluebutton_data["meeting_id"],
217 "name": bigbluebutton_data["name"],
218 "moderatorPW": bigbluebutton_data["password"],
219 # We generate the attendee password from moderatorPW,
220 # because the BigBlueButton API requires a separate
221 # password. This integration is designed to have all users
222 # join as moderators, so we generate attendeePW by
223 # truncating the moderatorPW while keeping it long enough
224 # to not be vulnerable to brute force attacks.
225 "attendeePW": bigbluebutton_data["password"][:16],
226 },
227 quote_via=quote,
228 )
229
230 checksum = hashlib.sha256(
231 ("create" + create_params + settings.BIG_BLUE_BUTTON_SECRET).encode()
232 ).hexdigest()
233
234 try:
235 response = VideoCallSession().get(
236 append_url_query_string(settings.BIG_BLUE_BUTTON_URL + "api/create", create_params)
237 + "&checksum="
238 + checksum
239 )
240 response.raise_for_status()
241 except requests.RequestException:
242 raise JsonableError(_("Error connecting to the BigBlueButton server."))
243
244 payload = ElementTree.fromstring(response.text)
245 if payload.find("messageKey").text == "checksumError":
246 raise JsonableError(_("Error authenticating to the BigBlueButton server."))
247
248 if payload.find("returncode").text != "SUCCESS":
249 raise JsonableError(_("BigBlueButton server returned an unexpected error."))
250
251 join_params = urlencode(
252 {
253 "meetingID": bigbluebutton_data["meeting_id"],
254 # We use the moderator password here to grant ever user
255 # full moderator permissions to the bigbluebutton session.
256 "password": bigbluebutton_data["password"],
257 "fullName": request.user.full_name,
258 # https://docs.bigbluebutton.org/dev/api.html#create
259 # The createTime option is used to have the user redirected to a link
260 # that is only valid for this meeting.
261 #
262 # Even if the same link in Zulip is used again, a new
263 # createTime parameter will be created, as the meeting on
264 # the BigBlueButton server has to be recreated. (after a
265 # few minutes)
266 "createTime": payload.find("createTime").text,
267 },
268 quote_via=quote,
269 )
270
271 checksum = hashlib.sha256(
272 ("join" + join_params + settings.BIG_BLUE_BUTTON_SECRET).encode()
273 ).hexdigest()
274 redirect_url_base = append_url_query_string(
275 settings.BIG_BLUE_BUTTON_URL + "api/join", join_params
276 )
277 return redirect(append_url_query_string(redirect_url_base, "checksum=" + checksum))
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in api_docs/changelog.md, as well as "**Changes**"
# entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 205
+API_FEATURE_LEVEL = 206
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/views/video_calls.py b/zerver/views/video_calls.py
--- a/zerver/views/video_calls.py
+++ b/zerver/views/video_calls.py
@@ -31,7 +31,7 @@
from zerver.lib.response import json_success
from zerver.lib.subdomains import get_subdomain
from zerver.lib.url_encoding import append_url_query_string
-from zerver.lib.validator import check_dict, check_string
+from zerver.lib.validator import check_bool, check_dict, check_string
from zerver.models import UserProfile, get_realm
@@ -142,13 +142,30 @@
return render(request, "zerver/close_window.html")
-def make_zoom_video_call(request: HttpRequest, user: UserProfile) -> HttpResponse:
+@has_request_variables
+def make_zoom_video_call(
+ request: HttpRequest,
+ user: UserProfile,
+ is_video_call: bool = REQ(json_validator=check_bool, default=True),
+) -> HttpResponse:
oauth = get_zoom_session(user)
if not oauth.authorized:
raise InvalidZoomTokenError
+ # The meeting host has the ability to configure both their own and
+ # participants' default video on/off state for the meeting. That's
+ # why when creating a meeting, configure the video on/off default
+ # according to the desired call type. Each Zoom user can still have
+ # their own personal setting to not start video by default.
+ payload = {
+ "settings": {
+ "host_video": is_video_call,
+ "participant_video": is_video_call,
+ }
+ }
+
try:
- res = oauth.post("https://api.zoom.us/v2/users/me/meetings", json={})
+ res = oauth.post("https://api.zoom.us/v2/users/me/meetings", json=payload)
except OAuth2Error:
do_set_zoom_token(user, None)
raise InvalidZoomTokenError
|
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -33,7 +33,7 @@\n # Changes should be accompanied by documentation explaining what the\n # new level means in api_docs/changelog.md, as well as \"**Changes**\"\n # entries in the endpoint's documentation in `zulip.yaml`.\n-API_FEATURE_LEVEL = 205\n+API_FEATURE_LEVEL = 206\n \n # Bump the minor PROVISION_VERSION to indicate that folks should provision\n # only when going from an old version of the code to a newer version. Bump\ndiff --git a/zerver/views/video_calls.py b/zerver/views/video_calls.py\n--- a/zerver/views/video_calls.py\n+++ b/zerver/views/video_calls.py\n@@ -31,7 +31,7 @@\n from zerver.lib.response import json_success\n from zerver.lib.subdomains import get_subdomain\n from zerver.lib.url_encoding import append_url_query_string\n-from zerver.lib.validator import check_dict, check_string\n+from zerver.lib.validator import check_bool, check_dict, check_string\n from zerver.models import UserProfile, get_realm\n \n \n@@ -142,13 +142,30 @@\n return render(request, \"zerver/close_window.html\")\n \n \n-def make_zoom_video_call(request: HttpRequest, user: UserProfile) -> HttpResponse:\n+@has_request_variables\n+def make_zoom_video_call(\n+ request: HttpRequest,\n+ user: UserProfile,\n+ is_video_call: bool = REQ(json_validator=check_bool, default=True),\n+) -> HttpResponse:\n oauth = get_zoom_session(user)\n if not oauth.authorized:\n raise InvalidZoomTokenError\n \n+ # The meeting host has the ability to configure both their own and\n+ # participants' default video on/off state for the meeting. That's\n+ # why when creating a meeting, configure the video on/off default\n+ # according to the desired call type. Each Zoom user can still have\n+ # their own personal setting to not start video by default.\n+ payload = {\n+ \"settings\": {\n+ \"host_video\": is_video_call,\n+ \"participant_video\": is_video_call,\n+ }\n+ }\n+\n try:\n- res = oauth.post(\"https://api.zoom.us/v2/users/me/meetings\", json={})\n+ res = oauth.post(\"https://api.zoom.us/v2/users/me/meetings\", json=payload)\n except OAuth2Error:\n do_set_zoom_token(user, None)\n raise InvalidZoomTokenError\n", "issue": "Add Zoom support for audio-only calls \nFollowing #25922, we now have support for audio calls in Zulip, but the Zoom integration doesn't yet support it.\r\n\r\nWe should figure out what parameters we need to pass when creating a Zoom call to support an \"Audio call\" button for Zoom users as well.\r\n\r\nTagged as an 8.0 release goal, since we'll want to document the audio calls feature as available for all call providers.\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"8.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = [*f, \"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"7.0\"\nLATEST_RELEASE_VERSION = \"7.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2023/05/31/zulip-7-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.4.3\"\nDESKTOP_WARNING_VERSION = \"5.9.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in api_docs/changelog.md, as well as \"**Changes**\"\n# entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 205\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = (247, 6)\n", "path": "version.py"}, {"content": "import hashlib\nimport json\nimport random\nimport secrets\nfrom base64 import b32encode\nfrom functools import partial\nfrom typing import Dict\nfrom urllib.parse import quote, urlencode, urljoin\n\nimport requests\nfrom defusedxml import ElementTree\nfrom django.conf import settings\nfrom django.core.signing import Signer\nfrom django.http import HttpRequest, HttpResponse\nfrom django.middleware import csrf\nfrom django.shortcuts import redirect, render\nfrom django.utils.crypto import constant_time_compare, salted_hmac\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom oauthlib.oauth2 import OAuth2Error\nfrom requests_oauthlib import OAuth2Session\n\nfrom zerver.actions.video_calls import do_set_zoom_token\nfrom zerver.decorator import zulip_login_required\nfrom zerver.lib.exceptions import ErrorCode, JsonableError\nfrom zerver.lib.outgoing_http import OutgoingSession\nfrom zerver.lib.pysa import mark_sanitized\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.subdomains import get_subdomain\nfrom zerver.lib.url_encoding import append_url_query_string\nfrom zerver.lib.validator import check_dict, check_string\nfrom zerver.models import UserProfile, get_realm\n\n\nclass VideoCallSession(OutgoingSession):\n def __init__(self) -> None:\n super().__init__(role=\"video_calls\", timeout=5)\n\n\nclass InvalidZoomTokenError(JsonableError):\n code = ErrorCode.INVALID_ZOOM_TOKEN\n\n def __init__(self) -> None:\n super().__init__(_(\"Invalid Zoom access token\"))\n\n\ndef get_zoom_session(user: UserProfile) -> OAuth2Session:\n if settings.VIDEO_ZOOM_CLIENT_ID is None:\n raise JsonableError(_(\"Zoom credentials have not been configured\"))\n\n client_id = settings.VIDEO_ZOOM_CLIENT_ID\n client_secret = settings.VIDEO_ZOOM_CLIENT_SECRET\n\n return OAuth2Session(\n client_id,\n redirect_uri=urljoin(settings.ROOT_DOMAIN_URI, \"/calls/zoom/complete\"),\n auto_refresh_url=\"https://zoom.us/oauth/token\",\n auto_refresh_kwargs={\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n },\n token=user.zoom_token,\n token_updater=partial(do_set_zoom_token, user),\n )\n\n\ndef get_zoom_sid(request: HttpRequest) -> str:\n # This is used to prevent CSRF attacks on the Zoom OAuth\n # authentication flow. We want this value to be unpredictable and\n # tied to the session, but we don\u2019t want to expose the main CSRF\n # token directly to the Zoom server.\n\n csrf.get_token(request)\n # Use 'mark_sanitized' to cause Pysa to ignore the flow of user controlled\n # data out of this function. 'request.META' is indeed user controlled, but\n # post-HMAC output is no longer meaningfully controllable.\n return mark_sanitized(\n \"\"\n if getattr(request, \"_dont_enforce_csrf_checks\", False)\n else salted_hmac(\"Zulip Zoom sid\", request.META[\"CSRF_COOKIE\"]).hexdigest()\n )\n\n\n@zulip_login_required\n@never_cache\ndef register_zoom_user(request: HttpRequest) -> HttpResponse:\n assert request.user.is_authenticated\n\n oauth = get_zoom_session(request.user)\n authorization_url, state = oauth.authorization_url(\n \"https://zoom.us/oauth/authorize\",\n state=json.dumps(\n {\"realm\": get_subdomain(request), \"sid\": get_zoom_sid(request)},\n ),\n )\n return redirect(authorization_url)\n\n\n@never_cache\n@has_request_variables\ndef complete_zoom_user(\n request: HttpRequest,\n state: Dict[str, str] = REQ(\n json_validator=check_dict([(\"realm\", check_string)], value_validator=check_string)\n ),\n) -> HttpResponse:\n if get_subdomain(request) != state[\"realm\"]:\n return redirect(urljoin(get_realm(state[\"realm\"]).uri, request.get_full_path()))\n return complete_zoom_user_in_realm(request)\n\n\n@zulip_login_required\n@has_request_variables\ndef complete_zoom_user_in_realm(\n request: HttpRequest,\n code: str = REQ(),\n state: Dict[str, str] = REQ(\n json_validator=check_dict([(\"sid\", check_string)], value_validator=check_string)\n ),\n) -> HttpResponse:\n assert request.user.is_authenticated\n\n if not constant_time_compare(state[\"sid\"], get_zoom_sid(request)):\n raise JsonableError(_(\"Invalid Zoom session identifier\"))\n\n client_secret = settings.VIDEO_ZOOM_CLIENT_SECRET\n\n oauth = get_zoom_session(request.user)\n try:\n token = oauth.fetch_token(\n \"https://zoom.us/oauth/token\",\n code=code,\n client_secret=client_secret,\n )\n except OAuth2Error:\n raise JsonableError(_(\"Invalid Zoom credentials\"))\n\n do_set_zoom_token(request.user, token)\n return render(request, \"zerver/close_window.html\")\n\n\ndef make_zoom_video_call(request: HttpRequest, user: UserProfile) -> HttpResponse:\n oauth = get_zoom_session(user)\n if not oauth.authorized:\n raise InvalidZoomTokenError\n\n try:\n res = oauth.post(\"https://api.zoom.us/v2/users/me/meetings\", json={})\n except OAuth2Error:\n do_set_zoom_token(user, None)\n raise InvalidZoomTokenError\n\n if res.status_code == 401:\n do_set_zoom_token(user, None)\n raise InvalidZoomTokenError\n elif not res.ok:\n raise JsonableError(_(\"Failed to create Zoom call\"))\n\n return json_success(request, data={\"url\": res.json()[\"join_url\"]})\n\n\n@csrf_exempt\n@require_POST\n@has_request_variables\ndef deauthorize_zoom_user(request: HttpRequest) -> HttpResponse:\n return json_success(request)\n\n\n@has_request_variables\ndef get_bigbluebutton_url(\n request: HttpRequest, user_profile: UserProfile, meeting_name: str = REQ()\n) -> HttpResponse:\n # https://docs.bigbluebutton.org/dev/api.html#create for reference on the API calls\n # https://docs.bigbluebutton.org/dev/api.html#usage for reference for checksum\n id = \"zulip-\" + str(random.randint(100000000000, 999999999999))\n password = b32encode(secrets.token_bytes(20)).decode() # 20 bytes means 32 characters\n\n # We sign our data here to ensure a Zulip user can not tamper with\n # the join link to gain access to other meetings that are on the\n # same bigbluebutton server.\n signed = Signer().sign_object(\n {\n \"meeting_id\": id,\n \"name\": meeting_name,\n \"password\": password,\n }\n )\n url = append_url_query_string(\"/calls/bigbluebutton/join\", \"bigbluebutton=\" + signed)\n return json_success(request, {\"url\": url})\n\n\n# We use zulip_login_required here mainly to get access to the user's\n# full name from Zulip to prepopulate the user's name in the\n# BigBlueButton meeting. Since the meeting's details are encoded in\n# the link the user is clicking, there is no validation tying this\n# meeting to the Zulip organization it was created in.\n@zulip_login_required\n@never_cache\n@has_request_variables\ndef join_bigbluebutton(request: HttpRequest, bigbluebutton: str = REQ()) -> HttpResponse:\n assert request.user.is_authenticated\n\n if settings.BIG_BLUE_BUTTON_URL is None or settings.BIG_BLUE_BUTTON_SECRET is None:\n raise JsonableError(_(\"BigBlueButton is not configured.\"))\n\n try:\n bigbluebutton_data = Signer().unsign_object(bigbluebutton)\n except Exception:\n raise JsonableError(_(\"Invalid signature.\"))\n\n create_params = urlencode(\n {\n \"meetingID\": bigbluebutton_data[\"meeting_id\"],\n \"name\": bigbluebutton_data[\"name\"],\n \"moderatorPW\": bigbluebutton_data[\"password\"],\n # We generate the attendee password from moderatorPW,\n # because the BigBlueButton API requires a separate\n # password. This integration is designed to have all users\n # join as moderators, so we generate attendeePW by\n # truncating the moderatorPW while keeping it long enough\n # to not be vulnerable to brute force attacks.\n \"attendeePW\": bigbluebutton_data[\"password\"][:16],\n },\n quote_via=quote,\n )\n\n checksum = hashlib.sha256(\n (\"create\" + create_params + settings.BIG_BLUE_BUTTON_SECRET).encode()\n ).hexdigest()\n\n try:\n response = VideoCallSession().get(\n append_url_query_string(settings.BIG_BLUE_BUTTON_URL + \"api/create\", create_params)\n + \"&checksum=\"\n + checksum\n )\n response.raise_for_status()\n except requests.RequestException:\n raise JsonableError(_(\"Error connecting to the BigBlueButton server.\"))\n\n payload = ElementTree.fromstring(response.text)\n if payload.find(\"messageKey\").text == \"checksumError\":\n raise JsonableError(_(\"Error authenticating to the BigBlueButton server.\"))\n\n if payload.find(\"returncode\").text != \"SUCCESS\":\n raise JsonableError(_(\"BigBlueButton server returned an unexpected error.\"))\n\n join_params = urlencode(\n {\n \"meetingID\": bigbluebutton_data[\"meeting_id\"],\n # We use the moderator password here to grant ever user\n # full moderator permissions to the bigbluebutton session.\n \"password\": bigbluebutton_data[\"password\"],\n \"fullName\": request.user.full_name,\n # https://docs.bigbluebutton.org/dev/api.html#create\n # The createTime option is used to have the user redirected to a link\n # that is only valid for this meeting.\n #\n # Even if the same link in Zulip is used again, a new\n # createTime parameter will be created, as the meeting on\n # the BigBlueButton server has to be recreated. (after a\n # few minutes)\n \"createTime\": payload.find(\"createTime\").text,\n },\n quote_via=quote,\n )\n\n checksum = hashlib.sha256(\n (\"join\" + join_params + settings.BIG_BLUE_BUTTON_SECRET).encode()\n ).hexdigest()\n redirect_url_base = append_url_query_string(\n settings.BIG_BLUE_BUTTON_URL + \"api/join\", join_params\n )\n return redirect(append_url_query_string(redirect_url_base, \"checksum=\" + checksum))\n", "path": "zerver/views/video_calls.py"}], "after_files": [{"content": "import os\n\nZULIP_VERSION = \"8.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = [*f, \"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"7.0\"\nLATEST_RELEASE_VERSION = \"7.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2023/05/31/zulip-7-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.4.3\"\nDESKTOP_WARNING_VERSION = \"5.9.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in api_docs/changelog.md, as well as \"**Changes**\"\n# entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 206\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = (247, 6)\n", "path": "version.py"}, {"content": "import hashlib\nimport json\nimport random\nimport secrets\nfrom base64 import b32encode\nfrom functools import partial\nfrom typing import Dict\nfrom urllib.parse import quote, urlencode, urljoin\n\nimport requests\nfrom defusedxml import ElementTree\nfrom django.conf import settings\nfrom django.core.signing import Signer\nfrom django.http import HttpRequest, HttpResponse\nfrom django.middleware import csrf\nfrom django.shortcuts import redirect, render\nfrom django.utils.crypto import constant_time_compare, salted_hmac\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom oauthlib.oauth2 import OAuth2Error\nfrom requests_oauthlib import OAuth2Session\n\nfrom zerver.actions.video_calls import do_set_zoom_token\nfrom zerver.decorator import zulip_login_required\nfrom zerver.lib.exceptions import ErrorCode, JsonableError\nfrom zerver.lib.outgoing_http import OutgoingSession\nfrom zerver.lib.pysa import mark_sanitized\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.subdomains import get_subdomain\nfrom zerver.lib.url_encoding import append_url_query_string\nfrom zerver.lib.validator import check_bool, check_dict, check_string\nfrom zerver.models import UserProfile, get_realm\n\n\nclass VideoCallSession(OutgoingSession):\n def __init__(self) -> None:\n super().__init__(role=\"video_calls\", timeout=5)\n\n\nclass InvalidZoomTokenError(JsonableError):\n code = ErrorCode.INVALID_ZOOM_TOKEN\n\n def __init__(self) -> None:\n super().__init__(_(\"Invalid Zoom access token\"))\n\n\ndef get_zoom_session(user: UserProfile) -> OAuth2Session:\n if settings.VIDEO_ZOOM_CLIENT_ID is None:\n raise JsonableError(_(\"Zoom credentials have not been configured\"))\n\n client_id = settings.VIDEO_ZOOM_CLIENT_ID\n client_secret = settings.VIDEO_ZOOM_CLIENT_SECRET\n\n return OAuth2Session(\n client_id,\n redirect_uri=urljoin(settings.ROOT_DOMAIN_URI, \"/calls/zoom/complete\"),\n auto_refresh_url=\"https://zoom.us/oauth/token\",\n auto_refresh_kwargs={\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n },\n token=user.zoom_token,\n token_updater=partial(do_set_zoom_token, user),\n )\n\n\ndef get_zoom_sid(request: HttpRequest) -> str:\n # This is used to prevent CSRF attacks on the Zoom OAuth\n # authentication flow. We want this value to be unpredictable and\n # tied to the session, but we don\u2019t want to expose the main CSRF\n # token directly to the Zoom server.\n\n csrf.get_token(request)\n # Use 'mark_sanitized' to cause Pysa to ignore the flow of user controlled\n # data out of this function. 'request.META' is indeed user controlled, but\n # post-HMAC output is no longer meaningfully controllable.\n return mark_sanitized(\n \"\"\n if getattr(request, \"_dont_enforce_csrf_checks\", False)\n else salted_hmac(\"Zulip Zoom sid\", request.META[\"CSRF_COOKIE\"]).hexdigest()\n )\n\n\n@zulip_login_required\n@never_cache\ndef register_zoom_user(request: HttpRequest) -> HttpResponse:\n assert request.user.is_authenticated\n\n oauth = get_zoom_session(request.user)\n authorization_url, state = oauth.authorization_url(\n \"https://zoom.us/oauth/authorize\",\n state=json.dumps(\n {\"realm\": get_subdomain(request), \"sid\": get_zoom_sid(request)},\n ),\n )\n return redirect(authorization_url)\n\n\n@never_cache\n@has_request_variables\ndef complete_zoom_user(\n request: HttpRequest,\n state: Dict[str, str] = REQ(\n json_validator=check_dict([(\"realm\", check_string)], value_validator=check_string)\n ),\n) -> HttpResponse:\n if get_subdomain(request) != state[\"realm\"]:\n return redirect(urljoin(get_realm(state[\"realm\"]).uri, request.get_full_path()))\n return complete_zoom_user_in_realm(request)\n\n\n@zulip_login_required\n@has_request_variables\ndef complete_zoom_user_in_realm(\n request: HttpRequest,\n code: str = REQ(),\n state: Dict[str, str] = REQ(\n json_validator=check_dict([(\"sid\", check_string)], value_validator=check_string)\n ),\n) -> HttpResponse:\n assert request.user.is_authenticated\n\n if not constant_time_compare(state[\"sid\"], get_zoom_sid(request)):\n raise JsonableError(_(\"Invalid Zoom session identifier\"))\n\n client_secret = settings.VIDEO_ZOOM_CLIENT_SECRET\n\n oauth = get_zoom_session(request.user)\n try:\n token = oauth.fetch_token(\n \"https://zoom.us/oauth/token\",\n code=code,\n client_secret=client_secret,\n )\n except OAuth2Error:\n raise JsonableError(_(\"Invalid Zoom credentials\"))\n\n do_set_zoom_token(request.user, token)\n return render(request, \"zerver/close_window.html\")\n\n\n@has_request_variables\ndef make_zoom_video_call(\n request: HttpRequest,\n user: UserProfile,\n is_video_call: bool = REQ(json_validator=check_bool, default=True),\n) -> HttpResponse:\n oauth = get_zoom_session(user)\n if not oauth.authorized:\n raise InvalidZoomTokenError\n\n # The meeting host has the ability to configure both their own and\n # participants' default video on/off state for the meeting. That's\n # why when creating a meeting, configure the video on/off default\n # according to the desired call type. Each Zoom user can still have\n # their own personal setting to not start video by default.\n payload = {\n \"settings\": {\n \"host_video\": is_video_call,\n \"participant_video\": is_video_call,\n }\n }\n\n try:\n res = oauth.post(\"https://api.zoom.us/v2/users/me/meetings\", json=payload)\n except OAuth2Error:\n do_set_zoom_token(user, None)\n raise InvalidZoomTokenError\n\n if res.status_code == 401:\n do_set_zoom_token(user, None)\n raise InvalidZoomTokenError\n elif not res.ok:\n raise JsonableError(_(\"Failed to create Zoom call\"))\n\n return json_success(request, data={\"url\": res.json()[\"join_url\"]})\n\n\n@csrf_exempt\n@require_POST\n@has_request_variables\ndef deauthorize_zoom_user(request: HttpRequest) -> HttpResponse:\n return json_success(request)\n\n\n@has_request_variables\ndef get_bigbluebutton_url(\n request: HttpRequest, user_profile: UserProfile, meeting_name: str = REQ()\n) -> HttpResponse:\n # https://docs.bigbluebutton.org/dev/api.html#create for reference on the API calls\n # https://docs.bigbluebutton.org/dev/api.html#usage for reference for checksum\n id = \"zulip-\" + str(random.randint(100000000000, 999999999999))\n password = b32encode(secrets.token_bytes(20)).decode() # 20 bytes means 32 characters\n\n # We sign our data here to ensure a Zulip user can not tamper with\n # the join link to gain access to other meetings that are on the\n # same bigbluebutton server.\n signed = Signer().sign_object(\n {\n \"meeting_id\": id,\n \"name\": meeting_name,\n \"password\": password,\n }\n )\n url = append_url_query_string(\"/calls/bigbluebutton/join\", \"bigbluebutton=\" + signed)\n return json_success(request, {\"url\": url})\n\n\n# We use zulip_login_required here mainly to get access to the user's\n# full name from Zulip to prepopulate the user's name in the\n# BigBlueButton meeting. Since the meeting's details are encoded in\n# the link the user is clicking, there is no validation tying this\n# meeting to the Zulip organization it was created in.\n@zulip_login_required\n@never_cache\n@has_request_variables\ndef join_bigbluebutton(request: HttpRequest, bigbluebutton: str = REQ()) -> HttpResponse:\n assert request.user.is_authenticated\n\n if settings.BIG_BLUE_BUTTON_URL is None or settings.BIG_BLUE_BUTTON_SECRET is None:\n raise JsonableError(_(\"BigBlueButton is not configured.\"))\n\n try:\n bigbluebutton_data = Signer().unsign_object(bigbluebutton)\n except Exception:\n raise JsonableError(_(\"Invalid signature.\"))\n\n create_params = urlencode(\n {\n \"meetingID\": bigbluebutton_data[\"meeting_id\"],\n \"name\": bigbluebutton_data[\"name\"],\n \"moderatorPW\": bigbluebutton_data[\"password\"],\n # We generate the attendee password from moderatorPW,\n # because the BigBlueButton API requires a separate\n # password. This integration is designed to have all users\n # join as moderators, so we generate attendeePW by\n # truncating the moderatorPW while keeping it long enough\n # to not be vulnerable to brute force attacks.\n \"attendeePW\": bigbluebutton_data[\"password\"][:16],\n },\n quote_via=quote,\n )\n\n checksum = hashlib.sha256(\n (\"create\" + create_params + settings.BIG_BLUE_BUTTON_SECRET).encode()\n ).hexdigest()\n\n try:\n response = VideoCallSession().get(\n append_url_query_string(settings.BIG_BLUE_BUTTON_URL + \"api/create\", create_params)\n + \"&checksum=\"\n + checksum\n )\n response.raise_for_status()\n except requests.RequestException:\n raise JsonableError(_(\"Error connecting to the BigBlueButton server.\"))\n\n payload = ElementTree.fromstring(response.text)\n if payload.find(\"messageKey\").text == \"checksumError\":\n raise JsonableError(_(\"Error authenticating to the BigBlueButton server.\"))\n\n if payload.find(\"returncode\").text != \"SUCCESS\":\n raise JsonableError(_(\"BigBlueButton server returned an unexpected error.\"))\n\n join_params = urlencode(\n {\n \"meetingID\": bigbluebutton_data[\"meeting_id\"],\n # We use the moderator password here to grant ever user\n # full moderator permissions to the bigbluebutton session.\n \"password\": bigbluebutton_data[\"password\"],\n \"fullName\": request.user.full_name,\n # https://docs.bigbluebutton.org/dev/api.html#create\n # The createTime option is used to have the user redirected to a link\n # that is only valid for this meeting.\n #\n # Even if the same link in Zulip is used again, a new\n # createTime parameter will be created, as the meeting on\n # the BigBlueButton server has to be recreated. (after a\n # few minutes)\n \"createTime\": payload.find(\"createTime\").text,\n },\n quote_via=quote,\n )\n\n checksum = hashlib.sha256(\n (\"join\" + join_params + settings.BIG_BLUE_BUTTON_SECRET).encode()\n ).hexdigest()\n redirect_url_base = append_url_query_string(\n settings.BIG_BLUE_BUTTON_URL + \"api/join\", join_params\n )\n return redirect(append_url_query_string(redirect_url_base, \"checksum=\" + checksum))\n", "path": "zerver/views/video_calls.py"}]}
| 4,033 | 552 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.