problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10906 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Group administration: AttributeError: 'NoneType' object has no attribute 'getGroupTitleOrName'
/Plone2/@@usergroup-groupmembership?groupname=None
gives me
Here is the full error message:
Display traceback as text
Traceback (innermost last):
Module ZPublisher.Publish, line 138, in publish
Module ZPublisher.mapply, line 77, in mapply
Module ZPublisher.Publish, line 48, in call_object
Module Products.CMFPlone.controlpanel.browser.usergroups_groupmembership, line 69, in **call**
Module Products.CMFPlone.controlpanel.browser.usergroups_groupmembership, line 16, in update
AttributeError: 'NoneType' object has no attribute 'getGroupTitleOrName'
This happens when you click on "new group" and then on the "group members" tab.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py`
Content:
```
1 from Products.CMFPlone import PloneMessageFactory as _
2 from zExceptions import Forbidden
3 from Products.CMFCore.utils import getToolByName
4 from Products.CMFPlone.controlpanel.browser.usergroups import \
5 UsersGroupsControlPanelView
6 from Products.CMFPlone.utils import normalizeString
7
8
9 class GroupMembershipControlPanel(UsersGroupsControlPanelView):
10
11 def update(self):
12 self.groupname = getattr(self.request, 'groupname')
13 self.gtool = getToolByName(self, 'portal_groups')
14 self.mtool = getToolByName(self, 'portal_membership')
15 self.group = self.gtool.getGroupById(self.groupname)
16 self.grouptitle = self.group.getGroupTitleOrName() or self.groupname
17
18 self.request.set('grouproles', self.group.getRoles() if self.group else [])
19 self.canAddUsers = True
20 if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:
21 self.canAddUsers = False
22
23 self.groupquery = self.makeQuery(groupname=self.groupname)
24 self.groupkeyquery = self.makeQuery(key=self.groupname)
25
26 form = self.request.form
27 submitted = form.get('form.submitted', False)
28
29 self.searchResults = []
30 self.searchString = ''
31 self.newSearch = False
32
33 if submitted:
34 # add/delete before we search so we don't show stale results
35 toAdd = form.get('add', [])
36 if toAdd:
37 if not self.canAddUsers:
38 raise Forbidden
39
40 for u in toAdd:
41 self.gtool.addPrincipalToGroup(u, self.groupname, self.request)
42 self.context.plone_utils.addPortalMessage(_(u'Changes made.'))
43
44 toDelete = form.get('delete', [])
45 if toDelete:
46 for u in toDelete:
47 self.gtool.removePrincipalFromGroup(u, self.groupname, self.request)
48 self.context.plone_utils.addPortalMessage(_(u'Changes made.'))
49
50 search = form.get('form.button.Search', None) is not None
51 edit = form.get('form.button.Edit', None) is not None and toDelete
52 add = form.get('form.button.Add', None) is not None and toAdd
53 findAll = form.get('form.button.FindAll', None) is not None and \
54 not self.many_users
55 # The search string should be cleared when one of the
56 # non-search buttons has been clicked.
57 if findAll or edit or add:
58 form['searchstring'] = ''
59 self.searchString = form.get('searchstring', '')
60 if findAll or bool(self.searchString):
61 self.searchResults = self.getPotentialMembers(self.searchString)
62
63 if search or findAll:
64 self.newSearch = True
65
66 self.groupMembers = self.getMembers()
67
68 def __call__(self):
69 self.update()
70 return self.index()
71
72 def isGroup(self, itemName):
73 return self.gtool.isGroup(itemName)
74
75 def getMembers(self):
76 searchResults = self.gtool.getGroupMembers(self.groupname)
77
78 groupResults = [self.gtool.getGroupById(m) for m in searchResults]
79 groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))
80
81 userResults = [self.mtool.getMemberById(m) for m in searchResults]
82 userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')
83
84 mergedResults = groupResults + userResults
85 return filter(None, mergedResults)
86
87 def getPotentialMembers(self, searchString):
88 ignoredUsersGroups = [x.id for x in self.getMembers() + [self.group,] if x is not None]
89 return self.membershipSearch(searchString, ignore=ignoredUsersGroups)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
--- a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
+++ b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
@@ -13,6 +13,9 @@
self.gtool = getToolByName(self, 'portal_groups')
self.mtool = getToolByName(self, 'portal_membership')
self.group = self.gtool.getGroupById(self.groupname)
+ if self.group is None:
+ return
+
self.grouptitle = self.group.getGroupTitleOrName() or self.groupname
self.request.set('grouproles', self.group.getRoles() if self.group else [])
| {"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n@@ -13,6 +13,9 @@\n self.gtool = getToolByName(self, 'portal_groups')\n self.mtool = getToolByName(self, 'portal_membership')\n self.group = self.gtool.getGroupById(self.groupname)\n+ if self.group is None:\n+ return\n+\n self.grouptitle = self.group.getGroupTitleOrName() or self.groupname\n \n self.request.set('grouproles', self.group.getRoles() if self.group else [])\n", "issue": "Group administration: AttributeError: 'NoneType' object has no attribute 'getGroupTitleOrName'\n/Plone2/@@usergroup-groupmembership?groupname=None\n\ngives me\n\nHere is the full error message:\n\nDisplay traceback as text\n\nTraceback (innermost last):\n\nModule ZPublisher.Publish, line 138, in publish\nModule ZPublisher.mapply, line 77, in mapply\nModule ZPublisher.Publish, line 48, in call_object\nModule Products.CMFPlone.controlpanel.browser.usergroups_groupmembership, line 69, in **call**\nModule Products.CMFPlone.controlpanel.browser.usergroups_groupmembership, line 16, in update\nAttributeError: 'NoneType' object has no attribute 'getGroupTitleOrName'\n\nThis happens when you click on \"new group\" and then on the \"group members\" tab.\n\n", "before_files": [{"content": "from Products.CMFPlone import PloneMessageFactory as _\nfrom zExceptions import Forbidden\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.controlpanel.browser.usergroups import \\\n UsersGroupsControlPanelView\nfrom Products.CMFPlone.utils import normalizeString\n\n\nclass GroupMembershipControlPanel(UsersGroupsControlPanelView):\n\n def update(self):\n self.groupname = getattr(self.request, 'groupname')\n self.gtool = getToolByName(self, 'portal_groups')\n self.mtool = getToolByName(self, 'portal_membership')\n self.group = self.gtool.getGroupById(self.groupname)\n self.grouptitle = self.group.getGroupTitleOrName() or self.groupname\n\n self.request.set('grouproles', self.group.getRoles() if self.group else [])\n self.canAddUsers = True\n if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:\n self.canAddUsers = False\n\n self.groupquery = self.makeQuery(groupname=self.groupname)\n self.groupkeyquery = self.makeQuery(key=self.groupname)\n\n form = self.request.form\n submitted = form.get('form.submitted', False)\n\n self.searchResults = []\n self.searchString = ''\n self.newSearch = False\n\n if submitted:\n # add/delete before we search so we don't show stale results\n toAdd = form.get('add', [])\n if toAdd:\n if not self.canAddUsers:\n raise Forbidden\n\n for u in toAdd:\n self.gtool.addPrincipalToGroup(u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(u'Changes made.'))\n\n toDelete = form.get('delete', [])\n if toDelete:\n for u in toDelete:\n self.gtool.removePrincipalFromGroup(u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(u'Changes made.'))\n\n search = form.get('form.button.Search', None) is not None\n edit = form.get('form.button.Edit', None) is not None and toDelete\n add = form.get('form.button.Add', None) is not None and toAdd\n findAll = form.get('form.button.FindAll', None) is not None and \\\n not self.many_users\n # The search string should be cleared when one of the\n # non-search buttons has been clicked.\n if findAll or edit or add:\n form['searchstring'] = ''\n self.searchString = form.get('searchstring', '')\n if findAll or bool(self.searchString):\n self.searchResults = self.getPotentialMembers(self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groupMembers = self.getMembers()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def isGroup(self, itemName):\n return self.gtool.isGroup(itemName)\n\n def getMembers(self):\n searchResults = self.gtool.getGroupMembers(self.groupname)\n\n groupResults = [self.gtool.getGroupById(m) for m in searchResults]\n groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))\n\n userResults = [self.mtool.getMemberById(m) for m in searchResults]\n userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')\n\n mergedResults = groupResults + userResults\n return filter(None, mergedResults)\n\n def getPotentialMembers(self, searchString):\n ignoredUsersGroups = [x.id for x in self.getMembers() + [self.group,] if x is not None]\n return self.membershipSearch(searchString, ignore=ignoredUsersGroups)\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py"}], "after_files": [{"content": "from Products.CMFPlone import PloneMessageFactory as _\nfrom zExceptions import Forbidden\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.controlpanel.browser.usergroups import \\\n UsersGroupsControlPanelView\nfrom Products.CMFPlone.utils import normalizeString\n\n\nclass GroupMembershipControlPanel(UsersGroupsControlPanelView):\n\n def update(self):\n self.groupname = getattr(self.request, 'groupname')\n self.gtool = getToolByName(self, 'portal_groups')\n self.mtool = getToolByName(self, 'portal_membership')\n self.group = self.gtool.getGroupById(self.groupname)\n if self.group is None:\n return\n\n self.grouptitle = self.group.getGroupTitleOrName() or self.groupname\n\n self.request.set('grouproles', self.group.getRoles() if self.group else [])\n self.canAddUsers = True\n if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:\n self.canAddUsers = False\n\n self.groupquery = self.makeQuery(groupname=self.groupname)\n self.groupkeyquery = self.makeQuery(key=self.groupname)\n\n form = self.request.form\n submitted = form.get('form.submitted', False)\n\n self.searchResults = []\n self.searchString = ''\n self.newSearch = False\n\n if submitted:\n # add/delete before we search so we don't show stale results\n toAdd = form.get('add', [])\n if toAdd:\n if not self.canAddUsers:\n raise Forbidden\n\n for u in toAdd:\n self.gtool.addPrincipalToGroup(u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(u'Changes made.'))\n\n toDelete = form.get('delete', [])\n if toDelete:\n for u in toDelete:\n self.gtool.removePrincipalFromGroup(u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(u'Changes made.'))\n\n search = form.get('form.button.Search', None) is not None\n edit = form.get('form.button.Edit', None) is not None and toDelete\n add = form.get('form.button.Add', None) is not None and toAdd\n findAll = form.get('form.button.FindAll', None) is not None and \\\n not self.many_users\n # The search string should be cleared when one of the\n # non-search buttons has been clicked.\n if findAll or edit or add:\n form['searchstring'] = ''\n self.searchString = form.get('searchstring', '')\n if findAll or bool(self.searchString):\n self.searchResults = self.getPotentialMembers(self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groupMembers = self.getMembers()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def isGroup(self, itemName):\n return self.gtool.isGroup(itemName)\n\n def getMembers(self):\n searchResults = self.gtool.getGroupMembers(self.groupname)\n\n groupResults = [self.gtool.getGroupById(m) for m in searchResults]\n groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))\n\n userResults = [self.mtool.getMemberById(m) for m in searchResults]\n userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')\n\n mergedResults = groupResults + userResults\n return filter(None, mergedResults)\n\n def getPotentialMembers(self, searchString):\n ignoredUsersGroups = [x.id for x in self.getMembers() + [self.group,] if x is not None]\n return self.membershipSearch(searchString, ignore=ignoredUsersGroups)\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py"}]} | 1,451 | 179 |
gh_patches_debug_17529 | rasdani/github-patches | git_diff | talonhub__community-324 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Vocabulary functions do not strip whitespace from CSV
I had the misfortune the last few days to believe that the vocabulary csv files were not operating correctly.
Apparently whitespace is not stripped around fields. So "test, test2" leads a row which is parsed, but not turned into a functioning command.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/user_settings.py`
Content:
```
1 from talon import Module, fs, Context
2 import os
3 import csv
4 from pathlib import Path
5 from typing import Dict, List, Tuple
6 import threading
7
8
9 # NOTE: This method requires this module to be one folder below the top-level
10 # knausj folder.
11 SETTINGS_DIR = Path(__file__).parents[1] / "settings"
12
13 if not SETTINGS_DIR.is_dir():
14 os.mkdir(SETTINGS_DIR)
15
16 mod = Module()
17 ctx = Context()
18
19
20 def _load_csv_dict(
21 file_name: str, headers=Tuple[str, str], default: Dict[str, str] = {}
22 ) -> Dict[str, str]:
23 """Load a word mapping from a CSV file. If it doesn't exist, create it."""
24 assert file_name.endswith(".csv")
25 path = SETTINGS_DIR / file_name
26
27 # Create the file if it doesn't exist
28 if not SETTINGS_DIR.is_dir():
29 os.mkdir(SETTINGS_DIR)
30 if not path.is_file():
31 with open(path, "w", encoding="utf-8") as file:
32 writer = csv.writer(file)
33 writer.writerow(headers)
34 for key, value in default.items():
35 writer.writerow([key] if key == value else [value, key])
36
37 # Now read from disk
38 with open(path, "r", encoding="utf-8") as file:
39 rows = list(csv.reader(file))
40
41 mapping = {}
42 if len(rows) >= 2:
43 actual_headers = rows[0]
44 if not actual_headers == list(headers):
45 print(
46 f'"{file_name}": Malformed headers - {actual_headers}.'
47 + f" Should be {list(headers)}. Ignoring row."
48 )
49 for row in rows[1:]:
50 if len(row) == 0:
51 # Windows newlines are sometimes read as empty rows. :champagne:
52 continue
53 if len(row) == 1:
54 mapping[row[0]] = row[0]
55 else:
56 mapping[row[1]] = row[0]
57 if len(row) > 2:
58 print(
59 f'"{file_name}": More than two values in row: {row}.'
60 + " Ignoring the extras."
61 )
62 return mapping
63
64
65 _mapped_lists = {}
66 _settings_lock = threading.Lock()
67 _word_map_params = None
68
69
70 def _update_list(list_name: str, *csv_params):
71 """Update list with `list_name` from a csv on disk.
72
73 `csv_params` will be passed to `_load_csv_dict`.
74
75 """
76 global ctx
77 ctx.lists[list_name] = _load_csv_dict(*csv_params)
78
79
80 def _update_word_map(*csv_params):
81 """Update `dictate.word_map` from disk.
82
83 `csv_params` will be passed to `_load_csv_dict`.
84
85 """
86 global ctx
87 ctx.settings["dictate.word_map"] = _load_csv_dict(*csv_params)
88
89
90 def _update_lists(*_):
91 """Update all CSV lists from disk."""
92 print("Updating CSV lists...")
93 with _settings_lock:
94 for list_name, csv_params in _mapped_lists.items():
95 try:
96 _update_list(list_name, *csv_params)
97 except Exception as e:
98 print(f'Error loading list "{list_name}": {e}')
99 # Special case - `dictate.word_map` isn't a list.
100 if _word_map_params:
101 try:
102 _update_word_map(*_word_map_params)
103 except Exception as e:
104 print(f'Error updating "dictate.word_map": {e}')
105
106
107 def bind_list_to_csv(
108 list_name: str,
109 csv_name: str,
110 csv_headers: Tuple[str, str],
111 default_values: Dict[str, str] = {},
112 ) -> None:
113 """Register a Talon list that should be updated from a CSV on disk.
114
115 The CSV file will be created automatically in the "settings" dir if it
116 doesn't exist. This directory can be tracked independently to
117 `knausj_talon`, allowing the user to specify things like private vocab
118 separately.
119
120 Note the list must be declared separately.
121
122 """
123 global _mapped_lists
124 with _settings_lock:
125 _update_list(list_name, csv_name, csv_headers, default_values)
126 # If there were no errors, we can register it permanently.
127 _mapped_lists[list_name] = (csv_name, csv_headers, default_values)
128
129
130 def bind_word_map_to_csv(
131 csv_name: str, csv_headers: Tuple[str, str], default_values: Dict[str, str] = {}
132 ) -> None:
133 """Like `bind_list_to_csv`, but for the `dictate.word_map` setting.
134
135 Since it is a setting, not a list, it has to be handled separately.
136
137 """
138 global _word_map_params
139 # TODO: Maybe a generic system for binding the dicts to settings? Only
140 # implement if it's needed.
141 with _settings_lock:
142 _update_word_map(csv_name, csv_headers, default_values)
143 # If there were no errors, we can register it permanently.
144 _word_map_params = (csv_name, csv_headers, default_values)
145
146
147 fs.watch(str(SETTINGS_DIR), _update_lists)
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/code/user_settings.py b/code/user_settings.py
--- a/code/user_settings.py
+++ b/code/user_settings.py
@@ -51,14 +51,17 @@
# Windows newlines are sometimes read as empty rows. :champagne:
continue
if len(row) == 1:
- mapping[row[0]] = row[0]
+ output = spoken_form = row[0]
else:
- mapping[row[1]] = row[0]
+ output, spoken_form = row[:2]
if len(row) > 2:
print(
f'"{file_name}": More than two values in row: {row}.'
+ " Ignoring the extras."
)
+ # Leading/trailing whitespace in spoken form can prevent recognition.
+ spoken_form = spoken_form.strip()
+ mapping[spoken_form] = output
return mapping
| {"golden_diff": "diff --git a/code/user_settings.py b/code/user_settings.py\n--- a/code/user_settings.py\n+++ b/code/user_settings.py\n@@ -51,14 +51,17 @@\n # Windows newlines are sometimes read as empty rows. :champagne:\n continue\n if len(row) == 1:\n- mapping[row[0]] = row[0]\n+ output = spoken_form = row[0]\n else:\n- mapping[row[1]] = row[0]\n+ output, spoken_form = row[:2]\n if len(row) > 2:\n print(\n f'\"{file_name}\": More than two values in row: {row}.'\n + \" Ignoring the extras.\"\n )\n+ # Leading/trailing whitespace in spoken form can prevent recognition.\n+ spoken_form = spoken_form.strip()\n+ mapping[spoken_form] = output\n return mapping\n", "issue": "Vocabulary functions do not strip whitespace from CSV\nI had the misfortune the last few days to believe that the vocabulary csv files were not operating correctly.\r\n\r\nApparently whitespace is not stripped around fields. So \"test, test2\" leads a row which is parsed, but not turned into a functioning command.\n", "before_files": [{"content": "from talon import Module, fs, Context\nimport os\nimport csv\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple\nimport threading\n\n\n# NOTE: This method requires this module to be one folder below the top-level\n# knausj folder.\nSETTINGS_DIR = Path(__file__).parents[1] / \"settings\"\n\nif not SETTINGS_DIR.is_dir():\n os.mkdir(SETTINGS_DIR)\n\nmod = Module()\nctx = Context()\n\n\ndef _load_csv_dict(\n file_name: str, headers=Tuple[str, str], default: Dict[str, str] = {}\n) -> Dict[str, str]:\n \"\"\"Load a word mapping from a CSV file. If it doesn't exist, create it.\"\"\"\n assert file_name.endswith(\".csv\")\n path = SETTINGS_DIR / file_name\n\n # Create the file if it doesn't exist\n if not SETTINGS_DIR.is_dir():\n os.mkdir(SETTINGS_DIR)\n if not path.is_file():\n with open(path, \"w\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerow(headers)\n for key, value in default.items():\n writer.writerow([key] if key == value else [value, key])\n\n # Now read from disk\n with open(path, \"r\", encoding=\"utf-8\") as file:\n rows = list(csv.reader(file))\n\n mapping = {}\n if len(rows) >= 2:\n actual_headers = rows[0]\n if not actual_headers == list(headers):\n print(\n f'\"{file_name}\": Malformed headers - {actual_headers}.'\n + f\" Should be {list(headers)}. Ignoring row.\"\n )\n for row in rows[1:]:\n if len(row) == 0:\n # Windows newlines are sometimes read as empty rows. :champagne:\n continue\n if len(row) == 1:\n mapping[row[0]] = row[0]\n else:\n mapping[row[1]] = row[0]\n if len(row) > 2:\n print(\n f'\"{file_name}\": More than two values in row: {row}.'\n + \" Ignoring the extras.\"\n )\n return mapping\n\n\n_mapped_lists = {}\n_settings_lock = threading.Lock()\n_word_map_params = None\n\n\ndef _update_list(list_name: str, *csv_params):\n \"\"\"Update list with `list_name` from a csv on disk.\n\n `csv_params` will be passed to `_load_csv_dict`.\n\n \"\"\"\n global ctx\n ctx.lists[list_name] = _load_csv_dict(*csv_params)\n\n\ndef _update_word_map(*csv_params):\n \"\"\"Update `dictate.word_map` from disk.\n\n `csv_params` will be passed to `_load_csv_dict`.\n\n \"\"\"\n global ctx\n ctx.settings[\"dictate.word_map\"] = _load_csv_dict(*csv_params)\n\n\ndef _update_lists(*_):\n \"\"\"Update all CSV lists from disk.\"\"\"\n print(\"Updating CSV lists...\")\n with _settings_lock:\n for list_name, csv_params in _mapped_lists.items():\n try:\n _update_list(list_name, *csv_params)\n except Exception as e:\n print(f'Error loading list \"{list_name}\": {e}')\n # Special case - `dictate.word_map` isn't a list.\n if _word_map_params:\n try:\n _update_word_map(*_word_map_params)\n except Exception as e:\n print(f'Error updating \"dictate.word_map\": {e}')\n\n\ndef bind_list_to_csv(\n list_name: str,\n csv_name: str,\n csv_headers: Tuple[str, str],\n default_values: Dict[str, str] = {},\n) -> None:\n \"\"\"Register a Talon list that should be updated from a CSV on disk.\n\n The CSV file will be created automatically in the \"settings\" dir if it\n doesn't exist. This directory can be tracked independently to\n `knausj_talon`, allowing the user to specify things like private vocab\n separately.\n\n Note the list must be declared separately.\n\n \"\"\"\n global _mapped_lists\n with _settings_lock:\n _update_list(list_name, csv_name, csv_headers, default_values)\n # If there were no errors, we can register it permanently.\n _mapped_lists[list_name] = (csv_name, csv_headers, default_values)\n\n\ndef bind_word_map_to_csv(\n csv_name: str, csv_headers: Tuple[str, str], default_values: Dict[str, str] = {}\n) -> None:\n \"\"\"Like `bind_list_to_csv`, but for the `dictate.word_map` setting.\n\n Since it is a setting, not a list, it has to be handled separately.\n\n \"\"\"\n global _word_map_params\n # TODO: Maybe a generic system for binding the dicts to settings? Only\n # implement if it's needed.\n with _settings_lock:\n _update_word_map(csv_name, csv_headers, default_values)\n # If there were no errors, we can register it permanently.\n _word_map_params = (csv_name, csv_headers, default_values)\n\n\nfs.watch(str(SETTINGS_DIR), _update_lists)\n", "path": "code/user_settings.py"}], "after_files": [{"content": "from talon import Module, fs, Context\nimport os\nimport csv\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple\nimport threading\n\n\n# NOTE: This method requires this module to be one folder below the top-level\n# knausj folder.\nSETTINGS_DIR = Path(__file__).parents[1] / \"settings\"\n\nif not SETTINGS_DIR.is_dir():\n os.mkdir(SETTINGS_DIR)\n\nmod = Module()\nctx = Context()\n\n\ndef _load_csv_dict(\n file_name: str, headers=Tuple[str, str], default: Dict[str, str] = {}\n) -> Dict[str, str]:\n \"\"\"Load a word mapping from a CSV file. If it doesn't exist, create it.\"\"\"\n assert file_name.endswith(\".csv\")\n path = SETTINGS_DIR / file_name\n\n # Create the file if it doesn't exist\n if not SETTINGS_DIR.is_dir():\n os.mkdir(SETTINGS_DIR)\n if not path.is_file():\n with open(path, \"w\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerow(headers)\n for key, value in default.items():\n writer.writerow([key] if key == value else [value, key])\n\n # Now read from disk\n with open(path, \"r\", encoding=\"utf-8\") as file:\n rows = list(csv.reader(file))\n\n mapping = {}\n if len(rows) >= 2:\n actual_headers = rows[0]\n if not actual_headers == list(headers):\n print(\n f'\"{file_name}\": Malformed headers - {actual_headers}.'\n + f\" Should be {list(headers)}. Ignoring row.\"\n )\n for row in rows[1:]:\n if len(row) == 0:\n # Windows newlines are sometimes read as empty rows. :champagne:\n continue\n if len(row) == 1:\n output = spoken_form = row[0]\n else:\n output, spoken_form = row[:2]\n if len(row) > 2:\n print(\n f'\"{file_name}\": More than two values in row: {row}.'\n + \" Ignoring the extras.\"\n )\n # Leading/trailing whitespace in spoken form can prevent recognition.\n spoken_form = spoken_form.strip()\n mapping[spoken_form] = output\n return mapping\n\n\n_mapped_lists = {}\n_settings_lock = threading.Lock()\n_word_map_params = None\n\n\ndef _update_list(list_name: str, *csv_params):\n \"\"\"Update list with `list_name` from a csv on disk.\n\n `csv_params` will be passed to `_load_csv_dict`.\n\n \"\"\"\n global ctx\n ctx.lists[list_name] = _load_csv_dict(*csv_params)\n\n\ndef _update_word_map(*csv_params):\n \"\"\"Update `dictate.word_map` from disk.\n\n `csv_params` will be passed to `_load_csv_dict`.\n\n \"\"\"\n global ctx\n ctx.settings[\"dictate.word_map\"] = _load_csv_dict(*csv_params)\n\n\ndef _update_lists(*_):\n \"\"\"Update all CSV lists from disk.\"\"\"\n print(\"Updating CSV lists...\")\n with _settings_lock:\n for list_name, csv_params in _mapped_lists.items():\n try:\n _update_list(list_name, *csv_params)\n except Exception as e:\n print(f'Error loading list \"{list_name}\": {e}')\n # Special case - `dictate.word_map` isn't a list.\n if _word_map_params:\n try:\n _update_word_map(*_word_map_params)\n except Exception as e:\n print(f'Error updating \"dictate.word_map\": {e}')\n\n\ndef bind_list_to_csv(\n list_name: str,\n csv_name: str,\n csv_headers: Tuple[str, str],\n default_values: Dict[str, str] = {},\n) -> None:\n \"\"\"Register a Talon list that should be updated from a CSV on disk.\n\n The CSV file will be created automatically in the \"settings\" dir if it\n doesn't exist. This directory can be tracked independently to\n `knausj_talon`, allowing the user to specify things like private vocab\n separately.\n\n Note the list must be declared separately.\n\n \"\"\"\n global _mapped_lists\n with _settings_lock:\n _update_list(list_name, csv_name, csv_headers, default_values)\n # If there were no errors, we can register it permanently.\n _mapped_lists[list_name] = (csv_name, csv_headers, default_values)\n\n\ndef bind_word_map_to_csv(\n csv_name: str, csv_headers: Tuple[str, str], default_values: Dict[str, str] = {}\n) -> None:\n \"\"\"Like `bind_list_to_csv`, but for the `dictate.word_map` setting.\n\n Since it is a setting, not a list, it has to be handled separately.\n\n \"\"\"\n global _word_map_params\n # TODO: Maybe a generic system for binding the dicts to settings? Only\n # implement if it's needed.\n with _settings_lock:\n _update_word_map(csv_name, csv_headers, default_values)\n # If there were no errors, we can register it permanently.\n _word_map_params = (csv_name, csv_headers, default_values)\n\n\nfs.watch(str(SETTINGS_DIR), _update_lists)\n", "path": "code/user_settings.py"}]} | 1,781 | 198 |
gh_patches_debug_40475 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-846 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`HashCommandsMixin.hset()` doesn't support multiple field=value pairs
> As of Redis 4.0.0, HSET is variadic and allows for multiple field/value pairs.
https://redis.io/commands/hset
And also info about HMSET usage:
> As per Redis 4.0.0, HMSET is considered deprecated. Please use HSET in new code.
https://redis.io/commands/hmset
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aioredis/commands/hash.py`
Content:
```
1 from itertools import chain
2
3 from aioredis.util import (
4 wait_ok,
5 wait_convert,
6 wait_make_dict,
7 _NOTSET,
8 _ScanIter,
9 )
10
11
12 class HashCommandsMixin:
13 """Hash commands mixin.
14
15 For commands details see: http://redis.io/commands#hash
16 """
17
18 def hdel(self, key, field, *fields):
19 """Delete one or more hash fields."""
20 return self.execute(b"HDEL", key, field, *fields)
21
22 def hexists(self, key, field):
23 """Determine if hash field exists."""
24 fut = self.execute(b"HEXISTS", key, field)
25 return wait_convert(fut, bool)
26
27 def hget(self, key, field, *, encoding=_NOTSET):
28 """Get the value of a hash field."""
29 return self.execute(b"HGET", key, field, encoding=encoding)
30
31 def hgetall(self, key, *, encoding=_NOTSET):
32 """Get all the fields and values in a hash."""
33 fut = self.execute(b"HGETALL", key, encoding=encoding)
34 return wait_make_dict(fut)
35
36 def hincrby(self, key, field, increment=1):
37 """Increment the integer value of a hash field by the given number."""
38 return self.execute(b"HINCRBY", key, field, increment)
39
40 def hincrbyfloat(self, key, field, increment=1.0):
41 """Increment the float value of a hash field by the given number."""
42 fut = self.execute(b"HINCRBYFLOAT", key, field, increment)
43 return wait_convert(fut, float)
44
45 def hkeys(self, key, *, encoding=_NOTSET):
46 """Get all the fields in a hash."""
47 return self.execute(b"HKEYS", key, encoding=encoding)
48
49 def hlen(self, key):
50 """Get the number of fields in a hash."""
51 return self.execute(b"HLEN", key)
52
53 def hmget(self, key, field, *fields, encoding=_NOTSET):
54 """Get the values of all the given fields."""
55 return self.execute(b"HMGET", key, field, *fields, encoding=encoding)
56
57 def hmset(self, key, field, value, *pairs):
58 """Set multiple hash fields to multiple values."""
59 if len(pairs) % 2 != 0:
60 raise TypeError("length of pairs must be even number")
61 return wait_ok(self.execute(b"HMSET", key, field, value, *pairs))
62
63 def hmset_dict(self, key, *args, **kwargs):
64 """Set multiple hash fields to multiple values.
65
66 dict can be passed as first positional argument:
67
68 >>> await redis.hmset_dict(
69 ... 'key', {'field1': 'value1', 'field2': 'value2'})
70
71 or keyword arguments can be used:
72
73 >>> await redis.hmset_dict(
74 ... 'key', field1='value1', field2='value2')
75
76 or dict argument can be mixed with kwargs:
77
78 >>> await redis.hmset_dict(
79 ... 'key', {'field1': 'value1'}, field2='value2')
80
81 .. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary,
82 if both specified and both have same key(s) -- ``kwargs`` will win:
83
84 >>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz')
85 >>> await redis.hget('key', 'foo', encoding='utf-8')
86 'baz'
87
88 """
89 if not args and not kwargs:
90 raise TypeError("args or kwargs must be specified")
91 pairs = ()
92 if len(args) > 1:
93 raise TypeError("single positional argument allowed")
94 elif len(args) == 1:
95 if not isinstance(args[0], dict):
96 raise TypeError("args[0] must be dict")
97 elif not args[0] and not kwargs:
98 raise ValueError("args[0] is empty dict")
99 pairs = chain.from_iterable(args[0].items())
100 kwargs_pairs = chain.from_iterable(kwargs.items())
101 return wait_ok(self.execute(b"HMSET", key, *chain(pairs, kwargs_pairs)))
102
103 def hset(self, key, field, value):
104 """Set the string value of a hash field."""
105 return self.execute(b"HSET", key, field, value)
106
107 def hsetnx(self, key, field, value):
108 """Set the value of a hash field, only if the field does not exist."""
109 return self.execute(b"HSETNX", key, field, value)
110
111 def hvals(self, key, *, encoding=_NOTSET):
112 """Get all the values in a hash."""
113 return self.execute(b"HVALS", key, encoding=encoding)
114
115 def hscan(self, key, cursor=0, match=None, count=None):
116 """Incrementally iterate hash fields and associated values."""
117 args = [key, cursor]
118 match is not None and args.extend([b"MATCH", match])
119 count is not None and args.extend([b"COUNT", count])
120 fut = self.execute(b"HSCAN", *args)
121 return wait_convert(fut, _make_pairs)
122
123 def ihscan(self, key, *, match=None, count=None):
124 """Incrementally iterate sorted set items using async for.
125
126 Usage example:
127
128 >>> async for name, val in redis.ihscan(key, match='something*'):
129 ... print('Matched:', name, '->', val)
130
131 """
132 return _ScanIter(lambda cur: self.hscan(key, cur, match=match, count=count))
133
134 def hstrlen(self, key, field):
135 """Get the length of the value of a hash field."""
136 return self.execute(b"HSTRLEN", key, field)
137
138
139 def _make_pairs(obj):
140 it = iter(obj[1])
141 return (int(obj[0]), list(zip(it, it)))
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aioredis/commands/hash.py b/aioredis/commands/hash.py
--- a/aioredis/commands/hash.py
+++ b/aioredis/commands/hash.py
@@ -1,3 +1,4 @@
+import warnings
from itertools import chain
from aioredis.util import (
@@ -55,7 +56,17 @@
return self.execute(b"HMGET", key, field, *fields, encoding=encoding)
def hmset(self, key, field, value, *pairs):
- """Set multiple hash fields to multiple values."""
+ """Set multiple hash fields to multiple values.
+
+ .. deprecated::
+ HMSET is deprecated since redis 4.0.0, use HSET instead.
+
+ """
+ warnings.warn(
+ "%s.hmset() is deprecated since redis 4.0.0, use %s.hset() instead"
+ % (self.__class__.__name__, self.__class__.__name__),
+ DeprecationWarning
+ )
if len(pairs) % 2 != 0:
raise TypeError("length of pairs must be even number")
return wait_ok(self.execute(b"HMSET", key, field, value, *pairs))
@@ -63,6 +74,9 @@
def hmset_dict(self, key, *args, **kwargs):
"""Set multiple hash fields to multiple values.
+ .. deprecated::
+ HMSET is deprecated since redis 4.0.0, use HSET instead.
+
dict can be passed as first positional argument:
>>> await redis.hmset_dict(
@@ -86,6 +100,12 @@
'baz'
"""
+ warnings.warn(
+ "%s.hmset() is deprecated since redis 4.0.0, use %s.hset() instead"
+ % (self.__class__.__name__, self.__class__.__name__),
+ DeprecationWarning
+ )
+
if not args and not kwargs:
raise TypeError("args or kwargs must be specified")
pairs = ()
@@ -100,9 +120,31 @@
kwargs_pairs = chain.from_iterable(kwargs.items())
return wait_ok(self.execute(b"HMSET", key, *chain(pairs, kwargs_pairs)))
- def hset(self, key, field, value):
- """Set the string value of a hash field."""
- return self.execute(b"HSET", key, field, value)
+ def hset(self, key, field=None, value=None, mapping=None):
+ """Set multiple hash fields to multiple values.
+
+ Setting a single hash field to a value:
+ >>> await redis.hset('key', 'some_field', 'some_value')
+
+ Setting values for multipe has fields at once:
+ >>> await redis.hset('key', mapping={'field1': 'abc', 'field2': 'def'})
+
+ .. note:: Using both the field/value pair and mapping at the same time
+ will also work.
+
+ """
+ if not field and not mapping:
+ raise ValueError("hset needs either a field/value pair or mapping")
+ if mapping and not isinstance(mapping, dict):
+ raise TypeError("'mapping' should be dict")
+
+ items = []
+ if field:
+ items.extend((field, value))
+ if mapping:
+ for item in mapping.items():
+ items.extend(item)
+ return self.execute(b"HSET", key, *items)
def hsetnx(self, key, field, value):
"""Set the value of a hash field, only if the field does not exist."""
| {"golden_diff": "diff --git a/aioredis/commands/hash.py b/aioredis/commands/hash.py\n--- a/aioredis/commands/hash.py\n+++ b/aioredis/commands/hash.py\n@@ -1,3 +1,4 @@\n+import warnings\n from itertools import chain\n \n from aioredis.util import (\n@@ -55,7 +56,17 @@\n return self.execute(b\"HMGET\", key, field, *fields, encoding=encoding)\n \n def hmset(self, key, field, value, *pairs):\n- \"\"\"Set multiple hash fields to multiple values.\"\"\"\n+ \"\"\"Set multiple hash fields to multiple values.\n+\n+ .. deprecated::\n+ HMSET is deprecated since redis 4.0.0, use HSET instead.\n+\n+ \"\"\"\n+ warnings.warn(\n+ \"%s.hmset() is deprecated since redis 4.0.0, use %s.hset() instead\"\n+ % (self.__class__.__name__, self.__class__.__name__),\n+ DeprecationWarning\n+ )\n if len(pairs) % 2 != 0:\n raise TypeError(\"length of pairs must be even number\")\n return wait_ok(self.execute(b\"HMSET\", key, field, value, *pairs))\n@@ -63,6 +74,9 @@\n def hmset_dict(self, key, *args, **kwargs):\n \"\"\"Set multiple hash fields to multiple values.\n \n+ .. deprecated::\n+ HMSET is deprecated since redis 4.0.0, use HSET instead.\n+\n dict can be passed as first positional argument:\n \n >>> await redis.hmset_dict(\n@@ -86,6 +100,12 @@\n 'baz'\n \n \"\"\"\n+ warnings.warn(\n+ \"%s.hmset() is deprecated since redis 4.0.0, use %s.hset() instead\"\n+ % (self.__class__.__name__, self.__class__.__name__),\n+ DeprecationWarning\n+ )\n+\n if not args and not kwargs:\n raise TypeError(\"args or kwargs must be specified\")\n pairs = ()\n@@ -100,9 +120,31 @@\n kwargs_pairs = chain.from_iterable(kwargs.items())\n return wait_ok(self.execute(b\"HMSET\", key, *chain(pairs, kwargs_pairs)))\n \n- def hset(self, key, field, value):\n- \"\"\"Set the string value of a hash field.\"\"\"\n- return self.execute(b\"HSET\", key, field, value)\n+ def hset(self, key, field=None, value=None, mapping=None):\n+ \"\"\"Set multiple hash fields to multiple values.\n+\n+ Setting a single hash field to a value:\n+ >>> await redis.hset('key', 'some_field', 'some_value')\n+\n+ Setting values for multipe has fields at once:\n+ >>> await redis.hset('key', mapping={'field1': 'abc', 'field2': 'def'})\n+\n+ .. note:: Using both the field/value pair and mapping at the same time\n+ will also work.\n+\n+ \"\"\"\n+ if not field and not mapping:\n+ raise ValueError(\"hset needs either a field/value pair or mapping\")\n+ if mapping and not isinstance(mapping, dict):\n+ raise TypeError(\"'mapping' should be dict\")\n+\n+ items = []\n+ if field:\n+ items.extend((field, value))\n+ if mapping:\n+ for item in mapping.items():\n+ items.extend(item)\n+ return self.execute(b\"HSET\", key, *items)\n \n def hsetnx(self, key, field, value):\n \"\"\"Set the value of a hash field, only if the field does not exist.\"\"\"\n", "issue": "`HashCommandsMixin.hset()` doesn't support multiple field=value pairs\n> As of Redis 4.0.0, HSET is variadic and allows for multiple field/value pairs.\r\n\r\nhttps://redis.io/commands/hset\r\n\r\nAnd also info about HMSET usage:\r\n> As per Redis 4.0.0, HMSET is considered deprecated. Please use HSET in new code.\r\n\r\nhttps://redis.io/commands/hmset\n", "before_files": [{"content": "from itertools import chain\n\nfrom aioredis.util import (\n wait_ok,\n wait_convert,\n wait_make_dict,\n _NOTSET,\n _ScanIter,\n)\n\n\nclass HashCommandsMixin:\n \"\"\"Hash commands mixin.\n\n For commands details see: http://redis.io/commands#hash\n \"\"\"\n\n def hdel(self, key, field, *fields):\n \"\"\"Delete one or more hash fields.\"\"\"\n return self.execute(b\"HDEL\", key, field, *fields)\n\n def hexists(self, key, field):\n \"\"\"Determine if hash field exists.\"\"\"\n fut = self.execute(b\"HEXISTS\", key, field)\n return wait_convert(fut, bool)\n\n def hget(self, key, field, *, encoding=_NOTSET):\n \"\"\"Get the value of a hash field.\"\"\"\n return self.execute(b\"HGET\", key, field, encoding=encoding)\n\n def hgetall(self, key, *, encoding=_NOTSET):\n \"\"\"Get all the fields and values in a hash.\"\"\"\n fut = self.execute(b\"HGETALL\", key, encoding=encoding)\n return wait_make_dict(fut)\n\n def hincrby(self, key, field, increment=1):\n \"\"\"Increment the integer value of a hash field by the given number.\"\"\"\n return self.execute(b\"HINCRBY\", key, field, increment)\n\n def hincrbyfloat(self, key, field, increment=1.0):\n \"\"\"Increment the float value of a hash field by the given number.\"\"\"\n fut = self.execute(b\"HINCRBYFLOAT\", key, field, increment)\n return wait_convert(fut, float)\n\n def hkeys(self, key, *, encoding=_NOTSET):\n \"\"\"Get all the fields in a hash.\"\"\"\n return self.execute(b\"HKEYS\", key, encoding=encoding)\n\n def hlen(self, key):\n \"\"\"Get the number of fields in a hash.\"\"\"\n return self.execute(b\"HLEN\", key)\n\n def hmget(self, key, field, *fields, encoding=_NOTSET):\n \"\"\"Get the values of all the given fields.\"\"\"\n return self.execute(b\"HMGET\", key, field, *fields, encoding=encoding)\n\n def hmset(self, key, field, value, *pairs):\n \"\"\"Set multiple hash fields to multiple values.\"\"\"\n if len(pairs) % 2 != 0:\n raise TypeError(\"length of pairs must be even number\")\n return wait_ok(self.execute(b\"HMSET\", key, field, value, *pairs))\n\n def hmset_dict(self, key, *args, **kwargs):\n \"\"\"Set multiple hash fields to multiple values.\n\n dict can be passed as first positional argument:\n\n >>> await redis.hmset_dict(\n ... 'key', {'field1': 'value1', 'field2': 'value2'})\n\n or keyword arguments can be used:\n\n >>> await redis.hmset_dict(\n ... 'key', field1='value1', field2='value2')\n\n or dict argument can be mixed with kwargs:\n\n >>> await redis.hmset_dict(\n ... 'key', {'field1': 'value1'}, field2='value2')\n\n .. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary,\n if both specified and both have same key(s) -- ``kwargs`` will win:\n\n >>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz')\n >>> await redis.hget('key', 'foo', encoding='utf-8')\n 'baz'\n\n \"\"\"\n if not args and not kwargs:\n raise TypeError(\"args or kwargs must be specified\")\n pairs = ()\n if len(args) > 1:\n raise TypeError(\"single positional argument allowed\")\n elif len(args) == 1:\n if not isinstance(args[0], dict):\n raise TypeError(\"args[0] must be dict\")\n elif not args[0] and not kwargs:\n raise ValueError(\"args[0] is empty dict\")\n pairs = chain.from_iterable(args[0].items())\n kwargs_pairs = chain.from_iterable(kwargs.items())\n return wait_ok(self.execute(b\"HMSET\", key, *chain(pairs, kwargs_pairs)))\n\n def hset(self, key, field, value):\n \"\"\"Set the string value of a hash field.\"\"\"\n return self.execute(b\"HSET\", key, field, value)\n\n def hsetnx(self, key, field, value):\n \"\"\"Set the value of a hash field, only if the field does not exist.\"\"\"\n return self.execute(b\"HSETNX\", key, field, value)\n\n def hvals(self, key, *, encoding=_NOTSET):\n \"\"\"Get all the values in a hash.\"\"\"\n return self.execute(b\"HVALS\", key, encoding=encoding)\n\n def hscan(self, key, cursor=0, match=None, count=None):\n \"\"\"Incrementally iterate hash fields and associated values.\"\"\"\n args = [key, cursor]\n match is not None and args.extend([b\"MATCH\", match])\n count is not None and args.extend([b\"COUNT\", count])\n fut = self.execute(b\"HSCAN\", *args)\n return wait_convert(fut, _make_pairs)\n\n def ihscan(self, key, *, match=None, count=None):\n \"\"\"Incrementally iterate sorted set items using async for.\n\n Usage example:\n\n >>> async for name, val in redis.ihscan(key, match='something*'):\n ... print('Matched:', name, '->', val)\n\n \"\"\"\n return _ScanIter(lambda cur: self.hscan(key, cur, match=match, count=count))\n\n def hstrlen(self, key, field):\n \"\"\"Get the length of the value of a hash field.\"\"\"\n return self.execute(b\"HSTRLEN\", key, field)\n\n\ndef _make_pairs(obj):\n it = iter(obj[1])\n return (int(obj[0]), list(zip(it, it)))\n", "path": "aioredis/commands/hash.py"}], "after_files": [{"content": "import warnings\nfrom itertools import chain\n\nfrom aioredis.util import (\n wait_ok,\n wait_convert,\n wait_make_dict,\n _NOTSET,\n _ScanIter,\n)\n\n\nclass HashCommandsMixin:\n \"\"\"Hash commands mixin.\n\n For commands details see: http://redis.io/commands#hash\n \"\"\"\n\n def hdel(self, key, field, *fields):\n \"\"\"Delete one or more hash fields.\"\"\"\n return self.execute(b\"HDEL\", key, field, *fields)\n\n def hexists(self, key, field):\n \"\"\"Determine if hash field exists.\"\"\"\n fut = self.execute(b\"HEXISTS\", key, field)\n return wait_convert(fut, bool)\n\n def hget(self, key, field, *, encoding=_NOTSET):\n \"\"\"Get the value of a hash field.\"\"\"\n return self.execute(b\"HGET\", key, field, encoding=encoding)\n\n def hgetall(self, key, *, encoding=_NOTSET):\n \"\"\"Get all the fields and values in a hash.\"\"\"\n fut = self.execute(b\"HGETALL\", key, encoding=encoding)\n return wait_make_dict(fut)\n\n def hincrby(self, key, field, increment=1):\n \"\"\"Increment the integer value of a hash field by the given number.\"\"\"\n return self.execute(b\"HINCRBY\", key, field, increment)\n\n def hincrbyfloat(self, key, field, increment=1.0):\n \"\"\"Increment the float value of a hash field by the given number.\"\"\"\n fut = self.execute(b\"HINCRBYFLOAT\", key, field, increment)\n return wait_convert(fut, float)\n\n def hkeys(self, key, *, encoding=_NOTSET):\n \"\"\"Get all the fields in a hash.\"\"\"\n return self.execute(b\"HKEYS\", key, encoding=encoding)\n\n def hlen(self, key):\n \"\"\"Get the number of fields in a hash.\"\"\"\n return self.execute(b\"HLEN\", key)\n\n def hmget(self, key, field, *fields, encoding=_NOTSET):\n \"\"\"Get the values of all the given fields.\"\"\"\n return self.execute(b\"HMGET\", key, field, *fields, encoding=encoding)\n\n def hmset(self, key, field, value, *pairs):\n \"\"\"Set multiple hash fields to multiple values.\n\n .. deprecated::\n HMSET is deprecated since redis 4.0.0, use HSET instead.\n\n \"\"\"\n warnings.warn(\n \"%s.hmset() is deprecated since redis 4.0.0, use %s.hset() instead\"\n % (self.__class__.__name__, self.__class__.__name__),\n DeprecationWarning\n )\n if len(pairs) % 2 != 0:\n raise TypeError(\"length of pairs must be even number\")\n return wait_ok(self.execute(b\"HMSET\", key, field, value, *pairs))\n\n def hmset_dict(self, key, *args, **kwargs):\n \"\"\"Set multiple hash fields to multiple values.\n\n .. deprecated::\n HMSET is deprecated since redis 4.0.0, use HSET instead.\n\n dict can be passed as first positional argument:\n\n >>> await redis.hmset_dict(\n ... 'key', {'field1': 'value1', 'field2': 'value2'})\n\n or keyword arguments can be used:\n\n >>> await redis.hmset_dict(\n ... 'key', field1='value1', field2='value2')\n\n or dict argument can be mixed with kwargs:\n\n >>> await redis.hmset_dict(\n ... 'key', {'field1': 'value1'}, field2='value2')\n\n .. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary,\n if both specified and both have same key(s) -- ``kwargs`` will win:\n\n >>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz')\n >>> await redis.hget('key', 'foo', encoding='utf-8')\n 'baz'\n\n \"\"\"\n warnings.warn(\n \"%s.hmset() is deprecated since redis 4.0.0, use %s.hset() instead\"\n % (self.__class__.__name__, self.__class__.__name__),\n DeprecationWarning\n )\n\n if not args and not kwargs:\n raise TypeError(\"args or kwargs must be specified\")\n pairs = ()\n if len(args) > 1:\n raise TypeError(\"single positional argument allowed\")\n elif len(args) == 1:\n if not isinstance(args[0], dict):\n raise TypeError(\"args[0] must be dict\")\n elif not args[0] and not kwargs:\n raise ValueError(\"args[0] is empty dict\")\n pairs = chain.from_iterable(args[0].items())\n kwargs_pairs = chain.from_iterable(kwargs.items())\n return wait_ok(self.execute(b\"HMSET\", key, *chain(pairs, kwargs_pairs)))\n\n def hset(self, key, field=None, value=None, mapping=None):\n \"\"\"Set multiple hash fields to multiple values.\n\n Setting a single hash field to a value:\n >>> await redis.hset('key', 'some_field', 'some_value')\n\n Setting values for multipe has fields at once:\n >>> await redis.hset('key', mapping={'field1': 'abc', 'field2': 'def'})\n\n .. note:: Using both the field/value pair and mapping at the same time\n will also work.\n\n \"\"\"\n if not field and not mapping:\n raise ValueError(\"hset needs either a field/value pair or mapping\")\n if mapping and not isinstance(mapping, dict):\n raise TypeError(\"'mapping' should be dict\")\n\n items = []\n if field:\n items.extend((field, value))\n if mapping:\n for item in mapping.items():\n items.extend(item)\n return self.execute(b\"HSET\", key, *items)\n\n def hsetnx(self, key, field, value):\n \"\"\"Set the value of a hash field, only if the field does not exist.\"\"\"\n return self.execute(b\"HSETNX\", key, field, value)\n\n def hvals(self, key, *, encoding=_NOTSET):\n \"\"\"Get all the values in a hash.\"\"\"\n return self.execute(b\"HVALS\", key, encoding=encoding)\n\n def hscan(self, key, cursor=0, match=None, count=None):\n \"\"\"Incrementally iterate hash fields and associated values.\"\"\"\n args = [key, cursor]\n match is not None and args.extend([b\"MATCH\", match])\n count is not None and args.extend([b\"COUNT\", count])\n fut = self.execute(b\"HSCAN\", *args)\n return wait_convert(fut, _make_pairs)\n\n def ihscan(self, key, *, match=None, count=None):\n \"\"\"Incrementally iterate sorted set items using async for.\n\n Usage example:\n\n >>> async for name, val in redis.ihscan(key, match='something*'):\n ... print('Matched:', name, '->', val)\n\n \"\"\"\n return _ScanIter(lambda cur: self.hscan(key, cur, match=match, count=count))\n\n def hstrlen(self, key, field):\n \"\"\"Get the length of the value of a hash field.\"\"\"\n return self.execute(b\"HSTRLEN\", key, field)\n\n\ndef _make_pairs(obj):\n it = iter(obj[1])\n return (int(obj[0]), list(zip(it, it)))\n", "path": "aioredis/commands/hash.py"}]} | 1,957 | 806 |
gh_patches_debug_16347 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-506 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Organisations listed in filter on project overview unsorted
The list of organisations listed in filter on the project overview page is unsorted and determined by the order of creating the organisations. I think it would be best to sort them alphabetically.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/projects/views.py`
Content:
```
1 from datetime import datetime
2 import django_filters
3 from django.apps import apps
4 from django.conf import settings
5 from django.utils.translation import ugettext_lazy as _
6
7 from adhocracy4.filters import views as filter_views
8 from adhocracy4.filters.filters import DefaultsFilterSet
9 from adhocracy4.projects import models as project_models
10
11 from apps.contrib.widgets import DropdownLinkWidget
12 from apps.dashboard import blueprints
13
14
15 class OrderingWidget(DropdownLinkWidget):
16 label = _('Ordering')
17 right = True
18
19
20 class OrganisationWidget(DropdownLinkWidget):
21 label = _('Organisation')
22
23
24 class ArchivedWidget(DropdownLinkWidget):
25 label = _('Archived')
26
27 def __init__(self, attrs=None):
28 choices = (
29 ('', _('All')),
30 ('false', _('No')),
31 ('true', _('Yes')),
32 )
33 super().__init__(attrs, choices)
34
35
36 class YearWidget(DropdownLinkWidget):
37 label = _('Year')
38
39 def __init__(self, attrs=None):
40 choices = (('', _('Any')),)
41 now = datetime.now().year
42 try:
43 first_year = project_models.Project.objects.earliest('created').\
44 created.year
45 except project_models.Project.DoesNotExist:
46 first_year = now
47 for year in range(now, first_year - 1, -1):
48 choices += (year, year),
49 super().__init__(attrs, choices)
50
51
52 class TypeWidget(DropdownLinkWidget):
53 label = _('Project Type')
54
55 def __init__(self, attrs=None):
56 choices = (('', _('Any')),)
57 for blueprint_key, blueprint in blueprints.blueprints:
58 choices += (blueprint_key, blueprint.title),
59 super().__init__(attrs, choices)
60
61
62 class ProjectFilterSet(DefaultsFilterSet):
63
64 defaults = {
65 'is_archived': 'false'
66 }
67
68 ordering = django_filters.OrderingFilter(
69 choices=(
70 ('-created', _('Most recent')),
71 ),
72 empty_label=None,
73 widget=OrderingWidget,
74 )
75
76 organisation = django_filters.ModelChoiceFilter(
77 queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects.all(),
78 widget=OrganisationWidget,
79 )
80
81 is_archived = django_filters.BooleanFilter(
82 widget=ArchivedWidget
83 )
84
85 created = django_filters.NumberFilter(
86 name='created',
87 lookup_expr='year',
88 widget=YearWidget,
89 )
90
91 typ = django_filters.CharFilter(
92 widget=TypeWidget,
93 )
94
95 class Meta:
96 model = project_models.Project
97 fields = ['organisation', 'is_archived', 'created', 'typ']
98
99
100 class ProjectListView(filter_views.FilteredListView):
101 model = project_models.Project
102 paginate_by = 16
103 filter_set = ProjectFilterSet
104
105 def get_queryset(self):
106 return super().get_queryset().filter(is_draft=False)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/projects/views.py b/apps/projects/views.py
--- a/apps/projects/views.py
+++ b/apps/projects/views.py
@@ -54,7 +54,8 @@
def __init__(self, attrs=None):
choices = (('', _('Any')),)
- for blueprint_key, blueprint in blueprints.blueprints:
+ sorted_blueprints = sorted(blueprints.blueprints, key=lambda a: a[1])
+ for blueprint_key, blueprint in sorted_blueprints:
choices += (blueprint_key, blueprint.title),
super().__init__(attrs, choices)
@@ -74,7 +75,8 @@
)
organisation = django_filters.ModelChoiceFilter(
- queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects.all(),
+ queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects
+ .order_by('name'),
widget=OrganisationWidget,
)
| {"golden_diff": "diff --git a/apps/projects/views.py b/apps/projects/views.py\n--- a/apps/projects/views.py\n+++ b/apps/projects/views.py\n@@ -54,7 +54,8 @@\n \n def __init__(self, attrs=None):\n choices = (('', _('Any')),)\n- for blueprint_key, blueprint in blueprints.blueprints:\n+ sorted_blueprints = sorted(blueprints.blueprints, key=lambda a: a[1])\n+ for blueprint_key, blueprint in sorted_blueprints:\n choices += (blueprint_key, blueprint.title),\n super().__init__(attrs, choices)\n \n@@ -74,7 +75,8 @@\n )\n \n organisation = django_filters.ModelChoiceFilter(\n- queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects.all(),\n+ queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects\n+ .order_by('name'),\n widget=OrganisationWidget,\n )\n", "issue": "Organisations listed in filter on project overview unsorted\nThe list of organisations listed in filter on the project overview page is unsorted and determined by the order of creating the organisations. I think it would be best to sort them alphabetically.\n", "before_files": [{"content": "from datetime import datetime\nimport django_filters\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.filters import views as filter_views\nfrom adhocracy4.filters.filters import DefaultsFilterSet\nfrom adhocracy4.projects import models as project_models\n\nfrom apps.contrib.widgets import DropdownLinkWidget\nfrom apps.dashboard import blueprints\n\n\nclass OrderingWidget(DropdownLinkWidget):\n label = _('Ordering')\n right = True\n\n\nclass OrganisationWidget(DropdownLinkWidget):\n label = _('Organisation')\n\n\nclass ArchivedWidget(DropdownLinkWidget):\n label = _('Archived')\n\n def __init__(self, attrs=None):\n choices = (\n ('', _('All')),\n ('false', _('No')),\n ('true', _('Yes')),\n )\n super().__init__(attrs, choices)\n\n\nclass YearWidget(DropdownLinkWidget):\n label = _('Year')\n\n def __init__(self, attrs=None):\n choices = (('', _('Any')),)\n now = datetime.now().year\n try:\n first_year = project_models.Project.objects.earliest('created').\\\n created.year\n except project_models.Project.DoesNotExist:\n first_year = now\n for year in range(now, first_year - 1, -1):\n choices += (year, year),\n super().__init__(attrs, choices)\n\n\nclass TypeWidget(DropdownLinkWidget):\n label = _('Project Type')\n\n def __init__(self, attrs=None):\n choices = (('', _('Any')),)\n for blueprint_key, blueprint in blueprints.blueprints:\n choices += (blueprint_key, blueprint.title),\n super().__init__(attrs, choices)\n\n\nclass ProjectFilterSet(DefaultsFilterSet):\n\n defaults = {\n 'is_archived': 'false'\n }\n\n ordering = django_filters.OrderingFilter(\n choices=(\n ('-created', _('Most recent')),\n ),\n empty_label=None,\n widget=OrderingWidget,\n )\n\n organisation = django_filters.ModelChoiceFilter(\n queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects.all(),\n widget=OrganisationWidget,\n )\n\n is_archived = django_filters.BooleanFilter(\n widget=ArchivedWidget\n )\n\n created = django_filters.NumberFilter(\n name='created',\n lookup_expr='year',\n widget=YearWidget,\n )\n\n typ = django_filters.CharFilter(\n widget=TypeWidget,\n )\n\n class Meta:\n model = project_models.Project\n fields = ['organisation', 'is_archived', 'created', 'typ']\n\n\nclass ProjectListView(filter_views.FilteredListView):\n model = project_models.Project\n paginate_by = 16\n filter_set = ProjectFilterSet\n\n def get_queryset(self):\n return super().get_queryset().filter(is_draft=False)\n", "path": "apps/projects/views.py"}], "after_files": [{"content": "from datetime import datetime\nimport django_filters\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.filters import views as filter_views\nfrom adhocracy4.filters.filters import DefaultsFilterSet\nfrom adhocracy4.projects import models as project_models\n\nfrom apps.contrib.widgets import DropdownLinkWidget\nfrom apps.dashboard import blueprints\n\n\nclass OrderingWidget(DropdownLinkWidget):\n label = _('Ordering')\n right = True\n\n\nclass OrganisationWidget(DropdownLinkWidget):\n label = _('Organisation')\n\n\nclass ArchivedWidget(DropdownLinkWidget):\n label = _('Archived')\n\n def __init__(self, attrs=None):\n choices = (\n ('', _('All')),\n ('false', _('No')),\n ('true', _('Yes')),\n )\n super().__init__(attrs, choices)\n\n\nclass YearWidget(DropdownLinkWidget):\n label = _('Year')\n\n def __init__(self, attrs=None):\n choices = (('', _('Any')),)\n now = datetime.now().year\n try:\n first_year = project_models.Project.objects.earliest('created').\\\n created.year\n except project_models.Project.DoesNotExist:\n first_year = now\n for year in range(now, first_year - 1, -1):\n choices += (year, year),\n super().__init__(attrs, choices)\n\n\nclass TypeWidget(DropdownLinkWidget):\n label = _('Project Type')\n\n def __init__(self, attrs=None):\n choices = (('', _('Any')),)\n sorted_blueprints = sorted(blueprints.blueprints, key=lambda a: a[1])\n for blueprint_key, blueprint in sorted_blueprints:\n choices += (blueprint_key, blueprint.title),\n super().__init__(attrs, choices)\n\n\nclass ProjectFilterSet(DefaultsFilterSet):\n\n defaults = {\n 'is_archived': 'false'\n }\n\n ordering = django_filters.OrderingFilter(\n choices=(\n ('-created', _('Most recent')),\n ),\n empty_label=None,\n widget=OrderingWidget,\n )\n\n organisation = django_filters.ModelChoiceFilter(\n queryset=apps.get_model(settings.A4_ORGANISATIONS_MODEL).objects\n .order_by('name'),\n widget=OrganisationWidget,\n )\n\n is_archived = django_filters.BooleanFilter(\n widget=ArchivedWidget\n )\n\n created = django_filters.NumberFilter(\n name='created',\n lookup_expr='year',\n widget=YearWidget,\n )\n\n typ = django_filters.CharFilter(\n widget=TypeWidget,\n )\n\n class Meta:\n model = project_models.Project\n fields = ['organisation', 'is_archived', 'created', 'typ']\n\n\nclass ProjectListView(filter_views.FilteredListView):\n model = project_models.Project\n paginate_by = 16\n filter_set = ProjectFilterSet\n\n def get_queryset(self):\n return super().get_queryset().filter(is_draft=False)\n", "path": "apps/projects/views.py"}]} | 1,136 | 205 |
gh_patches_debug_42454 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider holiday_stationstores is broken
During the global build at 2021-08-18-14-42-26, spider **holiday_stationstores** failed with **552 features** and **10 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/holiday_stationstores.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/holiday_stationstores.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7
8
9 class HolidayStationstoreSpider(scrapy.Spider):
10 name = "holiday_stationstores"
11 item_attributes = {'brand': 'Holiday Stationstores',
12 'brand_wikidata': 'Q5880490'}
13 allowed_domains = ["www.holidaystationstores.com"]
14 download_delay = 0.2
15
16 def start_requests(self):
17 yield scrapy.Request('https://www.holidaystationstores.com/Locations/GetAllStores',
18 method='POST',
19 callback=self.parse_all_stores)
20
21 def parse_all_stores(self, response):
22 all_stores = json.loads(response.body_as_unicode())
23
24 for store_id, store in all_stores.items():
25 # GET requests get blocked by their CDN, but POST works fine
26 yield scrapy.Request(f"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}",
27 method='POST',
28 meta={'store': store})
29
30 def parse(self, response):
31 store = response.meta['store']
32
33 address = response.css(
34 '.row.HolidayBackgroundColorBlue div::text').extract_first().strip()
35 phone = response.css(
36 '.body-content .col-lg-4 .HolidayFontColorRed::text').extract_first().strip()
37 services = '|'.join(response.css(
38 '.body-content .col-lg-4 ul li::text').extract()).lower()
39 open_24_hours = '24 hours' in response.css(
40 '.body-content .col-lg-4').get().lower()
41
42 properties = {
43 'name': f"Holiday #{store['Name']}",
44 'lon': store['Lng'],
45 'lat': store['Lat'],
46 'addr_full': address,
47 'phone': phone,
48 'ref': store['ID'],
49 'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),
50 'extras': {
51 'amenity:fuel': True,
52 'fuel:diesel': 'diesel' in services or None,
53 'atm': 'atm' in services or None,
54 'fuel:e85': 'e85' in services or None,
55 'hgv': 'truck' in services or None,
56 'fuel:propane': 'propane' in services or None,
57 'car_wash': 'car wash' in services or None,
58 'fuel:cng': 'cng' in services or None
59 }
60 }
61
62 yield GeojsonPointItem(**properties)
63
64 def opening_hours(self, response):
65 hour_part_elems = response.css(
66 '.body-content .col-lg-4 .row div::text').extract()
67 day_groups = []
68 this_day_group = None
69
70 if hour_part_elems:
71 def slice(source, step):
72 return [source[i:i+step] for i in range(0, len(source), step)]
73
74 for day, hours in slice(hour_part_elems, 2):
75 day = day[:2]
76 match = re.search(
77 r'^(\d{1,2}):(\d{2})\s*(a|p)m - (\d{1,2}):(\d{2})\s*(a|p)m?$', hours.lower())
78 (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()
79
80 f_hr = int(f_hr)
81 if f_ampm == 'p':
82 f_hr += 12
83 elif f_ampm == 'a' and f_hr == 12:
84 f_hr = 0
85 t_hr = int(t_hr)
86 if t_ampm == 'p':
87 t_hr += 12
88 elif t_ampm == 'a' and t_hr == 12:
89 t_hr = 0
90
91 hours = '{:02d}:{}-{:02d}:{}'.format(
92 f_hr,
93 f_min,
94 t_hr,
95 t_min,
96 )
97
98 if not this_day_group:
99 this_day_group = {
100 'from_day': day,
101 'to_day': day,
102 'hours': hours
103 }
104 elif this_day_group['hours'] != hours:
105 day_groups.append(this_day_group)
106 this_day_group = {
107 'from_day': day,
108 'to_day': day,
109 'hours': hours
110 }
111 elif this_day_group['hours'] == hours:
112 this_day_group['to_day'] = day
113
114 day_groups.append(this_day_group)
115
116 hour_part_elems = response.xpath(
117 '//span[@style="font-size:90%"]/text()').extract()
118 if hour_part_elems:
119 day_groups.append(
120 {'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})
121
122 opening_hours = ""
123 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
124 opening_hours = '24/7'
125 else:
126 for day_group in day_groups:
127 if day_group['from_day'] == day_group['to_day']:
128 opening_hours += '{from_day} {hours}; '.format(**day_group)
129 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
130 opening_hours += '{hours}; '.format(**day_group)
131 else:
132 opening_hours += '{from_day}-{to_day} {hours}; '.format(
133 **day_group)
134 opening_hours = opening_hours[:-2]
135
136 return opening_hours
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/holiday_stationstores.py b/locations/spiders/holiday_stationstores.py
--- a/locations/spiders/holiday_stationstores.py
+++ b/locations/spiders/holiday_stationstores.py
@@ -19,10 +19,10 @@
callback=self.parse_all_stores)
def parse_all_stores(self, response):
- all_stores = json.loads(response.body_as_unicode())
+ all_stores = json.loads(response.text)
for store_id, store in all_stores.items():
- # GET requests get blocked by their CDN, but POST works fine
+ # GET requests get blocked by their Incapsula bot protection, but POST works fine
yield scrapy.Request(f"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}",
method='POST',
meta={'store': store})
@@ -30,12 +30,9 @@
def parse(self, response):
store = response.meta['store']
- address = response.css(
- '.row.HolidayBackgroundColorBlue div::text').extract_first().strip()
- phone = response.css(
- '.body-content .col-lg-4 .HolidayFontColorRed::text').extract_first().strip()
- services = '|'.join(response.css(
- '.body-content .col-lg-4 ul li::text').extract()).lower()
+ address = response.xpath('//div[@class="col-lg-4 col-sm-12"]/text()')[1].extract().strip()
+ phone = response.xpath('//div[@class="HolidayFontColorRed"]/text()').extract_first().strip()
+ services = '|'.join(response.xpath('//ul[@style="list-style-type: none; padding-left: 1.0em; font-size: 12px;"]/li/text()').extract()).lower()
open_24_hours = '24 hours' in response.css(
'.body-content .col-lg-4').get().lower()
@@ -62,16 +59,18 @@
yield GeojsonPointItem(**properties)
def opening_hours(self, response):
- hour_part_elems = response.css(
- '.body-content .col-lg-4 .row div::text').extract()
+ hour_part_elems = response.xpath('//div[@class="row"][@style="font-size: 12px;"]')
day_groups = []
this_day_group = None
if hour_part_elems:
- def slice(source, step):
- return [source[i:i+step] for i in range(0, len(source), step)]
+ for hour_part_elem in hour_part_elems:
+ day = hour_part_elem.xpath('.//div[@class="col-3"]/text()').extract_first()
+ hours = hour_part_elem.xpath('.//div[@class="col-9"]/text()').extract_first()
+
+ if not hours:
+ continue
- for day, hours in slice(hour_part_elems, 2):
day = day[:2]
match = re.search(
r'^(\d{1,2}):(\d{2})\s*(a|p)m - (\d{1,2}):(\d{2})\s*(a|p)m?$', hours.lower())
@@ -111,13 +110,12 @@
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
- day_groups.append(this_day_group)
+ if this_day_group:
+ day_groups.append(this_day_group)
- hour_part_elems = response.xpath(
- '//span[@style="font-size:90%"]/text()').extract()
+ hour_part_elems = response.xpath('//span[@style="font-size:90%"]/text()').extract()
if hour_part_elems:
- day_groups.append(
- {'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})
+ day_groups.append({'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
| {"golden_diff": "diff --git a/locations/spiders/holiday_stationstores.py b/locations/spiders/holiday_stationstores.py\n--- a/locations/spiders/holiday_stationstores.py\n+++ b/locations/spiders/holiday_stationstores.py\n@@ -19,10 +19,10 @@\n callback=self.parse_all_stores)\n \n def parse_all_stores(self, response):\n- all_stores = json.loads(response.body_as_unicode())\n+ all_stores = json.loads(response.text)\n \n for store_id, store in all_stores.items():\n- # GET requests get blocked by their CDN, but POST works fine\n+ # GET requests get blocked by their Incapsula bot protection, but POST works fine\n yield scrapy.Request(f\"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}\",\n method='POST',\n meta={'store': store})\n@@ -30,12 +30,9 @@\n def parse(self, response):\n store = response.meta['store']\n \n- address = response.css(\n- '.row.HolidayBackgroundColorBlue div::text').extract_first().strip()\n- phone = response.css(\n- '.body-content .col-lg-4 .HolidayFontColorRed::text').extract_first().strip()\n- services = '|'.join(response.css(\n- '.body-content .col-lg-4 ul li::text').extract()).lower()\n+ address = response.xpath('//div[@class=\"col-lg-4 col-sm-12\"]/text()')[1].extract().strip()\n+ phone = response.xpath('//div[@class=\"HolidayFontColorRed\"]/text()').extract_first().strip()\n+ services = '|'.join(response.xpath('//ul[@style=\"list-style-type: none; padding-left: 1.0em; font-size: 12px;\"]/li/text()').extract()).lower()\n open_24_hours = '24 hours' in response.css(\n '.body-content .col-lg-4').get().lower()\n \n@@ -62,16 +59,18 @@\n yield GeojsonPointItem(**properties)\n \n def opening_hours(self, response):\n- hour_part_elems = response.css(\n- '.body-content .col-lg-4 .row div::text').extract()\n+ hour_part_elems = response.xpath('//div[@class=\"row\"][@style=\"font-size: 12px;\"]')\n day_groups = []\n this_day_group = None\n \n if hour_part_elems:\n- def slice(source, step):\n- return [source[i:i+step] for i in range(0, len(source), step)]\n+ for hour_part_elem in hour_part_elems:\n+ day = hour_part_elem.xpath('.//div[@class=\"col-3\"]/text()').extract_first()\n+ hours = hour_part_elem.xpath('.//div[@class=\"col-9\"]/text()').extract_first()\n+\n+ if not hours:\n+ continue\n \n- for day, hours in slice(hour_part_elems, 2):\n day = day[:2]\n match = re.search(\n r'^(\\d{1,2}):(\\d{2})\\s*(a|p)m - (\\d{1,2}):(\\d{2})\\s*(a|p)m?$', hours.lower())\n@@ -111,13 +110,12 @@\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n \n- day_groups.append(this_day_group)\n+ if this_day_group:\n+ day_groups.append(this_day_group)\n \n- hour_part_elems = response.xpath(\n- '//span[@style=\"font-size:90%\"]/text()').extract()\n+ hour_part_elems = response.xpath('//span[@style=\"font-size:90%\"]/text()').extract()\n if hour_part_elems:\n- day_groups.append(\n- {'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})\n+ day_groups.append({'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})\n \n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n", "issue": "Spider holiday_stationstores is broken\nDuring the global build at 2021-08-18-14-42-26, spider **holiday_stationstores** failed with **552 features** and **10 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/holiday_stationstores.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass HolidayStationstoreSpider(scrapy.Spider):\n name = \"holiday_stationstores\"\n item_attributes = {'brand': 'Holiday Stationstores',\n 'brand_wikidata': 'Q5880490'}\n allowed_domains = [\"www.holidaystationstores.com\"]\n download_delay = 0.2\n\n def start_requests(self):\n yield scrapy.Request('https://www.holidaystationstores.com/Locations/GetAllStores',\n method='POST',\n callback=self.parse_all_stores)\n\n def parse_all_stores(self, response):\n all_stores = json.loads(response.body_as_unicode())\n\n for store_id, store in all_stores.items():\n # GET requests get blocked by their CDN, but POST works fine\n yield scrapy.Request(f\"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}\",\n method='POST',\n meta={'store': store})\n\n def parse(self, response):\n store = response.meta['store']\n\n address = response.css(\n '.row.HolidayBackgroundColorBlue div::text').extract_first().strip()\n phone = response.css(\n '.body-content .col-lg-4 .HolidayFontColorRed::text').extract_first().strip()\n services = '|'.join(response.css(\n '.body-content .col-lg-4 ul li::text').extract()).lower()\n open_24_hours = '24 hours' in response.css(\n '.body-content .col-lg-4').get().lower()\n\n properties = {\n 'name': f\"Holiday #{store['Name']}\",\n 'lon': store['Lng'],\n 'lat': store['Lat'],\n 'addr_full': address,\n 'phone': phone,\n 'ref': store['ID'],\n 'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),\n 'extras': {\n 'amenity:fuel': True,\n 'fuel:diesel': 'diesel' in services or None,\n 'atm': 'atm' in services or None,\n 'fuel:e85': 'e85' in services or None,\n 'hgv': 'truck' in services or None,\n 'fuel:propane': 'propane' in services or None,\n 'car_wash': 'car wash' in services or None,\n 'fuel:cng': 'cng' in services or None\n }\n }\n\n yield GeojsonPointItem(**properties)\n\n def opening_hours(self, response):\n hour_part_elems = response.css(\n '.body-content .col-lg-4 .row div::text').extract()\n day_groups = []\n this_day_group = None\n\n if hour_part_elems:\n def slice(source, step):\n return [source[i:i+step] for i in range(0, len(source), step)]\n\n for day, hours in slice(hour_part_elems, 2):\n day = day[:2]\n match = re.search(\n r'^(\\d{1,2}):(\\d{2})\\s*(a|p)m - (\\d{1,2}):(\\d{2})\\s*(a|p)m?$', hours.lower())\n (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()\n\n f_hr = int(f_hr)\n if f_ampm == 'p':\n f_hr += 12\n elif f_ampm == 'a' and f_hr == 12:\n f_hr = 0\n t_hr = int(t_hr)\n if t_ampm == 'p':\n t_hr += 12\n elif t_ampm == 'a' and t_hr == 12:\n t_hr = 0\n\n hours = '{:02d}:{}-{:02d}:{}'.format(\n f_hr,\n f_min,\n t_hr,\n t_min,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n day_groups.append(this_day_group)\n\n hour_part_elems = response.xpath(\n '//span[@style=\"font-size:90%\"]/text()').extract()\n if hour_part_elems:\n day_groups.append(\n {'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(\n **day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n", "path": "locations/spiders/holiday_stationstores.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass HolidayStationstoreSpider(scrapy.Spider):\n name = \"holiday_stationstores\"\n item_attributes = {'brand': 'Holiday Stationstores',\n 'brand_wikidata': 'Q5880490'}\n allowed_domains = [\"www.holidaystationstores.com\"]\n download_delay = 0.2\n\n def start_requests(self):\n yield scrapy.Request('https://www.holidaystationstores.com/Locations/GetAllStores',\n method='POST',\n callback=self.parse_all_stores)\n\n def parse_all_stores(self, response):\n all_stores = json.loads(response.text)\n\n for store_id, store in all_stores.items():\n # GET requests get blocked by their Incapsula bot protection, but POST works fine\n yield scrapy.Request(f\"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}\",\n method='POST',\n meta={'store': store})\n\n def parse(self, response):\n store = response.meta['store']\n\n address = response.xpath('//div[@class=\"col-lg-4 col-sm-12\"]/text()')[1].extract().strip()\n phone = response.xpath('//div[@class=\"HolidayFontColorRed\"]/text()').extract_first().strip()\n services = '|'.join(response.xpath('//ul[@style=\"list-style-type: none; padding-left: 1.0em; font-size: 12px;\"]/li/text()').extract()).lower()\n open_24_hours = '24 hours' in response.css(\n '.body-content .col-lg-4').get().lower()\n\n properties = {\n 'name': f\"Holiday #{store['Name']}\",\n 'lon': store['Lng'],\n 'lat': store['Lat'],\n 'addr_full': address,\n 'phone': phone,\n 'ref': store['ID'],\n 'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),\n 'extras': {\n 'amenity:fuel': True,\n 'fuel:diesel': 'diesel' in services or None,\n 'atm': 'atm' in services or None,\n 'fuel:e85': 'e85' in services or None,\n 'hgv': 'truck' in services or None,\n 'fuel:propane': 'propane' in services or None,\n 'car_wash': 'car wash' in services or None,\n 'fuel:cng': 'cng' in services or None\n }\n }\n\n yield GeojsonPointItem(**properties)\n\n def opening_hours(self, response):\n hour_part_elems = response.xpath('//div[@class=\"row\"][@style=\"font-size: 12px;\"]')\n day_groups = []\n this_day_group = None\n\n if hour_part_elems:\n for hour_part_elem in hour_part_elems:\n day = hour_part_elem.xpath('.//div[@class=\"col-3\"]/text()').extract_first()\n hours = hour_part_elem.xpath('.//div[@class=\"col-9\"]/text()').extract_first()\n\n if not hours:\n continue\n\n day = day[:2]\n match = re.search(\n r'^(\\d{1,2}):(\\d{2})\\s*(a|p)m - (\\d{1,2}):(\\d{2})\\s*(a|p)m?$', hours.lower())\n (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()\n\n f_hr = int(f_hr)\n if f_ampm == 'p':\n f_hr += 12\n elif f_ampm == 'a' and f_hr == 12:\n f_hr = 0\n t_hr = int(t_hr)\n if t_ampm == 'p':\n t_hr += 12\n elif t_ampm == 'a' and t_hr == 12:\n t_hr = 0\n\n hours = '{:02d}:{}-{:02d}:{}'.format(\n f_hr,\n f_min,\n t_hr,\n t_min,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n if this_day_group:\n day_groups.append(this_day_group)\n\n hour_part_elems = response.xpath('//span[@style=\"font-size:90%\"]/text()').extract()\n if hour_part_elems:\n day_groups.append({'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(\n **day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n", "path": "locations/spiders/holiday_stationstores.py"}]} | 1,997 | 959 |
gh_patches_debug_13266 | rasdani/github-patches | git_diff | liqd__a4-opin-1158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dashboard styling: spacing when entering the first information of project
We also need more space between texts, boxes and bottons on the page where I add the first information for a project.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/projects/forms.py`
Content:
```
1 from django import forms
2
3 from adhocracy4.projects.models import Project
4 from euth.users.fields import UserSearchField
5
6
7 class AddModeratorForm(forms.ModelForm):
8 user = UserSearchField(required=False, identifier='moderators',)
9
10 class Meta:
11 model = Project
12 fields = ('user',)
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/projects/forms.py b/euth/projects/forms.py
--- a/euth/projects/forms.py
+++ b/euth/projects/forms.py
@@ -1,11 +1,17 @@
from django import forms
+from django.utils.translation import ugettext_lazy as _
from adhocracy4.projects.models import Project
from euth.users.fields import UserSearchField
class AddModeratorForm(forms.ModelForm):
- user = UserSearchField(required=False, identifier='moderators',)
+ user = UserSearchField(required=False,
+ identifier='moderators',
+ help_text=_('Type in the username '
+ 'of a user you would '
+ 'like to add as moderator.'),
+ label=_('Search for username'))
class Meta:
model = Project
| {"golden_diff": "diff --git a/euth/projects/forms.py b/euth/projects/forms.py\n--- a/euth/projects/forms.py\n+++ b/euth/projects/forms.py\n@@ -1,11 +1,17 @@\n from django import forms\n+from django.utils.translation import ugettext_lazy as _\n \n from adhocracy4.projects.models import Project\n from euth.users.fields import UserSearchField\n \n \n class AddModeratorForm(forms.ModelForm):\n- user = UserSearchField(required=False, identifier='moderators',)\n+ user = UserSearchField(required=False,\n+ identifier='moderators',\n+ help_text=_('Type in the username '\n+ 'of a user you would '\n+ 'like to add as moderator.'),\n+ label=_('Search for username'))\n \n class Meta:\n model = Project\n", "issue": "dashboard styling: spacing when entering the first information of project\nWe also need more space between texts, boxes and bottons on the page where I add the first information for a project.\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\n\nfrom adhocracy4.projects.models import Project\nfrom euth.users.fields import UserSearchField\n\n\nclass AddModeratorForm(forms.ModelForm):\n user = UserSearchField(required=False, identifier='moderators',)\n\n class Meta:\n model = Project\n fields = ('user',)\n", "path": "euth/projects/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects.models import Project\nfrom euth.users.fields import UserSearchField\n\n\nclass AddModeratorForm(forms.ModelForm):\n user = UserSearchField(required=False,\n identifier='moderators',\n help_text=_('Type in the username '\n 'of a user you would '\n 'like to add as moderator.'),\n label=_('Search for username'))\n\n class Meta:\n model = Project\n fields = ('user',)\n", "path": "euth/projects/forms.py"}]} | 468 | 169 |
gh_patches_debug_27829 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-7655 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken link in the GNN Cheat sheet
### 📚 Describe the documentation issue
In the cheatsheet [https://pytorch-geometric.readthedocs.io/en/latest/cheatsheet/gnn_cheatsheet.html](https://pytorch-geometric.readthedocs.io/en/latest/cheatsheet/gnn_cheatsheet.html). The paper link for SimpleConv points to a non-existant page. There should not be invalid links on the page.
[SimpleConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SimpleConv.html#torch_geometric.nn.conv.SimpleConv) ([Paper](https://pytorch-geometric.readthedocs.io/en/latest/cheatsheet/None))
### Suggest a potential alternative/fix
I see the code does
```
not torch_geometric.nn.conv.utils.processes_point_clouds(cls) %}
* - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__)
- {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}✓{% endif %}
- {% if
```
If there is a valid appropriate paper - then we should point to that; if not then I suggest having a new document in this repository as the target for this link. The document should describe what SimpleConv does and why there is not paper for it; I assume because it is a very simple example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/datasets/utils/cheatsheet.py`
Content:
```
1 import importlib
2 import inspect
3 import re
4 from typing import Any, List, Optional
5
6
7 def paper_link(cls: str) -> str:
8 cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]
9 match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)
10 return None if match is None else match.group().replace('\n', ' ')[1:-1]
11
12
13 def get_stats_table(cls: str) -> str:
14 cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]
15 match = re.search(r'\*\*STATS:\*\*\n.*$', inspect.getdoc(cls),
16 flags=re.DOTALL)
17 return '' if match is None else match.group()
18
19
20 def has_stats(cls: str) -> bool:
21 return len(get_stats_table(cls)) > 0
22
23
24 def get_type(cls: str) -> str:
25 return 'Edge' if '-' in cls else 'Node'
26
27
28 def get_stat(cls: str, name: str, child: Optional[str] = None,
29 default: Any = None) -> str:
30 if child is None and len(get_children(cls)) > 0:
31 return ''
32
33 stats_table = get_stats_table(cls)
34
35 if len(stats_table) > 0:
36 stats_table = '\n'.join(stats_table.split('\n')[2:])
37
38 match = re.search(f'^.*- {name}', stats_table, flags=re.DOTALL)
39 if match is None:
40 return default
41
42 column = match.group().count(' -')
43
44 if child is not None:
45 child = child.replace('(', r'\(').replace(')', r'\)')
46 match = re.search(f'[*] - {child}\n.*$', stats_table, flags=re.DOTALL)
47 stats_row = match.group()
48 else:
49 stats_row = '*' + stats_table.split('*')[2]
50
51 return stats_row.split(' -')[column].split('\n')[0].strip()
52
53
54 def get_children(cls: str) -> List[str]:
55 matches = re.findall('[*] -.*', get_stats_table(cls))
56 return [match[4:] for match in matches[1:]] if len(matches) > 2 else []
57
```
Path: `torch_geometric/nn/conv/utils/cheatsheet.py`
Content:
```
1 import importlib
2 import inspect
3 import re
4
5
6 def paper_title(cls: str) -> str:
7 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
8 match = re.search('`\".+?\"', inspect.getdoc(cls), flags=re.DOTALL)
9 return None if match is None else match.group().replace('\n', ' ')[2:-1]
10
11
12 def paper_link(cls: str) -> str:
13 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
14 match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)
15 return None if match is None else match.group().replace('\n', ' ')[1:-1]
16
17
18 def supports_sparse_tensor(cls: str) -> bool:
19 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
20 signature = inspect.signature(cls.forward)
21 return 'SparseTensor' in str(signature)
22
23
24 def supports_edge_weights(cls: str) -> bool:
25 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
26 signature = inspect.signature(cls.forward)
27 return 'edge_weight' in str(signature)
28
29
30 def supports_edge_features(cls: str) -> bool:
31 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
32 signature = inspect.signature(cls.forward)
33 return 'edge_attr' in str(signature)
34
35
36 def supports_bipartite_graphs(cls: str) -> bool:
37 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
38 signature = inspect.signature(cls.forward)
39 return 'Union[torch.Tensor, Tuple[torch.Tensor' in str(signature)
40
41
42 def supports_static_graphs(cls: str) -> bool:
43 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
44 return 'node_dim=' not in inspect.getsource(cls.__init__)
45
46
47 def supports_lazy_initialization(cls: str) -> bool:
48 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
49 doc = re.sub(' +', ' ', inspect.getdoc(cls).replace('\n', ' '))
50 match = re.search('or :obj:`-1` to derive the size from the first', doc)
51 return match is not None
52
53
54 def processes_heterogeneous_graphs(cls: str) -> bool:
55 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
56 signature = inspect.signature(cls.forward)
57 return 'edge_index_dict' in str(signature) or 'edge_type' in str(signature)
58
59
60 def processes_hypergraphs(cls: str) -> bool:
61 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
62 signature = inspect.signature(cls.forward)
63 return 'hyperedge_index' in str(signature)
64
65
66 def processes_point_clouds(cls: str) -> bool:
67 cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
68 signature = inspect.signature(cls.forward)
69 return (('edge_index' not in str(signature)
70 and 'csc' not in str(signature)) or 'pos' in str(signature))
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/datasets/utils/cheatsheet.py b/torch_geometric/datasets/utils/cheatsheet.py
--- a/torch_geometric/datasets/utils/cheatsheet.py
+++ b/torch_geometric/datasets/utils/cheatsheet.py
@@ -4,7 +4,7 @@
from typing import Any, List, Optional
-def paper_link(cls: str) -> str:
+def paper_link(cls: str) -> Optional[str]:
cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]
match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)
return None if match is None else match.group().replace('\n', ' ')[1:-1]
diff --git a/torch_geometric/nn/conv/utils/cheatsheet.py b/torch_geometric/nn/conv/utils/cheatsheet.py
--- a/torch_geometric/nn/conv/utils/cheatsheet.py
+++ b/torch_geometric/nn/conv/utils/cheatsheet.py
@@ -1,15 +1,16 @@
import importlib
import inspect
import re
+from typing import Optional
-def paper_title(cls: str) -> str:
+def paper_title(cls: str) -> Optional[str]:
cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
match = re.search('`\".+?\"', inspect.getdoc(cls), flags=re.DOTALL)
return None if match is None else match.group().replace('\n', ' ')[2:-1]
-def paper_link(cls: str) -> str:
+def paper_link(cls: str) -> Optional[str]:
cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]
match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)
return None if match is None else match.group().replace('\n', ' ')[1:-1]
| {"golden_diff": "diff --git a/torch_geometric/datasets/utils/cheatsheet.py b/torch_geometric/datasets/utils/cheatsheet.py\n--- a/torch_geometric/datasets/utils/cheatsheet.py\n+++ b/torch_geometric/datasets/utils/cheatsheet.py\n@@ -4,7 +4,7 @@\n from typing import Any, List, Optional\n \n \n-def paper_link(cls: str) -> str:\n+def paper_link(cls: str) -> Optional[str]:\n cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]\n match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[1:-1]\ndiff --git a/torch_geometric/nn/conv/utils/cheatsheet.py b/torch_geometric/nn/conv/utils/cheatsheet.py\n--- a/torch_geometric/nn/conv/utils/cheatsheet.py\n+++ b/torch_geometric/nn/conv/utils/cheatsheet.py\n@@ -1,15 +1,16 @@\n import importlib\n import inspect\n import re\n+from typing import Optional\n \n \n-def paper_title(cls: str) -> str:\n+def paper_title(cls: str) -> Optional[str]:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n match = re.search('`\\\".+?\\\"', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[2:-1]\n \n \n-def paper_link(cls: str) -> str:\n+def paper_link(cls: str) -> Optional[str]:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[1:-1]\n", "issue": "Broken link in the GNN Cheat sheet\n### \ud83d\udcda Describe the documentation issue\r\n\r\nIn the cheatsheet [https://pytorch-geometric.readthedocs.io/en/latest/cheatsheet/gnn_cheatsheet.html](https://pytorch-geometric.readthedocs.io/en/latest/cheatsheet/gnn_cheatsheet.html). The paper link for SimpleConv points to a non-existant page. There should not be invalid links on the page. \r\n\r\n[SimpleConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SimpleConv.html#torch_geometric.nn.conv.SimpleConv) ([Paper](https://pytorch-geometric.readthedocs.io/en/latest/cheatsheet/None))\r\n\r\n### Suggest a potential alternative/fix\r\n\r\n\r\nI see the code does\r\n\r\n```\r\n not torch_geometric.nn.conv.utils.processes_point_clouds(cls) %}\r\n * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__)\r\n - {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}\u2713{% endif %}\r\n - {% if \r\n```\r\n\r\nIf there is a valid appropriate paper - then we should point to that; if not then I suggest having a new document in this repository as the target for this link. The document should describe what SimpleConv does and why there is not paper for it; I assume because it is a very simple example. \r\n\r\n\n", "before_files": [{"content": "import importlib\nimport inspect\nimport re\nfrom typing import Any, List, Optional\n\n\ndef paper_link(cls: str) -> str:\n cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]\n match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[1:-1]\n\n\ndef get_stats_table(cls: str) -> str:\n cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]\n match = re.search(r'\\*\\*STATS:\\*\\*\\n.*$', inspect.getdoc(cls),\n flags=re.DOTALL)\n return '' if match is None else match.group()\n\n\ndef has_stats(cls: str) -> bool:\n return len(get_stats_table(cls)) > 0\n\n\ndef get_type(cls: str) -> str:\n return 'Edge' if '-' in cls else 'Node'\n\n\ndef get_stat(cls: str, name: str, child: Optional[str] = None,\n default: Any = None) -> str:\n if child is None and len(get_children(cls)) > 0:\n return ''\n\n stats_table = get_stats_table(cls)\n\n if len(stats_table) > 0:\n stats_table = '\\n'.join(stats_table.split('\\n')[2:])\n\n match = re.search(f'^.*- {name}', stats_table, flags=re.DOTALL)\n if match is None:\n return default\n\n column = match.group().count(' -')\n\n if child is not None:\n child = child.replace('(', r'\\(').replace(')', r'\\)')\n match = re.search(f'[*] - {child}\\n.*$', stats_table, flags=re.DOTALL)\n stats_row = match.group()\n else:\n stats_row = '*' + stats_table.split('*')[2]\n\n return stats_row.split(' -')[column].split('\\n')[0].strip()\n\n\ndef get_children(cls: str) -> List[str]:\n matches = re.findall('[*] -.*', get_stats_table(cls))\n return [match[4:] for match in matches[1:]] if len(matches) > 2 else []\n", "path": "torch_geometric/datasets/utils/cheatsheet.py"}, {"content": "import importlib\nimport inspect\nimport re\n\n\ndef paper_title(cls: str) -> str:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n match = re.search('`\\\".+?\\\"', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[2:-1]\n\n\ndef paper_link(cls: str) -> str:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[1:-1]\n\n\ndef supports_sparse_tensor(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'SparseTensor' in str(signature)\n\n\ndef supports_edge_weights(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'edge_weight' in str(signature)\n\n\ndef supports_edge_features(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'edge_attr' in str(signature)\n\n\ndef supports_bipartite_graphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'Union[torch.Tensor, Tuple[torch.Tensor' in str(signature)\n\n\ndef supports_static_graphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n return 'node_dim=' not in inspect.getsource(cls.__init__)\n\n\ndef supports_lazy_initialization(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n doc = re.sub(' +', ' ', inspect.getdoc(cls).replace('\\n', ' '))\n match = re.search('or :obj:`-1` to derive the size from the first', doc)\n return match is not None\n\n\ndef processes_heterogeneous_graphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'edge_index_dict' in str(signature) or 'edge_type' in str(signature)\n\n\ndef processes_hypergraphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'hyperedge_index' in str(signature)\n\n\ndef processes_point_clouds(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return (('edge_index' not in str(signature)\n and 'csc' not in str(signature)) or 'pos' in str(signature))\n", "path": "torch_geometric/nn/conv/utils/cheatsheet.py"}], "after_files": [{"content": "import importlib\nimport inspect\nimport re\nfrom typing import Any, List, Optional\n\n\ndef paper_link(cls: str) -> Optional[str]:\n cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]\n match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[1:-1]\n\n\ndef get_stats_table(cls: str) -> str:\n cls = importlib.import_module('torch_geometric.datasets').__dict__[cls]\n match = re.search(r'\\*\\*STATS:\\*\\*\\n.*$', inspect.getdoc(cls),\n flags=re.DOTALL)\n return '' if match is None else match.group()\n\n\ndef has_stats(cls: str) -> bool:\n return len(get_stats_table(cls)) > 0\n\n\ndef get_type(cls: str) -> str:\n return 'Edge' if '-' in cls else 'Node'\n\n\ndef get_stat(cls: str, name: str, child: Optional[str] = None,\n default: Any = None) -> str:\n if child is None and len(get_children(cls)) > 0:\n return ''\n\n stats_table = get_stats_table(cls)\n\n if len(stats_table) > 0:\n stats_table = '\\n'.join(stats_table.split('\\n')[2:])\n\n match = re.search(f'^.*- {name}', stats_table, flags=re.DOTALL)\n if match is None:\n return default\n\n column = match.group().count(' -')\n\n if child is not None:\n child = child.replace('(', r'\\(').replace(')', r'\\)')\n match = re.search(f'[*] - {child}\\n.*$', stats_table, flags=re.DOTALL)\n stats_row = match.group()\n else:\n stats_row = '*' + stats_table.split('*')[2]\n\n return stats_row.split(' -')[column].split('\\n')[0].strip()\n\n\ndef get_children(cls: str) -> List[str]:\n matches = re.findall('[*] -.*', get_stats_table(cls))\n return [match[4:] for match in matches[1:]] if len(matches) > 2 else []\n", "path": "torch_geometric/datasets/utils/cheatsheet.py"}, {"content": "import importlib\nimport inspect\nimport re\nfrom typing import Optional\n\n\ndef paper_title(cls: str) -> Optional[str]:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n match = re.search('`\\\".+?\\\"', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[2:-1]\n\n\ndef paper_link(cls: str) -> Optional[str]:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL)\n return None if match is None else match.group().replace('\\n', ' ')[1:-1]\n\n\ndef supports_sparse_tensor(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'SparseTensor' in str(signature)\n\n\ndef supports_edge_weights(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'edge_weight' in str(signature)\n\n\ndef supports_edge_features(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'edge_attr' in str(signature)\n\n\ndef supports_bipartite_graphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'Union[torch.Tensor, Tuple[torch.Tensor' in str(signature)\n\n\ndef supports_static_graphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n return 'node_dim=' not in inspect.getsource(cls.__init__)\n\n\ndef supports_lazy_initialization(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n doc = re.sub(' +', ' ', inspect.getdoc(cls).replace('\\n', ' '))\n match = re.search('or :obj:`-1` to derive the size from the first', doc)\n return match is not None\n\n\ndef processes_heterogeneous_graphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'edge_index_dict' in str(signature) or 'edge_type' in str(signature)\n\n\ndef processes_hypergraphs(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return 'hyperedge_index' in str(signature)\n\n\ndef processes_point_clouds(cls: str) -> bool:\n cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls]\n signature = inspect.signature(cls.forward)\n return (('edge_index' not in str(signature)\n and 'csc' not in str(signature)) or 'pos' in str(signature))\n", "path": "torch_geometric/nn/conv/utils/cheatsheet.py"}]} | 2,013 | 426 |
gh_patches_debug_40608 | rasdani/github-patches | git_diff | enthought__chaco-501 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Demo functionplotter.py range editor error
**Problem Description**
functionplotter.py break when `auto` is chosen from enum editor.
**Reproduction Steps:**
Run the file and change the enum to `auto`.
**Expected behavior:**
Plot disappear and raise the error
```
functionplotter.py:47: RuntimeWarning: invalid value encountered in double_scalars
real_high = ceil(high/dx) * dx
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:124: RuntimeWarning: invalid value encountered in greater_equal
return ((data.view(ndarray) >= self._low_value) &
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:125: RuntimeWarning: invalid value encountered in less_equal
(data.view(ndarray) <= self._high_value))
functionplotter.py:46: RuntimeWarning: invalid value encountered in double_scalars
real_low = ceil(low/dx) * dx
functionplotter.py:52: RuntimeWarning: divide by zero encountered in divide
return sin(1.0/x)
functionplotter.py:52: RuntimeWarning: invalid value encountered in sin
return sin(1.0/x)
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:148: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if low == 'track':
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:232: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if self._low_setting != val:
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:239: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if val == 'auto':
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:245: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
elif val == 'track':
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:285: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if self._high_setting != val:
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:292: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if val == 'auto':
/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:298: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
elif val == 'track':
```
**OS, Python version:**
OSX, Python 2.7
splits from #385
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/demo/functionplotter.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Demonstrates use of the FunctionDataSource that depends on an external range
4 and returns different data depending on that range.
5 """
6
7 # Major library imports
8 from numpy import linspace, sin, ceil
9
10 # Enthought library imports
11 from enable.api import Component, ComponentEditor
12 from traits.api import HasTraits, Instance, Int
13 from traitsui.api import Item, Group, HGroup, View
14
15 # Chaco imports
16 from chaco.api import ScatterPlot, DataView, LinePlot
17 from chaco.tools.api import PanTool, ZoomTool
18 from chaco.function_data_source import FunctionDataSource
19
20
21 class PlotExample(HasTraits):
22 plot = Instance(Component)
23 numpoints = Int(500)
24
25 traits_view = View(
26 Group(
27 Item('plot', editor=ComponentEditor(), show_label=False),
28 HGroup(
29 HGroup(
30 Item('object.plot.x_mapper.range.low_setting', label='Low'),
31 Item('object.plot.x_mapper.range.high_setting', label='High'),
32 label='X', show_border=True
33 ),
34 HGroup(
35 Item('object.plot.y_mapper.range.low_setting', label='Low'),
36 Item('object.plot.y_mapper.range.high_setting', label='High'),
37 label='Y', show_border=True
38 ),
39 ),
40 orientation = "vertical"), resizable=True, title="Function Plot",
41 width=900, height=600
42 )
43
44 def xfunc(self, low, high):
45 dx = (high - low) / self.numpoints
46 real_low = ceil(low/dx) * dx
47 real_high = ceil(high/dx) * dx
48 return linspace(real_low, real_high, self.numpoints)
49
50 def yfunc(self, low, high):
51 x = self.xfunc(low, high)
52 return sin(1.0/x)
53
54 def _plot_default(self):
55 container = DataView()
56
57 xds = FunctionDataSource(func = self.xfunc)
58 yds = FunctionDataSource(func = self.yfunc)
59
60 xmapper = container.x_mapper
61 ymapper = container.y_mapper
62
63 xds.data_range = xmapper.range
64 yds.data_range = xmapper.range
65
66 xmapper.range.set_bounds(-5, 10)
67 ymapper.range.set_bounds(-1, 1.2)
68
69 plot = ScatterPlot(index = xds, value = yds, index_mapper = xmapper,
70 value_mapper = ymapper,
71 color = "green",
72 marker = "circle",
73 marker_size = 3,
74 line_width = 0)
75
76 plot2 = LinePlot(index = xds, value = yds, index_mapper = xmapper,
77 value_mapper = ymapper,
78 color = "lightgray")
79
80 container.add(plot2, plot)
81 plot.tools.append(PanTool(plot, constrain_direction="x", constrain=True))
82 plot.tools.append(ZoomTool(plot, axis="index", tool_mode="range"))
83
84 return container
85
86
87 demo = PlotExample()
88
89 if __name__ == "__main__":
90 demo.configure_traits()
91
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/demo/functionplotter.py b/examples/demo/functionplotter.py
--- a/examples/demo/functionplotter.py
+++ b/examples/demo/functionplotter.py
@@ -9,8 +9,8 @@
# Enthought library imports
from enable.api import Component, ComponentEditor
-from traits.api import HasTraits, Instance, Int
-from traitsui.api import Item, Group, HGroup, View
+from traits.api import HasTraits, Instance, Int, Enum
+from traitsui.api import Item, Group, HGroup, View, TextEditor
# Chaco imports
from chaco.api import ScatterPlot, DataView, LinePlot
@@ -22,18 +22,35 @@
plot = Instance(Component)
numpoints = Int(500)
+ low_mode = Enum("value", "track")
+ high_mode = Enum("value", "track")
+
+
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(), show_label=False),
HGroup(
HGroup(
- Item('object.plot.x_mapper.range.low_setting', label='Low'),
- Item('object.plot.x_mapper.range.high_setting', label='High'),
+ Item('object.plot.x_mapper.range.low_setting', label='Low',
+ editor=TextEditor(),
+ visible_when='object.low_mode == "value" ', ),
+ Item('low_mode', label='Low Mode'),
+ Item('object.plot.x_mapper.range.high_setting',
+ label='High', editor=TextEditor(),
+ visible_when='object.high_mode == "value" '),
+ Item('high_mode', label='High Mode'),
+ Item('object.plot.x_mapper.range.tracking_amount',
+ label='Tracking Amount',
+ editor=TextEditor(read_only=True),
+ visible_when='object.high_mode == "track" or '
+ 'object.low_mode == "track"'),
label='X', show_border=True
),
HGroup(
- Item('object.plot.y_mapper.range.low_setting', label='Low'),
- Item('object.plot.y_mapper.range.high_setting', label='High'),
+ Item('object.plot.y_mapper.range.low_setting',
+ label='Low', editor=TextEditor()),
+ Item('object.plot.y_mapper.range.high_setting',
+ label='High', editor=TextEditor()),
label='Y', show_border=True
),
),
@@ -51,6 +68,14 @@
x = self.xfunc(low, high)
return sin(1.0/x)
+ def _low_mode_changed(self, newvalue):
+ if newvalue != "value":
+ self.plot.x_mapper.range.low_setting = newvalue
+
+ def _high_mode_changed(self, newvalue):
+ if newvalue != "value":
+ self.plot.x_mapper.range.high_setting = newvalue
+
def _plot_default(self):
container = DataView()
@@ -78,7 +103,8 @@
color = "lightgray")
container.add(plot2, plot)
- plot.tools.append(PanTool(plot, constrain_direction="x", constrain=True))
+ plot.tools.append(PanTool(plot, constrain_direction="x",
+ constrain=True))
plot.tools.append(ZoomTool(plot, axis="index", tool_mode="range"))
return container
| {"golden_diff": "diff --git a/examples/demo/functionplotter.py b/examples/demo/functionplotter.py\n--- a/examples/demo/functionplotter.py\n+++ b/examples/demo/functionplotter.py\n@@ -9,8 +9,8 @@\n \n # Enthought library imports\n from enable.api import Component, ComponentEditor\n-from traits.api import HasTraits, Instance, Int\n-from traitsui.api import Item, Group, HGroup, View\n+from traits.api import HasTraits, Instance, Int, Enum\n+from traitsui.api import Item, Group, HGroup, View, TextEditor\n \n # Chaco imports\n from chaco.api import ScatterPlot, DataView, LinePlot\n@@ -22,18 +22,35 @@\n plot = Instance(Component)\n numpoints = Int(500)\n \n+ low_mode = Enum(\"value\", \"track\")\n+ high_mode = Enum(\"value\", \"track\")\n+\n+\n traits_view = View(\n Group(\n Item('plot', editor=ComponentEditor(), show_label=False),\n HGroup(\n HGroup(\n- Item('object.plot.x_mapper.range.low_setting', label='Low'),\n- Item('object.plot.x_mapper.range.high_setting', label='High'),\n+ Item('object.plot.x_mapper.range.low_setting', label='Low',\n+ editor=TextEditor(),\n+ visible_when='object.low_mode == \"value\" ', ),\n+ Item('low_mode', label='Low Mode'),\n+ Item('object.plot.x_mapper.range.high_setting',\n+ label='High', editor=TextEditor(),\n+ visible_when='object.high_mode == \"value\" '),\n+ Item('high_mode', label='High Mode'),\n+ Item('object.plot.x_mapper.range.tracking_amount',\n+ label='Tracking Amount',\n+ editor=TextEditor(read_only=True),\n+ visible_when='object.high_mode == \"track\" or '\n+ 'object.low_mode == \"track\"'),\n label='X', show_border=True\n ),\n HGroup(\n- Item('object.plot.y_mapper.range.low_setting', label='Low'),\n- Item('object.plot.y_mapper.range.high_setting', label='High'),\n+ Item('object.plot.y_mapper.range.low_setting',\n+ label='Low', editor=TextEditor()),\n+ Item('object.plot.y_mapper.range.high_setting',\n+ label='High', editor=TextEditor()),\n label='Y', show_border=True\n ),\n ),\n@@ -51,6 +68,14 @@\n x = self.xfunc(low, high)\n return sin(1.0/x)\n \n+ def _low_mode_changed(self, newvalue):\n+ if newvalue != \"value\":\n+ self.plot.x_mapper.range.low_setting = newvalue\n+\n+ def _high_mode_changed(self, newvalue):\n+ if newvalue != \"value\":\n+ self.plot.x_mapper.range.high_setting = newvalue\n+\n def _plot_default(self):\n container = DataView()\n \n@@ -78,7 +103,8 @@\n color = \"lightgray\")\n \n container.add(plot2, plot)\n- plot.tools.append(PanTool(plot, constrain_direction=\"x\", constrain=True))\n+ plot.tools.append(PanTool(plot, constrain_direction=\"x\",\n+ constrain=True))\n plot.tools.append(ZoomTool(plot, axis=\"index\", tool_mode=\"range\"))\n \n return container\n", "issue": "Demo functionplotter.py range editor error\n**Problem Description**\r\nfunctionplotter.py break when `auto` is chosen from enum editor.\r\n**Reproduction Steps:**\r\nRun the file and change the enum to `auto`.\r\n**Expected behavior:**\r\nPlot disappear and raise the error\r\n```\r\nfunctionplotter.py:47: RuntimeWarning: invalid value encountered in double_scalars\r\n real_high = ceil(high/dx) * dx\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:124: RuntimeWarning: invalid value encountered in greater_equal\r\n return ((data.view(ndarray) >= self._low_value) &\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:125: RuntimeWarning: invalid value encountered in less_equal\r\n (data.view(ndarray) <= self._high_value))\r\nfunctionplotter.py:46: RuntimeWarning: invalid value encountered in double_scalars\r\n real_low = ceil(low/dx) * dx\r\nfunctionplotter.py:52: RuntimeWarning: divide by zero encountered in divide\r\n return sin(1.0/x)\r\nfunctionplotter.py:52: RuntimeWarning: invalid value encountered in sin\r\n return sin(1.0/x)\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:148: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\r\n if low == 'track':\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:232: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\r\n if self._low_setting != val:\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:239: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\r\n if val == 'auto':\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:245: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\r\n elif val == 'track':\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:285: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\r\n if self._high_setting != val:\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:292: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\r\n if val == 'auto':\r\n/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/data_range_1d.py:298: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\r\n elif val == 'track':\r\n```\r\n\r\n**OS, Python version:** \r\nOSX, Python 2.7\r\n\r\nsplits from #385 \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nDemonstrates use of the FunctionDataSource that depends on an external range\nand returns different data depending on that range.\n\"\"\"\n\n# Major library imports\nfrom numpy import linspace, sin, ceil\n\n# Enthought library imports\nfrom enable.api import Component, ComponentEditor\nfrom traits.api import HasTraits, Instance, Int\nfrom traitsui.api import Item, Group, HGroup, View\n\n# Chaco imports\nfrom chaco.api import ScatterPlot, DataView, LinePlot\nfrom chaco.tools.api import PanTool, ZoomTool\nfrom chaco.function_data_source import FunctionDataSource\n\n\nclass PlotExample(HasTraits):\n plot = Instance(Component)\n numpoints = Int(500)\n\n traits_view = View(\n Group(\n Item('plot', editor=ComponentEditor(), show_label=False),\n HGroup(\n HGroup(\n Item('object.plot.x_mapper.range.low_setting', label='Low'),\n Item('object.plot.x_mapper.range.high_setting', label='High'),\n label='X', show_border=True\n ),\n HGroup(\n Item('object.plot.y_mapper.range.low_setting', label='Low'),\n Item('object.plot.y_mapper.range.high_setting', label='High'),\n label='Y', show_border=True\n ),\n ),\n orientation = \"vertical\"), resizable=True, title=\"Function Plot\",\n width=900, height=600\n )\n\n def xfunc(self, low, high):\n dx = (high - low) / self.numpoints\n real_low = ceil(low/dx) * dx\n real_high = ceil(high/dx) * dx\n return linspace(real_low, real_high, self.numpoints)\n\n def yfunc(self, low, high):\n x = self.xfunc(low, high)\n return sin(1.0/x)\n\n def _plot_default(self):\n container = DataView()\n\n xds = FunctionDataSource(func = self.xfunc)\n yds = FunctionDataSource(func = self.yfunc)\n\n xmapper = container.x_mapper\n ymapper = container.y_mapper\n\n xds.data_range = xmapper.range\n yds.data_range = xmapper.range\n\n xmapper.range.set_bounds(-5, 10)\n ymapper.range.set_bounds(-1, 1.2)\n\n plot = ScatterPlot(index = xds, value = yds, index_mapper = xmapper,\n value_mapper = ymapper,\n color = \"green\",\n marker = \"circle\",\n marker_size = 3,\n line_width = 0)\n\n plot2 = LinePlot(index = xds, value = yds, index_mapper = xmapper,\n value_mapper = ymapper,\n color = \"lightgray\")\n\n container.add(plot2, plot)\n plot.tools.append(PanTool(plot, constrain_direction=\"x\", constrain=True))\n plot.tools.append(ZoomTool(plot, axis=\"index\", tool_mode=\"range\"))\n\n return container\n\n\ndemo = PlotExample()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n\n", "path": "examples/demo/functionplotter.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nDemonstrates use of the FunctionDataSource that depends on an external range\nand returns different data depending on that range.\n\"\"\"\n\n# Major library imports\nfrom numpy import linspace, sin, ceil\n\n# Enthought library imports\nfrom enable.api import Component, ComponentEditor\nfrom traits.api import HasTraits, Instance, Int, Enum\nfrom traitsui.api import Item, Group, HGroup, View, TextEditor\n\n# Chaco imports\nfrom chaco.api import ScatterPlot, DataView, LinePlot\nfrom chaco.tools.api import PanTool, ZoomTool\nfrom chaco.function_data_source import FunctionDataSource\n\n\nclass PlotExample(HasTraits):\n plot = Instance(Component)\n numpoints = Int(500)\n\n low_mode = Enum(\"value\", \"track\")\n high_mode = Enum(\"value\", \"track\")\n\n\n traits_view = View(\n Group(\n Item('plot', editor=ComponentEditor(), show_label=False),\n HGroup(\n HGroup(\n Item('object.plot.x_mapper.range.low_setting', label='Low',\n editor=TextEditor(),\n visible_when='object.low_mode == \"value\" ', ),\n Item('low_mode', label='Low Mode'),\n Item('object.plot.x_mapper.range.high_setting',\n label='High', editor=TextEditor(),\n visible_when='object.high_mode == \"value\" '),\n Item('high_mode', label='High Mode'),\n Item('object.plot.x_mapper.range.tracking_amount',\n label='Tracking Amount',\n editor=TextEditor(read_only=True),\n visible_when='object.high_mode == \"track\" or '\n 'object.low_mode == \"track\"'),\n label='X', show_border=True\n ),\n HGroup(\n Item('object.plot.y_mapper.range.low_setting',\n label='Low', editor=TextEditor()),\n Item('object.plot.y_mapper.range.high_setting',\n label='High', editor=TextEditor()),\n label='Y', show_border=True\n ),\n ),\n orientation = \"vertical\"), resizable=True, title=\"Function Plot\",\n width=900, height=600\n )\n\n def xfunc(self, low, high):\n dx = (high - low) / self.numpoints\n real_low = ceil(low/dx) * dx\n real_high = ceil(high/dx) * dx\n return linspace(real_low, real_high, self.numpoints)\n\n def yfunc(self, low, high):\n x = self.xfunc(low, high)\n return sin(1.0/x)\n\n def _low_mode_changed(self, newvalue):\n if newvalue != \"value\":\n self.plot.x_mapper.range.low_setting = newvalue\n\n def _high_mode_changed(self, newvalue):\n if newvalue != \"value\":\n self.plot.x_mapper.range.high_setting = newvalue\n\n def _plot_default(self):\n container = DataView()\n\n xds = FunctionDataSource(func = self.xfunc)\n yds = FunctionDataSource(func = self.yfunc)\n\n xmapper = container.x_mapper\n ymapper = container.y_mapper\n\n xds.data_range = xmapper.range\n yds.data_range = xmapper.range\n\n xmapper.range.set_bounds(-5, 10)\n ymapper.range.set_bounds(-1, 1.2)\n\n plot = ScatterPlot(index = xds, value = yds, index_mapper = xmapper,\n value_mapper = ymapper,\n color = \"green\",\n marker = \"circle\",\n marker_size = 3,\n line_width = 0)\n\n plot2 = LinePlot(index = xds, value = yds, index_mapper = xmapper,\n value_mapper = ymapper,\n color = \"lightgray\")\n\n container.add(plot2, plot)\n plot.tools.append(PanTool(plot, constrain_direction=\"x\",\n constrain=True))\n plot.tools.append(ZoomTool(plot, axis=\"index\", tool_mode=\"range\"))\n\n return container\n\n\ndemo = PlotExample()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n\n", "path": "examples/demo/functionplotter.py"}]} | 1,844 | 723 |
gh_patches_debug_2517 | rasdani/github-patches | git_diff | encode__uvicorn-436 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--limit-max-requests not working
Hi! I'm trying to figure out why my workers are not restarting as expected when using the `--limit-max-requests` flag. I ran `uvicorn` in debug mode and noticed that the `self.server_state.total_requests` count is not increasing (stays at 0 after each request) so `self.server_state.total_requests >= self.config.limit_max_requests` never returns `True`.
When looking into where the `total_requests` was used, I noticed that the `protocols.http.[auto/h11/httptools]` were never getting called. I tried forcing the `--http h11` and `--http httptools` parameters, without any change in behavior. Any help would be appreciated!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/workers.py`
Content:
```
1 import asyncio
2
3 from gunicorn.workers.base import Worker
4
5 from uvicorn.config import Config
6 from uvicorn.main import Server
7
8
9 class UvicornWorker(Worker):
10 """
11 A worker class for Gunicorn that interfaces with an ASGI consumer callable,
12 rather than a WSGI callable.
13 """
14
15 CONFIG_KWARGS = {"loop": "uvloop", "http": "httptools"}
16
17 def __init__(self, *args, **kwargs):
18 super(UvicornWorker, self).__init__(*args, **kwargs)
19
20 self.log.level = self.log.loglevel
21
22 config_kwargs = {
23 "app": None,
24 "logger": self.log,
25 "timeout_keep_alive": self.cfg.keepalive,
26 "timeout_notify": self.timeout,
27 "callback_notify": self.callback_notify,
28 }
29
30 if self.cfg.is_ssl:
31 ssl_kwargs = {
32 "ssl_keyfile": self.cfg.ssl_options.get("keyfile"),
33 "ssl_certfile": self.cfg.ssl_options.get("certfile"),
34 "ssl_version": self.cfg.ssl_options.get("ssl_version"),
35 "ssl_cert_reqs": self.cfg.ssl_options.get("cert_reqs"),
36 "ssl_ca_certs": self.cfg.ssl_options.get("ca_certs"),
37 "ssl_ciphers": self.cfg.ssl_options.get("ciphers"),
38 }
39 config_kwargs.update(ssl_kwargs)
40
41 config_kwargs.update(self.CONFIG_KWARGS)
42
43 self.config = Config(**config_kwargs)
44
45 def init_process(self):
46 self.config.setup_event_loop()
47 super(UvicornWorker, self).init_process()
48
49 def init_signals(self):
50 pass
51
52 def run(self):
53 self.config.app = self.wsgi
54 server = Server(config=self.config)
55 loop = asyncio.get_event_loop()
56 loop.run_until_complete(
57 server.serve(sockets=self.sockets, shutdown_servers=False)
58 )
59
60 async def callback_notify(self):
61 self.notify()
62
63
64 class UvicornH11Worker(UvicornWorker):
65 CONFIG_KWARGS = {"loop": "asyncio", "http": "h11"}
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/workers.py b/uvicorn/workers.py
--- a/uvicorn/workers.py
+++ b/uvicorn/workers.py
@@ -25,6 +25,7 @@
"timeout_keep_alive": self.cfg.keepalive,
"timeout_notify": self.timeout,
"callback_notify": self.callback_notify,
+ "limit_max_requests": self.max_requests,
}
if self.cfg.is_ssl:
| {"golden_diff": "diff --git a/uvicorn/workers.py b/uvicorn/workers.py\n--- a/uvicorn/workers.py\n+++ b/uvicorn/workers.py\n@@ -25,6 +25,7 @@\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n+ \"limit_max_requests\": self.max_requests,\n }\n \n if self.cfg.is_ssl:\n", "issue": "--limit-max-requests not working\nHi! I'm trying to figure out why my workers are not restarting as expected when using the `--limit-max-requests` flag. I ran `uvicorn` in debug mode and noticed that the `self.server_state.total_requests` count is not increasing (stays at 0 after each request) so `self.server_state.total_requests >= self.config.limit_max_requests` never returns `True`. \r\nWhen looking into where the `total_requests` was used, I noticed that the `protocols.http.[auto/h11/httptools]` were never getting called. I tried forcing the `--http h11` and `--http httptools` parameters, without any change in behavior. Any help would be appreciated!\n", "before_files": [{"content": "import asyncio\n\nfrom gunicorn.workers.base import Worker\n\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"uvloop\", \"http\": \"httptools\"}\n\n def __init__(self, *args, **kwargs):\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n self.log.level = self.log.loglevel\n\n config_kwargs = {\n \"app\": None,\n \"logger\": self.log,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self):\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self):\n pass\n\n def run(self):\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n server.serve(sockets=self.sockets, shutdown_servers=False)\n )\n\n async def callback_notify(self):\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}], "after_files": [{"content": "import asyncio\n\nfrom gunicorn.workers.base import Worker\n\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"uvloop\", \"http\": \"httptools\"}\n\n def __init__(self, *args, **kwargs):\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n self.log.level = self.log.loglevel\n\n config_kwargs = {\n \"app\": None,\n \"logger\": self.log,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self):\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self):\n pass\n\n def run(self):\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n server.serve(sockets=self.sockets, shutdown_servers=False)\n )\n\n async def callback_notify(self):\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}]} | 993 | 97 |
gh_patches_debug_9521 | rasdani/github-patches | git_diff | mkdocs__mkdocs-272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show installed version in command-line
I'd expect `mkdocs help` to display the currently installed version, would be nice to have for the next versions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/main.py`
Content:
```
1 #!/usr/bin/env python
2 # coding: utf-8
3 from __future__ import print_function
4
5 import sys
6
7 from mkdocs.build import build
8 from mkdocs.config import load_config
9 from mkdocs.exceptions import ConfigurationError
10 from mkdocs.gh_deploy import gh_deploy
11 from mkdocs.new import new
12 from mkdocs.serve import serve
13
14
15 def arg_to_option(arg):
16 """
17 Convert command line arguments into two-tuples of config key/value pairs.
18 """
19 arg = arg.lstrip('--')
20 option = True
21 if '=' in arg:
22 arg, option = arg.split('=', 1)
23 return (arg.replace('-', '_'), option)
24
25
26 def main(cmd, args, options=None):
27 """
28 Build the documentation, and optionally start the devserver.
29 """
30 clean_site_dir = 'clean' in options
31 if cmd == 'serve':
32 config = load_config(options=options)
33 serve(config, options=options)
34 elif cmd == 'build':
35 config = load_config(options=options)
36 build(config, clean_site_dir=clean_site_dir)
37 elif cmd == 'json':
38 config = load_config(options=options)
39 build(config, dump_json=True, clean_site_dir=clean_site_dir)
40 elif cmd == 'gh-deploy':
41 config = load_config(options=options)
42 build(config, clean_site_dir=clean_site_dir)
43 gh_deploy(config)
44 elif cmd == 'new':
45 new(args, options)
46 else:
47 print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')
48
49
50 def run_main():
51 """
52 Invokes main() with the contents of sys.argv
53
54 This is a separate function so it can be invoked
55 by a setuptools console_script.
56 """
57 cmd = sys.argv[1] if len(sys.argv) >= 2 else None
58 opts = [arg_to_option(arg) for arg in sys.argv[2:] if arg.startswith('--')]
59 try:
60 main(cmd, args=sys.argv[2:], options=dict(opts))
61 except ConfigurationError as e:
62 print(e.args[0], file=sys.stderr)
63
64
65 if __name__ == '__main__':
66 run_main()
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/main.py b/mkdocs/main.py
--- a/mkdocs/main.py
+++ b/mkdocs/main.py
@@ -4,6 +4,7 @@
import sys
+from mkdocs import __version__
from mkdocs.build import build
from mkdocs.config import load_config
from mkdocs.exceptions import ConfigurationError
@@ -44,6 +45,7 @@
elif cmd == 'new':
new(args, options)
else:
+ print('MkDocs (version {0})'.format(__version__))
print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')
| {"golden_diff": "diff --git a/mkdocs/main.py b/mkdocs/main.py\n--- a/mkdocs/main.py\n+++ b/mkdocs/main.py\n@@ -4,6 +4,7 @@\n \n import sys\n \n+from mkdocs import __version__\n from mkdocs.build import build\n from mkdocs.config import load_config\n from mkdocs.exceptions import ConfigurationError\n@@ -44,6 +45,7 @@\n elif cmd == 'new':\n new(args, options)\n else:\n+ print('MkDocs (version {0})'.format(__version__))\n print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')\n", "issue": "Show installed version in command-line\nI'd expect `mkdocs help` to display the currently installed version, would be nice to have for the next versions\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\n\nimport sys\n\nfrom mkdocs.build import build\nfrom mkdocs.config import load_config\nfrom mkdocs.exceptions import ConfigurationError\nfrom mkdocs.gh_deploy import gh_deploy\nfrom mkdocs.new import new\nfrom mkdocs.serve import serve\n\n\ndef arg_to_option(arg):\n \"\"\"\n Convert command line arguments into two-tuples of config key/value pairs.\n \"\"\"\n arg = arg.lstrip('--')\n option = True\n if '=' in arg:\n arg, option = arg.split('=', 1)\n return (arg.replace('-', '_'), option)\n\n\ndef main(cmd, args, options=None):\n \"\"\"\n Build the documentation, and optionally start the devserver.\n \"\"\"\n clean_site_dir = 'clean' in options\n if cmd == 'serve':\n config = load_config(options=options)\n serve(config, options=options)\n elif cmd == 'build':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n elif cmd == 'json':\n config = load_config(options=options)\n build(config, dump_json=True, clean_site_dir=clean_site_dir)\n elif cmd == 'gh-deploy':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n gh_deploy(config)\n elif cmd == 'new':\n new(args, options)\n else:\n print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')\n\n\ndef run_main():\n \"\"\"\n Invokes main() with the contents of sys.argv\n\n This is a separate function so it can be invoked\n by a setuptools console_script.\n \"\"\"\n cmd = sys.argv[1] if len(sys.argv) >= 2 else None\n opts = [arg_to_option(arg) for arg in sys.argv[2:] if arg.startswith('--')]\n try:\n main(cmd, args=sys.argv[2:], options=dict(opts))\n except ConfigurationError as e:\n print(e.args[0], file=sys.stderr)\n\n\nif __name__ == '__main__':\n run_main()\n", "path": "mkdocs/main.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\n\nimport sys\n\nfrom mkdocs import __version__\nfrom mkdocs.build import build\nfrom mkdocs.config import load_config\nfrom mkdocs.exceptions import ConfigurationError\nfrom mkdocs.gh_deploy import gh_deploy\nfrom mkdocs.new import new\nfrom mkdocs.serve import serve\n\n\ndef arg_to_option(arg):\n \"\"\"\n Convert command line arguments into two-tuples of config key/value pairs.\n \"\"\"\n arg = arg.lstrip('--')\n option = True\n if '=' in arg:\n arg, option = arg.split('=', 1)\n return (arg.replace('-', '_'), option)\n\n\ndef main(cmd, args, options=None):\n \"\"\"\n Build the documentation, and optionally start the devserver.\n \"\"\"\n clean_site_dir = 'clean' in options\n if cmd == 'serve':\n config = load_config(options=options)\n serve(config, options=options)\n elif cmd == 'build':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n elif cmd == 'json':\n config = load_config(options=options)\n build(config, dump_json=True, clean_site_dir=clean_site_dir)\n elif cmd == 'gh-deploy':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n gh_deploy(config)\n elif cmd == 'new':\n new(args, options)\n else:\n print('MkDocs (version {0})'.format(__version__))\n print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')\n\n\ndef run_main():\n \"\"\"\n Invokes main() with the contents of sys.argv\n\n This is a separate function so it can be invoked\n by a setuptools console_script.\n \"\"\"\n cmd = sys.argv[1] if len(sys.argv) >= 2 else None\n opts = [arg_to_option(arg) for arg in sys.argv[2:] if arg.startswith('--')]\n try:\n main(cmd, args=sys.argv[2:], options=dict(opts))\n except ConfigurationError as e:\n print(e.args[0], file=sys.stderr)\n\n\nif __name__ == '__main__':\n run_main()\n", "path": "mkdocs/main.py"}]} | 881 | 145 |
gh_patches_debug_30007 | rasdani/github-patches | git_diff | vispy__vispy-335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding `elapsed` property to Timer event?
`event.elapsed` would be a shortcut to ~~`event.dt * event.iteration`~~. Actually it's a bit more complicated because `event.dt` is not constant, so it should rather be the sum of all `event.dt`s.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/app/timer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from __future__ import division
6
7 from ..util.event import Event, EmitterGroup
8 from ..util.ptime import time as precision_time
9 from ..ext.six import string_types
10 from .base import BaseTimerBackend as TimerBackend # noqa
11 from . import use_app, Application
12
13
14 class Timer(object):
15
16 """Timer used to schedule events in the future or on a repeating schedule
17
18 Parameters
19 ----------
20 interval : float
21 Time between events.
22 connect : function | None
23 The function to call.
24 iterations : int
25 Number of iterations. Can be -1 for infinite.
26 start : bool
27 Whether to start the timer.
28 app : instance of vispy.app.Application
29 The application to attach the timer to.
30 """
31
32 def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,
33 app=None):
34 self.events = EmitterGroup(source=self,
35 start=Event,
36 stop=Event,
37 timeout=Event)
38 #self.connect = self.events.timeout.connect
39 #self.disconnect = self.events.timeout.disconnect
40
41 # Get app instance
42 if app is None:
43 self._app = use_app()
44 elif isinstance(app, Application):
45 self._app = app
46 elif isinstance(app, string_types):
47 self._app = Application(app)
48 else:
49 raise ValueError('Invalid value for app %r' % app)
50
51 # Ensure app has backend app object
52 self._app.native
53
54 # Instantiate the backed with the right class
55 self._backend = self._app.backend_module.TimerBackend(self)
56
57 self._interval = interval
58 self._running = False
59 self._last_emit_time = None
60 self.iter_count = 0
61 self.max_iterations = iterations
62 if connect is not None:
63 self.connect(connect)
64 if start:
65 self.start()
66
67 @property
68 def app(self):
69 """ The vispy Application instance on which this Timer is based.
70 """
71 return self._app
72
73 @property
74 def interval(self):
75 return self._interval
76
77 @interval.setter
78 def interval(self, val):
79 self._interval = val
80 if self.running:
81 self.stop()
82 self.start()
83
84 @property
85 def running(self):
86 return self._running
87
88 def start(self, interval=None, iterations=None):
89 """Start the timer.
90
91 A timeout event will be generated every *interval* seconds.
92 If *interval* is None, then self.interval will be used.
93
94 If *iterations* is specified, the timer will stop after
95 emitting that number of events. If unspecified, then
96 the previous value of self.iterations will be used. If the value is
97 negative, then the timer will continue running until stop() is called.
98 """
99 self.iter_count = 0
100 if interval is not None:
101 self.interval = interval
102 if iterations is not None:
103 self.max_iterations = iterations
104 self._backend._vispy_start(self.interval)
105 self._running = True
106 self._last_emit_time = precision_time()
107 self.events.start(type='timer_start')
108
109 def stop(self):
110 """Stop the timer."""
111 self._backend._vispy_stop()
112 self._running = False
113 self.events.stop(type='timer_stop')
114
115 # use timer.app.run() and .quit() instead.
116 # def run_event_loop(self):
117 #"""Execute the event loop for this Timer's backend.
118 #"""
119 # return self._backend._vispy_run()
120
121 # def quit_event_loop(self):
122 #"""Exit the event loop for this Timer's backend.
123 #"""
124 # return self._backend._vispy_quit()
125
126 @property
127 def native(self):
128 """ The native timer on which this Timer is based.
129 """
130 return self._backend._vispy_get_native_timer()
131
132 def _timeout(self, *args):
133 # called when the backend timer has triggered.
134 if not self.running:
135 return
136 if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:
137 self.stop()
138 return
139
140 # compute dt since last event
141 now = precision_time()
142 dt = now - self._last_emit_time
143 self._last_emit_time = now
144
145 self.events.timeout(
146 type='timer_timeout',
147 iteration=self.iter_count,
148 dt=dt)
149 self.iter_count += 1
150
151 def connect(self, callback):
152 """ Alias for self.events.timeout.connect() """
153 return self.events.timeout.connect(callback)
154
155 def disconnect(self, callback=None):
156 """ Alias for self.events.timeout.disconnect() """
157 return self.events.timeout.disconnect(callback)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/app/timer.py b/vispy/app/timer.py
--- a/vispy/app/timer.py
+++ b/vispy/app/timer.py
@@ -56,6 +56,7 @@
self._interval = interval
self._running = False
+ self._first_emit_time = None
self._last_emit_time = None
self.iter_count = 0
self.max_iterations = iterations
@@ -81,6 +82,10 @@
self.stop()
self.start()
+ @property
+ def elapsed(self):
+ return precision_time() - self._first_emit_time
+
@property
def running(self):
return self._running
@@ -103,6 +108,7 @@
self.max_iterations = iterations
self._backend._vispy_start(self.interval)
self._running = True
+ self._first_emit_time = precision_time()
self._last_emit_time = precision_time()
self.events.start(type='timer_start')
@@ -140,11 +146,13 @@
# compute dt since last event
now = precision_time()
dt = now - self._last_emit_time
+ elapsed = now - self._first_emit_time
self._last_emit_time = now
self.events.timeout(
type='timer_timeout',
iteration=self.iter_count,
+ elapsed=elapsed,
dt=dt)
self.iter_count += 1
| {"golden_diff": "diff --git a/vispy/app/timer.py b/vispy/app/timer.py\n--- a/vispy/app/timer.py\n+++ b/vispy/app/timer.py\n@@ -56,6 +56,7 @@\n \n self._interval = interval\n self._running = False\n+ self._first_emit_time = None\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n@@ -81,6 +82,10 @@\n self.stop()\n self.start()\n \n+ @property\n+ def elapsed(self):\n+ return precision_time() - self._first_emit_time\n+\n @property\n def running(self):\n return self._running\n@@ -103,6 +108,7 @@\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n+ self._first_emit_time = precision_time()\n self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n \n@@ -140,11 +146,13 @@\n # compute dt since last event\n now = precision_time()\n dt = now - self._last_emit_time\n+ elapsed = now - self._first_emit_time\n self._last_emit_time = now\n \n self.events.timeout(\n type='timer_timeout',\n iteration=self.iter_count,\n+ elapsed=elapsed,\n dt=dt)\n self.iter_count += 1\n", "issue": "Adding `elapsed` property to Timer event?\n`event.elapsed` would be a shortcut to ~~`event.dt * event.iteration`~~. Actually it's a bit more complicated because `event.dt` is not constant, so it should rather be the sum of all `event.dt`s.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nfrom ..util.event import Event, EmitterGroup\nfrom ..util.ptime import time as precision_time\nfrom ..ext.six import string_types\nfrom .base import BaseTimerBackend as TimerBackend # noqa\nfrom . import use_app, Application\n\n\nclass Timer(object):\n\n \"\"\"Timer used to schedule events in the future or on a repeating schedule\n\n Parameters\n ----------\n interval : float\n Time between events.\n connect : function | None\n The function to call.\n iterations : int\n Number of iterations. Can be -1 for infinite.\n start : bool\n Whether to start the timer.\n app : instance of vispy.app.Application\n The application to attach the timer to.\n \"\"\"\n\n def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,\n app=None):\n self.events = EmitterGroup(source=self,\n start=Event,\n stop=Event,\n timeout=Event)\n #self.connect = self.events.timeout.connect\n #self.disconnect = self.events.timeout.disconnect\n\n # Get app instance\n if app is None:\n self._app = use_app()\n elif isinstance(app, Application):\n self._app = app\n elif isinstance(app, string_types):\n self._app = Application(app)\n else:\n raise ValueError('Invalid value for app %r' % app)\n \n # Ensure app has backend app object\n self._app.native\n \n # Instantiate the backed with the right class\n self._backend = self._app.backend_module.TimerBackend(self)\n\n self._interval = interval\n self._running = False\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n if connect is not None:\n self.connect(connect)\n if start:\n self.start()\n\n @property\n def app(self):\n \"\"\" The vispy Application instance on which this Timer is based.\n \"\"\"\n return self._app\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, val):\n self._interval = val\n if self.running:\n self.stop()\n self.start()\n\n @property\n def running(self):\n return self._running\n\n def start(self, interval=None, iterations=None):\n \"\"\"Start the timer.\n\n A timeout event will be generated every *interval* seconds.\n If *interval* is None, then self.interval will be used.\n\n If *iterations* is specified, the timer will stop after\n emitting that number of events. If unspecified, then\n the previous value of self.iterations will be used. If the value is\n negative, then the timer will continue running until stop() is called.\n \"\"\"\n self.iter_count = 0\n if interval is not None:\n self.interval = interval\n if iterations is not None:\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._backend._vispy_stop()\n self._running = False\n self.events.stop(type='timer_stop')\n\n # use timer.app.run() and .quit() instead.\n # def run_event_loop(self):\n #\"\"\"Execute the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_run()\n\n # def quit_event_loop(self):\n #\"\"\"Exit the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_quit()\n\n @property\n def native(self):\n \"\"\" The native timer on which this Timer is based.\n \"\"\"\n return self._backend._vispy_get_native_timer()\n\n def _timeout(self, *args):\n # called when the backend timer has triggered.\n if not self.running:\n return\n if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:\n self.stop()\n return\n\n # compute dt since last event\n now = precision_time()\n dt = now - self._last_emit_time\n self._last_emit_time = now\n\n self.events.timeout(\n type='timer_timeout',\n iteration=self.iter_count,\n dt=dt)\n self.iter_count += 1\n\n def connect(self, callback):\n \"\"\" Alias for self.events.timeout.connect() \"\"\"\n return self.events.timeout.connect(callback)\n\n def disconnect(self, callback=None):\n \"\"\" Alias for self.events.timeout.disconnect() \"\"\"\n return self.events.timeout.disconnect(callback)\n", "path": "vispy/app/timer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nfrom ..util.event import Event, EmitterGroup\nfrom ..util.ptime import time as precision_time\nfrom ..ext.six import string_types\nfrom .base import BaseTimerBackend as TimerBackend # noqa\nfrom . import use_app, Application\n\n\nclass Timer(object):\n\n \"\"\"Timer used to schedule events in the future or on a repeating schedule\n\n Parameters\n ----------\n interval : float\n Time between events.\n connect : function | None\n The function to call.\n iterations : int\n Number of iterations. Can be -1 for infinite.\n start : bool\n Whether to start the timer.\n app : instance of vispy.app.Application\n The application to attach the timer to.\n \"\"\"\n\n def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,\n app=None):\n self.events = EmitterGroup(source=self,\n start=Event,\n stop=Event,\n timeout=Event)\n #self.connect = self.events.timeout.connect\n #self.disconnect = self.events.timeout.disconnect\n\n # Get app instance\n if app is None:\n self._app = use_app()\n elif isinstance(app, Application):\n self._app = app\n elif isinstance(app, string_types):\n self._app = Application(app)\n else:\n raise ValueError('Invalid value for app %r' % app)\n \n # Ensure app has backend app object\n self._app.native\n \n # Instantiate the backed with the right class\n self._backend = self._app.backend_module.TimerBackend(self)\n\n self._interval = interval\n self._running = False\n self._first_emit_time = None\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n if connect is not None:\n self.connect(connect)\n if start:\n self.start()\n\n @property\n def app(self):\n \"\"\" The vispy Application instance on which this Timer is based.\n \"\"\"\n return self._app\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, val):\n self._interval = val\n if self.running:\n self.stop()\n self.start()\n\n @property\n def elapsed(self):\n return precision_time() - self._first_emit_time\n\n @property\n def running(self):\n return self._running\n\n def start(self, interval=None, iterations=None):\n \"\"\"Start the timer.\n\n A timeout event will be generated every *interval* seconds.\n If *interval* is None, then self.interval will be used.\n\n If *iterations* is specified, the timer will stop after\n emitting that number of events. If unspecified, then\n the previous value of self.iterations will be used. If the value is\n negative, then the timer will continue running until stop() is called.\n \"\"\"\n self.iter_count = 0\n if interval is not None:\n self.interval = interval\n if iterations is not None:\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n self._first_emit_time = precision_time()\n self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._backend._vispy_stop()\n self._running = False\n self.events.stop(type='timer_stop')\n\n # use timer.app.run() and .quit() instead.\n # def run_event_loop(self):\n #\"\"\"Execute the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_run()\n\n # def quit_event_loop(self):\n #\"\"\"Exit the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_quit()\n\n @property\n def native(self):\n \"\"\" The native timer on which this Timer is based.\n \"\"\"\n return self._backend._vispy_get_native_timer()\n\n def _timeout(self, *args):\n # called when the backend timer has triggered.\n if not self.running:\n return\n if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:\n self.stop()\n return\n\n # compute dt since last event\n now = precision_time()\n dt = now - self._last_emit_time\n elapsed = now - self._first_emit_time\n self._last_emit_time = now\n\n self.events.timeout(\n type='timer_timeout',\n iteration=self.iter_count,\n elapsed=elapsed,\n dt=dt)\n self.iter_count += 1\n\n def connect(self, callback):\n \"\"\" Alias for self.events.timeout.connect() \"\"\"\n return self.events.timeout.connect(callback)\n\n def disconnect(self, callback=None):\n \"\"\" Alias for self.events.timeout.disconnect() \"\"\"\n return self.events.timeout.disconnect(callback)\n", "path": "vispy/app/timer.py"}]} | 1,738 | 331 |
gh_patches_debug_15129 | rasdani/github-patches | git_diff | fossasia__open-event-server-5346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sort message-settings by id
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Sort message-settings by id
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Stacktrace**
<!-- If applicable, add stacktrace to help explain your problem. -->
**Additional details (please complete the following information):**
- OS: [e.g. MacOS, Ubuntu, CentOS]
- Python Version [e.g. `3.5`, `3.6`]
- `HEAD` Commit hash [e.g. `4629c62`]
**Additional context**
<!-- Add any other context about the problem here. -->
**Wanna work on this issue**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/message_settings.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList
2
3 from app.api.bootstrap import api
4 from app.api.schema.message_settings import MessageSettingSchema
5 from app.models import db
6 from app.models.message_setting import MessageSettings
7
8
9 class MessageSettingsList(ResourceList):
10 """
11 List Events Role Permission
12 """
13 decorators = (api.has_permission('is_admin', methods="GET"),)
14 methods = ['GET']
15 schema = MessageSettingSchema
16 data_layer = {'session': db.session,
17 'model': MessageSettings}
18
19
20 class MessageSettingsDetail(ResourceDetail):
21 """
22 Events Role Permission detail by id
23 """
24 schema = MessageSettingSchema
25 decorators = (api.has_permission('is_admin', methods="PATCH"),)
26 methods = ['GET', 'PATCH']
27 data_layer = {'session': db.session,
28 'model': MessageSettings}
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/message_settings.py b/app/api/message_settings.py
--- a/app/api/message_settings.py
+++ b/app/api/message_settings.py
@@ -10,11 +10,23 @@
"""
List Events Role Permission
"""
+ def query(self, view_kwargs):
+ """
+ query method for Message Setting List
+ :param view_kwargs:
+ :return:
+ """
+ query_ = db.session.query(MessageSettings).order_by(MessageSettings.id)
+ return query_
+
decorators = (api.has_permission('is_admin', methods="GET"),)
methods = ['GET']
schema = MessageSettingSchema
data_layer = {'session': db.session,
- 'model': MessageSettings}
+ 'model': MessageSettings,
+ 'methods': {
+ 'query': query
+ }}
class MessageSettingsDetail(ResourceDetail):
| {"golden_diff": "diff --git a/app/api/message_settings.py b/app/api/message_settings.py\n--- a/app/api/message_settings.py\n+++ b/app/api/message_settings.py\n@@ -10,11 +10,23 @@\n \"\"\"\n List Events Role Permission\n \"\"\"\n+ def query(self, view_kwargs):\n+ \"\"\"\n+ query method for Message Setting List\n+ :param view_kwargs:\n+ :return:\n+ \"\"\"\n+ query_ = db.session.query(MessageSettings).order_by(MessageSettings.id)\n+ return query_\n+\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n methods = ['GET']\n schema = MessageSettingSchema\n data_layer = {'session': db.session,\n- 'model': MessageSettings}\n+ 'model': MessageSettings,\n+ 'methods': {\n+ 'query': query\n+ }}\n \n \n class MessageSettingsDetail(ResourceDetail):\n", "issue": "Sort message-settings by id\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nSort message-settings by id\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Stacktrace**\r\n<!-- If applicable, add stacktrace to help explain your problem. -->\r\n\r\n**Additional details (please complete the following information):**\r\n - OS: [e.g. MacOS, Ubuntu, CentOS]\r\n - Python Version [e.g. `3.5`, `3.6`]\r\n - `HEAD` Commit hash [e.g. `4629c62`]\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n**Wanna work on this issue**\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\n\nfrom app.api.bootstrap import api\nfrom app.api.schema.message_settings import MessageSettingSchema\nfrom app.models import db\nfrom app.models.message_setting import MessageSettings\n\n\nclass MessageSettingsList(ResourceList):\n \"\"\"\n List Events Role Permission\n \"\"\"\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n methods = ['GET']\n schema = MessageSettingSchema\n data_layer = {'session': db.session,\n 'model': MessageSettings}\n\n\nclass MessageSettingsDetail(ResourceDetail):\n \"\"\"\n Events Role Permission detail by id\n \"\"\"\n schema = MessageSettingSchema\n decorators = (api.has_permission('is_admin', methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': MessageSettings}\n", "path": "app/api/message_settings.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\n\nfrom app.api.bootstrap import api\nfrom app.api.schema.message_settings import MessageSettingSchema\nfrom app.models import db\nfrom app.models.message_setting import MessageSettings\n\n\nclass MessageSettingsList(ResourceList):\n \"\"\"\n List Events Role Permission\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for Message Setting List\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = db.session.query(MessageSettings).order_by(MessageSettings.id)\n return query_\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n methods = ['GET']\n schema = MessageSettingSchema\n data_layer = {'session': db.session,\n 'model': MessageSettings,\n 'methods': {\n 'query': query\n }}\n\n\nclass MessageSettingsDetail(ResourceDetail):\n \"\"\"\n Events Role Permission detail by id\n \"\"\"\n schema = MessageSettingSchema\n decorators = (api.has_permission('is_admin', methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': MessageSettings}\n", "path": "app/api/message_settings.py"}]} | 678 | 194 |
gh_patches_debug_67410 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4066 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mitmweb fails with addons/options-configure.py example.
I am new to learn it, but i follow official [demo][1], it can't working?
```python
Proxy server listening at http://*:8888
ERROR:tornado.application:Uncaught exception GET /options.json (127.0.0.1)
HTTPServerRequest(protocol='http', host='127.0.0.1:8081', method='GET', uri='/options.json', version='HTTP/1.1', remote_ip='127.0.0.1')
Traceback (most recent call last):
File "c:\users\jekoie\appdata\local\programs\python\python37-32\lib\site-packages\tornado\web.py", line 1697, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "c:\users\jekoie\appdata\local\programs\python\python37-32\lib\site-packages\mitmproxy\tools\web\app.py", line 453, in get
self.write(optmanager.dump_dicts(self.master.options))
File "c:\users\jekoie\appdata\local\programs\python\python37-32\lib\site-packages\mitmproxy\optmanager.py", line 469, in dump_dicts
t = typecheck.typespec_to_str(o.typespec)
File "c:\users\jekoie\appdata\local\programs\python\python37-32\lib\site-packages\mitmproxy\utils\typecheck.py", line 85, in typespec_to_str
raise NotImplementedError
NotImplementedError
ERROR:tornado.access:500 GET /options.json (127.0.0.1) 3.91ms
````
[1]: https://docs.mitmproxy.org/stable/addons-options/#handling-configuration-updates
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/utils/typecheck.py`
Content:
```
1 import typing
2
3 Type = typing.Union[
4 typing.Any # anything more elaborate really fails with mypy at the moment.
5 ]
6
7
8 def sequence_type(typeinfo: typing.Type[typing.List]) -> Type:
9 """Return the type of a sequence, e.g. typing.List"""
10 return typeinfo.__args__[0] # type: ignore
11
12
13 def tuple_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:
14 """Return the types of a typing.Tuple"""
15 return typeinfo.__args__ # type: ignore
16
17
18 def union_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:
19 """return the types of a typing.Union"""
20 return typeinfo.__args__ # type: ignore
21
22
23 def mapping_types(typeinfo: typing.Type[typing.Mapping]) -> typing.Tuple[Type, Type]:
24 """return the types of a mapping, e.g. typing.Dict"""
25 return typeinfo.__args__ # type: ignore
26
27
28 def check_option_type(name: str, value: typing.Any, typeinfo: Type) -> None:
29 """
30 Check if the provided value is an instance of typeinfo and raises a
31 TypeError otherwise. This function supports only those types required for
32 options.
33 """
34 e = TypeError("Expected {} for {}, but got {}.".format(
35 typeinfo,
36 name,
37 type(value)
38 ))
39
40 typename = str(typeinfo)
41
42 if typename.startswith("typing.Union"):
43 for T in union_types(typeinfo):
44 try:
45 check_option_type(name, value, T)
46 except TypeError:
47 pass
48 else:
49 return
50 raise e
51 elif typename.startswith("typing.Tuple"):
52 types = tuple_types(typeinfo)
53 if not isinstance(value, (tuple, list)):
54 raise e
55 if len(types) != len(value):
56 raise e
57 for i, (x, T) in enumerate(zip(value, types)):
58 check_option_type("{}[{}]".format(name, i), x, T)
59 return
60 elif typename.startswith("typing.Sequence"):
61 T = sequence_type(typeinfo)
62 if not isinstance(value, (tuple, list)):
63 raise e
64 for v in value:
65 check_option_type(name, v, T)
66 elif typename.startswith("typing.IO"):
67 if hasattr(value, "read"):
68 return
69 else:
70 raise e
71 elif typename.startswith("typing.Any"):
72 return
73 elif not isinstance(value, typeinfo):
74 raise e
75
76
77 def typespec_to_str(typespec: typing.Any) -> str:
78 if typespec in (str, int, bool):
79 t = typespec.__name__
80 elif typespec == typing.Optional[str]:
81 t = 'optional str'
82 elif typespec == typing.Sequence[str]:
83 t = 'sequence of str'
84 else:
85 raise NotImplementedError
86 return t
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/utils/typecheck.py b/mitmproxy/utils/typecheck.py
--- a/mitmproxy/utils/typecheck.py
+++ b/mitmproxy/utils/typecheck.py
@@ -81,6 +81,8 @@
t = 'optional str'
elif typespec == typing.Sequence[str]:
t = 'sequence of str'
+ elif typespec == typing.Optional[int]:
+ t = 'optional int'
else:
raise NotImplementedError
return t
| {"golden_diff": "diff --git a/mitmproxy/utils/typecheck.py b/mitmproxy/utils/typecheck.py\n--- a/mitmproxy/utils/typecheck.py\n+++ b/mitmproxy/utils/typecheck.py\n@@ -81,6 +81,8 @@\n t = 'optional str'\n elif typespec == typing.Sequence[str]:\n t = 'sequence of str'\n+ elif typespec == typing.Optional[int]:\n+ t = 'optional int'\n else:\n raise NotImplementedError\n return t\n", "issue": "Mitmweb fails with addons/options-configure.py example.\nI am new to learn it, but i follow official [demo][1], it can't working?\r\n```python\r\nProxy server listening at http://*:8888\r\nERROR:tornado.application:Uncaught exception GET /options.json (127.0.0.1)\r\nHTTPServerRequest(protocol='http', host='127.0.0.1:8081', method='GET', uri='/options.json', version='HTTP/1.1', remote_ip='127.0.0.1')\r\nTraceback (most recent call last):\r\n File \"c:\\users\\jekoie\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\tornado\\web.py\", line 1697, in _execute\r\n result = method(*self.path_args, **self.path_kwargs)\r\n File \"c:\\users\\jekoie\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\mitmproxy\\tools\\web\\app.py\", line 453, in get\r\n self.write(optmanager.dump_dicts(self.master.options))\r\n File \"c:\\users\\jekoie\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\mitmproxy\\optmanager.py\", line 469, in dump_dicts\r\n t = typecheck.typespec_to_str(o.typespec)\r\n File \"c:\\users\\jekoie\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\mitmproxy\\utils\\typecheck.py\", line 85, in typespec_to_str\r\n raise NotImplementedError\r\nNotImplementedError\r\nERROR:tornado.access:500 GET /options.json (127.0.0.1) 3.91ms\r\n````\r\n\r\n[1]: https://docs.mitmproxy.org/stable/addons-options/#handling-configuration-updates\n", "before_files": [{"content": "import typing\n\nType = typing.Union[\n typing.Any # anything more elaborate really fails with mypy at the moment.\n]\n\n\ndef sequence_type(typeinfo: typing.Type[typing.List]) -> Type:\n \"\"\"Return the type of a sequence, e.g. typing.List\"\"\"\n return typeinfo.__args__[0] # type: ignore\n\n\ndef tuple_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"Return the types of a typing.Tuple\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef union_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"return the types of a typing.Union\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef mapping_types(typeinfo: typing.Type[typing.Mapping]) -> typing.Tuple[Type, Type]:\n \"\"\"return the types of a mapping, e.g. typing.Dict\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef check_option_type(name: str, value: typing.Any, typeinfo: Type) -> None:\n \"\"\"\n Check if the provided value is an instance of typeinfo and raises a\n TypeError otherwise. This function supports only those types required for\n options.\n \"\"\"\n e = TypeError(\"Expected {} for {}, but got {}.\".format(\n typeinfo,\n name,\n type(value)\n ))\n\n typename = str(typeinfo)\n\n if typename.startswith(\"typing.Union\"):\n for T in union_types(typeinfo):\n try:\n check_option_type(name, value, T)\n except TypeError:\n pass\n else:\n return\n raise e\n elif typename.startswith(\"typing.Tuple\"):\n types = tuple_types(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n if len(types) != len(value):\n raise e\n for i, (x, T) in enumerate(zip(value, types)):\n check_option_type(\"{}[{}]\".format(name, i), x, T)\n return\n elif typename.startswith(\"typing.Sequence\"):\n T = sequence_type(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n for v in value:\n check_option_type(name, v, T)\n elif typename.startswith(\"typing.IO\"):\n if hasattr(value, \"read\"):\n return\n else:\n raise e\n elif typename.startswith(\"typing.Any\"):\n return\n elif not isinstance(value, typeinfo):\n raise e\n\n\ndef typespec_to_str(typespec: typing.Any) -> str:\n if typespec in (str, int, bool):\n t = typespec.__name__\n elif typespec == typing.Optional[str]:\n t = 'optional str'\n elif typespec == typing.Sequence[str]:\n t = 'sequence of str'\n else:\n raise NotImplementedError\n return t\n", "path": "mitmproxy/utils/typecheck.py"}], "after_files": [{"content": "import typing\n\nType = typing.Union[\n typing.Any # anything more elaborate really fails with mypy at the moment.\n]\n\n\ndef sequence_type(typeinfo: typing.Type[typing.List]) -> Type:\n \"\"\"Return the type of a sequence, e.g. typing.List\"\"\"\n return typeinfo.__args__[0] # type: ignore\n\n\ndef tuple_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"Return the types of a typing.Tuple\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef union_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"return the types of a typing.Union\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef mapping_types(typeinfo: typing.Type[typing.Mapping]) -> typing.Tuple[Type, Type]:\n \"\"\"return the types of a mapping, e.g. typing.Dict\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef check_option_type(name: str, value: typing.Any, typeinfo: Type) -> None:\n \"\"\"\n Check if the provided value is an instance of typeinfo and raises a\n TypeError otherwise. This function supports only those types required for\n options.\n \"\"\"\n e = TypeError(\"Expected {} for {}, but got {}.\".format(\n typeinfo,\n name,\n type(value)\n ))\n\n typename = str(typeinfo)\n\n if typename.startswith(\"typing.Union\"):\n for T in union_types(typeinfo):\n try:\n check_option_type(name, value, T)\n except TypeError:\n pass\n else:\n return\n raise e\n elif typename.startswith(\"typing.Tuple\"):\n types = tuple_types(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n if len(types) != len(value):\n raise e\n for i, (x, T) in enumerate(zip(value, types)):\n check_option_type(\"{}[{}]\".format(name, i), x, T)\n return\n elif typename.startswith(\"typing.Sequence\"):\n T = sequence_type(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n for v in value:\n check_option_type(name, v, T)\n elif typename.startswith(\"typing.IO\"):\n if hasattr(value, \"read\"):\n return\n else:\n raise e\n elif typename.startswith(\"typing.Any\"):\n return\n elif not isinstance(value, typeinfo):\n raise e\n\n\ndef typespec_to_str(typespec: typing.Any) -> str:\n if typespec in (str, int, bool):\n t = typespec.__name__\n elif typespec == typing.Optional[str]:\n t = 'optional str'\n elif typespec == typing.Sequence[str]:\n t = 'sequence of str'\n elif typespec == typing.Optional[int]:\n t = 'optional int'\n else:\n raise NotImplementedError\n return t\n", "path": "mitmproxy/utils/typecheck.py"}]} | 1,479 | 105 |
gh_patches_debug_53877 | rasdani/github-patches | git_diff | pyca__cryptography-7406 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
release.py should link to GH create PAT page
We can pre-fill what permissions are needed to improve the UX of doing a release. Example URL: https://github.com/settings/tokens/new?description=foo&scopes=repo,workflow
@reaperhulk do you know what scopes are required?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `release.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 import getpass
6 import glob
7 import io
8 import os
9 import subprocess
10 import time
11 import zipfile
12
13 import click
14
15 import requests
16
17
18 def run(*args, **kwargs):
19 print("[running] {0}".format(list(args)))
20 subprocess.check_call(list(args), **kwargs)
21
22
23 def wait_for_build_complete_github_actions(session, token, run_url):
24 while True:
25 response = session.get(
26 run_url,
27 headers={
28 "Content-Type": "application/json",
29 "Authorization": "token {}".format(token),
30 },
31 )
32 response.raise_for_status()
33 if response.json()["conclusion"] is not None:
34 break
35 time.sleep(3)
36
37
38 def download_artifacts_github_actions(session, token, run_url):
39 response = session.get(
40 run_url,
41 headers={
42 "Content-Type": "application/json",
43 "Authorization": "token {}".format(token),
44 },
45 )
46 response.raise_for_status()
47
48 response = session.get(
49 response.json()["artifacts_url"],
50 headers={
51 "Content-Type": "application/json",
52 "Authorization": "token {}".format(token),
53 },
54 )
55 response.raise_for_status()
56 paths = []
57 for artifact in response.json()["artifacts"]:
58 response = session.get(
59 artifact["archive_download_url"],
60 headers={
61 "Content-Type": "application/json",
62 "Authorization": "token {}".format(token),
63 },
64 )
65 with zipfile.ZipFile(io.BytesIO(response.content)) as z:
66 for name in z.namelist():
67 if not name.endswith(".whl"):
68 continue
69 p = z.open(name)
70 out_path = os.path.join(
71 os.path.dirname(__file__),
72 "dist",
73 os.path.basename(name),
74 )
75 with open(out_path, "wb") as f:
76 f.write(p.read())
77 paths.append(out_path)
78 return paths
79
80
81 def fetch_github_actions_wheels(token, version):
82 session = requests.Session()
83
84 response = session.get(
85 (
86 "https://api.github.com/repos/pyca/cryptography/actions/workflows/"
87 "wheel-builder.yml/runs?event=push"
88 ),
89 headers={
90 "Content-Type": "application/json",
91 "Authorization": "token {}".format(token),
92 },
93 )
94 response.raise_for_status()
95 run_url = response.json()["workflow_runs"][0]["url"]
96 wait_for_build_complete_github_actions(session, token, run_url)
97 return download_artifacts_github_actions(session, token, run_url)
98
99
100 @click.command()
101 @click.argument("version")
102 def release(version):
103 """
104 ``version`` should be a string like '0.4' or '1.0'.
105 """
106 github_token = getpass.getpass("Github person access token: ")
107
108 # Tag and push the tag (this will trigger the wheel builder in Actions)
109 run("git", "tag", "-s", version, "-m", "{0} release".format(version))
110 run("git", "push", "--tags")
111
112 # Generate and upload vector packages
113 run("python", "setup.py", "sdist", "bdist_wheel", cwd="vectors/")
114 packages = glob.glob(
115 "vectors/dist/cryptography_vectors-{0}*".format(version)
116 )
117 run("twine", "upload", "-s", *packages)
118
119 # Generate sdist for upload
120 run("python", "setup.py", "sdist")
121 sdist = glob.glob("dist/cryptography-{0}*".format(version))
122
123 # Wait for Actions to complete and download the wheels
124 github_actions_wheel_paths = fetch_github_actions_wheels(
125 github_token, version
126 )
127
128 # Upload sdist and wheels
129 run("twine", "upload", "-s", *sdist)
130 run("twine", "upload", *github_actions_wheel_paths)
131
132
133 if __name__ == "__main__":
134 release()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/release.py b/release.py
--- a/release.py
+++ b/release.py
@@ -103,6 +103,11 @@
"""
``version`` should be a string like '0.4' or '1.0'.
"""
+ print(
+ f"Create a new GH PAT at: "
+ f"https://github.com/settings/tokens/new?"
+ f"description={version}&scopes=repo"
+ )
github_token = getpass.getpass("Github person access token: ")
# Tag and push the tag (this will trigger the wheel builder in Actions)
| {"golden_diff": "diff --git a/release.py b/release.py\n--- a/release.py\n+++ b/release.py\n@@ -103,6 +103,11 @@\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n+ print(\n+ f\"Create a new GH PAT at: \"\n+ f\"https://github.com/settings/tokens/new?\"\n+ f\"description={version}&scopes=repo\"\n+ )\n github_token = getpass.getpass(\"Github person access token: \")\n \n # Tag and push the tag (this will trigger the wheel builder in Actions)\n", "issue": "release.py should link to GH create PAT page\nWe can pre-fill what permissions are needed to improve the UX of doing a release. Example URL: https://github.com/settings/tokens/new?description=foo&scopes=repo,workflow\r\n\r\n@reaperhulk do you know what scopes are required?\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport getpass\nimport glob\nimport io\nimport os\nimport subprocess\nimport time\nimport zipfile\n\nimport click\n\nimport requests\n\n\ndef run(*args, **kwargs):\n print(\"[running] {0}\".format(list(args)))\n subprocess.check_call(list(args), **kwargs)\n\n\ndef wait_for_build_complete_github_actions(session, token, run_url):\n while True:\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n if response.json()[\"conclusion\"] is not None:\n break\n time.sleep(3)\n\n\ndef download_artifacts_github_actions(session, token, run_url):\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n\n response = session.get(\n response.json()[\"artifacts_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n paths = []\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n artifact[\"archive_download_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n with zipfile.ZipFile(io.BytesIO(response.content)) as z:\n for name in z.namelist():\n if not name.endswith(\".whl\"):\n continue\n p = z.open(name)\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n os.path.basename(name),\n )\n with open(out_path, \"wb\") as f:\n f.write(p.read())\n paths.append(out_path)\n return paths\n\n\ndef fetch_github_actions_wheels(token, version):\n session = requests.Session()\n\n response = session.get(\n (\n \"https://api.github.com/repos/pyca/cryptography/actions/workflows/\"\n \"wheel-builder.yml/runs?event=push\"\n ),\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n run_url = response.json()[\"workflow_runs\"][0][\"url\"]\n wait_for_build_complete_github_actions(session, token, run_url)\n return download_artifacts_github_actions(session, token, run_url)\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n github_token = getpass.getpass(\"Github person access token: \")\n\n # Tag and push the tag (this will trigger the wheel builder in Actions)\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n # Generate and upload vector packages\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n packages = glob.glob(\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n run(\"twine\", \"upload\", \"-s\", *packages)\n\n # Generate sdist for upload\n run(\"python\", \"setup.py\", \"sdist\")\n sdist = glob.glob(\"dist/cryptography-{0}*\".format(version))\n\n # Wait for Actions to complete and download the wheels\n github_actions_wheel_paths = fetch_github_actions_wheels(\n github_token, version\n )\n\n # Upload sdist and wheels\n run(\"twine\", \"upload\", \"-s\", *sdist)\n run(\"twine\", \"upload\", *github_actions_wheel_paths)\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport getpass\nimport glob\nimport io\nimport os\nimport subprocess\nimport time\nimport zipfile\n\nimport click\n\nimport requests\n\n\ndef run(*args, **kwargs):\n print(\"[running] {0}\".format(list(args)))\n subprocess.check_call(list(args), **kwargs)\n\n\ndef wait_for_build_complete_github_actions(session, token, run_url):\n while True:\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n if response.json()[\"conclusion\"] is not None:\n break\n time.sleep(3)\n\n\ndef download_artifacts_github_actions(session, token, run_url):\n response = session.get(\n run_url,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n\n response = session.get(\n response.json()[\"artifacts_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n paths = []\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n artifact[\"archive_download_url\"],\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n with zipfile.ZipFile(io.BytesIO(response.content)) as z:\n for name in z.namelist():\n if not name.endswith(\".whl\"):\n continue\n p = z.open(name)\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n os.path.basename(name),\n )\n with open(out_path, \"wb\") as f:\n f.write(p.read())\n paths.append(out_path)\n return paths\n\n\ndef fetch_github_actions_wheels(token, version):\n session = requests.Session()\n\n response = session.get(\n (\n \"https://api.github.com/repos/pyca/cryptography/actions/workflows/\"\n \"wheel-builder.yml/runs?event=push\"\n ),\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"token {}\".format(token),\n },\n )\n response.raise_for_status()\n run_url = response.json()[\"workflow_runs\"][0][\"url\"]\n wait_for_build_complete_github_actions(session, token, run_url)\n return download_artifacts_github_actions(session, token, run_url)\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n print(\n f\"Create a new GH PAT at: \"\n f\"https://github.com/settings/tokens/new?\"\n f\"description={version}&scopes=repo\"\n )\n github_token = getpass.getpass(\"Github person access token: \")\n\n # Tag and push the tag (this will trigger the wheel builder in Actions)\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n # Generate and upload vector packages\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n packages = glob.glob(\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n run(\"twine\", \"upload\", \"-s\", *packages)\n\n # Generate sdist for upload\n run(\"python\", \"setup.py\", \"sdist\")\n sdist = glob.glob(\"dist/cryptography-{0}*\".format(version))\n\n # Wait for Actions to complete and download the wheels\n github_actions_wheel_paths = fetch_github_actions_wheels(\n github_token, version\n )\n\n # Upload sdist and wheels\n run(\"twine\", \"upload\", \"-s\", *sdist)\n run(\"twine\", \"upload\", *github_actions_wheel_paths)\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py"}]} | 1,499 | 134 |
gh_patches_debug_22074 | rasdani/github-patches | git_diff | GPflow__GPflow-165 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tests fail with tensorflow 10.0rc0
One test fails with the new tensorflow pre-release, it's related to the custom-op test:
``` python
ImportError: No module named 'tensorflow.python.kernel_tests'
```
In addition, there sees to be an issue with one of the notebooks ( #161 ), I'm looking into this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import print_function
4 from setuptools import setup
5 import re
6 import os
7 import sys
8
9 # load version form _version.py
10 VERSIONFILE = "GPflow/_version.py"
11 verstrline = open(VERSIONFILE, "rt").read()
12 VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
13 mo = re.search(VSRE, verstrline, re.M)
14 if mo:
15 verstr = mo.group(1)
16 else:
17 raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
18
19 # Compile the bespoke TensorFlow ops in-place. Not sure how this would work if this script wasn't executed as `develop`.
20 compile_command = "g++ -std=c++11 -shared ./GPflow/tfops/vec_to_tri.cc " \
21 "GPflow/tfops/tri_to_vec.cc -o GPflow/tfops/matpackops.so " \
22 "-fPIC -I $(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())')"
23 if sys.platform == "darwin":
24 # Additional command for Macs, as instructed by the TensorFlow docs
25 compile_command += " -undefined dynamic_lookup"
26 os.system(compile_command)
27
28 setup(name='GPflow',
29 version=verstr,
30 author="James Hensman, Alex Matthews",
31 author_email="[email protected]",
32 description=("Gaussian process methods in tensorflow"),
33 license="BSD 3-clause",
34 keywords="machine-learning gaussian-processes kernels tensorflow",
35 url="http://github.com/gpflow/gpflow",
36 ext_modules=[],
37 packages=["GPflow"],
38 package_dir={'GPflow': 'GPflow'},
39 py_modules=['GPflow.__init__'],
40 test_suite='testing',
41 install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.7.1'],
42 classifiers=['License :: OSI Approved :: BSD License',
43 'Natural Language :: English',
44 'Operating System :: MacOS :: MacOS X',
45 'Operating System :: Microsoft :: Windows',
46 'Operating System :: POSIX :: Linux',
47 'Programming Language :: Python :: 2.7',
48 'Topic :: Scientific/Engineering :: Artificial Intelligence']
49 )
50
```
Path: `GPflow/_version.py`
Content:
```
1 # Copyright 2016 James Hensman
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 __version__ = "0.2.1"
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/GPflow/_version.py b/GPflow/_version.py
--- a/GPflow/_version.py
+++ b/GPflow/_version.py
@@ -13,4 +13,4 @@
# limitations under the License.
-__version__ = "0.2.1"
+__version__ = "0.3.0"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,12 +33,14 @@
license="BSD 3-clause",
keywords="machine-learning gaussian-processes kernels tensorflow",
url="http://github.com/gpflow/gpflow",
+ package_data={'GPflow': ['GPflow/tfops/*.so']},
+ include_package_data=True,
ext_modules=[],
packages=["GPflow"],
package_dir={'GPflow': 'GPflow'},
py_modules=['GPflow.__init__'],
test_suite='testing',
- install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.7.1'],
+ install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.10.0rc0'],
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
| {"golden_diff": "diff --git a/GPflow/_version.py b/GPflow/_version.py\n--- a/GPflow/_version.py\n+++ b/GPflow/_version.py\n@@ -13,4 +13,4 @@\n # limitations under the License.\n \n \n-__version__ = \"0.2.1\"\n+__version__ = \"0.3.0\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,12 +33,14 @@\n license=\"BSD 3-clause\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"http://github.com/gpflow/gpflow\",\n+ package_data={'GPflow': ['GPflow/tfops/*.so']},\n+ include_package_data=True,\n ext_modules=[],\n packages=[\"GPflow\"],\n package_dir={'GPflow': 'GPflow'},\n py_modules=['GPflow.__init__'],\n test_suite='testing',\n- install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.7.1'],\n+ install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.10.0rc0'],\n classifiers=['License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n", "issue": "tests fail with tensorflow 10.0rc0\nOne test fails with the new tensorflow pre-release, it's related to the custom-op test:\n\n``` python\nImportError: No module named 'tensorflow.python.kernel_tests'\n```\n\nIn addition, there sees to be an issue with one of the notebooks ( #161 ), I'm looking into this.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n# load version form _version.py\nVERSIONFILE = \"GPflow/_version.py\"\nverstrline = open(VERSIONFILE, \"rt\").read()\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(VSRE, verstrline, re.M)\nif mo:\n verstr = mo.group(1)\nelse:\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))\n\n# Compile the bespoke TensorFlow ops in-place. Not sure how this would work if this script wasn't executed as `develop`.\ncompile_command = \"g++ -std=c++11 -shared ./GPflow/tfops/vec_to_tri.cc \" \\\n \"GPflow/tfops/tri_to_vec.cc -o GPflow/tfops/matpackops.so \" \\\n \"-fPIC -I $(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())')\"\nif sys.platform == \"darwin\":\n # Additional command for Macs, as instructed by the TensorFlow docs\n compile_command += \" -undefined dynamic_lookup\"\nos.system(compile_command)\n\nsetup(name='GPflow',\n version=verstr,\n author=\"James Hensman, Alex Matthews\",\n author_email=\"[email protected]\",\n description=(\"Gaussian process methods in tensorflow\"),\n license=\"BSD 3-clause\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"http://github.com/gpflow/gpflow\",\n ext_modules=[],\n packages=[\"GPflow\"],\n package_dir={'GPflow': 'GPflow'},\n py_modules=['GPflow.__init__'],\n test_suite='testing',\n install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.7.1'],\n classifiers=['License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence']\n )\n", "path": "setup.py"}, {"content": "# Copyright 2016 James Hensman\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n__version__ = \"0.2.1\"\n", "path": "GPflow/_version.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n# load version form _version.py\nVERSIONFILE = \"GPflow/_version.py\"\nverstrline = open(VERSIONFILE, \"rt\").read()\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(VSRE, verstrline, re.M)\nif mo:\n verstr = mo.group(1)\nelse:\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))\n\n# Compile the bespoke TensorFlow ops in-place. Not sure how this would work if this script wasn't executed as `develop`.\ncompile_command = \"g++ -std=c++11 -shared ./GPflow/tfops/vec_to_tri.cc \" \\\n \"GPflow/tfops/tri_to_vec.cc -o GPflow/tfops/matpackops.so \" \\\n \"-fPIC -I $(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())')\"\nif sys.platform == \"darwin\":\n # Additional command for Macs, as instructed by the TensorFlow docs\n compile_command += \" -undefined dynamic_lookup\"\nos.system(compile_command)\n\nsetup(name='GPflow',\n version=verstr,\n author=\"James Hensman, Alex Matthews\",\n author_email=\"[email protected]\",\n description=(\"Gaussian process methods in tensorflow\"),\n license=\"BSD 3-clause\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"http://github.com/gpflow/gpflow\",\n package_data={'GPflow': ['GPflow/tfops/*.so']},\n include_package_data=True,\n ext_modules=[],\n packages=[\"GPflow\"],\n package_dir={'GPflow': 'GPflow'},\n py_modules=['GPflow.__init__'],\n test_suite='testing',\n install_requires=['numpy>=1.9', 'scipy>=0.16', 'tensorflow>=0.10.0rc0'],\n classifiers=['License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence']\n )\n", "path": "setup.py"}, {"content": "# Copyright 2016 James Hensman\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n__version__ = \"0.3.0\"\n", "path": "GPflow/_version.py"}]} | 1,101 | 298 |
gh_patches_debug_13731 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4801 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Locally hosted RTD instance doesn't allow git file:/// URLs
## Details
I installed a local RTD instance according to the Installation guide and imported test project. Now, I want to import my git project manually,
## Expected Result
I expected that the instance should accept all valid Git URLs.
## Actual Result
When I enter file:///.../../x.git URL, the manual import page shows "Invalid scheme for URL" error. I checked that I can clone this URL from a terminal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/projects/validators.py`
Content:
```
1 """Validators for projects app."""
2
3 # From https://github.com/django/django/pull/3477/files
4 from __future__ import absolute_import
5 import re
6
7 from django.conf import settings
8 from django.core.exceptions import ValidationError
9 from django.utils.deconstruct import deconstructible
10 from django.utils.translation import ugettext_lazy as _
11 from django.core.validators import RegexValidator
12 from future.backports.urllib.parse import urlparse
13
14
15 domain_regex = (
16 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|'
17 r'localhost|' # localhost...
18 r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
19 r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
20 )
21
22
23 @deconstructible
24 class DomainNameValidator(RegexValidator):
25 message = _('Enter a valid plain or internationalized domain name value')
26 regex = re.compile(domain_regex, re.IGNORECASE)
27
28 def __init__(self, accept_idna=True, **kwargs):
29 message = kwargs.get('message')
30 self.accept_idna = accept_idna
31 super(DomainNameValidator, self).__init__(**kwargs)
32 if not self.accept_idna and message is None:
33 self.message = _('Enter a valid domain name value')
34
35 def __call__(self, value):
36 try:
37 super(DomainNameValidator, self).__call__(value)
38 except ValidationError as exc:
39 if not self.accept_idna:
40 raise
41 if not value:
42 raise
43 try:
44 idnavalue = value.encode('idna')
45 except UnicodeError:
46 raise exc
47 super(DomainNameValidator, self).__call__(idnavalue)
48
49
50 validate_domain_name = DomainNameValidator()
51
52
53 @deconstructible
54 class RepositoryURLValidator(object):
55
56 disallow_relative_url = True
57
58 # Pattern for ``[email protected]:user/repo`` pattern
59 re_git_user = re.compile(r'^[\w]+@.+')
60
61 def __call__(self, value):
62 allow_private_repos = getattr(settings, 'ALLOW_PRIVATE_REPOS', False)
63 public_schemes = ['https', 'http', 'git', 'ftps', 'ftp']
64 private_schemes = ['ssh', 'ssh+git']
65 valid_schemes = public_schemes
66 if allow_private_repos:
67 valid_schemes += private_schemes
68 url = urlparse(value)
69
70 # Malicious characters go first
71 if '&&' in value or '|' in value:
72 raise ValidationError(_('Invalid character in the URL'))
73 elif url.scheme in valid_schemes:
74 return value
75
76 # Repo URL is not a supported scheme at this point, but there are
77 # several cases where we might support it
78 # Launchpad
79 elif value.startswith('lp:'):
80 return value
81 # Relative paths are conditionally supported
82 elif value.startswith('.') and not self.disallow_relative_url:
83 return value
84 # SSH cloning and ``[email protected]:user/project.git``
85 elif self.re_git_user.search(value) or url.scheme in private_schemes:
86 if allow_private_repos:
87 return value
88
89 # Throw a more helpful error message
90 raise ValidationError('Manual cloning via SSH is not supported')
91
92 # No more valid URLs without supported URL schemes
93 raise ValidationError(_('Invalid scheme for URL'))
94
95
96 class SubmoduleURLValidator(RepositoryURLValidator):
97
98 """
99 A URL validator for repository submodules
100
101 If a repository has a relative submodule, the URL path is effectively the
102 supermodule's remote ``origin`` URL with the relative path applied.
103
104 From the git docs::
105
106 ``<repository>`` is the URL of the new submodule's origin repository.
107 This may be either an absolute URL, or (if it begins with ``./`` or
108 ``../``), the location relative to the superproject's default remote
109 repository
110 """
111
112 disallow_relative_url = False
113
114
115 validate_repository_url = RepositoryURLValidator()
116 validate_submodule_url = SubmoduleURLValidator()
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/projects/validators.py b/readthedocs/projects/validators.py
--- a/readthedocs/projects/validators.py
+++ b/readthedocs/projects/validators.py
@@ -62,9 +62,12 @@
allow_private_repos = getattr(settings, 'ALLOW_PRIVATE_REPOS', False)
public_schemes = ['https', 'http', 'git', 'ftps', 'ftp']
private_schemes = ['ssh', 'ssh+git']
+ local_schemes = ['file']
valid_schemes = public_schemes
if allow_private_repos:
valid_schemes += private_schemes
+ if getattr(settings, 'DEBUG'): # allow `file://` urls in dev
+ valid_schemes += local_schemes
url = urlparse(value)
# Malicious characters go first
| {"golden_diff": "diff --git a/readthedocs/projects/validators.py b/readthedocs/projects/validators.py\n--- a/readthedocs/projects/validators.py\n+++ b/readthedocs/projects/validators.py\n@@ -62,9 +62,12 @@\n allow_private_repos = getattr(settings, 'ALLOW_PRIVATE_REPOS', False)\n public_schemes = ['https', 'http', 'git', 'ftps', 'ftp']\n private_schemes = ['ssh', 'ssh+git']\n+ local_schemes = ['file']\n valid_schemes = public_schemes\n if allow_private_repos:\n valid_schemes += private_schemes\n+ if getattr(settings, 'DEBUG'): # allow `file://` urls in dev\n+ valid_schemes += local_schemes\n url = urlparse(value)\n \n # Malicious characters go first\n", "issue": "Locally hosted RTD instance doesn't allow git file:/// URLs\n## Details\r\nI installed a local RTD instance according to the Installation guide and imported test project. Now, I want to import my git project manually, \r\n## Expected Result\r\n\r\nI expected that the instance should accept all valid Git URLs.\r\n\r\n## Actual Result\r\n\r\nWhen I enter file:///.../../x.git URL, the manual import page shows \"Invalid scheme for URL\" error. I checked that I can clone this URL from a terminal.\r\n\n", "before_files": [{"content": "\"\"\"Validators for projects app.\"\"\"\n\n# From https://github.com/django/django/pull/3477/files\nfrom __future__ import absolute_import\nimport re\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.deconstruct import deconstructible\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.validators import RegexValidator\nfrom future.backports.urllib.parse import urlparse\n\n\ndomain_regex = (\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}(?<!-)\\.?)|'\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}|' # ...or ipv4\n r'\\[?[A-F0-9]*:[A-F0-9:]+\\]?)' # ...or ipv6\n)\n\n\n@deconstructible\nclass DomainNameValidator(RegexValidator):\n message = _('Enter a valid plain or internationalized domain name value')\n regex = re.compile(domain_regex, re.IGNORECASE)\n\n def __init__(self, accept_idna=True, **kwargs):\n message = kwargs.get('message')\n self.accept_idna = accept_idna\n super(DomainNameValidator, self).__init__(**kwargs)\n if not self.accept_idna and message is None:\n self.message = _('Enter a valid domain name value')\n\n def __call__(self, value):\n try:\n super(DomainNameValidator, self).__call__(value)\n except ValidationError as exc:\n if not self.accept_idna:\n raise\n if not value:\n raise\n try:\n idnavalue = value.encode('idna')\n except UnicodeError:\n raise exc\n super(DomainNameValidator, self).__call__(idnavalue)\n\n\nvalidate_domain_name = DomainNameValidator()\n\n\n@deconstructible\nclass RepositoryURLValidator(object):\n\n disallow_relative_url = True\n\n # Pattern for ``[email protected]:user/repo`` pattern\n re_git_user = re.compile(r'^[\\w]+@.+')\n\n def __call__(self, value):\n allow_private_repos = getattr(settings, 'ALLOW_PRIVATE_REPOS', False)\n public_schemes = ['https', 'http', 'git', 'ftps', 'ftp']\n private_schemes = ['ssh', 'ssh+git']\n valid_schemes = public_schemes\n if allow_private_repos:\n valid_schemes += private_schemes\n url = urlparse(value)\n\n # Malicious characters go first\n if '&&' in value or '|' in value:\n raise ValidationError(_('Invalid character in the URL'))\n elif url.scheme in valid_schemes:\n return value\n\n # Repo URL is not a supported scheme at this point, but there are\n # several cases where we might support it\n # Launchpad\n elif value.startswith('lp:'):\n return value\n # Relative paths are conditionally supported\n elif value.startswith('.') and not self.disallow_relative_url:\n return value\n # SSH cloning and ``[email protected]:user/project.git``\n elif self.re_git_user.search(value) or url.scheme in private_schemes:\n if allow_private_repos:\n return value\n\n # Throw a more helpful error message\n raise ValidationError('Manual cloning via SSH is not supported')\n\n # No more valid URLs without supported URL schemes\n raise ValidationError(_('Invalid scheme for URL'))\n\n\nclass SubmoduleURLValidator(RepositoryURLValidator):\n\n \"\"\"\n A URL validator for repository submodules\n\n If a repository has a relative submodule, the URL path is effectively the\n supermodule's remote ``origin`` URL with the relative path applied.\n\n From the git docs::\n\n ``<repository>`` is the URL of the new submodule's origin repository.\n This may be either an absolute URL, or (if it begins with ``./`` or\n ``../``), the location relative to the superproject's default remote\n repository\n \"\"\"\n\n disallow_relative_url = False\n\n\nvalidate_repository_url = RepositoryURLValidator()\nvalidate_submodule_url = SubmoduleURLValidator()\n", "path": "readthedocs/projects/validators.py"}], "after_files": [{"content": "\"\"\"Validators for projects app.\"\"\"\n\n# From https://github.com/django/django/pull/3477/files\nfrom __future__ import absolute_import\nimport re\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.deconstruct import deconstructible\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.validators import RegexValidator\nfrom future.backports.urllib.parse import urlparse\n\n\ndomain_regex = (\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}(?<!-)\\.?)|'\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}|' # ...or ipv4\n r'\\[?[A-F0-9]*:[A-F0-9:]+\\]?)' # ...or ipv6\n)\n\n\n@deconstructible\nclass DomainNameValidator(RegexValidator):\n message = _('Enter a valid plain or internationalized domain name value')\n regex = re.compile(domain_regex, re.IGNORECASE)\n\n def __init__(self, accept_idna=True, **kwargs):\n message = kwargs.get('message')\n self.accept_idna = accept_idna\n super(DomainNameValidator, self).__init__(**kwargs)\n if not self.accept_idna and message is None:\n self.message = _('Enter a valid domain name value')\n\n def __call__(self, value):\n try:\n super(DomainNameValidator, self).__call__(value)\n except ValidationError as exc:\n if not self.accept_idna:\n raise\n if not value:\n raise\n try:\n idnavalue = value.encode('idna')\n except UnicodeError:\n raise exc\n super(DomainNameValidator, self).__call__(idnavalue)\n\n\nvalidate_domain_name = DomainNameValidator()\n\n\n@deconstructible\nclass RepositoryURLValidator(object):\n\n disallow_relative_url = True\n\n # Pattern for ``[email protected]:user/repo`` pattern\n re_git_user = re.compile(r'^[\\w]+@.+')\n\n def __call__(self, value):\n allow_private_repos = getattr(settings, 'ALLOW_PRIVATE_REPOS', False)\n public_schemes = ['https', 'http', 'git', 'ftps', 'ftp']\n private_schemes = ['ssh', 'ssh+git']\n local_schemes = ['file']\n valid_schemes = public_schemes\n if allow_private_repos:\n valid_schemes += private_schemes\n if getattr(settings, 'DEBUG'): # allow `file://` urls in dev\n valid_schemes += local_schemes\n url = urlparse(value)\n\n # Malicious characters go first\n if '&&' in value or '|' in value:\n raise ValidationError(_('Invalid character in the URL'))\n elif url.scheme in valid_schemes:\n return value\n\n # Repo URL is not a supported scheme at this point, but there are\n # several cases where we might support it\n # Launchpad\n elif value.startswith('lp:'):\n return value\n # Relative paths are conditionally supported\n elif value.startswith('.') and not self.disallow_relative_url:\n return value\n # SSH cloning and ``[email protected]:user/project.git``\n elif self.re_git_user.search(value) or url.scheme in private_schemes:\n if allow_private_repos:\n return value\n\n # Throw a more helpful error message\n raise ValidationError('Manual cloning via SSH is not supported')\n\n # No more valid URLs without supported URL schemes\n raise ValidationError(_('Invalid scheme for URL'))\n\n\nclass SubmoduleURLValidator(RepositoryURLValidator):\n\n \"\"\"\n A URL validator for repository submodules\n\n If a repository has a relative submodule, the URL path is effectively the\n supermodule's remote ``origin`` URL with the relative path applied.\n\n From the git docs::\n\n ``<repository>`` is the URL of the new submodule's origin repository.\n This may be either an absolute URL, or (if it begins with ``./`` or\n ``../``), the location relative to the superproject's default remote\n repository\n \"\"\"\n\n disallow_relative_url = False\n\n\nvalidate_repository_url = RepositoryURLValidator()\nvalidate_submodule_url = SubmoduleURLValidator()\n", "path": "readthedocs/projects/validators.py"}]} | 1,555 | 183 |
gh_patches_debug_14591 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-144 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Function to order table by set of columns
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
We should be able to get the records of a table in an ordering by any set of its columns. The records should be paginated.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
We need a function at the data layer (i.e., in the `db` library) that performs this query.
**Additional context**
<!-- Add any other context or screenshots about the feature request here.-->
The interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) ordering query each time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/records.py`
Content:
```
1 from sqlalchemy import delete, select
2 from sqlalchemy.inspection import inspect
3
4
5 def _get_primary_key_column(table):
6 primary_key_list = list(inspect(table).primary_key)
7 # We do not support getting by composite primary keys
8 assert len(primary_key_list) == 1
9 return primary_key_list[0]
10
11
12 def get_record(table, engine, id_value):
13 primary_key_column = _get_primary_key_column(table)
14 query = select(table).where(primary_key_column == id_value)
15 with engine.begin() as conn:
16 result = conn.execute(query).fetchall()
17 assert len(result) <= 1
18 return result[0] if result else None
19
20
21 def get_records(table, engine, limit=None, offset=None):
22 query = select(table).limit(limit).offset(offset)
23 with engine.begin() as conn:
24 return conn.execute(query).fetchall()
25
26
27 def create_record_or_records(table, engine, record_data):
28 """
29 record_data can be a dictionary, tuple, or list of dictionaries or tuples.
30 if record_data is a list, it creates multiple records.
31 """
32 id_value = None
33 with engine.begin() as connection:
34 result = connection.execute(table.insert(), record_data)
35 # If there was only a single record created, return the record.
36 if result.rowcount == 1:
37 # We need to manually commit insertion so that we can retrieve the record.
38 connection.commit()
39 id_value = result.inserted_primary_key[0]
40 if id_value is not None:
41 return get_record(table, engine, id_value)
42 # Do not return any records if multiple rows were added.
43 return None
44
45
46 def update_record(table, engine, id_value, record_data):
47 primary_key_column = _get_primary_key_column(table)
48 with engine.begin() as connection:
49 connection.execute(
50 table.update().where(primary_key_column == id_value).values(record_data)
51 )
52 return get_record(table, engine, id_value)
53
54
55 def delete_record(table, engine, id_value):
56 primary_key_column = _get_primary_key_column(table)
57 query = delete(table).where(primary_key_column == id_value)
58 with engine.begin() as conn:
59 return conn.execute(query)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/records.py b/db/records.py
--- a/db/records.py
+++ b/db/records.py
@@ -18,8 +18,20 @@
return result[0] if result else None
-def get_records(table, engine, limit=None, offset=None):
- query = select(table).limit(limit).offset(offset)
+def get_records(table, engine, limit=None, offset=None, order_by=[]):
+ """
+ Returns records from a table.
+
+ Args:
+ table: SQLAlchemy table object
+ engine: SQLAlchemy engine object
+ limit: int, gives number of rows to return
+ offset: int, gives number of rows to skip
+ order_by: list of SQLAlchemy ColumnElements to order by. Should
+ usually be either a list of string column names, or a
+ list of columns from the given table.
+ """
+ query = select(table).order_by(*order_by).limit(limit).offset(offset)
with engine.begin() as conn:
return conn.execute(query).fetchall()
| {"golden_diff": "diff --git a/db/records.py b/db/records.py\n--- a/db/records.py\n+++ b/db/records.py\n@@ -18,8 +18,20 @@\n return result[0] if result else None\n \n \n-def get_records(table, engine, limit=None, offset=None):\n- query = select(table).limit(limit).offset(offset)\n+def get_records(table, engine, limit=None, offset=None, order_by=[]):\n+ \"\"\"\n+ Returns records from a table.\n+\n+ Args:\n+ table: SQLAlchemy table object\n+ engine: SQLAlchemy engine object\n+ limit: int, gives number of rows to return\n+ offset: int, gives number of rows to skip\n+ order_by: list of SQLAlchemy ColumnElements to order by. Should\n+ usually be either a list of string column names, or a\n+ list of columns from the given table.\n+ \"\"\"\n+ query = select(table).order_by(*order_by).limit(limit).offset(offset)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n", "issue": "Function to order table by set of columns\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nWe should be able to get the records of a table in an ordering by any set of its columns. The records should be paginated.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nWe need a function at the data layer (i.e., in the `db` library) that performs this query.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nThe interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) ordering query each time.\n", "before_files": [{"content": "from sqlalchemy import delete, select\nfrom sqlalchemy.inspection import inspect\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(table, engine, limit=None, offset=None):\n query = select(table).limit(limit).offset(offset)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}], "after_files": [{"content": "from sqlalchemy import delete, select\nfrom sqlalchemy.inspection import inspect\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(table, engine, limit=None, offset=None, order_by=[]):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n \"\"\"\n query = select(table).order_by(*order_by).limit(limit).offset(offset)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}]} | 994 | 240 |
gh_patches_debug_4832 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-664 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bring back parallel testing in tox.ini
I forgot a flag in there while debugging.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/classes/sources/openpmd_hdf5.py`
Content:
```
1 import numpy as np
2 import astropy.units as u
3 try:
4 import h5py
5 except (ImportError, ModuleNotFoundError) as e:
6 from plasmapy.optional_deps import h5py_import_error
7 raise ImportError(h5py_import_error) from e
8
9 from plasmapy.classes import GenericPlasma
10 from plasmapy.classes.exceptions import DataStandardError
11
12 import os
13 from distutils.version import StrictVersion
14
15
16 _OUTDATED_VERSION = "1.1.0"
17 _NEWER_VERSION = "2.0.0"
18
19 # This is the order what OpenPMD uses to store unit
20 # dimensions for a record.
21 _UNITS = (u.meter,
22 u.kilogram,
23 u.second,
24 u.ampere,
25 u.Kelvin,
26 u.mol,
27 u.candela)
28
29
30 def _fetch_units(openPMD_dims):
31 """
32 Converts a collection of OpenPMD dimensions to astropy.units.
33 """
34
35 units = u.dimensionless_unscaled
36 for factor, unit in zip(openPMD_dims, _UNITS):
37 units *= (unit ** factor)
38 units, *_ = units.compose()
39 return units
40
41
42 def _valid_version(openPMD_version,
43 outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):
44 """
45 Checks if the passed version is supported or not.
46 """
47
48 parsed_version = StrictVersion(openPMD_version)
49 outdated_version = StrictVersion(outdated)
50 newer_version = StrictVersion(newer)
51 return outdated_version <= parsed_version < newer_version
52
53
54 class HDF5Reader(GenericPlasma):
55 def __init__(self, hdf5, **kwargs):
56 """
57 Core class for accessing various attributes on HDF5 files that
58 are based on OpenPMD standards.
59
60 Attributes
61 ----------
62 electric_field : `astropy.units.Quantity`
63 An (x, y, z) array containing electric field data.
64 charge_density : `astropy.units.Quantity`
65 An array containing charge density data.
66
67 Parameters
68 ----------
69 hdf5 : `str`
70 Path to HDF5 file.
71
72 References
73 ----------
74 .. [1] http://openpmd.org/
75 """
76
77 if not os.path.isfile(hdf5):
78 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
79
80 h5 = h5py.File(hdf5)
81 self.h5 = h5
82
83 self._check_valid_openpmd_version()
84
85 self.subname = tuple(self.h5['data'])[0]
86
87 def _check_valid_openpmd_version(self):
88 try:
89 openPMD_version = self.h5.attrs["openPMD"].decode('utf-8')
90 if _valid_version(openPMD_version):
91 return True
92 else:
93 raise DataStandardError(f"We currently only support HDF5 versions"
94 f"starting from v{_OUTDATED_VERSION} and "
95 f"lower than v{_NEWER_VERSION}. You can "
96 f"however convert your HDF5 to a supported "
97 f"version. For more information; see "
98 f"https://github.com/openPMD/openPMD-updater")
99 except KeyError:
100 raise DataStandardError("Input HDF5 file does not go on with "
101 "standards defined by OpenPMD")
102
103 @property
104 def electric_field(self):
105 path = f"data/{self.subname}/fields/E"
106 if path in self.h5:
107 units = _fetch_units(self.h5[path].attrs["unitDimension"])
108 axes = [self.h5[path][axis]
109 for axis in self.h5[path]]
110 return np.array(axes) * units
111 else:
112 raise AttributeError("No electric field data available "
113 "in HDF5 file")
114
115 @property
116 def charge_density(self):
117 path = f"data/{self.subname}/fields/rho"
118 if path in self.h5:
119 units = _fetch_units(self.h5[path].attrs["unitDimension"])
120 return np.array(self.h5[path]) * units
121 else:
122 raise AttributeError("No charge density data available "
123 "in HDF5 file")
124
125 @property
126 def magnetic_field(self):
127 path = f"data/{self.subname}/fields/B"
128 if path in self.h5:
129 units = _fetch_units(self.h5[path].attrs["unitDimension"])
130 axes = [self.h5[path][axis]
131 for axis in self.h5[path]]
132 return np.array(axes) * units
133 else:
134 raise AttributeError("No magnetic field data available "
135 "in HDF5 file")
136
137 @property
138 def electric_current(self):
139 path = f"data/{self.subname}/fields/J"
140 if path in self.h5:
141 units = _fetch_units(self.h5[path].attrs["unitDimension"])
142 axes = [self.h5[path][axis]
143 for axis in self.h5[path]]
144 return np.array(axes) * units
145 else:
146 raise AttributeError("No electric current data available "
147 "in HDF5 file")
148
149 @classmethod
150 def is_datasource_for(cls, **kwargs):
151 if "hdf5" not in kwargs:
152 return False
153
154 hdf5 = kwargs.get("hdf5")
155 openPMD = kwargs.get("openPMD")
156
157 isfile = os.path.isfile(hdf5)
158 if not isfile:
159 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
160
161 if "openPMD" not in kwargs:
162 h5 = h5py.File(hdf5)
163 try:
164 openPMD = h5.attrs["openPMD"]
165 except KeyError:
166 openPMD = False
167
168 return openPMD
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plasmapy/classes/sources/openpmd_hdf5.py b/plasmapy/classes/sources/openpmd_hdf5.py
--- a/plasmapy/classes/sources/openpmd_hdf5.py
+++ b/plasmapy/classes/sources/openpmd_hdf5.py
@@ -84,6 +84,15 @@
self.subname = tuple(self.h5['data'])[0]
+ def __enter__(self):
+ return self.h5
+
+ def close(self):
+ self.h5.close()
+
+ def __exit__(self):
+ self.h5.close()
+
def _check_valid_openpmd_version(self):
try:
openPMD_version = self.h5.attrs["openPMD"].decode('utf-8')
| {"golden_diff": "diff --git a/plasmapy/classes/sources/openpmd_hdf5.py b/plasmapy/classes/sources/openpmd_hdf5.py\n--- a/plasmapy/classes/sources/openpmd_hdf5.py\n+++ b/plasmapy/classes/sources/openpmd_hdf5.py\n@@ -84,6 +84,15 @@\n \n self.subname = tuple(self.h5['data'])[0]\n \n+ def __enter__(self):\n+ return self.h5\n+\n+ def close(self):\n+ self.h5.close()\n+\n+ def __exit__(self):\n+ self.h5.close()\n+\n def _check_valid_openpmd_version(self):\n try:\n openPMD_version = self.h5.attrs[\"openPMD\"].decode('utf-8')\n", "issue": "Bring back parallel testing in tox.ini\nI forgot a flag in there while debugging.\n", "before_files": [{"content": "import numpy as np\nimport astropy.units as u\ntry:\n import h5py\nexcept (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n raise ImportError(h5py_import_error) from e\n\nfrom plasmapy.classes import GenericPlasma\nfrom plasmapy.classes.exceptions import DataStandardError\n\nimport os\nfrom distutils.version import StrictVersion\n\n\n_OUTDATED_VERSION = \"1.1.0\"\n_NEWER_VERSION = \"2.0.0\"\n\n# This is the order what OpenPMD uses to store unit\n# dimensions for a record.\n_UNITS = (u.meter,\n u.kilogram,\n u.second,\n u.ampere,\n u.Kelvin,\n u.mol,\n u.candela)\n\n\ndef _fetch_units(openPMD_dims):\n \"\"\"\n Converts a collection of OpenPMD dimensions to astropy.units.\n \"\"\"\n\n units = u.dimensionless_unscaled\n for factor, unit in zip(openPMD_dims, _UNITS):\n units *= (unit ** factor)\n units, *_ = units.compose()\n return units\n\n\ndef _valid_version(openPMD_version,\n outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"\n Checks if the passed version is supported or not.\n \"\"\"\n\n parsed_version = StrictVersion(openPMD_version)\n outdated_version = StrictVersion(outdated)\n newer_version = StrictVersion(newer)\n return outdated_version <= parsed_version < newer_version\n\n\nclass HDF5Reader(GenericPlasma):\n def __init__(self, hdf5, **kwargs):\n \"\"\"\n Core class for accessing various attributes on HDF5 files that\n are based on OpenPMD standards.\n\n Attributes\n ----------\n electric_field : `astropy.units.Quantity`\n An (x, y, z) array containing electric field data.\n charge_density : `astropy.units.Quantity`\n An array containing charge density data.\n\n Parameters\n ----------\n hdf5 : `str`\n Path to HDF5 file.\n\n References\n ----------\n .. [1] http://openpmd.org/\n \"\"\"\n\n if not os.path.isfile(hdf5):\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n h5 = h5py.File(hdf5)\n self.h5 = h5\n\n self._check_valid_openpmd_version()\n\n self.subname = tuple(self.h5['data'])[0]\n\n def _check_valid_openpmd_version(self):\n try:\n openPMD_version = self.h5.attrs[\"openPMD\"].decode('utf-8')\n if _valid_version(openPMD_version):\n return True\n else:\n raise DataStandardError(f\"We currently only support HDF5 versions\"\n f\"starting from v{_OUTDATED_VERSION} and \"\n f\"lower than v{_NEWER_VERSION}. You can \"\n f\"however convert your HDF5 to a supported \"\n f\"version. For more information; see \"\n f\"https://github.com/openPMD/openPMD-updater\")\n except KeyError:\n raise DataStandardError(\"Input HDF5 file does not go on with \"\n \"standards defined by OpenPMD\")\n\n @property\n def electric_field(self):\n path = f\"data/{self.subname}/fields/E\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis]\n for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric field data available \"\n \"in HDF5 file\")\n\n @property\n def charge_density(self):\n path = f\"data/{self.subname}/fields/rho\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n return np.array(self.h5[path]) * units\n else:\n raise AttributeError(\"No charge density data available \"\n \"in HDF5 file\")\n\n @property\n def magnetic_field(self):\n path = f\"data/{self.subname}/fields/B\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis]\n for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No magnetic field data available \"\n \"in HDF5 file\")\n\n @property\n def electric_current(self):\n path = f\"data/{self.subname}/fields/J\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis]\n for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric current data available \"\n \"in HDF5 file\")\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if \"hdf5\" not in kwargs:\n return False\n\n hdf5 = kwargs.get(\"hdf5\")\n openPMD = kwargs.get(\"openPMD\")\n\n isfile = os.path.isfile(hdf5)\n if not isfile:\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n if \"openPMD\" not in kwargs:\n h5 = h5py.File(hdf5)\n try:\n openPMD = h5.attrs[\"openPMD\"]\n except KeyError:\n openPMD = False\n\n return openPMD\n", "path": "plasmapy/classes/sources/openpmd_hdf5.py"}], "after_files": [{"content": "import numpy as np\nimport astropy.units as u\ntry:\n import h5py\nexcept (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n raise ImportError(h5py_import_error) from e\n\nfrom plasmapy.classes import GenericPlasma\nfrom plasmapy.classes.exceptions import DataStandardError\n\nimport os\nfrom distutils.version import StrictVersion\n\n\n_OUTDATED_VERSION = \"1.1.0\"\n_NEWER_VERSION = \"2.0.0\"\n\n# This is the order what OpenPMD uses to store unit\n# dimensions for a record.\n_UNITS = (u.meter,\n u.kilogram,\n u.second,\n u.ampere,\n u.Kelvin,\n u.mol,\n u.candela)\n\n\ndef _fetch_units(openPMD_dims):\n \"\"\"\n Converts a collection of OpenPMD dimensions to astropy.units.\n \"\"\"\n\n units = u.dimensionless_unscaled\n for factor, unit in zip(openPMD_dims, _UNITS):\n units *= (unit ** factor)\n units, *_ = units.compose()\n return units\n\n\ndef _valid_version(openPMD_version,\n outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"\n Checks if the passed version is supported or not.\n \"\"\"\n\n parsed_version = StrictVersion(openPMD_version)\n outdated_version = StrictVersion(outdated)\n newer_version = StrictVersion(newer)\n return outdated_version <= parsed_version < newer_version\n\n\nclass HDF5Reader(GenericPlasma):\n def __init__(self, hdf5, **kwargs):\n \"\"\"\n Core class for accessing various attributes on HDF5 files that\n are based on OpenPMD standards.\n\n Attributes\n ----------\n electric_field : `astropy.units.Quantity`\n An (x, y, z) array containing electric field data.\n charge_density : `astropy.units.Quantity`\n An array containing charge density data.\n\n Parameters\n ----------\n hdf5 : `str`\n Path to HDF5 file.\n\n References\n ----------\n .. [1] http://openpmd.org/\n \"\"\"\n\n if not os.path.isfile(hdf5):\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n h5 = h5py.File(hdf5)\n self.h5 = h5\n\n self._check_valid_openpmd_version()\n\n self.subname = tuple(self.h5['data'])[0]\n\n def __enter__(self):\n return self.h5\n\n def close(self):\n self.h5.close()\n\n def __exit__(self):\n self.h5.close()\n\n def _check_valid_openpmd_version(self):\n try:\n openPMD_version = self.h5.attrs[\"openPMD\"].decode('utf-8')\n if _valid_version(openPMD_version):\n return True\n else:\n raise DataStandardError(f\"We currently only support HDF5 versions\"\n f\"starting from v{_OUTDATED_VERSION} and \"\n f\"lower than v{_NEWER_VERSION}. You can \"\n f\"however convert your HDF5 to a supported \"\n f\"version. For more information; see \"\n f\"https://github.com/openPMD/openPMD-updater\")\n except KeyError:\n raise DataStandardError(\"Input HDF5 file does not go on with \"\n \"standards defined by OpenPMD\")\n\n @property\n def electric_field(self):\n path = f\"data/{self.subname}/fields/E\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis]\n for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric field data available \"\n \"in HDF5 file\")\n\n @property\n def charge_density(self):\n path = f\"data/{self.subname}/fields/rho\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n return np.array(self.h5[path]) * units\n else:\n raise AttributeError(\"No charge density data available \"\n \"in HDF5 file\")\n\n @property\n def magnetic_field(self):\n path = f\"data/{self.subname}/fields/B\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis]\n for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No magnetic field data available \"\n \"in HDF5 file\")\n\n @property\n def electric_current(self):\n path = f\"data/{self.subname}/fields/J\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis]\n for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric current data available \"\n \"in HDF5 file\")\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if \"hdf5\" not in kwargs:\n return False\n\n hdf5 = kwargs.get(\"hdf5\")\n openPMD = kwargs.get(\"openPMD\")\n\n isfile = os.path.isfile(hdf5)\n if not isfile:\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n if \"openPMD\" not in kwargs:\n h5 = h5py.File(hdf5)\n try:\n openPMD = h5.attrs[\"openPMD\"]\n except KeyError:\n openPMD = False\n\n return openPMD\n", "path": "plasmapy/classes/sources/openpmd_hdf5.py"}]} | 1,916 | 174 |
gh_patches_debug_37987 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
regionsprops_table is not mentioned in the doc
We should use it in an existing example, or add a new example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/examples/segmentation/plot_regionprops.py`
Content:
```
1 """
2 =========================
3 Measure region properties
4 =========================
5
6 This example shows how to measure properties of labelled image regions.
7
8 """
9 import math
10 import matplotlib.pyplot as plt
11 import numpy as np
12
13 from skimage.draw import ellipse
14 from skimage.measure import label, regionprops
15 from skimage.transform import rotate
16
17
18 image = np.zeros((600, 600))
19
20 rr, cc = ellipse(300, 350, 100, 220)
21 image[rr, cc] = 1
22
23 image = rotate(image, angle=15, order=0)
24
25 label_img = label(image)
26 regions = regionprops(label_img)
27
28 fig, ax = plt.subplots()
29 ax.imshow(image, cmap=plt.cm.gray)
30
31 for props in regions:
32 y0, x0 = props.centroid
33 orientation = props.orientation
34 x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
35 y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
36 x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
37 y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length
38
39 ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
40 ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
41 ax.plot(x0, y0, '.g', markersize=15)
42
43 minr, minc, maxr, maxc = props.bbox
44 bx = (minc, maxc, maxc, minc, minc)
45 by = (minr, minr, maxr, maxr, minr)
46 ax.plot(bx, by, '-b', linewidth=2.5)
47
48 ax.axis((0, 600, 600, 0))
49 plt.show()
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/examples/segmentation/plot_regionprops.py b/doc/examples/segmentation/plot_regionprops.py
--- a/doc/examples/segmentation/plot_regionprops.py
+++ b/doc/examples/segmentation/plot_regionprops.py
@@ -3,15 +3,17 @@
Measure region properties
=========================
-This example shows how to measure properties of labelled image regions.
+This example shows how to measure properties of labelled image regions. We
+analyze an image with two ellipses.
"""
import math
import matplotlib.pyplot as plt
import numpy as np
+import pandas as pd
from skimage.draw import ellipse
-from skimage.measure import label, regionprops
+from skimage.measure import label, regionprops, regionprops_table
from skimage.transform import rotate
@@ -22,19 +24,27 @@
image = rotate(image, angle=15, order=0)
+rr, cc = ellipse(100, 100, 60, 50)
+image[rr, cc] = 1
+
label_img = label(image)
regions = regionprops(label_img)
+#####################################################################
+# We use the :py:func:`skimage.measure.regionprops` result to draw certain
+# properties on each region. For example, in red, we plot the major and minor
+# axes of each ellipse.
+
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
for props in regions:
y0, x0 = props.centroid
orientation = props.orientation
- x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
- y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
- x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
- y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length
+ x1 = x0 + math.cos(orientation) * 0.5 * props.minor_axis_length
+ y1 = y0 - math.sin(orientation) * 0.5 * props.minor_axis_length
+ x2 = x0 - math.sin(orientation) * 0.5 * props.major_axis_length
+ y2 = y0 - math.cos(orientation) * 0.5 * props.major_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
@@ -47,3 +57,22 @@
ax.axis((0, 600, 600, 0))
plt.show()
+
+#####################################################################
+# We use the :py:func:`skimage.measure.regionprops_table` to compute
+# (selected) properties for each region. Note that
+# ``skimage.measure.regionprops_table`` actually computes the properties,
+# whereas ``skimage.measure.regionprops`` computes them when they come in use
+# (lazy evaluation).
+
+props = regionprops_table(label_img, properties=('centroid',
+ 'orientation',
+ 'major_axis_length',
+ 'minor_axis_length'))
+
+#####################################################################
+# We now display a table of these selected properties (one region per row),
+# the ``skimage.measure.regionprops_table`` result being a pandas-compatible
+# dict.
+
+pd.DataFrame(props)
| {"golden_diff": "diff --git a/doc/examples/segmentation/plot_regionprops.py b/doc/examples/segmentation/plot_regionprops.py\n--- a/doc/examples/segmentation/plot_regionprops.py\n+++ b/doc/examples/segmentation/plot_regionprops.py\n@@ -3,15 +3,17 @@\n Measure region properties\n =========================\n \n-This example shows how to measure properties of labelled image regions.\n+This example shows how to measure properties of labelled image regions. We\n+analyze an image with two ellipses.\n \n \"\"\"\n import math\n import matplotlib.pyplot as plt\n import numpy as np\n+import pandas as pd\n \n from skimage.draw import ellipse\n-from skimage.measure import label, regionprops\n+from skimage.measure import label, regionprops, regionprops_table\n from skimage.transform import rotate\n \n \n@@ -22,19 +24,27 @@\n \n image = rotate(image, angle=15, order=0)\n \n+rr, cc = ellipse(100, 100, 60, 50)\n+image[rr, cc] = 1\n+\n label_img = label(image)\n regions = regionprops(label_img)\n \n+#####################################################################\n+# We use the :py:func:`skimage.measure.regionprops` result to draw certain\n+# properties on each region. For example, in red, we plot the major and minor\n+# axes of each ellipse.\n+\n fig, ax = plt.subplots()\n ax.imshow(image, cmap=plt.cm.gray)\n \n for props in regions:\n y0, x0 = props.centroid\n orientation = props.orientation\n- x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length\n- y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length\n- x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length\n- y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length\n+ x1 = x0 + math.cos(orientation) * 0.5 * props.minor_axis_length\n+ y1 = y0 - math.sin(orientation) * 0.5 * props.minor_axis_length\n+ x2 = x0 - math.sin(orientation) * 0.5 * props.major_axis_length\n+ y2 = y0 - math.cos(orientation) * 0.5 * props.major_axis_length\n \n ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)\n ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)\n@@ -47,3 +57,22 @@\n \n ax.axis((0, 600, 600, 0))\n plt.show()\n+\n+#####################################################################\n+# We use the :py:func:`skimage.measure.regionprops_table` to compute\n+# (selected) properties for each region. Note that\n+# ``skimage.measure.regionprops_table`` actually computes the properties,\n+# whereas ``skimage.measure.regionprops`` computes them when they come in use\n+# (lazy evaluation).\n+\n+props = regionprops_table(label_img, properties=('centroid',\n+ 'orientation',\n+ 'major_axis_length',\n+ 'minor_axis_length'))\n+\n+#####################################################################\n+# We now display a table of these selected properties (one region per row),\n+# the ``skimage.measure.regionprops_table`` result being a pandas-compatible\n+# dict.\n+\n+pd.DataFrame(props)\n", "issue": "regionsprops_table is not mentioned in the doc\nWe should use it in an existing example, or add a new example.\n", "before_files": [{"content": "\"\"\"\n=========================\nMeasure region properties\n=========================\n\nThis example shows how to measure properties of labelled image regions.\n\n\"\"\"\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom skimage.draw import ellipse\nfrom skimage.measure import label, regionprops\nfrom skimage.transform import rotate\n\n\nimage = np.zeros((600, 600))\n\nrr, cc = ellipse(300, 350, 100, 220)\nimage[rr, cc] = 1\n\nimage = rotate(image, angle=15, order=0)\n\nlabel_img = label(image)\nregions = regionprops(label_img)\n\nfig, ax = plt.subplots()\nax.imshow(image, cmap=plt.cm.gray)\n\nfor props in regions:\n y0, x0 = props.centroid\n orientation = props.orientation\n x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length\n y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length\n x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length\n y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length\n\n ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)\n ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)\n ax.plot(x0, y0, '.g', markersize=15)\n\n minr, minc, maxr, maxc = props.bbox\n bx = (minc, maxc, maxc, minc, minc)\n by = (minr, minr, maxr, maxr, minr)\n ax.plot(bx, by, '-b', linewidth=2.5)\n\nax.axis((0, 600, 600, 0))\nplt.show()\n", "path": "doc/examples/segmentation/plot_regionprops.py"}], "after_files": [{"content": "\"\"\"\n=========================\nMeasure region properties\n=========================\n\nThis example shows how to measure properties of labelled image regions. We\nanalyze an image with two ellipses.\n\n\"\"\"\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom skimage.draw import ellipse\nfrom skimage.measure import label, regionprops, regionprops_table\nfrom skimage.transform import rotate\n\n\nimage = np.zeros((600, 600))\n\nrr, cc = ellipse(300, 350, 100, 220)\nimage[rr, cc] = 1\n\nimage = rotate(image, angle=15, order=0)\n\nrr, cc = ellipse(100, 100, 60, 50)\nimage[rr, cc] = 1\n\nlabel_img = label(image)\nregions = regionprops(label_img)\n\n#####################################################################\n# We use the :py:func:`skimage.measure.regionprops` result to draw certain\n# properties on each region. For example, in red, we plot the major and minor\n# axes of each ellipse.\n\nfig, ax = plt.subplots()\nax.imshow(image, cmap=plt.cm.gray)\n\nfor props in regions:\n y0, x0 = props.centroid\n orientation = props.orientation\n x1 = x0 + math.cos(orientation) * 0.5 * props.minor_axis_length\n y1 = y0 - math.sin(orientation) * 0.5 * props.minor_axis_length\n x2 = x0 - math.sin(orientation) * 0.5 * props.major_axis_length\n y2 = y0 - math.cos(orientation) * 0.5 * props.major_axis_length\n\n ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)\n ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)\n ax.plot(x0, y0, '.g', markersize=15)\n\n minr, minc, maxr, maxc = props.bbox\n bx = (minc, maxc, maxc, minc, minc)\n by = (minr, minr, maxr, maxr, minr)\n ax.plot(bx, by, '-b', linewidth=2.5)\n\nax.axis((0, 600, 600, 0))\nplt.show()\n\n#####################################################################\n# We use the :py:func:`skimage.measure.regionprops_table` to compute\n# (selected) properties for each region. Note that\n# ``skimage.measure.regionprops_table`` actually computes the properties,\n# whereas ``skimage.measure.regionprops`` computes them when they come in use\n# (lazy evaluation).\n\nprops = regionprops_table(label_img, properties=('centroid',\n 'orientation',\n 'major_axis_length',\n 'minor_axis_length'))\n\n#####################################################################\n# We now display a table of these selected properties (one region per row),\n# the ``skimage.measure.regionprops_table`` result being a pandas-compatible\n# dict.\n\npd.DataFrame(props)\n", "path": "doc/examples/segmentation/plot_regionprops.py"}]} | 821 | 767 |
gh_patches_debug_49618 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5936 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_GCP_79 SQL Server latest version is 2022 instead of 2019
**Describe the issue**
The `CKV_GCP_79` about SQL server is pinned at 2019 but 2022 is the latest version :
https://learn.microsoft.com/en-us/troubleshoot/sql/releases/download-and-install-latest-updates
**Examples**
Related to this files :
https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
https://github.com/bridgecrewio/checkov/blob/d07fdc994015772a9fa0dc1a12d1391b5765916c/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf#L213
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3
4
5 class CloudSqlMajorVersion(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure SQL database is using latest Major version"
8 id = "CKV_GCP_79"
9 supported_resources = ['google_sql_database_instance']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'database_version'
15
16 def get_expected_values(self):
17 return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_WEB",
18 "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS"]
19
20
21 check = CloudSqlMajorVersion()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
--- a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
+++ b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
@@ -14,8 +14,8 @@
return 'database_version'
def get_expected_values(self):
- return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_WEB",
- "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS"]
+ return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2022_STANDARD", "SQLSERVER_2022_WEB",
+ "SQLSERVER_2022_ENTERPRISE", "SQLSERVER_2022_EXPRESS"]
check = CloudSqlMajorVersion()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\n--- a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\n+++ b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\n@@ -14,8 +14,8 @@\n return 'database_version'\n \n def get_expected_values(self):\n- return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2019_STANDARD\", \"SQLSERVER_2019_WEB\",\n- \"SQLSERVER_2019_ENTERPRISE\", \"SQLSERVER_2019_EXPRESS\"]\n+ return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2022_STANDARD\", \"SQLSERVER_2022_WEB\",\n+ \"SQLSERVER_2022_ENTERPRISE\", \"SQLSERVER_2022_EXPRESS\"]\n \n \n check = CloudSqlMajorVersion()\n", "issue": "CKV_GCP_79 SQL Server latest version is 2022 instead of 2019\n**Describe the issue**\r\nThe `CKV_GCP_79` about SQL server is pinned at 2019 but 2022 is the latest version : \r\nhttps://learn.microsoft.com/en-us/troubleshoot/sql/releases/download-and-install-latest-updates\r\n\r\n**Examples**\r\nRelated to this files : \r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/d07fdc994015772a9fa0dc1a12d1391b5765916c/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf#L213\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass CloudSqlMajorVersion(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure SQL database is using latest Major version\"\n id = \"CKV_GCP_79\"\n supported_resources = ['google_sql_database_instance']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'database_version'\n\n def get_expected_values(self):\n return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2019_STANDARD\", \"SQLSERVER_2019_WEB\",\n \"SQLSERVER_2019_ENTERPRISE\", \"SQLSERVER_2019_EXPRESS\"]\n\n\ncheck = CloudSqlMajorVersion()\n", "path": "checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py"}], "after_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass CloudSqlMajorVersion(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure SQL database is using latest Major version\"\n id = \"CKV_GCP_79\"\n supported_resources = ['google_sql_database_instance']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'database_version'\n\n def get_expected_values(self):\n return [\"POSTGRES_15\", \"MYSQL_8_0\", \"SQLSERVER_2022_STANDARD\", \"SQLSERVER_2022_WEB\",\n \"SQLSERVER_2022_ENTERPRISE\", \"SQLSERVER_2022_EXPRESS\"]\n\n\ncheck = CloudSqlMajorVersion()\n", "path": "checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py"}]} | 699 | 230 |
gh_patches_debug_20647 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug in pairwise_euclidean_distance
https://github.com/Lightning-AI/metrics/blob/e1c3fda24f90367803c2b04315ad7c8bced719db/torchmetrics/functional/pairwise/euclidean.py#L34
this line can become negative, resulting in a failure with the sqrt function and thus return "nan"
you can test this easily by checking this code:
`pairwise_euclidean_distance(torch.tensor([[772., 112.], [772.20001, 112.], [772.20001, 112.], [772., 112.00000], [772.2, 112.00000], [772.0, 112.00000], [772.01, 112.00000], [772.00000000000001, 112.00000], [772.000001, 112.00000], [772.00001, 112.00000], [772.0001, 112.00000], [772.001, 112.00000], [772.01, 112.00000], [772.99, 112.00000]], dtype=torch.float32))`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/pairwise/euclidean.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 from torch import Tensor
17 from typing_extensions import Literal
18
19 from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
20
21
22 def _pairwise_euclidean_distance_update(
23 x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
24 ) -> Tensor:
25 """Calculates the pairwise euclidean distance matrix.
26
27 Args:
28 x: tensor of shape ``[N,d]``
29 y: tensor of shape ``[M,d]``
30 zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
31 """
32 x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
33 x_norm = x.norm(dim=1, keepdim=True)
34 y_norm = y.norm(dim=1).T
35 distance = x_norm * x_norm + y_norm * y_norm - 2 * x.mm(y.T)
36 if zero_diagonal:
37 distance.fill_diagonal_(0)
38 return distance.sqrt()
39
40
41 def pairwise_euclidean_distance(
42 x: Tensor,
43 y: Optional[Tensor] = None,
44 reduction: Literal["mean", "sum", "none", None] = None,
45 zero_diagonal: Optional[bool] = None,
46 ) -> Tensor:
47 r"""Calculates pairwise euclidean distances:
48
49 .. math::
50 d_{euc}(x,y) = ||x - y||_2 = \sqrt{\sum_{d=1}^D (x_d - y_d)^2}
51
52 If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise between
53 the rows of :math:`x` and :math:`y`.
54 If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
55
56 Args:
57 x: Tensor with shape ``[N, d]``
58 y: Tensor with shape ``[M, d]``, optional
59 reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
60 (applied along column dimension) or `'none'`, `None` for no reduction
61 zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given
62 this defaults to `True` else if `y` is also given it defaults to `False`
63
64 Returns:
65 A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
66
67 Example:
68 >>> import torch
69 >>> from torchmetrics.functional import pairwise_euclidean_distance
70 >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
71 >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
72 >>> pairwise_euclidean_distance(x, y)
73 tensor([[3.1623, 2.0000],
74 [5.3852, 4.1231],
75 [8.9443, 7.6158]])
76 >>> pairwise_euclidean_distance(x)
77 tensor([[0.0000, 2.2361, 5.8310],
78 [2.2361, 0.0000, 3.6056],
79 [5.8310, 3.6056, 0.0000]])
80 """
81 distance = _pairwise_euclidean_distance_update(x, y, zero_diagonal)
82 return _reduce_distance_matrix(distance, reduction)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/functional/pairwise/euclidean.py b/src/torchmetrics/functional/pairwise/euclidean.py
--- a/src/torchmetrics/functional/pairwise/euclidean.py
+++ b/src/torchmetrics/functional/pairwise/euclidean.py
@@ -13,6 +13,7 @@
# limitations under the License.
from typing import Optional
+import torch
from torch import Tensor
from typing_extensions import Literal
@@ -30,9 +31,13 @@
zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
"""
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
- x_norm = x.norm(dim=1, keepdim=True)
- y_norm = y.norm(dim=1).T
- distance = x_norm * x_norm + y_norm * y_norm - 2 * x.mm(y.T)
+ # upcast to float64 to prevent precision issues
+ _orig_dtype = x.dtype
+ x = x.to(torch.float64)
+ y = y.to(torch.float64)
+ x_norm = (x * x).sum(dim=1, keepdim=True)
+ y_norm = (y * y).sum(dim=1)
+ distance = (x_norm + y_norm - 2 * x.mm(y.T)).to(_orig_dtype)
if zero_diagonal:
distance.fill_diagonal_(0)
return distance.sqrt()
| {"golden_diff": "diff --git a/src/torchmetrics/functional/pairwise/euclidean.py b/src/torchmetrics/functional/pairwise/euclidean.py\n--- a/src/torchmetrics/functional/pairwise/euclidean.py\n+++ b/src/torchmetrics/functional/pairwise/euclidean.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n from typing import Optional\n \n+import torch\n from torch import Tensor\n from typing_extensions import Literal\n \n@@ -30,9 +31,13 @@\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n- x_norm = x.norm(dim=1, keepdim=True)\n- y_norm = y.norm(dim=1).T\n- distance = x_norm * x_norm + y_norm * y_norm - 2 * x.mm(y.T)\n+ # upcast to float64 to prevent precision issues\n+ _orig_dtype = x.dtype\n+ x = x.to(torch.float64)\n+ y = y.to(torch.float64)\n+ x_norm = (x * x).sum(dim=1, keepdim=True)\n+ y_norm = (y * y).sum(dim=1)\n+ distance = (x_norm + y_norm - 2 * x.mm(y.T)).to(_orig_dtype)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance.sqrt()\n", "issue": "bug in pairwise_euclidean_distance\nhttps://github.com/Lightning-AI/metrics/blob/e1c3fda24f90367803c2b04315ad7c8bced719db/torchmetrics/functional/pairwise/euclidean.py#L34\r\nthis line can become negative, resulting in a failure with the sqrt function and thus return \"nan\"\r\n\r\nyou can test this easily by checking this code:\r\n\r\n`pairwise_euclidean_distance(torch.tensor([[772., 112.], [772.20001, 112.], [772.20001, 112.], [772., 112.00000], [772.2, 112.00000], [772.0, 112.00000], [772.01, 112.00000], [772.00000000000001, 112.00000], [772.000001, 112.00000], [772.00001, 112.00000], [772.0001, 112.00000], [772.001, 112.00000], [772.01, 112.00000], [772.99, 112.00000]], dtype=torch.float32))`\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n\n\ndef _pairwise_euclidean_distance_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise euclidean distance matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n x_norm = x.norm(dim=1, keepdim=True)\n y_norm = y.norm(dim=1).T\n distance = x_norm * x_norm + y_norm * y_norm - 2 * x.mm(y.T)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance.sqrt()\n\n\ndef pairwise_euclidean_distance(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise euclidean distances:\n\n .. math::\n d_{euc}(x,y) = ||x - y||_2 = \\sqrt{\\sum_{d=1}^D (x_d - y_d)^2}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise between\n the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given\n this defaults to `True` else if `y` is also given it defaults to `False`\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_euclidean_distance\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_euclidean_distance(x, y)\n tensor([[3.1623, 2.0000],\n [5.3852, 4.1231],\n [8.9443, 7.6158]])\n >>> pairwise_euclidean_distance(x)\n tensor([[0.0000, 2.2361, 5.8310],\n [2.2361, 0.0000, 3.6056],\n [5.8310, 3.6056, 0.0000]])\n \"\"\"\n distance = _pairwise_euclidean_distance_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/euclidean.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n\n\ndef _pairwise_euclidean_distance_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise euclidean distance matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n # upcast to float64 to prevent precision issues\n _orig_dtype = x.dtype\n x = x.to(torch.float64)\n y = y.to(torch.float64)\n x_norm = (x * x).sum(dim=1, keepdim=True)\n y_norm = (y * y).sum(dim=1)\n distance = (x_norm + y_norm - 2 * x.mm(y.T)).to(_orig_dtype)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance.sqrt()\n\n\ndef pairwise_euclidean_distance(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise euclidean distances:\n\n .. math::\n d_{euc}(x,y) = ||x - y||_2 = \\sqrt{\\sum_{d=1}^D (x_d - y_d)^2}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise between\n the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given\n this defaults to `True` else if `y` is also given it defaults to `False`\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_euclidean_distance\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_euclidean_distance(x, y)\n tensor([[3.1623, 2.0000],\n [5.3852, 4.1231],\n [8.9443, 7.6158]])\n >>> pairwise_euclidean_distance(x)\n tensor([[0.0000, 2.2361, 5.8310],\n [2.2361, 0.0000, 3.6056],\n [5.8310, 3.6056, 0.0000]])\n \"\"\"\n distance = _pairwise_euclidean_distance_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/euclidean.py"}]} | 1,755 | 330 |
gh_patches_debug_50347 | rasdani/github-patches | git_diff | pypi__warehouse-7351 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix simple typo: thorugh -> through
# Issue Type
[x] Bug (Typo)
# Steps to Replicate
1. Examine warehouse/filters.py.
2. Search for `thorugh`.
# Expected Behaviour
1. Should read `through`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/filters.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import binascii
14 import collections
15 import enum
16 import hmac
17 import json
18 import re
19 import urllib.parse
20
21 import html5lib
22 import html5lib.serializer
23 import html5lib.treewalkers
24 import jinja2
25 import packaging.version
26 import pytz
27
28 from pyramid.threadlocal import get_current_request
29
30 from warehouse.utils.http import is_valid_uri
31
32
33 class PackageType(enum.Enum):
34 bdist_dmg = "OSX Disk Image"
35 bdist_dumb = "Dumb Binary"
36 bdist_egg = "Egg"
37 bdist_msi = "Windows MSI Installer"
38 bdist_rpm = "RPM"
39 bdist_wheel = "Wheel"
40 bdist_wininst = "Windows Installer"
41 sdist = "Source"
42
43
44 def format_package_type(value):
45 try:
46 return PackageType[value].value
47 except KeyError:
48 return value
49
50
51 def _camo_url(request, url):
52 camo_url = request.registry.settings["camo.url"].format(request=request)
53 camo_key = request.registry.settings["camo.key"].encode("utf8")
54 url = url.encode("utf8")
55
56 path = "/".join(
57 [
58 hmac.new(camo_key, url, digestmod="sha1").hexdigest(),
59 binascii.hexlify(url).decode("utf8"),
60 ]
61 )
62
63 return urllib.parse.urljoin(camo_url, path)
64
65
66 @jinja2.contextfilter
67 def camoify(ctx, value):
68 request = ctx.get("request") or get_current_request()
69
70 # Parse the rendered output and replace any inline images that don't point
71 # to HTTPS with camouflaged images.
72 tree_builder = html5lib.treebuilders.getTreeBuilder("dom")
73 parser = html5lib.html5parser.HTMLParser(tree=tree_builder)
74 dom = parser.parse(value)
75
76 for element in dom.getElementsByTagName("img"):
77 src = element.getAttribute("src")
78 if src:
79 element.setAttribute("src", request.camo_url(src))
80
81 tree_walker = html5lib.treewalkers.getTreeWalker("dom")
82 html_serializer = html5lib.serializer.HTMLSerializer()
83 camoed = "".join(html_serializer.serialize(tree_walker(dom)))
84
85 return camoed
86
87
88 _SI_SYMBOLS = ["k", "M", "G", "T", "P", "E", "Z", "Y"]
89
90
91 def shorten_number(value):
92 for i, symbol in enumerate(_SI_SYMBOLS):
93 magnitude = value / (1000 ** (i + 1))
94 if magnitude >= 1 and magnitude < 1000:
95 return "{:.3g}{}".format(magnitude, symbol)
96
97 return str(value)
98
99
100 def tojson(value):
101 return json.dumps(value, sort_keys=True, separators=(",", ":"))
102
103
104 def urlparse(value):
105 return urllib.parse.urlparse(value)
106
107
108 def format_tags(tags):
109 # split tags
110 if re.search(r",", tags):
111 split_tags = re.split(r"\s*,\s*", tags)
112 elif re.search(r";", tags):
113 split_tags = re.split(r"\s*;\s*", tags)
114 else:
115 split_tags = re.split(r"\s+", tags)
116
117 # strip whitespace, quotes, double quotes
118 stripped_tags = [re.sub(r'^["\'\s]+|["\'\s]+$', "", t) for t in split_tags]
119
120 # remove any empty tags
121 formatted_tags = [t for t in stripped_tags if t]
122
123 return formatted_tags
124
125
126 def format_classifiers(classifiers):
127 structured = collections.defaultdict(list)
128
129 # Split up our classifiers into our data structure
130 for classifier in classifiers:
131 key, *value = classifier.split(" :: ", 1)
132 if value:
133 structured[key].append(value[0])
134
135 # Go thorugh and ensure that all of the lists in our classifiers are in
136 # sorted order.
137 structured = {k: sorted(v) for k, v in structured.items()}
138
139 # Now, we'll ensure that our keys themselves are in sorted order, using an
140 # OrderedDict to preserve this ordering when we pass this data back up to
141 # our caller.
142 structured = collections.OrderedDict(sorted(structured.items()))
143
144 return structured
145
146
147 def classifier_id(classifier):
148 return classifier.replace(" ", "_").replace("::", ".")
149
150
151 def contains_valid_uris(items):
152 """Returns boolean representing whether the input list contains any valid
153 URIs
154 """
155 return any(is_valid_uri(i) for i in items)
156
157
158 def parse_version(version_str):
159 return packaging.version.parse(version_str)
160
161
162 def localize_datetime(timestamp):
163 return pytz.utc.localize(timestamp)
164
165
166 def includeme(config):
167 config.add_request_method(_camo_url, name="camo_url")
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/filters.py b/warehouse/filters.py
--- a/warehouse/filters.py
+++ b/warehouse/filters.py
@@ -132,7 +132,7 @@
if value:
structured[key].append(value[0])
- # Go thorugh and ensure that all of the lists in our classifiers are in
+ # Go through and ensure that all of the lists in our classifiers are in
# sorted order.
structured = {k: sorted(v) for k, v in structured.items()}
| {"golden_diff": "diff --git a/warehouse/filters.py b/warehouse/filters.py\n--- a/warehouse/filters.py\n+++ b/warehouse/filters.py\n@@ -132,7 +132,7 @@\n if value:\n structured[key].append(value[0])\n \n- # Go thorugh and ensure that all of the lists in our classifiers are in\n+ # Go through and ensure that all of the lists in our classifiers are in\n # sorted order.\n structured = {k: sorted(v) for k, v in structured.items()}\n", "issue": "Fix simple typo: thorugh -> through\n# Issue Type\n\n[x] Bug (Typo)\n\n# Steps to Replicate\n\n1. Examine warehouse/filters.py.\n2. Search for `thorugh`.\n\n# Expected Behaviour\n\n1. Should read `through`.\n\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport collections\nimport enum\nimport hmac\nimport json\nimport re\nimport urllib.parse\n\nimport html5lib\nimport html5lib.serializer\nimport html5lib.treewalkers\nimport jinja2\nimport packaging.version\nimport pytz\n\nfrom pyramid.threadlocal import get_current_request\n\nfrom warehouse.utils.http import is_valid_uri\n\n\nclass PackageType(enum.Enum):\n bdist_dmg = \"OSX Disk Image\"\n bdist_dumb = \"Dumb Binary\"\n bdist_egg = \"Egg\"\n bdist_msi = \"Windows MSI Installer\"\n bdist_rpm = \"RPM\"\n bdist_wheel = \"Wheel\"\n bdist_wininst = \"Windows Installer\"\n sdist = \"Source\"\n\n\ndef format_package_type(value):\n try:\n return PackageType[value].value\n except KeyError:\n return value\n\n\ndef _camo_url(request, url):\n camo_url = request.registry.settings[\"camo.url\"].format(request=request)\n camo_key = request.registry.settings[\"camo.key\"].encode(\"utf8\")\n url = url.encode(\"utf8\")\n\n path = \"/\".join(\n [\n hmac.new(camo_key, url, digestmod=\"sha1\").hexdigest(),\n binascii.hexlify(url).decode(\"utf8\"),\n ]\n )\n\n return urllib.parse.urljoin(camo_url, path)\n\n\[email protected]\ndef camoify(ctx, value):\n request = ctx.get(\"request\") or get_current_request()\n\n # Parse the rendered output and replace any inline images that don't point\n # to HTTPS with camouflaged images.\n tree_builder = html5lib.treebuilders.getTreeBuilder(\"dom\")\n parser = html5lib.html5parser.HTMLParser(tree=tree_builder)\n dom = parser.parse(value)\n\n for element in dom.getElementsByTagName(\"img\"):\n src = element.getAttribute(\"src\")\n if src:\n element.setAttribute(\"src\", request.camo_url(src))\n\n tree_walker = html5lib.treewalkers.getTreeWalker(\"dom\")\n html_serializer = html5lib.serializer.HTMLSerializer()\n camoed = \"\".join(html_serializer.serialize(tree_walker(dom)))\n\n return camoed\n\n\n_SI_SYMBOLS = [\"k\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\", \"Y\"]\n\n\ndef shorten_number(value):\n for i, symbol in enumerate(_SI_SYMBOLS):\n magnitude = value / (1000 ** (i + 1))\n if magnitude >= 1 and magnitude < 1000:\n return \"{:.3g}{}\".format(magnitude, symbol)\n\n return str(value)\n\n\ndef tojson(value):\n return json.dumps(value, sort_keys=True, separators=(\",\", \":\"))\n\n\ndef urlparse(value):\n return urllib.parse.urlparse(value)\n\n\ndef format_tags(tags):\n # split tags\n if re.search(r\",\", tags):\n split_tags = re.split(r\"\\s*,\\s*\", tags)\n elif re.search(r\";\", tags):\n split_tags = re.split(r\"\\s*;\\s*\", tags)\n else:\n split_tags = re.split(r\"\\s+\", tags)\n\n # strip whitespace, quotes, double quotes\n stripped_tags = [re.sub(r'^[\"\\'\\s]+|[\"\\'\\s]+$', \"\", t) for t in split_tags]\n\n # remove any empty tags\n formatted_tags = [t for t in stripped_tags if t]\n\n return formatted_tags\n\n\ndef format_classifiers(classifiers):\n structured = collections.defaultdict(list)\n\n # Split up our classifiers into our data structure\n for classifier in classifiers:\n key, *value = classifier.split(\" :: \", 1)\n if value:\n structured[key].append(value[0])\n\n # Go thorugh and ensure that all of the lists in our classifiers are in\n # sorted order.\n structured = {k: sorted(v) for k, v in structured.items()}\n\n # Now, we'll ensure that our keys themselves are in sorted order, using an\n # OrderedDict to preserve this ordering when we pass this data back up to\n # our caller.\n structured = collections.OrderedDict(sorted(structured.items()))\n\n return structured\n\n\ndef classifier_id(classifier):\n return classifier.replace(\" \", \"_\").replace(\"::\", \".\")\n\n\ndef contains_valid_uris(items):\n \"\"\"Returns boolean representing whether the input list contains any valid\n URIs\n \"\"\"\n return any(is_valid_uri(i) for i in items)\n\n\ndef parse_version(version_str):\n return packaging.version.parse(version_str)\n\n\ndef localize_datetime(timestamp):\n return pytz.utc.localize(timestamp)\n\n\ndef includeme(config):\n config.add_request_method(_camo_url, name=\"camo_url\")\n", "path": "warehouse/filters.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport collections\nimport enum\nimport hmac\nimport json\nimport re\nimport urllib.parse\n\nimport html5lib\nimport html5lib.serializer\nimport html5lib.treewalkers\nimport jinja2\nimport packaging.version\nimport pytz\n\nfrom pyramid.threadlocal import get_current_request\n\nfrom warehouse.utils.http import is_valid_uri\n\n\nclass PackageType(enum.Enum):\n bdist_dmg = \"OSX Disk Image\"\n bdist_dumb = \"Dumb Binary\"\n bdist_egg = \"Egg\"\n bdist_msi = \"Windows MSI Installer\"\n bdist_rpm = \"RPM\"\n bdist_wheel = \"Wheel\"\n bdist_wininst = \"Windows Installer\"\n sdist = \"Source\"\n\n\ndef format_package_type(value):\n try:\n return PackageType[value].value\n except KeyError:\n return value\n\n\ndef _camo_url(request, url):\n camo_url = request.registry.settings[\"camo.url\"].format(request=request)\n camo_key = request.registry.settings[\"camo.key\"].encode(\"utf8\")\n url = url.encode(\"utf8\")\n\n path = \"/\".join(\n [\n hmac.new(camo_key, url, digestmod=\"sha1\").hexdigest(),\n binascii.hexlify(url).decode(\"utf8\"),\n ]\n )\n\n return urllib.parse.urljoin(camo_url, path)\n\n\[email protected]\ndef camoify(ctx, value):\n request = ctx.get(\"request\") or get_current_request()\n\n # Parse the rendered output and replace any inline images that don't point\n # to HTTPS with camouflaged images.\n tree_builder = html5lib.treebuilders.getTreeBuilder(\"dom\")\n parser = html5lib.html5parser.HTMLParser(tree=tree_builder)\n dom = parser.parse(value)\n\n for element in dom.getElementsByTagName(\"img\"):\n src = element.getAttribute(\"src\")\n if src:\n element.setAttribute(\"src\", request.camo_url(src))\n\n tree_walker = html5lib.treewalkers.getTreeWalker(\"dom\")\n html_serializer = html5lib.serializer.HTMLSerializer()\n camoed = \"\".join(html_serializer.serialize(tree_walker(dom)))\n\n return camoed\n\n\n_SI_SYMBOLS = [\"k\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\", \"Y\"]\n\n\ndef shorten_number(value):\n for i, symbol in enumerate(_SI_SYMBOLS):\n magnitude = value / (1000 ** (i + 1))\n if magnitude >= 1 and magnitude < 1000:\n return \"{:.3g}{}\".format(magnitude, symbol)\n\n return str(value)\n\n\ndef tojson(value):\n return json.dumps(value, sort_keys=True, separators=(\",\", \":\"))\n\n\ndef urlparse(value):\n return urllib.parse.urlparse(value)\n\n\ndef format_tags(tags):\n # split tags\n if re.search(r\",\", tags):\n split_tags = re.split(r\"\\s*,\\s*\", tags)\n elif re.search(r\";\", tags):\n split_tags = re.split(r\"\\s*;\\s*\", tags)\n else:\n split_tags = re.split(r\"\\s+\", tags)\n\n # strip whitespace, quotes, double quotes\n stripped_tags = [re.sub(r'^[\"\\'\\s]+|[\"\\'\\s]+$', \"\", t) for t in split_tags]\n\n # remove any empty tags\n formatted_tags = [t for t in stripped_tags if t]\n\n return formatted_tags\n\n\ndef format_classifiers(classifiers):\n structured = collections.defaultdict(list)\n\n # Split up our classifiers into our data structure\n for classifier in classifiers:\n key, *value = classifier.split(\" :: \", 1)\n if value:\n structured[key].append(value[0])\n\n # Go through and ensure that all of the lists in our classifiers are in\n # sorted order.\n structured = {k: sorted(v) for k, v in structured.items()}\n\n # Now, we'll ensure that our keys themselves are in sorted order, using an\n # OrderedDict to preserve this ordering when we pass this data back up to\n # our caller.\n structured = collections.OrderedDict(sorted(structured.items()))\n\n return structured\n\n\ndef classifier_id(classifier):\n return classifier.replace(\" \", \"_\").replace(\"::\", \".\")\n\n\ndef contains_valid_uris(items):\n \"\"\"Returns boolean representing whether the input list contains any valid\n URIs\n \"\"\"\n return any(is_valid_uri(i) for i in items)\n\n\ndef parse_version(version_str):\n return packaging.version.parse(version_str)\n\n\ndef localize_datetime(timestamp):\n return pytz.utc.localize(timestamp)\n\n\ndef includeme(config):\n config.add_request_method(_camo_url, name=\"camo_url\")\n", "path": "warehouse/filters.py"}]} | 1,875 | 119 |
gh_patches_debug_17552 | rasdani/github-patches | git_diff | modin-project__modin-6959 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove `DataFrame.to_pickle_distributed` in favour of `DataFrame.modin.to_pickle_distributed`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/experimental/pandas/__init__.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """
15 The main module through which interaction with the experimental API takes place.
16
17 See `Experimental API Reference` for details.
18
19 Notes
20 -----
21 * Some of experimental APIs deviate from pandas in order to provide improved
22 performance.
23
24 * Although the use of experimental storage formats and engines is available through the
25 `modin.pandas` module when defining environment variable `MODIN_EXPERIMENTAL=true`,
26 the use of experimental I/O functions is available only through the
27 `modin.experimental.pandas` module.
28
29 Examples
30 --------
31 >>> import modin.experimental.pandas as pd
32 >>> df = pd.read_csv_glob("data*.csv")
33 """
34
35 import functools
36 import warnings
37
38 from modin.pandas import * # noqa F401, F403
39
40 from .io import ( # noqa F401
41 read_csv_glob,
42 read_custom_text,
43 read_json_glob,
44 read_parquet_glob,
45 read_pickle_distributed,
46 read_sql,
47 read_xml_glob,
48 to_pickle_distributed,
49 )
50
51 old_to_pickle_distributed = to_pickle_distributed
52
53
54 @functools.wraps(to_pickle_distributed)
55 def to_pickle_distributed(*args, **kwargs):
56 warnings.warn(
57 "`DataFrame.to_pickle_distributed` is deprecated and will be removed in a future version. "
58 + "Please use `DataFrame.modin.to_pickle_distributed` instead.",
59 category=FutureWarning,
60 )
61 return old_to_pickle_distributed(*args, **kwargs)
62
63
64 setattr(DataFrame, "to_pickle_distributed", to_pickle_distributed) # noqa: F405
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/experimental/pandas/__init__.py b/modin/experimental/pandas/__init__.py
--- a/modin/experimental/pandas/__init__.py
+++ b/modin/experimental/pandas/__init__.py
@@ -32,9 +32,6 @@
>>> df = pd.read_csv_glob("data*.csv")
"""
-import functools
-import warnings
-
from modin.pandas import * # noqa F401, F403
from .io import ( # noqa F401
@@ -45,20 +42,4 @@
read_pickle_distributed,
read_sql,
read_xml_glob,
- to_pickle_distributed,
)
-
-old_to_pickle_distributed = to_pickle_distributed
-
-
[email protected](to_pickle_distributed)
-def to_pickle_distributed(*args, **kwargs):
- warnings.warn(
- "`DataFrame.to_pickle_distributed` is deprecated and will be removed in a future version. "
- + "Please use `DataFrame.modin.to_pickle_distributed` instead.",
- category=FutureWarning,
- )
- return old_to_pickle_distributed(*args, **kwargs)
-
-
-setattr(DataFrame, "to_pickle_distributed", to_pickle_distributed) # noqa: F405
| {"golden_diff": "diff --git a/modin/experimental/pandas/__init__.py b/modin/experimental/pandas/__init__.py\n--- a/modin/experimental/pandas/__init__.py\n+++ b/modin/experimental/pandas/__init__.py\n@@ -32,9 +32,6 @@\n >>> df = pd.read_csv_glob(\"data*.csv\")\n \"\"\"\n \n-import functools\n-import warnings\n-\n from modin.pandas import * # noqa F401, F403\n \n from .io import ( # noqa F401\n@@ -45,20 +42,4 @@\n read_pickle_distributed,\n read_sql,\n read_xml_glob,\n- to_pickle_distributed,\n )\n-\n-old_to_pickle_distributed = to_pickle_distributed\n-\n-\[email protected](to_pickle_distributed)\n-def to_pickle_distributed(*args, **kwargs):\n- warnings.warn(\n- \"`DataFrame.to_pickle_distributed` is deprecated and will be removed in a future version. \"\n- + \"Please use `DataFrame.modin.to_pickle_distributed` instead.\",\n- category=FutureWarning,\n- )\n- return old_to_pickle_distributed(*args, **kwargs)\n-\n-\n-setattr(DataFrame, \"to_pickle_distributed\", to_pickle_distributed) # noqa: F405\n", "issue": "Remove `DataFrame.to_pickle_distributed` in favour of `DataFrame.modin.to_pickle_distributed`\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"\nThe main module through which interaction with the experimental API takes place.\n\nSee `Experimental API Reference` for details.\n\nNotes\n-----\n* Some of experimental APIs deviate from pandas in order to provide improved\n performance.\n\n* Although the use of experimental storage formats and engines is available through the\n `modin.pandas` module when defining environment variable `MODIN_EXPERIMENTAL=true`,\n the use of experimental I/O functions is available only through the\n `modin.experimental.pandas` module.\n\nExamples\n--------\n>>> import modin.experimental.pandas as pd\n>>> df = pd.read_csv_glob(\"data*.csv\")\n\"\"\"\n\nimport functools\nimport warnings\n\nfrom modin.pandas import * # noqa F401, F403\n\nfrom .io import ( # noqa F401\n read_csv_glob,\n read_custom_text,\n read_json_glob,\n read_parquet_glob,\n read_pickle_distributed,\n read_sql,\n read_xml_glob,\n to_pickle_distributed,\n)\n\nold_to_pickle_distributed = to_pickle_distributed\n\n\[email protected](to_pickle_distributed)\ndef to_pickle_distributed(*args, **kwargs):\n warnings.warn(\n \"`DataFrame.to_pickle_distributed` is deprecated and will be removed in a future version. \"\n + \"Please use `DataFrame.modin.to_pickle_distributed` instead.\",\n category=FutureWarning,\n )\n return old_to_pickle_distributed(*args, **kwargs)\n\n\nsetattr(DataFrame, \"to_pickle_distributed\", to_pickle_distributed) # noqa: F405\n", "path": "modin/experimental/pandas/__init__.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"\nThe main module through which interaction with the experimental API takes place.\n\nSee `Experimental API Reference` for details.\n\nNotes\n-----\n* Some of experimental APIs deviate from pandas in order to provide improved\n performance.\n\n* Although the use of experimental storage formats and engines is available through the\n `modin.pandas` module when defining environment variable `MODIN_EXPERIMENTAL=true`,\n the use of experimental I/O functions is available only through the\n `modin.experimental.pandas` module.\n\nExamples\n--------\n>>> import modin.experimental.pandas as pd\n>>> df = pd.read_csv_glob(\"data*.csv\")\n\"\"\"\n\nfrom modin.pandas import * # noqa F401, F403\n\nfrom .io import ( # noqa F401\n read_csv_glob,\n read_custom_text,\n read_json_glob,\n read_parquet_glob,\n read_pickle_distributed,\n read_sql,\n read_xml_glob,\n)\n", "path": "modin/experimental/pandas/__init__.py"}]} | 913 | 285 |
gh_patches_debug_16826 | rasdani/github-patches | git_diff | lightly-ai__lightly-164 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: train_embedding fails when pytorch-lightning version is a release candit
## Bug description, how to reproduce and reason
When running the line
https://github.com/lightly-ai/lightly/blob/6cee517ff73ab8fd3d91cbd9aa379df2ae4eada7/lightly/embedding/_base.py#L88-L92
with a pytorch-lightning version with a release candidate (e.g. "1.2.0rc1"), the line fails with
```
File "/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/cli/train_cli.py", line 150, in train_cli
return _train_cli(cfg)
File "/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/cli/train_cli.py", line 119, in _train_cli
encoder.train_embedding(**cfg['trainer'])
File "/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/embedding/_base.py", line 89, in train_embedding
pl_version = [int(v) for v in pl.__version__.split('.')]
File "/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/embedding/_base.py", line 89, in <listcomp>
pl_version = [int(v) for v in pl.__version__.split('.')]
ValueError: invalid literal for int() with base 10: '0rc1'
```
To reproduce, run the following python code (failing):
```python
version = "1.2.0rc1"
pl_version = [int(v) for v in version.split('.')]
```
## Proposed solutions
### Option A:
Change the version checker to account for "rcX" in the version string.
### Option B:
Increase the minimum requirement for pytorch lightning and drop the version check.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightly/embedding/_base.py`
Content:
```
1 """ BaseEmbeddings """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5 import os
6 import copy
7
8 import pytorch_lightning as pl
9 import pytorch_lightning.core.lightning as lightning
10 import torch.nn as nn
11
12 from lightly.embedding._callback import CustomModelCheckpoint
13
14
15 class BaseEmbedding(lightning.LightningModule):
16 """All trainable embeddings must inherit from BaseEmbedding.
17
18 """
19
20 def __init__(self,
21 model,
22 criterion,
23 optimizer,
24 dataloader,
25 scheduler=None):
26 """ Constructor
27
28 Args:
29 model: (torch.nn.Module)
30 criterion: (torch.nn.Module)
31 optimizer: (torch.optim.Optimizer)
32 dataloader: (torch.utils.data.DataLoader)
33
34 """
35
36 super(BaseEmbedding, self).__init__()
37 self.model = model
38 self.criterion = criterion
39 self.optimizer = optimizer
40 self.dataloader = dataloader
41 self.scheduler = scheduler
42 self.checkpoint = None
43 self.cwd = os.getcwd()
44
45 self.checkpoint_callback = None
46 self.init_checkpoint_callback()
47
48 def forward(self, x0, x1):
49 return self.model(x0, x1)
50
51 def training_step(self, batch, batch_idx):
52
53 # get the two image transformations
54 (x0, x1), _, _ = batch
55 # forward pass of the transformations
56 y0, y1 = self(x0, x1)
57 # calculate loss
58 loss = self.criterion(y0, y1)
59 # log loss and return
60 self.log('loss', loss)
61 return loss
62
63 def configure_optimizers(self):
64 if self.scheduler is None:
65 return self.optimizer
66 else:
67 return [self.optimizer], [self.scheduler]
68
69 def train_dataloader(self):
70 return self.dataloader
71
72 def train_embedding(self, **kwargs):
73 """ Train the model on the provided dataset.
74
75 Args:
76 **kwargs: pylightning_trainer arguments, examples include:
77 min_epochs: (int) Minimum number of epochs to train
78 max_epochs: (int) Maximum number of epochs to train
79 gpus: (int) number of gpus to use
80
81 Returns:
82 A trained encoder, ready for embedding datasets.
83
84 """
85 # backwards compatability for old pytorch-lightning versions:
86 # they changed the way checkpoint callbacks are passed in v1.0.3
87 # -> do a simple version check
88 # TODO: remove when incrementing minimum requirement for pl
89 pl_version = [int(v) for v in pl.__version__.split('.')]
90 ok_version = [1, 0, 4]
91 deprecated_checkpoint_callback = \
92 all([pl_v >= ok_v for pl_v, ok_v in zip(pl_version, ok_version)])
93
94 if deprecated_checkpoint_callback:
95 trainer = pl.Trainer(**kwargs,
96 callbacks=[self.checkpoint_callback])
97 else:
98 trainer = pl.Trainer(**kwargs,
99 checkpoint_callback=self.checkpoint_callback)
100
101 trainer.fit(self)
102
103 self.checkpoint = self.checkpoint_callback.best_model_path
104 self.checkpoint = os.path.join(self.cwd, self.checkpoint)
105
106 def embed(self, *args, **kwargs):
107 """Must be implemented by classes which inherit from BaseEmbedding.
108
109 """
110 raise NotImplementedError()
111
112 def init_checkpoint_callback(self,
113 save_last=False,
114 save_top_k=0,
115 monitor='loss',
116 dirpath=None):
117 """Initializes the checkpoint callback.
118
119 Args:
120 save_last:
121 Whether or not to save the checkpoint of the last epoch.
122 save_top_k:
123 Save the top_k model checkpoints.
124 monitor:
125 Which quantity to monitor.
126 dirpath:
127 Where to save the checkpoint.
128
129 """
130 # initialize custom model checkpoint
131 self.checkpoint_callback = CustomModelCheckpoint()
132 self.checkpoint_callback.save_last = save_last
133 self.checkpoint_callback.save_top_k = save_top_k
134 self.checkpoint_callback.monitor = monitor
135
136 dirpath = self.cwd if dirpath is None else dirpath
137 self.checkpoint_callback.dirpath = dirpath
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightly/embedding/_base.py b/lightly/embedding/_base.py
--- a/lightly/embedding/_base.py
+++ b/lightly/embedding/_base.py
@@ -82,21 +82,8 @@
A trained encoder, ready for embedding datasets.
"""
- # backwards compatability for old pytorch-lightning versions:
- # they changed the way checkpoint callbacks are passed in v1.0.3
- # -> do a simple version check
- # TODO: remove when incrementing minimum requirement for pl
- pl_version = [int(v) for v in pl.__version__.split('.')]
- ok_version = [1, 0, 4]
- deprecated_checkpoint_callback = \
- all([pl_v >= ok_v for pl_v, ok_v in zip(pl_version, ok_version)])
-
- if deprecated_checkpoint_callback:
- trainer = pl.Trainer(**kwargs,
- callbacks=[self.checkpoint_callback])
- else:
- trainer = pl.Trainer(**kwargs,
- checkpoint_callback=self.checkpoint_callback)
+
+ trainer = pl.Trainer(**kwargs, callbacks=[self.checkpoint_callback])
trainer.fit(self)
| {"golden_diff": "diff --git a/lightly/embedding/_base.py b/lightly/embedding/_base.py\n--- a/lightly/embedding/_base.py\n+++ b/lightly/embedding/_base.py\n@@ -82,21 +82,8 @@\n A trained encoder, ready for embedding datasets.\n \n \"\"\"\n- # backwards compatability for old pytorch-lightning versions:\n- # they changed the way checkpoint callbacks are passed in v1.0.3\n- # -> do a simple version check\n- # TODO: remove when incrementing minimum requirement for pl\n- pl_version = [int(v) for v in pl.__version__.split('.')]\n- ok_version = [1, 0, 4]\n- deprecated_checkpoint_callback = \\\n- all([pl_v >= ok_v for pl_v, ok_v in zip(pl_version, ok_version)])\n-\n- if deprecated_checkpoint_callback:\n- trainer = pl.Trainer(**kwargs,\n- callbacks=[self.checkpoint_callback])\n- else:\n- trainer = pl.Trainer(**kwargs,\n- checkpoint_callback=self.checkpoint_callback)\n+\n+ trainer = pl.Trainer(**kwargs, callbacks=[self.checkpoint_callback])\n \n trainer.fit(self)\n", "issue": "Bug: train_embedding fails when pytorch-lightning version is a release candit\n## Bug description, how to reproduce and reason\r\nWhen running the line\r\nhttps://github.com/lightly-ai/lightly/blob/6cee517ff73ab8fd3d91cbd9aa379df2ae4eada7/lightly/embedding/_base.py#L88-L92\r\nwith a pytorch-lightning version with a release candidate (e.g. \"1.2.0rc1\"), the line fails with\r\n\r\n```\r\n File \"/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/cli/train_cli.py\", line 150, in train_cli\r\n return _train_cli(cfg)\r\n File \"/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/cli/train_cli.py\", line 119, in _train_cli\r\n encoder.train_embedding(**cfg['trainer'])\r\n File \"/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/embedding/_base.py\", line 89, in train_embedding\r\n pl_version = [int(v) for v in pl.__version__.split('.')]\r\n File \"/Users/malteebnerlightly/Documents/GitHub/lightly/venv/lib/python3.8/site-packages/lightly/embedding/_base.py\", line 89, in <listcomp>\r\n pl_version = [int(v) for v in pl.__version__.split('.')]\r\nValueError: invalid literal for int() with base 10: '0rc1'\r\n```\r\n\r\nTo reproduce, run the following python code (failing):\r\n```python\r\nversion = \"1.2.0rc1\"\r\npl_version = [int(v) for v in version.split('.')]\r\n```\r\n## Proposed solutions\r\n\r\n### Option A:\r\nChange the version checker to account for \"rcX\" in the version string.\r\n### Option B:\r\nIncrease the minimum requirement for pytorch lightning and drop the version check.\n", "before_files": [{"content": "\"\"\" BaseEmbeddings \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\nimport os\nimport copy\n\nimport pytorch_lightning as pl\nimport pytorch_lightning.core.lightning as lightning\nimport torch.nn as nn\n\nfrom lightly.embedding._callback import CustomModelCheckpoint\n\n\nclass BaseEmbedding(lightning.LightningModule):\n \"\"\"All trainable embeddings must inherit from BaseEmbedding.\n\n \"\"\"\n\n def __init__(self,\n model,\n criterion,\n optimizer,\n dataloader,\n scheduler=None):\n \"\"\" Constructor\n\n Args:\n model: (torch.nn.Module)\n criterion: (torch.nn.Module)\n optimizer: (torch.optim.Optimizer)\n dataloader: (torch.utils.data.DataLoader)\n\n \"\"\"\n\n super(BaseEmbedding, self).__init__()\n self.model = model\n self.criterion = criterion\n self.optimizer = optimizer\n self.dataloader = dataloader\n self.scheduler = scheduler\n self.checkpoint = None\n self.cwd = os.getcwd()\n\n self.checkpoint_callback = None\n self.init_checkpoint_callback()\n\n def forward(self, x0, x1):\n return self.model(x0, x1)\n\n def training_step(self, batch, batch_idx):\n\n # get the two image transformations\n (x0, x1), _, _ = batch\n # forward pass of the transformations\n y0, y1 = self(x0, x1)\n # calculate loss\n loss = self.criterion(y0, y1)\n # log loss and return\n self.log('loss', loss)\n return loss\n\n def configure_optimizers(self):\n if self.scheduler is None:\n return self.optimizer\n else:\n return [self.optimizer], [self.scheduler]\n\n def train_dataloader(self):\n return self.dataloader\n\n def train_embedding(self, **kwargs):\n \"\"\" Train the model on the provided dataset.\n\n Args:\n **kwargs: pylightning_trainer arguments, examples include:\n min_epochs: (int) Minimum number of epochs to train\n max_epochs: (int) Maximum number of epochs to train\n gpus: (int) number of gpus to use\n\n Returns:\n A trained encoder, ready for embedding datasets.\n\n \"\"\"\n # backwards compatability for old pytorch-lightning versions:\n # they changed the way checkpoint callbacks are passed in v1.0.3\n # -> do a simple version check\n # TODO: remove when incrementing minimum requirement for pl\n pl_version = [int(v) for v in pl.__version__.split('.')]\n ok_version = [1, 0, 4]\n deprecated_checkpoint_callback = \\\n all([pl_v >= ok_v for pl_v, ok_v in zip(pl_version, ok_version)])\n\n if deprecated_checkpoint_callback:\n trainer = pl.Trainer(**kwargs,\n callbacks=[self.checkpoint_callback])\n else:\n trainer = pl.Trainer(**kwargs,\n checkpoint_callback=self.checkpoint_callback)\n\n trainer.fit(self)\n\n self.checkpoint = self.checkpoint_callback.best_model_path\n self.checkpoint = os.path.join(self.cwd, self.checkpoint)\n\n def embed(self, *args, **kwargs):\n \"\"\"Must be implemented by classes which inherit from BaseEmbedding.\n\n \"\"\"\n raise NotImplementedError()\n\n def init_checkpoint_callback(self,\n save_last=False,\n save_top_k=0,\n monitor='loss',\n dirpath=None):\n \"\"\"Initializes the checkpoint callback.\n\n Args:\n save_last:\n Whether or not to save the checkpoint of the last epoch.\n save_top_k:\n Save the top_k model checkpoints.\n monitor:\n Which quantity to monitor.\n dirpath:\n Where to save the checkpoint.\n\n \"\"\"\n # initialize custom model checkpoint\n self.checkpoint_callback = CustomModelCheckpoint()\n self.checkpoint_callback.save_last = save_last\n self.checkpoint_callback.save_top_k = save_top_k\n self.checkpoint_callback.monitor = monitor\n\n dirpath = self.cwd if dirpath is None else dirpath\n self.checkpoint_callback.dirpath = dirpath\n", "path": "lightly/embedding/_base.py"}], "after_files": [{"content": "\"\"\" BaseEmbeddings \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\nimport os\nimport copy\n\nimport pytorch_lightning as pl\nimport pytorch_lightning.core.lightning as lightning\nimport torch.nn as nn\n\nfrom lightly.embedding._callback import CustomModelCheckpoint\n\n\nclass BaseEmbedding(lightning.LightningModule):\n \"\"\"All trainable embeddings must inherit from BaseEmbedding.\n\n \"\"\"\n\n def __init__(self,\n model,\n criterion,\n optimizer,\n dataloader,\n scheduler=None):\n \"\"\" Constructor\n\n Args:\n model: (torch.nn.Module)\n criterion: (torch.nn.Module)\n optimizer: (torch.optim.Optimizer)\n dataloader: (torch.utils.data.DataLoader)\n\n \"\"\"\n\n super(BaseEmbedding, self).__init__()\n self.model = model\n self.criterion = criterion\n self.optimizer = optimizer\n self.dataloader = dataloader\n self.scheduler = scheduler\n self.checkpoint = None\n self.cwd = os.getcwd()\n\n self.checkpoint_callback = None\n self.init_checkpoint_callback()\n\n def forward(self, x0, x1):\n return self.model(x0, x1)\n\n def training_step(self, batch, batch_idx):\n\n # get the two image transformations\n (x0, x1), _, _ = batch\n # forward pass of the transformations\n y0, y1 = self(x0, x1)\n # calculate loss\n loss = self.criterion(y0, y1)\n # log loss and return\n self.log('loss', loss)\n return loss\n\n def configure_optimizers(self):\n if self.scheduler is None:\n return self.optimizer\n else:\n return [self.optimizer], [self.scheduler]\n\n def train_dataloader(self):\n return self.dataloader\n\n def train_embedding(self, **kwargs):\n \"\"\" Train the model on the provided dataset.\n\n Args:\n **kwargs: pylightning_trainer arguments, examples include:\n min_epochs: (int) Minimum number of epochs to train\n max_epochs: (int) Maximum number of epochs to train\n gpus: (int) number of gpus to use\n\n Returns:\n A trained encoder, ready for embedding datasets.\n\n \"\"\"\n\n trainer = pl.Trainer(**kwargs, callbacks=[self.checkpoint_callback])\n\n trainer.fit(self)\n\n self.checkpoint = self.checkpoint_callback.best_model_path\n self.checkpoint = os.path.join(self.cwd, self.checkpoint)\n\n def embed(self, *args, **kwargs):\n \"\"\"Must be implemented by classes which inherit from BaseEmbedding.\n\n \"\"\"\n raise NotImplementedError()\n\n def init_checkpoint_callback(self,\n save_last=False,\n save_top_k=0,\n monitor='loss',\n dirpath=None):\n \"\"\"Initializes the checkpoint callback.\n\n Args:\n save_last:\n Whether or not to save the checkpoint of the last epoch.\n save_top_k:\n Save the top_k model checkpoints.\n monitor:\n Which quantity to monitor.\n dirpath:\n Where to save the checkpoint.\n\n \"\"\"\n # initialize custom model checkpoint\n self.checkpoint_callback = CustomModelCheckpoint()\n self.checkpoint_callback.save_last = save_last\n self.checkpoint_callback.save_top_k = save_top_k\n self.checkpoint_callback.monitor = monitor\n\n dirpath = self.cwd if dirpath is None else dirpath\n self.checkpoint_callback.dirpath = dirpath\n", "path": "lightly/embedding/_base.py"}]} | 1,915 | 261 |
gh_patches_debug_44533 | rasdani/github-patches | git_diff | biolab__orange3-text-240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OWCorpus: save Text Features in Settings
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.2.3
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.5.dev
##### Expected behavior
Corpus widget remembers set Text Features.
##### Actual behavior
Saved workflow (i.e. Corpus) doesn't store Text Features the user has set.
##### Steps to reproduce the behavior
Corpus (load a data set with several string attributes, set the Text Feature to be one of them, but not default).
Save and reload workflow.
Text Feature is reloaded to default instead of the selected one.
##### Additional info (worksheets, data, screenshots, ...)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/widgets/owloadcorpus.py`
Content:
```
1 import os
2
3 from Orange.data.io import FileFormat
4 from Orange.widgets import gui
5 from Orange.widgets.utils.itemmodels import VariableListModel
6 from Orange.widgets.data.owselectcolumns import VariablesListItemView
7 from Orange.widgets.settings import Setting
8 from Orange.widgets.widget import OWWidget, Msg
9 from orangecontrib.text.corpus import Corpus, get_sample_corpora_dir
10 from orangecontrib.text.widgets.utils import widgets
11
12
13 class Output:
14 CORPUS = "Corpus"
15
16
17 class OWLoadCorpus(OWWidget):
18 name = "Corpus"
19 description = "Load a corpus of text documents, (optionally) tagged with categories."
20 icon = "icons/TextFile.svg"
21 priority = 10
22
23 outputs = [(Output.CORPUS, Corpus)]
24 want_main_area = False
25 resizing_enabled = False
26
27 dlgFormats = (
28 "All readable files ({});;".format(
29 '*' + ' *'.join(FileFormat.readers.keys())) +
30 ";;".join("{} (*{})".format(f.DESCRIPTION, ' *'.join(f.EXTENSIONS))
31 for f in sorted(set(FileFormat.readers.values()),
32 key=list(FileFormat.readers.values()).index)))
33
34 recent_files = Setting([])
35
36 class Error(OWWidget.Error):
37 read_file = Msg("Can't read file {} ({})")
38
39 def __init__(self):
40 super().__init__()
41
42 self.corpus = None
43
44 # Browse file box
45 fbox = gui.widgetBox(self.controlArea, "Corpus file", orientation=0)
46 widget = widgets.FileWidget(recent_files=self.recent_files, icon_size=(16, 16), on_open=self.open_file,
47 directory_aliases={"Browse documentation corpora ...": get_sample_corpora_dir()},
48 dialog_format=self.dlgFormats, dialog_title='Open Orange Document Corpus',
49 allow_empty=False, reload_label='Reload', browse_label='Browse')
50 fbox.layout().addWidget(widget)
51
52 # Corpus info
53 ibox = gui.widgetBox(self.controlArea, "Corpus info", addSpace=True)
54 corp_info = "Corpus of 0 documents."
55 self.info_label = gui.label(ibox, self, corp_info)
56
57 # Used Text Features
58 fbox = gui.widgetBox(self.controlArea, orientation=0)
59 ubox = gui.widgetBox(fbox, "Used text features", addSpace=True)
60 self.used_attrs = VariableListModel(enable_dnd=True)
61 self.used_attrs_view = VariablesListItemView()
62 self.used_attrs_view.setModel(self.used_attrs)
63 ubox.layout().addWidget(self.used_attrs_view)
64
65 aa = self.used_attrs
66 aa.dataChanged.connect(self.update_feature_selection)
67 aa.rowsInserted.connect(self.update_feature_selection)
68 aa.rowsRemoved.connect(self.update_feature_selection)
69
70 # Ignored Text Features
71 ibox = gui.widgetBox(fbox, "Ignored text features", addSpace=True)
72 self.unused_attrs = VariableListModel(enable_dnd=True)
73 self.unused_attrs_view = VariablesListItemView()
74 self.unused_attrs_view.setModel(self.unused_attrs)
75 ibox.layout().addWidget(self.unused_attrs_view)
76
77 # load first file
78 widget.select(0)
79
80 def open_file(self, path):
81 self.Error.read_file.clear()
82 self.used_attrs[:] = []
83 self.unused_attrs[:] = []
84 if path:
85 try:
86 self.corpus = Corpus.from_file(path)
87 self.corpus.name = os.path.splitext(os.path.basename(path))[0]
88 self.info_label.setText("Corpus of {} documents.".format(len(self.corpus)))
89 self.used_attrs.extend(self.corpus.text_features)
90 self.unused_attrs.extend([f for f in self.corpus.domain.metas
91 if f.is_string and f not in self.corpus.text_features])
92 except BaseException as err:
93 self.Error.read_file(path, str(err))
94
95 def update_feature_selection(self):
96 # TODO fix VariablesListItemView so it does not emit
97 # duplicated data when reordering inside a single window
98 def remove_duplicates(l):
99 unique = []
100 for i in l:
101 if i not in unique:
102 unique.append(i)
103 return unique
104
105 if self.corpus is not None:
106 self.corpus.set_text_features(remove_duplicates(self.used_attrs))
107 self.send(Output.CORPUS, self.corpus)
108
109
110 if __name__ == '__main__':
111 from AnyQt.QtWidgets import QApplication
112 app = QApplication([])
113 widget = OWLoadCorpus()
114 widget.show()
115 app.exec()
116 widget.saveSettings()
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/orangecontrib/text/widgets/owloadcorpus.py b/orangecontrib/text/widgets/owloadcorpus.py
--- a/orangecontrib/text/widgets/owloadcorpus.py
+++ b/orangecontrib/text/widgets/owloadcorpus.py
@@ -4,7 +4,7 @@
from Orange.widgets import gui
from Orange.widgets.utils.itemmodels import VariableListModel
from Orange.widgets.data.owselectcolumns import VariablesListItemView
-from Orange.widgets.settings import Setting
+from Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler
from Orange.widgets.widget import OWWidget, Msg
from orangecontrib.text.corpus import Corpus, get_sample_corpora_dir
from orangecontrib.text.widgets.utils import widgets
@@ -31,7 +31,12 @@
for f in sorted(set(FileFormat.readers.values()),
key=list(FileFormat.readers.values()).index)))
+ settingsHandler = PerfectDomainContextHandler(
+ match_values=PerfectDomainContextHandler.MATCH_VALUES_ALL
+ )
+
recent_files = Setting([])
+ used_attrs = ContextSetting([])
class Error(OWWidget.Error):
read_file = Msg("Can't read file {} ({})")
@@ -57,38 +62,41 @@
# Used Text Features
fbox = gui.widgetBox(self.controlArea, orientation=0)
ubox = gui.widgetBox(fbox, "Used text features", addSpace=True)
- self.used_attrs = VariableListModel(enable_dnd=True)
+ self.used_attrs_model = VariableListModel(enable_dnd=True)
self.used_attrs_view = VariablesListItemView()
- self.used_attrs_view.setModel(self.used_attrs)
+ self.used_attrs_view.setModel(self.used_attrs_model)
ubox.layout().addWidget(self.used_attrs_view)
- aa = self.used_attrs
+ aa = self.used_attrs_model
aa.dataChanged.connect(self.update_feature_selection)
aa.rowsInserted.connect(self.update_feature_selection)
aa.rowsRemoved.connect(self.update_feature_selection)
# Ignored Text Features
ibox = gui.widgetBox(fbox, "Ignored text features", addSpace=True)
- self.unused_attrs = VariableListModel(enable_dnd=True)
+ self.unused_attrs_model = VariableListModel(enable_dnd=True)
self.unused_attrs_view = VariablesListItemView()
- self.unused_attrs_view.setModel(self.unused_attrs)
+ self.unused_attrs_view.setModel(self.unused_attrs_model)
ibox.layout().addWidget(self.unused_attrs_view)
# load first file
widget.select(0)
def open_file(self, path):
+ self.closeContext()
self.Error.read_file.clear()
- self.used_attrs[:] = []
- self.unused_attrs[:] = []
+ self.used_attrs_model[:] = []
+ self.unused_attrs_model[:] = []
if path:
try:
self.corpus = Corpus.from_file(path)
self.corpus.name = os.path.splitext(os.path.basename(path))[0]
self.info_label.setText("Corpus of {} documents.".format(len(self.corpus)))
- self.used_attrs.extend(self.corpus.text_features)
- self.unused_attrs.extend([f for f in self.corpus.domain.metas
- if f.is_string and f not in self.corpus.text_features])
+ self.used_attrs = list(self.corpus.text_features)
+ self.openContext(self.corpus)
+ self.used_attrs_model.extend(self.used_attrs)
+ self.unused_attrs_model.extend([f for f in self.corpus.domain.metas
+ if f.is_string and f not in self.used_attrs_model])
except BaseException as err:
self.Error.read_file(path, str(err))
@@ -103,8 +111,9 @@
return unique
if self.corpus is not None:
- self.corpus.set_text_features(remove_duplicates(self.used_attrs))
+ self.corpus.set_text_features(remove_duplicates(self.used_attrs_model))
self.send(Output.CORPUS, self.corpus)
+ self.used_attrs = list(self.used_attrs_model)
if __name__ == '__main__':
| {"golden_diff": "diff --git a/orangecontrib/text/widgets/owloadcorpus.py b/orangecontrib/text/widgets/owloadcorpus.py\n--- a/orangecontrib/text/widgets/owloadcorpus.py\n+++ b/orangecontrib/text/widgets/owloadcorpus.py\n@@ -4,7 +4,7 @@\n from Orange.widgets import gui\n from Orange.widgets.utils.itemmodels import VariableListModel\n from Orange.widgets.data.owselectcolumns import VariablesListItemView\n-from Orange.widgets.settings import Setting\n+from Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler\n from Orange.widgets.widget import OWWidget, Msg\n from orangecontrib.text.corpus import Corpus, get_sample_corpora_dir\n from orangecontrib.text.widgets.utils import widgets\n@@ -31,7 +31,12 @@\n for f in sorted(set(FileFormat.readers.values()),\n key=list(FileFormat.readers.values()).index)))\n \n+ settingsHandler = PerfectDomainContextHandler(\n+ match_values=PerfectDomainContextHandler.MATCH_VALUES_ALL\n+ )\n+\n recent_files = Setting([])\n+ used_attrs = ContextSetting([])\n \n class Error(OWWidget.Error):\n read_file = Msg(\"Can't read file {} ({})\")\n@@ -57,38 +62,41 @@\n # Used Text Features\n fbox = gui.widgetBox(self.controlArea, orientation=0)\n ubox = gui.widgetBox(fbox, \"Used text features\", addSpace=True)\n- self.used_attrs = VariableListModel(enable_dnd=True)\n+ self.used_attrs_model = VariableListModel(enable_dnd=True)\n self.used_attrs_view = VariablesListItemView()\n- self.used_attrs_view.setModel(self.used_attrs)\n+ self.used_attrs_view.setModel(self.used_attrs_model)\n ubox.layout().addWidget(self.used_attrs_view)\n \n- aa = self.used_attrs\n+ aa = self.used_attrs_model\n aa.dataChanged.connect(self.update_feature_selection)\n aa.rowsInserted.connect(self.update_feature_selection)\n aa.rowsRemoved.connect(self.update_feature_selection)\n \n # Ignored Text Features\n ibox = gui.widgetBox(fbox, \"Ignored text features\", addSpace=True)\n- self.unused_attrs = VariableListModel(enable_dnd=True)\n+ self.unused_attrs_model = VariableListModel(enable_dnd=True)\n self.unused_attrs_view = VariablesListItemView()\n- self.unused_attrs_view.setModel(self.unused_attrs)\n+ self.unused_attrs_view.setModel(self.unused_attrs_model)\n ibox.layout().addWidget(self.unused_attrs_view)\n \n # load first file\n widget.select(0)\n \n def open_file(self, path):\n+ self.closeContext()\n self.Error.read_file.clear()\n- self.used_attrs[:] = []\n- self.unused_attrs[:] = []\n+ self.used_attrs_model[:] = []\n+ self.unused_attrs_model[:] = []\n if path:\n try:\n self.corpus = Corpus.from_file(path)\n self.corpus.name = os.path.splitext(os.path.basename(path))[0]\n self.info_label.setText(\"Corpus of {} documents.\".format(len(self.corpus)))\n- self.used_attrs.extend(self.corpus.text_features)\n- self.unused_attrs.extend([f for f in self.corpus.domain.metas\n- if f.is_string and f not in self.corpus.text_features])\n+ self.used_attrs = list(self.corpus.text_features)\n+ self.openContext(self.corpus)\n+ self.used_attrs_model.extend(self.used_attrs)\n+ self.unused_attrs_model.extend([f for f in self.corpus.domain.metas\n+ if f.is_string and f not in self.used_attrs_model])\n except BaseException as err:\n self.Error.read_file(path, str(err))\n \n@@ -103,8 +111,9 @@\n return unique\n \n if self.corpus is not None:\n- self.corpus.set_text_features(remove_duplicates(self.used_attrs))\n+ self.corpus.set_text_features(remove_duplicates(self.used_attrs_model))\n self.send(Output.CORPUS, self.corpus)\n+ self.used_attrs = list(self.used_attrs_model)\n \n \n if __name__ == '__main__':\n", "issue": "OWCorpus: save Text Features in Settings\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.2.3\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.5.dev\r\n\r\n##### Expected behavior\r\nCorpus widget remembers set Text Features.\r\n\r\n\r\n##### Actual behavior\r\nSaved workflow (i.e. Corpus) doesn't store Text Features the user has set.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nCorpus (load a data set with several string attributes, set the Text Feature to be one of them, but not default).\r\nSave and reload workflow.\r\nText Feature is reloaded to default instead of the selected one.\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "import os\n\nfrom Orange.data.io import FileFormat\nfrom Orange.widgets import gui\nfrom Orange.widgets.utils.itemmodels import VariableListModel\nfrom Orange.widgets.data.owselectcolumns import VariablesListItemView\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.widget import OWWidget, Msg\nfrom orangecontrib.text.corpus import Corpus, get_sample_corpora_dir\nfrom orangecontrib.text.widgets.utils import widgets\n\n\nclass Output:\n CORPUS = \"Corpus\"\n\n\nclass OWLoadCorpus(OWWidget):\n name = \"Corpus\"\n description = \"Load a corpus of text documents, (optionally) tagged with categories.\"\n icon = \"icons/TextFile.svg\"\n priority = 10\n\n outputs = [(Output.CORPUS, Corpus)]\n want_main_area = False\n resizing_enabled = False\n\n dlgFormats = (\n \"All readable files ({});;\".format(\n '*' + ' *'.join(FileFormat.readers.keys())) +\n \";;\".join(\"{} (*{})\".format(f.DESCRIPTION, ' *'.join(f.EXTENSIONS))\n for f in sorted(set(FileFormat.readers.values()),\n key=list(FileFormat.readers.values()).index)))\n\n recent_files = Setting([])\n\n class Error(OWWidget.Error):\n read_file = Msg(\"Can't read file {} ({})\")\n\n def __init__(self):\n super().__init__()\n\n self.corpus = None\n\n # Browse file box\n fbox = gui.widgetBox(self.controlArea, \"Corpus file\", orientation=0)\n widget = widgets.FileWidget(recent_files=self.recent_files, icon_size=(16, 16), on_open=self.open_file,\n directory_aliases={\"Browse documentation corpora ...\": get_sample_corpora_dir()},\n dialog_format=self.dlgFormats, dialog_title='Open Orange Document Corpus',\n allow_empty=False, reload_label='Reload', browse_label='Browse')\n fbox.layout().addWidget(widget)\n\n # Corpus info\n ibox = gui.widgetBox(self.controlArea, \"Corpus info\", addSpace=True)\n corp_info = \"Corpus of 0 documents.\"\n self.info_label = gui.label(ibox, self, corp_info)\n\n # Used Text Features\n fbox = gui.widgetBox(self.controlArea, orientation=0)\n ubox = gui.widgetBox(fbox, \"Used text features\", addSpace=True)\n self.used_attrs = VariableListModel(enable_dnd=True)\n self.used_attrs_view = VariablesListItemView()\n self.used_attrs_view.setModel(self.used_attrs)\n ubox.layout().addWidget(self.used_attrs_view)\n\n aa = self.used_attrs\n aa.dataChanged.connect(self.update_feature_selection)\n aa.rowsInserted.connect(self.update_feature_selection)\n aa.rowsRemoved.connect(self.update_feature_selection)\n\n # Ignored Text Features\n ibox = gui.widgetBox(fbox, \"Ignored text features\", addSpace=True)\n self.unused_attrs = VariableListModel(enable_dnd=True)\n self.unused_attrs_view = VariablesListItemView()\n self.unused_attrs_view.setModel(self.unused_attrs)\n ibox.layout().addWidget(self.unused_attrs_view)\n\n # load first file\n widget.select(0)\n\n def open_file(self, path):\n self.Error.read_file.clear()\n self.used_attrs[:] = []\n self.unused_attrs[:] = []\n if path:\n try:\n self.corpus = Corpus.from_file(path)\n self.corpus.name = os.path.splitext(os.path.basename(path))[0]\n self.info_label.setText(\"Corpus of {} documents.\".format(len(self.corpus)))\n self.used_attrs.extend(self.corpus.text_features)\n self.unused_attrs.extend([f for f in self.corpus.domain.metas\n if f.is_string and f not in self.corpus.text_features])\n except BaseException as err:\n self.Error.read_file(path, str(err))\n\n def update_feature_selection(self):\n # TODO fix VariablesListItemView so it does not emit\n # duplicated data when reordering inside a single window\n def remove_duplicates(l):\n unique = []\n for i in l:\n if i not in unique:\n unique.append(i)\n return unique\n\n if self.corpus is not None:\n self.corpus.set_text_features(remove_duplicates(self.used_attrs))\n self.send(Output.CORPUS, self.corpus)\n\n\nif __name__ == '__main__':\n from AnyQt.QtWidgets import QApplication\n app = QApplication([])\n widget = OWLoadCorpus()\n widget.show()\n app.exec()\n widget.saveSettings()\n", "path": "orangecontrib/text/widgets/owloadcorpus.py"}], "after_files": [{"content": "import os\n\nfrom Orange.data.io import FileFormat\nfrom Orange.widgets import gui\nfrom Orange.widgets.utils.itemmodels import VariableListModel\nfrom Orange.widgets.data.owselectcolumns import VariablesListItemView\nfrom Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler\nfrom Orange.widgets.widget import OWWidget, Msg\nfrom orangecontrib.text.corpus import Corpus, get_sample_corpora_dir\nfrom orangecontrib.text.widgets.utils import widgets\n\n\nclass Output:\n CORPUS = \"Corpus\"\n\n\nclass OWLoadCorpus(OWWidget):\n name = \"Corpus\"\n description = \"Load a corpus of text documents, (optionally) tagged with categories.\"\n icon = \"icons/TextFile.svg\"\n priority = 10\n\n outputs = [(Output.CORPUS, Corpus)]\n want_main_area = False\n resizing_enabled = False\n\n dlgFormats = (\n \"All readable files ({});;\".format(\n '*' + ' *'.join(FileFormat.readers.keys())) +\n \";;\".join(\"{} (*{})\".format(f.DESCRIPTION, ' *'.join(f.EXTENSIONS))\n for f in sorted(set(FileFormat.readers.values()),\n key=list(FileFormat.readers.values()).index)))\n\n settingsHandler = PerfectDomainContextHandler(\n match_values=PerfectDomainContextHandler.MATCH_VALUES_ALL\n )\n\n recent_files = Setting([])\n used_attrs = ContextSetting([])\n\n class Error(OWWidget.Error):\n read_file = Msg(\"Can't read file {} ({})\")\n\n def __init__(self):\n super().__init__()\n\n self.corpus = None\n\n # Browse file box\n fbox = gui.widgetBox(self.controlArea, \"Corpus file\", orientation=0)\n widget = widgets.FileWidget(recent_files=self.recent_files, icon_size=(16, 16), on_open=self.open_file,\n directory_aliases={\"Browse documentation corpora ...\": get_sample_corpora_dir()},\n dialog_format=self.dlgFormats, dialog_title='Open Orange Document Corpus',\n allow_empty=False, reload_label='Reload', browse_label='Browse')\n fbox.layout().addWidget(widget)\n\n # Corpus info\n ibox = gui.widgetBox(self.controlArea, \"Corpus info\", addSpace=True)\n corp_info = \"Corpus of 0 documents.\"\n self.info_label = gui.label(ibox, self, corp_info)\n\n # Used Text Features\n fbox = gui.widgetBox(self.controlArea, orientation=0)\n ubox = gui.widgetBox(fbox, \"Used text features\", addSpace=True)\n self.used_attrs_model = VariableListModel(enable_dnd=True)\n self.used_attrs_view = VariablesListItemView()\n self.used_attrs_view.setModel(self.used_attrs_model)\n ubox.layout().addWidget(self.used_attrs_view)\n\n aa = self.used_attrs_model\n aa.dataChanged.connect(self.update_feature_selection)\n aa.rowsInserted.connect(self.update_feature_selection)\n aa.rowsRemoved.connect(self.update_feature_selection)\n\n # Ignored Text Features\n ibox = gui.widgetBox(fbox, \"Ignored text features\", addSpace=True)\n self.unused_attrs_model = VariableListModel(enable_dnd=True)\n self.unused_attrs_view = VariablesListItemView()\n self.unused_attrs_view.setModel(self.unused_attrs_model)\n ibox.layout().addWidget(self.unused_attrs_view)\n\n # load first file\n widget.select(0)\n\n def open_file(self, path):\n self.closeContext()\n self.Error.read_file.clear()\n self.used_attrs_model[:] = []\n self.unused_attrs_model[:] = []\n if path:\n try:\n self.corpus = Corpus.from_file(path)\n self.corpus.name = os.path.splitext(os.path.basename(path))[0]\n self.info_label.setText(\"Corpus of {} documents.\".format(len(self.corpus)))\n self.used_attrs = list(self.corpus.text_features)\n self.openContext(self.corpus)\n self.used_attrs_model.extend(self.used_attrs)\n self.unused_attrs_model.extend([f for f in self.corpus.domain.metas\n if f.is_string and f not in self.used_attrs_model])\n except BaseException as err:\n self.Error.read_file(path, str(err))\n\n def update_feature_selection(self):\n # TODO fix VariablesListItemView so it does not emit\n # duplicated data when reordering inside a single window\n def remove_duplicates(l):\n unique = []\n for i in l:\n if i not in unique:\n unique.append(i)\n return unique\n\n if self.corpus is not None:\n self.corpus.set_text_features(remove_duplicates(self.used_attrs_model))\n self.send(Output.CORPUS, self.corpus)\n self.used_attrs = list(self.used_attrs_model)\n\n\nif __name__ == '__main__':\n from AnyQt.QtWidgets import QApplication\n app = QApplication([])\n widget = OWLoadCorpus()\n widget.show()\n app.exec()\n widget.saveSettings()\n", "path": "orangecontrib/text/widgets/owloadcorpus.py"}]} | 1,675 | 878 |
gh_patches_debug_15637 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Un utilisateur peut s'écrire à lui même
Testé en préprod.
Scénario :
- Je vais dans l'interface des messages
- Dans les destinaites, je renseigne la chaine suivante (contenu entre guillemets) " , ".
- Le MP m'est envoyé à moi tout seul.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/mp/forms.py`
Content:
```
1 # coding: utf-8
2
3 from crispy_forms.helper import FormHelper
4 from crispy_forms.layout import Layout, Field, Hidden
5 from django import forms
6 from django.contrib.auth.models import User
7 from django.core.urlresolvers import reverse
8
9 from zds.mp.models import PrivateTopic
10 from zds.utils.forms import CommonLayoutEditor
11 from django.utils.translation import ugettext_lazy as _
12
13
14 class PrivateTopicForm(forms.Form):
15 participants = forms.CharField(
16 label=_('Participants'),
17 widget=forms.TextInput(
18 attrs={
19 'placeholder': _(u'Les participants doivent '
20 u'être séparés par une virgule.'),
21 'required': 'required',
22 'data-autocomplete': '{ "type": "multiple" }'}))
23
24 title = forms.CharField(
25 label=_('Titre'),
26 max_length=PrivateTopic._meta.get_field('title').max_length,
27 widget=forms.TextInput(
28 attrs={
29 'required': 'required'
30 }
31 )
32 )
33
34 subtitle = forms.CharField(
35 label=_('Sous-titre'),
36 max_length=PrivateTopic._meta.get_field('subtitle').max_length,
37 required=False
38 )
39
40 text = forms.CharField(
41 label='Texte',
42 required=False,
43 widget=forms.Textarea(
44 attrs={
45 'placeholder': _('Votre message au format Markdown.'),
46 'required': 'required'
47 }
48 )
49 )
50
51 def __init__(self, username, *args, **kwargs):
52 super(PrivateTopicForm, self).__init__(*args, **kwargs)
53 self.helper = FormHelper()
54 self.helper.form_class = 'content-wrapper'
55 self.helper.form_method = 'post'
56 self.username = username
57
58 self.helper.layout = Layout(
59 Field('participants', autocomplete='off'),
60 Field('title', autocomplete='off'),
61 Field('subtitle', autocomplete='off'),
62 CommonLayoutEditor(),
63 )
64
65 def clean(self):
66 cleaned_data = super(PrivateTopicForm, self).clean()
67
68 participants = cleaned_data.get('participants')
69 title = cleaned_data.get('title')
70 text = cleaned_data.get('text')
71
72 if participants is not None and participants.strip() == '':
73 self._errors['participants'] = self.error_class(
74 [_(u'Le champ participants ne peut être vide')])
75
76 if participants is not None and participants.strip() != '':
77 receivers = participants.strip().split(',')
78 for receiver in receivers:
79 if User.objects.filter(username__exact=receiver.strip()).count() == 0 and receiver.strip() != '':
80 self._errors['participants'] = self.error_class(
81 [_(u'Un des participants saisi est introuvable')])
82 elif receiver.strip().lower() == self.username.lower():
83 self._errors['participants'] = self.error_class(
84 [_(u'Vous ne pouvez pas vous écrire à vous-même !')])
85
86 if title is not None and title.strip() == '':
87 self._errors['title'] = self.error_class(
88 [_(u'Le champ titre ne peut être vide')])
89
90 if text is not None and text.strip() == '':
91 self._errors['text'] = self.error_class(
92 [_(u'Le champ text ne peut être vide')])
93
94 return cleaned_data
95
96
97 class PrivatePostForm(forms.Form):
98 text = forms.CharField(
99 label='',
100 widget=forms.Textarea(
101 attrs={
102 'placeholder': _('Votre message au format Markdown.'),
103 'required': 'required'
104 }
105 )
106 )
107
108 def __init__(self, topic, user, *args, **kwargs):
109 super(PrivatePostForm, self).__init__(*args, **kwargs)
110 self.helper = FormHelper()
111 self.helper.form_action = reverse(
112 'zds.mp.views.answer') + '?sujet=' + str(topic.pk)
113 self.helper.form_method = 'post'
114
115 self.helper.layout = Layout(
116 CommonLayoutEditor(),
117 Hidden('last_post', '{{ last_post_pk }}'),
118 )
119
120 if topic.alone():
121 self.helper['text'].wrap(
122 Field,
123 placeholder=_(u'Vous êtes seul dans cette conversation, '
124 u'vous ne pouvez plus y écrire.'),
125 disabled=True)
126
127 def clean(self):
128 cleaned_data = super(PrivatePostForm, self).clean()
129
130 text = cleaned_data.get('text')
131
132 if text is not None and text.strip() == '':
133 self._errors['text'] = self.error_class(
134 [_(u'Le champ text ne peut être vide')])
135
136 return cleaned_data
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/mp/forms.py b/zds/mp/forms.py
--- a/zds/mp/forms.py
+++ b/zds/mp/forms.py
@@ -76,7 +76,8 @@
if participants is not None and participants.strip() != '':
receivers = participants.strip().split(',')
for receiver in receivers:
- if User.objects.filter(username__exact=receiver.strip()).count() == 0 and receiver.strip() != '':
+ if User.objects.filter(username__exact=receiver.strip()).count() == 0 and receiver.strip() != '' \
+ or receiver.strip() == '':
self._errors['participants'] = self.error_class(
[_(u'Un des participants saisi est introuvable')])
elif receiver.strip().lower() == self.username.lower():
| {"golden_diff": "diff --git a/zds/mp/forms.py b/zds/mp/forms.py\n--- a/zds/mp/forms.py\n+++ b/zds/mp/forms.py\n@@ -76,7 +76,8 @@\n if participants is not None and participants.strip() != '':\n receivers = participants.strip().split(',')\n for receiver in receivers:\n- if User.objects.filter(username__exact=receiver.strip()).count() == 0 and receiver.strip() != '':\n+ if User.objects.filter(username__exact=receiver.strip()).count() == 0 and receiver.strip() != '' \\\n+ or receiver.strip() == '':\n self._errors['participants'] = self.error_class(\n [_(u'Un des participants saisi est introuvable')])\n elif receiver.strip().lower() == self.username.lower():\n", "issue": "Un utilisateur peut s'\u00e9crire \u00e0 lui m\u00eame\nTest\u00e9 en pr\u00e9prod.\n\nSc\u00e9nario : \n- Je vais dans l'interface des messages\n- Dans les destinaites, je renseigne la chaine suivante (contenu entre guillemets) \" , \".\n- Le MP m'est envoy\u00e9 \u00e0 moi tout seul.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field, Hidden\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\nfrom zds.mp.models import PrivateTopic\nfrom zds.utils.forms import CommonLayoutEditor\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass PrivateTopicForm(forms.Form):\n participants = forms.CharField(\n label=_('Participants'),\n widget=forms.TextInput(\n attrs={\n 'placeholder': _(u'Les participants doivent '\n u'\u00eatre s\u00e9par\u00e9s par une virgule.'),\n 'required': 'required',\n 'data-autocomplete': '{ \"type\": \"multiple\" }'}))\n\n title = forms.CharField(\n label=_('Titre'),\n max_length=PrivateTopic._meta.get_field('title').max_length,\n widget=forms.TextInput(\n attrs={\n 'required': 'required'\n }\n )\n )\n\n subtitle = forms.CharField(\n label=_('Sous-titre'),\n max_length=PrivateTopic._meta.get_field('subtitle').max_length,\n required=False\n )\n\n text = forms.CharField(\n label='Texte',\n required=False,\n widget=forms.Textarea(\n attrs={\n 'placeholder': _('Votre message au format Markdown.'),\n 'required': 'required'\n }\n )\n )\n\n def __init__(self, username, *args, **kwargs):\n super(PrivateTopicForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'content-wrapper'\n self.helper.form_method = 'post'\n self.username = username\n\n self.helper.layout = Layout(\n Field('participants', autocomplete='off'),\n Field('title', autocomplete='off'),\n Field('subtitle', autocomplete='off'),\n CommonLayoutEditor(),\n )\n\n def clean(self):\n cleaned_data = super(PrivateTopicForm, self).clean()\n\n participants = cleaned_data.get('participants')\n title = cleaned_data.get('title')\n text = cleaned_data.get('text')\n\n if participants is not None and participants.strip() == '':\n self._errors['participants'] = self.error_class(\n [_(u'Le champ participants ne peut \u00eatre vide')])\n\n if participants is not None and participants.strip() != '':\n receivers = participants.strip().split(',')\n for receiver in receivers:\n if User.objects.filter(username__exact=receiver.strip()).count() == 0 and receiver.strip() != '':\n self._errors['participants'] = self.error_class(\n [_(u'Un des participants saisi est introuvable')])\n elif receiver.strip().lower() == self.username.lower():\n self._errors['participants'] = self.error_class(\n [_(u'Vous ne pouvez pas vous \u00e9crire \u00e0 vous-m\u00eame !')])\n\n if title is not None and title.strip() == '':\n self._errors['title'] = self.error_class(\n [_(u'Le champ titre ne peut \u00eatre vide')])\n\n if text is not None and text.strip() == '':\n self._errors['text'] = self.error_class(\n [_(u'Le champ text ne peut \u00eatre vide')])\n\n return cleaned_data\n\n\nclass PrivatePostForm(forms.Form):\n text = forms.CharField(\n label='',\n widget=forms.Textarea(\n attrs={\n 'placeholder': _('Votre message au format Markdown.'),\n 'required': 'required'\n }\n )\n )\n\n def __init__(self, topic, user, *args, **kwargs):\n super(PrivatePostForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_action = reverse(\n 'zds.mp.views.answer') + '?sujet=' + str(topic.pk)\n self.helper.form_method = 'post'\n\n self.helper.layout = Layout(\n CommonLayoutEditor(),\n Hidden('last_post', '{{ last_post_pk }}'),\n )\n\n if topic.alone():\n self.helper['text'].wrap(\n Field,\n placeholder=_(u'Vous \u00eates seul dans cette conversation, '\n u'vous ne pouvez plus y \u00e9crire.'),\n disabled=True)\n\n def clean(self):\n cleaned_data = super(PrivatePostForm, self).clean()\n\n text = cleaned_data.get('text')\n\n if text is not None and text.strip() == '':\n self._errors['text'] = self.error_class(\n [_(u'Le champ text ne peut \u00eatre vide')])\n\n return cleaned_data\n", "path": "zds/mp/forms.py"}], "after_files": [{"content": "# coding: utf-8\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field, Hidden\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\nfrom zds.mp.models import PrivateTopic\nfrom zds.utils.forms import CommonLayoutEditor\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass PrivateTopicForm(forms.Form):\n participants = forms.CharField(\n label=_('Participants'),\n widget=forms.TextInput(\n attrs={\n 'placeholder': _(u'Les participants doivent '\n u'\u00eatre s\u00e9par\u00e9s par une virgule.'),\n 'required': 'required',\n 'data-autocomplete': '{ \"type\": \"multiple\" }'}))\n\n title = forms.CharField(\n label=_('Titre'),\n max_length=PrivateTopic._meta.get_field('title').max_length,\n widget=forms.TextInput(\n attrs={\n 'required': 'required'\n }\n )\n )\n\n subtitle = forms.CharField(\n label=_('Sous-titre'),\n max_length=PrivateTopic._meta.get_field('subtitle').max_length,\n required=False\n )\n\n text = forms.CharField(\n label='Texte',\n required=False,\n widget=forms.Textarea(\n attrs={\n 'placeholder': _('Votre message au format Markdown.'),\n 'required': 'required'\n }\n )\n )\n\n def __init__(self, username, *args, **kwargs):\n super(PrivateTopicForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'content-wrapper'\n self.helper.form_method = 'post'\n self.username = username\n\n self.helper.layout = Layout(\n Field('participants', autocomplete='off'),\n Field('title', autocomplete='off'),\n Field('subtitle', autocomplete='off'),\n CommonLayoutEditor(),\n )\n\n def clean(self):\n cleaned_data = super(PrivateTopicForm, self).clean()\n\n participants = cleaned_data.get('participants')\n title = cleaned_data.get('title')\n text = cleaned_data.get('text')\n\n if participants is not None and participants.strip() == '':\n self._errors['participants'] = self.error_class(\n [_(u'Le champ participants ne peut \u00eatre vide')])\n\n if participants is not None and participants.strip() != '':\n receivers = participants.strip().split(',')\n for receiver in receivers:\n if User.objects.filter(username__exact=receiver.strip()).count() == 0 and receiver.strip() != '' \\\n or receiver.strip() == '':\n self._errors['participants'] = self.error_class(\n [_(u'Un des participants saisi est introuvable')])\n elif receiver.strip().lower() == self.username.lower():\n self._errors['participants'] = self.error_class(\n [_(u'Vous ne pouvez pas vous \u00e9crire \u00e0 vous-m\u00eame !')])\n\n if title is not None and title.strip() == '':\n self._errors['title'] = self.error_class(\n [_(u'Le champ titre ne peut \u00eatre vide')])\n\n if text is not None and text.strip() == '':\n self._errors['text'] = self.error_class(\n [_(u'Le champ text ne peut \u00eatre vide')])\n\n return cleaned_data\n\n\nclass PrivatePostForm(forms.Form):\n text = forms.CharField(\n label='',\n widget=forms.Textarea(\n attrs={\n 'placeholder': _('Votre message au format Markdown.'),\n 'required': 'required'\n }\n )\n )\n\n def __init__(self, topic, user, *args, **kwargs):\n super(PrivatePostForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_action = reverse(\n 'zds.mp.views.answer') + '?sujet=' + str(topic.pk)\n self.helper.form_method = 'post'\n\n self.helper.layout = Layout(\n CommonLayoutEditor(),\n Hidden('last_post', '{{ last_post_pk }}'),\n )\n\n if topic.alone():\n self.helper['text'].wrap(\n Field,\n placeholder=_(u'Vous \u00eates seul dans cette conversation, '\n u'vous ne pouvez plus y \u00e9crire.'),\n disabled=True)\n\n def clean(self):\n cleaned_data = super(PrivatePostForm, self).clean()\n\n text = cleaned_data.get('text')\n\n if text is not None and text.strip() == '':\n self._errors['text'] = self.error_class(\n [_(u'Le champ text ne peut \u00eatre vide')])\n\n return cleaned_data\n", "path": "zds/mp/forms.py"}]} | 1,590 | 166 |
gh_patches_debug_1822 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-342 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update zigpy version to use the new (old module) name for zigpy?
@dmulcahey Ready to update zigpy version to use new (old) module name without -homeassistant suffix?
@Adminiuga in the PR https://github.com/zigpy/zigpy/pull/363 changed the zigpy module name back to just "zigpy" (from "zigpy-homeassistant")
https://github.com/zigpy/zigpy/pull/363/commits/6c9e0e9412a322d4b9558977decf50ca4dfb5ffd
From https://pypi.org/project/zigpy-homeassistant/ back to https://pypi.org/project/zigpy/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Setup module for ZHAQuirks."""
2
3 from setuptools import find_packages, setup
4
5 VERSION = "0.0.38"
6
7
8 def readme():
9 """Print long description."""
10 with open("README.md") as f:
11 return f.read()
12
13
14 setup(
15 name="zha-quirks",
16 version=VERSION,
17 description="Library implementing Zigpy quirks for ZHA in Home Assistant",
18 long_description=readme(),
19 long_description_content_type="text/markdown",
20 url="https://github.com/dmulcahey/zha-device-handlers",
21 author="David F. Mulcahey",
22 author_email="[email protected]",
23 license="Apache License Version 2.0",
24 keywords="zha quirks homeassistant hass",
25 packages=find_packages(exclude=["*.tests"]),
26 python_requires=">=3",
27 install_requires=["zigpy-homeassistant>=0.18.1"],
28 tests_require=["pytest"],
29 )
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,6 +24,6 @@
keywords="zha quirks homeassistant hass",
packages=find_packages(exclude=["*.tests"]),
python_requires=">=3",
- install_requires=["zigpy-homeassistant>=0.18.1"],
+ install_requires=["zigpy>=0.20.0"],
tests_require=["pytest"],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,6 +24,6 @@\n keywords=\"zha quirks homeassistant hass\",\n packages=find_packages(exclude=[\"*.tests\"]),\n python_requires=\">=3\",\n- install_requires=[\"zigpy-homeassistant>=0.18.1\"],\n+ install_requires=[\"zigpy>=0.20.0\"],\n tests_require=[\"pytest\"],\n )\n", "issue": "Update zigpy version to use the new (old module) name for zigpy?\n@dmulcahey Ready to update zigpy version to use new (old) module name without -homeassistant suffix?\r\n\r\n@Adminiuga in the PR https://github.com/zigpy/zigpy/pull/363 changed the zigpy module name back to just \"zigpy\" (from \"zigpy-homeassistant\")\r\n\r\nhttps://github.com/zigpy/zigpy/pull/363/commits/6c9e0e9412a322d4b9558977decf50ca4dfb5ffd\r\n\r\nFrom https://pypi.org/project/zigpy-homeassistant/ back to https://pypi.org/project/zigpy/\n", "before_files": [{"content": "\"\"\"Setup module for ZHAQuirks.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nVERSION = \"0.0.38\"\n\n\ndef readme():\n \"\"\"Print long description.\"\"\"\n with open(\"README.md\") as f:\n return f.read()\n\n\nsetup(\n name=\"zha-quirks\",\n version=VERSION,\n description=\"Library implementing Zigpy quirks for ZHA in Home Assistant\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dmulcahey/zha-device-handlers\",\n author=\"David F. Mulcahey\",\n author_email=\"[email protected]\",\n license=\"Apache License Version 2.0\",\n keywords=\"zha quirks homeassistant hass\",\n packages=find_packages(exclude=[\"*.tests\"]),\n python_requires=\">=3\",\n install_requires=[\"zigpy-homeassistant>=0.18.1\"],\n tests_require=[\"pytest\"],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Setup module for ZHAQuirks.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nVERSION = \"0.0.38\"\n\n\ndef readme():\n \"\"\"Print long description.\"\"\"\n with open(\"README.md\") as f:\n return f.read()\n\n\nsetup(\n name=\"zha-quirks\",\n version=VERSION,\n description=\"Library implementing Zigpy quirks for ZHA in Home Assistant\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dmulcahey/zha-device-handlers\",\n author=\"David F. Mulcahey\",\n author_email=\"[email protected]\",\n license=\"Apache License Version 2.0\",\n keywords=\"zha quirks homeassistant hass\",\n packages=find_packages(exclude=[\"*.tests\"]),\n python_requires=\">=3\",\n install_requires=[\"zigpy>=0.20.0\"],\n tests_require=[\"pytest\"],\n)\n", "path": "setup.py"}]} | 683 | 99 |
gh_patches_debug_10358 | rasdani/github-patches | git_diff | crytic__slither-971 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken link in Slither recommendation due to typo in Wiki
Hi, there is a typo (oarameters instead of parameters) in the first-level header "Unindexed ERC20 event oarameters" of the wiki:
https://github.com/crytic/slither/wiki/Detector-Documentation#unindexed-erc20-event-oarameters
On [L. 19](https://github.com/crytic/slither/blob/3bc22a9b143828edec956f170bdef7234d6707d6/slither/detectors/erc/unindexed_event_parameters.py#L19) of the detector there is also the same typo on `WIKI_TITLE`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/erc/unindexed_event_parameters.py`
Content:
```
1 """
2 Detect mistakenly un-indexed ERC20 event parameters
3 """
4 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
5
6
7 class UnindexedERC20EventParameters(AbstractDetector):
8 """
9 Un-indexed ERC20 event parameters
10 """
11
12 ARGUMENT = "erc20-indexed"
13 HELP = "Un-indexed ERC20 event parameters"
14 IMPACT = DetectorClassification.INFORMATIONAL
15 CONFIDENCE = DetectorClassification.HIGH
16
17 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#unindexed-erc20-event-parameters"
18
19 WIKI_TITLE = "Unindexed ERC20 event oarameters"
20 WIKI_DESCRIPTION = "Detects whether events defined by the `ERC20` specification that should have some parameters as `indexed` are missing the `indexed` keyword."
21
22 # region wiki_exploit_scenario
23 WIKI_EXPLOIT_SCENARIO = """
24 ```solidity
25 contract ERC20Bad {
26 // ...
27 event Transfer(address from, address to, uint value);
28 event Approval(address owner, address spender, uint value);
29
30 // ...
31 }
32 ```
33 `Transfer` and `Approval` events should have the 'indexed' keyword on their two first parameters, as defined by the `ERC20` specification.
34 Failure to include these keywords will exclude the parameter data in the transaction/block's bloom filter, so external tooling searching for these parameters may overlook them and fail to index logs from this token contract."""
35 # endregion wiki_exploit_scenario
36
37 WIKI_RECOMMENDATION = "Add the `indexed` keyword to event parameters that should include it, according to the `ERC20` specification."
38
39 STANDARD_JSON = False
40
41 @staticmethod
42 def detect_erc20_unindexed_event_params(contract):
43 """
44 Detect un-indexed ERC20 event parameters in a given contract.
45 :param contract: The contract to check ERC20 events for un-indexed parameters in.
46 :return: A list of tuple(event, parameter) of parameters which should be indexed.
47 """
48 # Create our result array
49 results = []
50
51 # If this contract isn't an ERC20 token, we return our empty results.
52 if not contract.is_erc20():
53 return results
54
55 # Loop through all events to look for poor form.
56 for event in contract.events_declared:
57
58 # If this is transfer/approval events, expect the first two parameters to be indexed.
59 if event.full_name in [
60 "Transfer(address,address,uint256)",
61 "Approval(address,address,uint256)",
62 ]:
63 if not event.elems[0].indexed:
64 results.append((event, event.elems[0]))
65 if not event.elems[1].indexed:
66 results.append((event, event.elems[1]))
67
68 # Return the results.
69 return results
70
71 def _detect(self):
72 """
73 Detect un-indexed ERC20 event parameters in all contracts.
74 """
75 results = []
76 for c in self.contracts:
77 unindexed_params = self.detect_erc20_unindexed_event_params(c)
78 if unindexed_params:
79 # Add each problematic event definition to our result list
80 for (event, parameter) in unindexed_params:
81
82 info = [
83 "ERC20 event ",
84 event,
85 f"does not index parameter {parameter}\n",
86 ]
87
88 # Add the events to the JSON (note: we do not add the params/vars as they have no source mapping).
89 res = self.generate_result(info)
90
91 res.add(event, {"parameter_name": parameter.name})
92 results.append(res)
93
94 return results
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/detectors/erc/unindexed_event_parameters.py b/slither/detectors/erc/unindexed_event_parameters.py
--- a/slither/detectors/erc/unindexed_event_parameters.py
+++ b/slither/detectors/erc/unindexed_event_parameters.py
@@ -16,7 +16,7 @@
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#unindexed-erc20-event-parameters"
- WIKI_TITLE = "Unindexed ERC20 event oarameters"
+ WIKI_TITLE = "Unindexed ERC20 event parameters"
WIKI_DESCRIPTION = "Detects whether events defined by the `ERC20` specification that should have some parameters as `indexed` are missing the `indexed` keyword."
# region wiki_exploit_scenario
| {"golden_diff": "diff --git a/slither/detectors/erc/unindexed_event_parameters.py b/slither/detectors/erc/unindexed_event_parameters.py\n--- a/slither/detectors/erc/unindexed_event_parameters.py\n+++ b/slither/detectors/erc/unindexed_event_parameters.py\n@@ -16,7 +16,7 @@\n \n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#unindexed-erc20-event-parameters\"\n \n- WIKI_TITLE = \"Unindexed ERC20 event oarameters\"\n+ WIKI_TITLE = \"Unindexed ERC20 event parameters\"\n WIKI_DESCRIPTION = \"Detects whether events defined by the `ERC20` specification that should have some parameters as `indexed` are missing the `indexed` keyword.\"\n \n # region wiki_exploit_scenario\n", "issue": "Broken link in Slither recommendation due to typo in Wiki\nHi, there is a typo (oarameters instead of parameters) in the first-level header \"Unindexed ERC20 event oarameters\" of the wiki: \r\nhttps://github.com/crytic/slither/wiki/Detector-Documentation#unindexed-erc20-event-oarameters\r\n\r\nOn [L. 19](https://github.com/crytic/slither/blob/3bc22a9b143828edec956f170bdef7234d6707d6/slither/detectors/erc/unindexed_event_parameters.py#L19) of the detector there is also the same typo on `WIKI_TITLE`.\r\n\n", "before_files": [{"content": "\"\"\"\nDetect mistakenly un-indexed ERC20 event parameters\n\"\"\"\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass UnindexedERC20EventParameters(AbstractDetector):\n \"\"\"\n Un-indexed ERC20 event parameters\n \"\"\"\n\n ARGUMENT = \"erc20-indexed\"\n HELP = \"Un-indexed ERC20 event parameters\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#unindexed-erc20-event-parameters\"\n\n WIKI_TITLE = \"Unindexed ERC20 event oarameters\"\n WIKI_DESCRIPTION = \"Detects whether events defined by the `ERC20` specification that should have some parameters as `indexed` are missing the `indexed` keyword.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract ERC20Bad {\n // ...\n event Transfer(address from, address to, uint value);\n event Approval(address owner, address spender, uint value);\n\n // ...\n}\n```\n`Transfer` and `Approval` events should have the 'indexed' keyword on their two first parameters, as defined by the `ERC20` specification.\nFailure to include these keywords will exclude the parameter data in the transaction/block's bloom filter, so external tooling searching for these parameters may overlook them and fail to index logs from this token contract.\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Add the `indexed` keyword to event parameters that should include it, according to the `ERC20` specification.\"\n\n STANDARD_JSON = False\n\n @staticmethod\n def detect_erc20_unindexed_event_params(contract):\n \"\"\"\n Detect un-indexed ERC20 event parameters in a given contract.\n :param contract: The contract to check ERC20 events for un-indexed parameters in.\n :return: A list of tuple(event, parameter) of parameters which should be indexed.\n \"\"\"\n # Create our result array\n results = []\n\n # If this contract isn't an ERC20 token, we return our empty results.\n if not contract.is_erc20():\n return results\n\n # Loop through all events to look for poor form.\n for event in contract.events_declared:\n\n # If this is transfer/approval events, expect the first two parameters to be indexed.\n if event.full_name in [\n \"Transfer(address,address,uint256)\",\n \"Approval(address,address,uint256)\",\n ]:\n if not event.elems[0].indexed:\n results.append((event, event.elems[0]))\n if not event.elems[1].indexed:\n results.append((event, event.elems[1]))\n\n # Return the results.\n return results\n\n def _detect(self):\n \"\"\"\n Detect un-indexed ERC20 event parameters in all contracts.\n \"\"\"\n results = []\n for c in self.contracts:\n unindexed_params = self.detect_erc20_unindexed_event_params(c)\n if unindexed_params:\n # Add each problematic event definition to our result list\n for (event, parameter) in unindexed_params:\n\n info = [\n \"ERC20 event \",\n event,\n f\"does not index parameter {parameter}\\n\",\n ]\n\n # Add the events to the JSON (note: we do not add the params/vars as they have no source mapping).\n res = self.generate_result(info)\n\n res.add(event, {\"parameter_name\": parameter.name})\n results.append(res)\n\n return results\n", "path": "slither/detectors/erc/unindexed_event_parameters.py"}], "after_files": [{"content": "\"\"\"\nDetect mistakenly un-indexed ERC20 event parameters\n\"\"\"\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass UnindexedERC20EventParameters(AbstractDetector):\n \"\"\"\n Un-indexed ERC20 event parameters\n \"\"\"\n\n ARGUMENT = \"erc20-indexed\"\n HELP = \"Un-indexed ERC20 event parameters\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#unindexed-erc20-event-parameters\"\n\n WIKI_TITLE = \"Unindexed ERC20 event parameters\"\n WIKI_DESCRIPTION = \"Detects whether events defined by the `ERC20` specification that should have some parameters as `indexed` are missing the `indexed` keyword.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract ERC20Bad {\n // ...\n event Transfer(address from, address to, uint value);\n event Approval(address owner, address spender, uint value);\n\n // ...\n}\n```\n`Transfer` and `Approval` events should have the 'indexed' keyword on their two first parameters, as defined by the `ERC20` specification.\nFailure to include these keywords will exclude the parameter data in the transaction/block's bloom filter, so external tooling searching for these parameters may overlook them and fail to index logs from this token contract.\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Add the `indexed` keyword to event parameters that should include it, according to the `ERC20` specification.\"\n\n STANDARD_JSON = False\n\n @staticmethod\n def detect_erc20_unindexed_event_params(contract):\n \"\"\"\n Detect un-indexed ERC20 event parameters in a given contract.\n :param contract: The contract to check ERC20 events for un-indexed parameters in.\n :return: A list of tuple(event, parameter) of parameters which should be indexed.\n \"\"\"\n # Create our result array\n results = []\n\n # If this contract isn't an ERC20 token, we return our empty results.\n if not contract.is_erc20():\n return results\n\n # Loop through all events to look for poor form.\n for event in contract.events_declared:\n\n # If this is transfer/approval events, expect the first two parameters to be indexed.\n if event.full_name in [\n \"Transfer(address,address,uint256)\",\n \"Approval(address,address,uint256)\",\n ]:\n if not event.elems[0].indexed:\n results.append((event, event.elems[0]))\n if not event.elems[1].indexed:\n results.append((event, event.elems[1]))\n\n # Return the results.\n return results\n\n def _detect(self):\n \"\"\"\n Detect un-indexed ERC20 event parameters in all contracts.\n \"\"\"\n results = []\n for c in self.contracts:\n unindexed_params = self.detect_erc20_unindexed_event_params(c)\n if unindexed_params:\n # Add each problematic event definition to our result list\n for (event, parameter) in unindexed_params:\n\n info = [\n \"ERC20 event \",\n event,\n f\"does not index parameter {parameter}\\n\",\n ]\n\n # Add the events to the JSON (note: we do not add the params/vars as they have no source mapping).\n res = self.generate_result(info)\n\n res.add(event, {\"parameter_name\": parameter.name})\n results.append(res)\n\n return results\n", "path": "slither/detectors/erc/unindexed_event_parameters.py"}]} | 1,403 | 186 |
gh_patches_debug_27309 | rasdani/github-patches | git_diff | getpelican__pelican-1778 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No tests in bdist
Discussion from IRC:
```
18:38 <ionelmc> can you stop including the tests in the installed package?
18:39 <+justmay> Fine with me. Care to submit a PR that addresses that?
18:39 <ionelmc> sure
18:47 <ionelmc> justmay: is `.mailmap` a stray file?
18:47 <ionelmc> look like your sdist is incomplete
18:48 <ionelmc> doesn't include all the docs and test conf
18:50 <ionelmc> justmay: oh snap. i have to move out the tests to fix this :-)
18:51 <ionelmc> because include_package_data is used any included data file overlaying the package is going into the bdist
18:51 <+justmay> ionelmc: .mailmap is there by design. See "Mapping Authors": https://www.kernel.org/pub/software/scm/git/docs/git-shortlog.html
18:52 <ionelmc> mkay
18:52 <ionelmc> justmay: you're not going to have a problem with the tests dir at the same level as pelican package right?
18:53 → e-Flex joined ([email protected])
18:54 <+justmay> There's no other way to prevent inclusion of tests in the installed package?
18:55 <ionelmc> justmay: there are two horrible ways
18:55 <ionelmc> don't include it in the sdist (highly undesirable)
18:55 <ionelmc> or
18:55 <ionelmc> manually specify package_data
18:55 <ionelmc> which i can do it correctly for you know, but it will be error prone to maintain
18:56 <ionelmc> s/know/now/
18:56 <ionelmc> i think that's also not desirable
18:56 <ionelmc> that's why i think moving them out is ok
18:57 <ionelmc> i'll fix the test configuration to work that way
18:57 <ionelmc> justmay: agree? :-)
18:59 <+justmay> ionelmc: Quite honestly, I don't have the bandwidth this morning to dig deeply enough into this topic. Would you submit an issue so we (i.e., the community) can discuss this and come to a consensus?
19:00 <ionelmc> justmay: there's already https://github.com/getpelican/pelican/issues/1409 - i seriously doubt a new issue will help in any way
19:01 <winlu> ionelmc: justs prune tests and be done with it
19:01 <ionelmc> justmay: it's either the relocation or manual package_data, make a choice :-)
19:01 <ionelmc> winlu: pruning the tests will remove them from sdist
```
Closes #1609. Closes #1545. Closes #1409.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from setuptools import setup
3
4 requires = ['feedgenerator >= 1.6', 'jinja2 >= 2.7', 'pygments', 'docutils',
5 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4',
6 'python-dateutil']
7
8 entry_points = {
9 'console_scripts': [
10 'pelican = pelican:main',
11 'pelican-import = pelican.tools.pelican_import:main',
12 'pelican-quickstart = pelican.tools.pelican_quickstart:main',
13 'pelican-themes = pelican.tools.pelican_themes:main'
14 ]
15 }
16
17
18 README = open('README.rst').read()
19 CHANGELOG = open('docs/changelog.rst').read()
20
21
22 setup(
23 name="pelican",
24 version="3.6.1.dev",
25 url='http://getpelican.com/',
26 author='Alexis Metaireau',
27 author_email='[email protected]',
28 description="A tool to generate a static blog from reStructuredText or "
29 "Markdown input files.",
30 long_description=README + '\n' + CHANGELOG,
31 packages=['pelican', 'pelican.tools'],
32 include_package_data=True,
33 install_requires=requires,
34 entry_points=entry_points,
35 classifiers=[
36 'Development Status :: 5 - Production/Stable',
37 'Environment :: Console',
38 'License :: OSI Approved :: GNU Affero General Public License v3',
39 'Operating System :: OS Independent',
40 'Programming Language :: Python :: 2',
41 'Programming Language :: Python :: 2.7',
42 'Programming Language :: Python :: 3',
43 'Programming Language :: Python :: 3.3',
44 'Programming Language :: Python :: 3.4',
45 'Topic :: Internet :: WWW/HTTP',
46 'Topic :: Software Development :: Libraries :: Python Modules',
47 ],
48 test_suite='pelican.tests',
49 )
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+from os import walk
+from os.path import join, relpath, dirname
+
from setuptools import setup
requires = ['feedgenerator >= 1.6', 'jinja2 >= 2.7', 'pygments', 'docutils',
@@ -14,11 +17,9 @@
]
}
-
README = open('README.rst').read()
CHANGELOG = open('docs/changelog.rst').read()
-
setup(
name="pelican",
version="3.6.1.dev",
@@ -29,7 +30,19 @@
"Markdown input files.",
long_description=README + '\n' + CHANGELOG,
packages=['pelican', 'pelican.tools'],
- include_package_data=True,
+ package_data={
+ # we manually collect the package data, as opposed to using include_package_data=True
+ # because we don't want the tests to be included automatically as package data
+ # (MANIFEST.in is too greedy)
+ 'pelican': [
+ relpath(join(root, name), 'pelican')
+ for root, _, names in walk(join('pelican', 'themes')) for name in names
+ ],
+ 'pelican.tools': [
+ relpath(join(root, name), join('pelican', 'tools'))
+ for root, _, names in walk(join('pelican', 'tools', 'templates')) for name in names
+ ],
+ },
install_requires=requires,
entry_points=entry_points,
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,7 @@\n #!/usr/bin/env python\n+from os import walk\n+from os.path import join, relpath, dirname\n+\n from setuptools import setup\n \n requires = ['feedgenerator >= 1.6', 'jinja2 >= 2.7', 'pygments', 'docutils',\n@@ -14,11 +17,9 @@\n ]\n }\n \n-\n README = open('README.rst').read()\n CHANGELOG = open('docs/changelog.rst').read()\n \n-\n setup(\n name=\"pelican\",\n version=\"3.6.1.dev\",\n@@ -29,7 +30,19 @@\n \"Markdown input files.\",\n long_description=README + '\\n' + CHANGELOG,\n packages=['pelican', 'pelican.tools'],\n- include_package_data=True,\n+ package_data={\n+ # we manually collect the package data, as opposed to using include_package_data=True\n+ # because we don't want the tests to be included automatically as package data\n+ # (MANIFEST.in is too greedy)\n+ 'pelican': [\n+ relpath(join(root, name), 'pelican')\n+ for root, _, names in walk(join('pelican', 'themes')) for name in names\n+ ],\n+ 'pelican.tools': [\n+ relpath(join(root, name), join('pelican', 'tools'))\n+ for root, _, names in walk(join('pelican', 'tools', 'templates')) for name in names\n+ ],\n+ },\n install_requires=requires,\n entry_points=entry_points,\n classifiers=[\n", "issue": "No tests in bdist\nDiscussion from IRC:\n\n```\n18:38 <ionelmc> can you stop including the tests in the installed package?\n18:39 <+justmay> Fine with me. Care to submit a PR that addresses that?\n18:39 <ionelmc> sure\n18:47 <ionelmc> justmay: is `.mailmap` a stray file?\n18:47 <ionelmc> look like your sdist is incomplete\n18:48 <ionelmc> doesn't include all the docs and test conf\n18:50 <ionelmc> justmay: oh snap. i have to move out the tests to fix this :-)\n18:51 <ionelmc> because include_package_data is used any included data file overlaying the package is going into the bdist\n18:51 <+justmay> ionelmc: .mailmap is there by design. See \"Mapping Authors\": https://www.kernel.org/pub/software/scm/git/docs/git-shortlog.html\n18:52 <ionelmc> mkay\n18:52 <ionelmc> justmay: you're not going to have a problem with the tests dir at the same level as pelican package right?\n18:53 \u2192 e-Flex joined ([email protected])\n18:54 <+justmay> There's no other way to prevent inclusion of tests in the installed package?\n18:55 <ionelmc> justmay: there are two horrible ways\n18:55 <ionelmc> don't include it in the sdist (highly undesirable)\n18:55 <ionelmc> or\n18:55 <ionelmc> manually specify package_data \n18:55 <ionelmc> which i can do it correctly for you know, but it will be error prone to maintain\n18:56 <ionelmc> s/know/now/\n18:56 <ionelmc> i think that's also not desirable\n18:56 <ionelmc> that's why i think moving them out is ok\n18:57 <ionelmc> i'll fix the test configuration to work that way\n18:57 <ionelmc> justmay: agree? :-)\n18:59 <+justmay> ionelmc: Quite honestly, I don't have the bandwidth this morning to dig deeply enough into this topic. Would you submit an issue so we (i.e., the community) can discuss this and come to a consensus?\n19:00 <ionelmc> justmay: there's already https://github.com/getpelican/pelican/issues/1409 - i seriously doubt a new issue will help in any way\n19:01 <winlu> ionelmc: justs prune tests and be done with it\n19:01 <ionelmc> justmay: it's either the relocation or manual package_data, make a choice :-)\n19:01 <ionelmc> winlu: pruning the tests will remove them from sdist\n```\n\nCloses #1609. Closes #1545. Closes #1409.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import setup\n\nrequires = ['feedgenerator >= 1.6', 'jinja2 >= 2.7', 'pygments', 'docutils',\n 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4',\n 'python-dateutil']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main'\n ]\n}\n\n\nREADME = open('README.rst').read()\nCHANGELOG = open('docs/changelog.rst').read()\n\n\nsetup(\n name=\"pelican\",\n version=\"3.6.1.dev\",\n url='http://getpelican.com/',\n author='Alexis Metaireau',\n author_email='[email protected]',\n description=\"A tool to generate a static blog from reStructuredText or \"\n \"Markdown input files.\",\n long_description=README + '\\n' + CHANGELOG,\n packages=['pelican', 'pelican.tools'],\n include_package_data=True,\n install_requires=requires,\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom os import walk\nfrom os.path import join, relpath, dirname\n\nfrom setuptools import setup\n\nrequires = ['feedgenerator >= 1.6', 'jinja2 >= 2.7', 'pygments', 'docutils',\n 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4',\n 'python-dateutil']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main'\n ]\n}\n\nREADME = open('README.rst').read()\nCHANGELOG = open('docs/changelog.rst').read()\n\nsetup(\n name=\"pelican\",\n version=\"3.6.1.dev\",\n url='http://getpelican.com/',\n author='Alexis Metaireau',\n author_email='[email protected]',\n description=\"A tool to generate a static blog from reStructuredText or \"\n \"Markdown input files.\",\n long_description=README + '\\n' + CHANGELOG,\n packages=['pelican', 'pelican.tools'],\n package_data={\n # we manually collect the package data, as opposed to using include_package_data=True\n # because we don't want the tests to be included automatically as package data\n # (MANIFEST.in is too greedy)\n 'pelican': [\n relpath(join(root, name), 'pelican')\n for root, _, names in walk(join('pelican', 'themes')) for name in names\n ],\n 'pelican.tools': [\n relpath(join(root, name), join('pelican', 'tools'))\n for root, _, names in walk(join('pelican', 'tools', 'templates')) for name in names\n ],\n },\n install_requires=requires,\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}]} | 1,496 | 371 |
gh_patches_debug_4667 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1910 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
REST API: project_extra with organisation filter gives duplicates
The `project_extra` REST API endpoint shows duplicate projects when it is filtered on organisation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/project.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3
4 See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6 """
7
8 from akvo.rest.serializers.project import ProjectUpSerializer
9 from akvo.rsr.models import Project, PublishingStatus
10 from ..serializers import ProjectSerializer, ProjectExtraSerializer
11 from ..viewsets import BaseRSRViewSet
12
13
14 class ProjectViewSet(BaseRSRViewSet):
15
16 """
17 Viewset providing Project data.
18
19 Allowed parameters are:
20 __limit__ (default 30, max 100),
21 __title__ (exact or icontains),
22 __subtitle__ (exact or icontains),
23 __status__,
24 __language__,
25 __currency__,
26 __date_start_planned__ (exact, gt, gte, lt or lte),
27 __date_start_actual__ (exact, gt, gte, lt or lte),
28 __date_end_planned__ (exact, gt, gte, lt or lte),
29 __date_end_actual__ (exact, gt, gte, lt or lte),
30 __created_at__ (exact, gt, gte, lt or lte),
31 __last_modified_at__ (exact, gt, gte, lt or lte),
32 __sync_owner__,
33 __iati_activity_id__ (exact or icontains),
34 __hierarchy__,
35 __project_scope__,
36 __collaboration_type__,
37 __default_aid_type__,
38 __default_finance_type__,
39 __default_flow_type__,
40 __default_tied_status__,
41 __budget__ (exact, gt, gte, lt or lte),
42 __funds__ (exact, gt, gte, lt or lte),
43 __funds_needed__ (exact, gt, gte, lt or lte),
44 __categories__ (exact, in),
45 __partners__ (exact, in),
46 __keywords__ (exact, in), and
47 __publishingstatus\__status__.
48 """
49 queryset = Project.objects.select_related(
50 'publishingstatus'
51 ).prefetch_related(
52 'categories',
53 'keywords',
54 'partners')
55
56 serializer_class = ProjectSerializer
57 filter_fields = {
58 'title': ['exact', 'icontains'],
59 'subtitle': ['exact', 'icontains'],
60 'status': ['exact', ],
61 'language': ['exact', ],
62 'currency': ['exact', ],
63 'date_start_planned': ['exact', 'gt', 'gte', 'lt', 'lte', ],
64 'date_start_actual': ['exact', 'gt', 'gte', 'lt', 'lte', ],
65 'date_end_planned': ['exact', 'gt', 'gte', 'lt', 'lte', ],
66 'date_end_actual': ['exact', 'gt', 'gte', 'lt', 'lte', ],
67 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
68 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
69 'iati_activity_id': ['exact', 'icontains', ],
70 'hierarchy': ['exact', ],
71 'project_scope': ['exact', ],
72 'collaboration_type': ['exact', ],
73 'default_aid_type': ['exact', ],
74 'default_finance_type': ['exact', ],
75 'default_flow_type': ['exact', ],
76 'default_tied_status': ['exact', ],
77 'budget': ['exact', 'gt', 'gte', 'lt', 'lte', ],
78 'funds': ['exact', 'gt', 'gte', 'lt', 'lte', ],
79 'funds_needed': ['exact', 'gt', 'gte', 'lt', 'lte', ],
80 'categories': ['exact', 'in', ],
81 'partners': ['exact', 'in', ],
82 'keywords': ['exact', 'in', ],
83 'publishingstatus__status': ['exact', ],
84 }
85
86 def get_queryset(self):
87 """
88 Allow custom filter for sync_owner, since this field has been replaced by the
89 reporting org partnership.
90 """
91 queryset = self.queryset
92 sync_owner = self.request.QUERY_PARAMS.get('sync_owner', None)
93 if sync_owner:
94 queryset = queryset.filter(partnerships__iati_organisation_role=101,
95 partnerships__organisation__pk=sync_owner)
96 return queryset
97
98
99 class ProjectExtraViewSet(ProjectViewSet):
100
101 """
102 Viewset providing extra Project data.
103
104 Allowed parameters are:
105 __limit__ (default 30, max 100),
106 __partnerships\__organisation__ (filter on organisation ID), and
107 __publishingstatus\__status__ (filter on publishing status)
108 """
109
110 queryset = Project.objects.select_related(
111 'publishing_status').prefetch_related(
112 'sectors', 'partnerships')
113 serializer_class = ProjectExtraSerializer
114 paginate_by_param = 'limit'
115 filter_fields = ('partnerships__organisation', 'publishingstatus__status')
116
117
118 class ProjectUpViewSet(ProjectViewSet):
119
120 """
121 Viewset providing extra data and limited filtering for Up in one go.
122
123 Allowed parameters are:
124 __limit__ (default 30, max 100),
125 __partnerships\__organisation__ (filter on organisation ID), and
126 __publishingstatus\__status__ (filter on publishing status)
127 """
128
129 queryset = Project.objects.select_related(
130 'primary_location',
131 'updates',
132 'publishingstatus'
133 ).prefetch_related(
134 'categories',
135 'keywords',
136 'partners')
137 serializer_class = ProjectUpSerializer
138 paginate_by_param = 'limit'
139 max_paginate_by = 100
140 filter_fields = ('partnerships__organisation', 'publishingstatus__status')
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/project.py b/akvo/rest/views/project.py
--- a/akvo/rest/views/project.py
+++ b/akvo/rest/views/project.py
@@ -93,7 +93,7 @@
if sync_owner:
queryset = queryset.filter(partnerships__iati_organisation_role=101,
partnerships__organisation__pk=sync_owner)
- return queryset
+ return queryset.distinct()
class ProjectExtraViewSet(ProjectViewSet):
| {"golden_diff": "diff --git a/akvo/rest/views/project.py b/akvo/rest/views/project.py\n--- a/akvo/rest/views/project.py\n+++ b/akvo/rest/views/project.py\n@@ -93,7 +93,7 @@\n if sync_owner:\n queryset = queryset.filter(partnerships__iati_organisation_role=101,\n partnerships__organisation__pk=sync_owner)\n- return queryset\n+ return queryset.distinct()\n \n \n class ProjectExtraViewSet(ProjectViewSet):\n", "issue": "REST API: project_extra with organisation filter gives duplicates\nThe `project_extra` REST API endpoint shows duplicate projects when it is filtered on organisation.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers.project import ProjectUpSerializer\nfrom akvo.rsr.models import Project, PublishingStatus\nfrom ..serializers import ProjectSerializer, ProjectExtraSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectViewSet(BaseRSRViewSet):\n\n \"\"\"\n Viewset providing Project data.\n\n Allowed parameters are:\n __limit__ (default 30, max 100),\n __title__ (exact or icontains),\n __subtitle__ (exact or icontains),\n __status__,\n __language__,\n __currency__,\n __date_start_planned__ (exact, gt, gte, lt or lte),\n __date_start_actual__ (exact, gt, gte, lt or lte),\n __date_end_planned__ (exact, gt, gte, lt or lte),\n __date_end_actual__ (exact, gt, gte, lt or lte),\n __created_at__ (exact, gt, gte, lt or lte),\n __last_modified_at__ (exact, gt, gte, lt or lte),\n __sync_owner__,\n __iati_activity_id__ (exact or icontains),\n __hierarchy__,\n __project_scope__,\n __collaboration_type__,\n __default_aid_type__,\n __default_finance_type__,\n __default_flow_type__,\n __default_tied_status__,\n __budget__ (exact, gt, gte, lt or lte),\n __funds__ (exact, gt, gte, lt or lte),\n __funds_needed__ (exact, gt, gte, lt or lte),\n __categories__ (exact, in),\n __partners__ (exact, in),\n __keywords__ (exact, in), and\n __publishingstatus\\__status__.\n \"\"\"\n queryset = Project.objects.select_related(\n 'publishingstatus'\n ).prefetch_related(\n 'categories',\n 'keywords',\n 'partners')\n\n serializer_class = ProjectSerializer\n filter_fields = {\n 'title': ['exact', 'icontains'],\n 'subtitle': ['exact', 'icontains'],\n 'status': ['exact', ],\n 'language': ['exact', ],\n 'currency': ['exact', ],\n 'date_start_planned': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'date_start_actual': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'date_end_planned': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'date_end_actual': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'iati_activity_id': ['exact', 'icontains', ],\n 'hierarchy': ['exact', ],\n 'project_scope': ['exact', ],\n 'collaboration_type': ['exact', ],\n 'default_aid_type': ['exact', ],\n 'default_finance_type': ['exact', ],\n 'default_flow_type': ['exact', ],\n 'default_tied_status': ['exact', ],\n 'budget': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'funds': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'funds_needed': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'categories': ['exact', 'in', ],\n 'partners': ['exact', 'in', ],\n 'keywords': ['exact', 'in', ],\n 'publishingstatus__status': ['exact', ],\n }\n\n def get_queryset(self):\n \"\"\"\n Allow custom filter for sync_owner, since this field has been replaced by the\n reporting org partnership.\n \"\"\"\n queryset = self.queryset\n sync_owner = self.request.QUERY_PARAMS.get('sync_owner', None)\n if sync_owner:\n queryset = queryset.filter(partnerships__iati_organisation_role=101,\n partnerships__organisation__pk=sync_owner)\n return queryset\n\n\nclass ProjectExtraViewSet(ProjectViewSet):\n\n \"\"\"\n Viewset providing extra Project data.\n\n Allowed parameters are:\n __limit__ (default 30, max 100),\n __partnerships\\__organisation__ (filter on organisation ID), and\n __publishingstatus\\__status__ (filter on publishing status)\n \"\"\"\n\n queryset = Project.objects.select_related(\n 'publishing_status').prefetch_related(\n 'sectors', 'partnerships')\n serializer_class = ProjectExtraSerializer\n paginate_by_param = 'limit'\n filter_fields = ('partnerships__organisation', 'publishingstatus__status')\n\n\nclass ProjectUpViewSet(ProjectViewSet):\n\n \"\"\"\n Viewset providing extra data and limited filtering for Up in one go.\n\n Allowed parameters are:\n __limit__ (default 30, max 100),\n __partnerships\\__organisation__ (filter on organisation ID), and\n __publishingstatus\\__status__ (filter on publishing status)\n \"\"\"\n\n queryset = Project.objects.select_related(\n 'primary_location',\n 'updates',\n 'publishingstatus'\n ).prefetch_related(\n 'categories',\n 'keywords',\n 'partners')\n serializer_class = ProjectUpSerializer\n paginate_by_param = 'limit'\n max_paginate_by = 100\n filter_fields = ('partnerships__organisation', 'publishingstatus__status')\n", "path": "akvo/rest/views/project.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers.project import ProjectUpSerializer\nfrom akvo.rsr.models import Project, PublishingStatus\nfrom ..serializers import ProjectSerializer, ProjectExtraSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectViewSet(BaseRSRViewSet):\n\n \"\"\"\n Viewset providing Project data.\n\n Allowed parameters are:\n __limit__ (default 30, max 100),\n __title__ (exact or icontains),\n __subtitle__ (exact or icontains),\n __status__,\n __language__,\n __currency__,\n __date_start_planned__ (exact, gt, gte, lt or lte),\n __date_start_actual__ (exact, gt, gte, lt or lte),\n __date_end_planned__ (exact, gt, gte, lt or lte),\n __date_end_actual__ (exact, gt, gte, lt or lte),\n __created_at__ (exact, gt, gte, lt or lte),\n __last_modified_at__ (exact, gt, gte, lt or lte),\n __sync_owner__,\n __iati_activity_id__ (exact or icontains),\n __hierarchy__,\n __project_scope__,\n __collaboration_type__,\n __default_aid_type__,\n __default_finance_type__,\n __default_flow_type__,\n __default_tied_status__,\n __budget__ (exact, gt, gte, lt or lte),\n __funds__ (exact, gt, gte, lt or lte),\n __funds_needed__ (exact, gt, gte, lt or lte),\n __categories__ (exact, in),\n __partners__ (exact, in),\n __keywords__ (exact, in), and\n __publishingstatus\\__status__.\n \"\"\"\n queryset = Project.objects.select_related(\n 'publishingstatus'\n ).prefetch_related(\n 'categories',\n 'keywords',\n 'partners')\n\n serializer_class = ProjectSerializer\n filter_fields = {\n 'title': ['exact', 'icontains'],\n 'subtitle': ['exact', 'icontains'],\n 'status': ['exact', ],\n 'language': ['exact', ],\n 'currency': ['exact', ],\n 'date_start_planned': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'date_start_actual': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'date_end_planned': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'date_end_actual': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'iati_activity_id': ['exact', 'icontains', ],\n 'hierarchy': ['exact', ],\n 'project_scope': ['exact', ],\n 'collaboration_type': ['exact', ],\n 'default_aid_type': ['exact', ],\n 'default_finance_type': ['exact', ],\n 'default_flow_type': ['exact', ],\n 'default_tied_status': ['exact', ],\n 'budget': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'funds': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'funds_needed': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'categories': ['exact', 'in', ],\n 'partners': ['exact', 'in', ],\n 'keywords': ['exact', 'in', ],\n 'publishingstatus__status': ['exact', ],\n }\n\n def get_queryset(self):\n \"\"\"\n Allow custom filter for sync_owner, since this field has been replaced by the\n reporting org partnership.\n \"\"\"\n queryset = self.queryset\n sync_owner = self.request.QUERY_PARAMS.get('sync_owner', None)\n if sync_owner:\n queryset = queryset.filter(partnerships__iati_organisation_role=101,\n partnerships__organisation__pk=sync_owner)\n return queryset.distinct()\n\n\nclass ProjectExtraViewSet(ProjectViewSet):\n\n \"\"\"\n Viewset providing extra Project data.\n\n Allowed parameters are:\n __limit__ (default 30, max 100),\n __partnerships\\__organisation__ (filter on organisation ID), and\n __publishingstatus\\__status__ (filter on publishing status)\n \"\"\"\n\n queryset = Project.objects.select_related(\n 'publishing_status').prefetch_related(\n 'sectors', 'partnerships')\n serializer_class = ProjectExtraSerializer\n paginate_by_param = 'limit'\n filter_fields = ('partnerships__organisation', 'publishingstatus__status')\n\n\nclass ProjectUpViewSet(ProjectViewSet):\n\n \"\"\"\n Viewset providing extra data and limited filtering for Up in one go.\n\n Allowed parameters are:\n __limit__ (default 30, max 100),\n __partnerships\\__organisation__ (filter on organisation ID), and\n __publishingstatus\\__status__ (filter on publishing status)\n \"\"\"\n\n queryset = Project.objects.select_related(\n 'primary_location',\n 'updates',\n 'publishingstatus'\n ).prefetch_related(\n 'categories',\n 'keywords',\n 'partners')\n serializer_class = ProjectUpSerializer\n paginate_by_param = 'limit'\n max_paginate_by = 100\n filter_fields = ('partnerships__organisation', 'publishingstatus__status')\n", "path": "akvo/rest/views/project.py"}]} | 1,883 | 106 |
gh_patches_debug_25926 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-2601 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
int to qml.BasisEmbedding
### Feature details
Currently, to use `BasisEmbedding` we have to pass a list with the bits [0,1,0,0] for example to put a PauliX gate in the second qubit. It would be nice to be able to pass an integer number like:
```python
qml.BasisEmbedding(4, wires = range(4))
```
This result would be the same that:
```python
qml.BasisEmbedding([0,1,0,0], wires = range(4))
```
because 0100 is the number 4 in binary (note that PennyLane uses the convention $|q_0,q_1,\dots,q_{N-1}\rangle$ where $q_0$ is the most significant bit).
This feature have a lot of applications, for example for qRAMs where we could do things like this one:
```python
for i in range(16):
qml.BasisEmbedding(i, wires = range(4))
### some stuff here
```
### Implementation
I'm using this functions to translate the integer to the list:
```python
def list_d(num, n_wires):
mods = []
while num != 0 or len(mods) != n_wires:
mods.insert(0, num % 2)
num //= 2
return mods
```
```
list_d(4,5)
>>> [0,0,1,0,0]
```
Probably incorporating this routine will be enough. However, some checks should be added because if num > 2^n_wires, the code will fail.
### How important would you say this feature is?
1: Not important. Would be nice to have.
### Additional information
Make sure that the order of the list is the same as Pennylane.
There are places where it is written from top to bottom and vice versa.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/templates/embeddings/basis.py`
Content:
```
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 Contains the BasisEmbedding template.
16 """
17 # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
18 import pennylane as qml
19 from pennylane.operation import Operation, AnyWires
20 from pennylane.wires import Wires
21
22
23 class BasisEmbedding(Operation):
24 r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits.
25
26 For example, for ``features=np.array([0, 1, 0])``, the quantum system will be
27 prepared in state :math:`|010 \rangle`.
28
29 .. warning::
30
31 ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.
32 The ``features`` argument is therefore not differentiable when using the template, and
33 gradients with respect to the argument cannot be computed by PennyLane.
34
35 Args:
36 features (tensor_like): binary input of shape ``(len(wires), )``
37 wires (Any or Iterable[Any]): wires that the template acts on
38
39 Example:
40
41 Basis embedding encodes the binary feature vector into a basis state.
42
43 .. code-block:: python
44
45 dev = qml.device('default.qubit', wires=3)
46
47 @qml.qnode(dev)
48 def circuit(feature_vector):
49 qml.BasisEmbedding(features=feature_vector, wires=range(3))
50 return qml.state()
51
52 X = [1,1,1]
53
54 The resulting circuit is:
55
56 >>> print(qml.draw(circuit, expansion_strategy="device")(X))
57 0: ──X─┤ State
58 1: ──X─┤ State
59 2: ──X─┤ State
60
61 And, the output state is:
62
63 >>> print(circuit(X))
64 [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j]
65
66 Thus, ``[1,1,1]`` is mapped to :math:`|111 \rangle`.
67
68 """
69
70 num_wires = AnyWires
71 grad_method = None
72
73 def __init__(self, features, wires, do_queue=True, id=None):
74
75 wires = Wires(wires)
76 shape = qml.math.shape(features)
77
78 if len(shape) != 1:
79 raise ValueError(f"Features must be one-dimensional; got shape {shape}.")
80
81 n_features = shape[0]
82 if n_features != len(wires):
83 raise ValueError(f"Features must be of length {len(wires)}; got length {n_features}.")
84
85 features = list(qml.math.toarray(features))
86
87 if not set(features).issubset({0, 1}):
88 raise ValueError(f"Basis state must only consist of 0s and 1s; got {features}")
89
90 self._hyperparameters = {"basis_state": features}
91
92 super().__init__(wires=wires, do_queue=do_queue, id=id)
93
94 @property
95 def num_params(self):
96 return 0
97
98 @staticmethod
99 def compute_decomposition(wires, basis_state): # pylint: disable=arguments-differ
100 r"""Representation of the operator as a product of other operators.
101
102 .. math:: O = O_1 O_2 \dots O_n.
103
104
105
106 .. seealso:: :meth:`~.BasisEmbedding.decomposition`.
107
108 Args:
109 features (tensor-like): binary input of shape ``(len(wires), )``
110 wires (Any or Iterable[Any]): wires that the operator acts on
111
112 Returns:
113 list[.Operator]: decomposition of the operator
114
115 **Example**
116
117 >>> features = torch.tensor([1, 0, 1])
118 >>> qml.BasisEmbedding.compute_decomposition(features, wires=["a", "b", "c"])
119 [PauliX(wires=['a']),
120 PauliX(wires=['c'])]
121 """
122 ops_list = []
123 for wire, bit in zip(wires, basis_state):
124 if bit == 1:
125 ops_list.append(qml.PauliX(wire))
126
127 return ops_list
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py
--- a/pennylane/templates/embeddings/basis.py
+++ b/pennylane/templates/embeddings/basis.py
@@ -23,8 +23,8 @@
class BasisEmbedding(Operation):
r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits.
- For example, for ``features=np.array([0, 1, 0])``, the quantum system will be
- prepared in state :math:`|010 \rangle`.
+ For example, for ``features=np.array([0, 1, 0])`` or ``features=2`` (binary 10), the
+ quantum system will be prepared in state :math:`|010 \rangle`.
.. warning::
@@ -72,6 +72,10 @@
def __init__(self, features, wires, do_queue=True, id=None):
+ if isinstance(features, int):
+ bin_string = f"{features:b}".zfill(len(wires))
+ features = [1 if d == "1" else 0 for d in bin_string]
+
wires = Wires(wires)
shape = qml.math.shape(features)
@@ -80,7 +84,9 @@
n_features = shape[0]
if n_features != len(wires):
- raise ValueError(f"Features must be of length {len(wires)}; got length {n_features}.")
+ raise ValueError(
+ f"Features must be of length {len(wires)}; got length {n_features} (features={features})."
+ )
features = list(qml.math.toarray(features))
| {"golden_diff": "diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py\n--- a/pennylane/templates/embeddings/basis.py\n+++ b/pennylane/templates/embeddings/basis.py\n@@ -23,8 +23,8 @@\n class BasisEmbedding(Operation):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n \n- For example, for ``features=np.array([0, 1, 0])``, the quantum system will be\n- prepared in state :math:`|010 \\rangle`.\n+ For example, for ``features=np.array([0, 1, 0])`` or ``features=2`` (binary 10), the\n+ quantum system will be prepared in state :math:`|010 \\rangle`.\n \n .. warning::\n \n@@ -72,6 +72,10 @@\n \n def __init__(self, features, wires, do_queue=True, id=None):\n \n+ if isinstance(features, int):\n+ bin_string = f\"{features:b}\".zfill(len(wires))\n+ features = [1 if d == \"1\" else 0 for d in bin_string]\n+\n wires = Wires(wires)\n shape = qml.math.shape(features)\n \n@@ -80,7 +84,9 @@\n \n n_features = shape[0]\n if n_features != len(wires):\n- raise ValueError(f\"Features must be of length {len(wires)}; got length {n_features}.\")\n+ raise ValueError(\n+ f\"Features must be of length {len(wires)}; got length {n_features} (features={features}).\"\n+ )\n \n features = list(qml.math.toarray(features))\n", "issue": "int to qml.BasisEmbedding\n### Feature details\r\n\r\nCurrently, to use `BasisEmbedding` we have to pass a list with the bits [0,1,0,0] for example to put a PauliX gate in the second qubit. It would be nice to be able to pass an integer number like:\r\n\r\n```python\r\nqml.BasisEmbedding(4, wires = range(4))\r\n```\r\nThis result would be the same that:\r\n\r\n```python\r\nqml.BasisEmbedding([0,1,0,0], wires = range(4))\r\n```\r\nbecause 0100 is the number 4 in binary (note that PennyLane uses the convention $|q_0,q_1,\\dots,q_{N-1}\\rangle$ where $q_0$ is the most significant bit).\r\n\r\nThis feature have a lot of applications, for example for qRAMs where we could do things like this one:\r\n\r\n```python\r\nfor i in range(16):\r\n qml.BasisEmbedding(i, wires = range(4))\r\n ### some stuff here\r\n```\r\n\r\n### Implementation\r\n\r\nI'm using this functions to translate the integer to the list:\r\n\r\n```python\r\ndef list_d(num, n_wires):\r\n\r\n mods = []\r\n while num != 0 or len(mods) != n_wires:\r\n mods.insert(0, num % 2)\r\n num //= 2\r\n\r\n return mods\r\n```\r\n\r\n```\r\nlist_d(4,5)\r\n>>> [0,0,1,0,0]\r\n```\r\n\r\nProbably incorporating this routine will be enough. However, some checks should be added because if num > 2^n_wires, the code will fail.\r\n\r\n\r\n### How important would you say this feature is?\r\n\r\n1: Not important. Would be nice to have.\r\n\r\n### Additional information\r\n\r\nMake sure that the order of the list is the same as Pennylane. \r\nThere are places where it is written from top to bottom and vice versa.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the BasisEmbedding template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport pennylane as qml\nfrom pennylane.operation import Operation, AnyWires\nfrom pennylane.wires import Wires\n\n\nclass BasisEmbedding(Operation):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n\n For example, for ``features=np.array([0, 1, 0])``, the quantum system will be\n prepared in state :math:`|010 \\rangle`.\n\n .. warning::\n\n ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.\n The ``features`` argument is therefore not differentiable when using the template, and\n gradients with respect to the argument cannot be computed by PennyLane.\n\n Args:\n features (tensor_like): binary input of shape ``(len(wires), )``\n wires (Any or Iterable[Any]): wires that the template acts on\n\n Example:\n\n Basis embedding encodes the binary feature vector into a basis state.\n\n .. code-block:: python\n\n dev = qml.device('default.qubit', wires=3)\n\n @qml.qnode(dev)\n def circuit(feature_vector):\n qml.BasisEmbedding(features=feature_vector, wires=range(3))\n return qml.state()\n\n X = [1,1,1]\n\n The resulting circuit is:\n\n >>> print(qml.draw(circuit, expansion_strategy=\"device\")(X))\n 0: \u2500\u2500X\u2500\u2524 State\n 1: \u2500\u2500X\u2500\u2524 State\n 2: \u2500\u2500X\u2500\u2524 State\n\n And, the output state is:\n\n >>> print(circuit(X))\n [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j]\n\n Thus, ``[1,1,1]`` is mapped to :math:`|111 \\rangle`.\n\n \"\"\"\n\n num_wires = AnyWires\n grad_method = None\n\n def __init__(self, features, wires, do_queue=True, id=None):\n\n wires = Wires(wires)\n shape = qml.math.shape(features)\n\n if len(shape) != 1:\n raise ValueError(f\"Features must be one-dimensional; got shape {shape}.\")\n\n n_features = shape[0]\n if n_features != len(wires):\n raise ValueError(f\"Features must be of length {len(wires)}; got length {n_features}.\")\n\n features = list(qml.math.toarray(features))\n\n if not set(features).issubset({0, 1}):\n raise ValueError(f\"Basis state must only consist of 0s and 1s; got {features}\")\n\n self._hyperparameters = {\"basis_state\": features}\n\n super().__init__(wires=wires, do_queue=do_queue, id=id)\n\n @property\n def num_params(self):\n return 0\n\n @staticmethod\n def compute_decomposition(wires, basis_state): # pylint: disable=arguments-differ\n r\"\"\"Representation of the operator as a product of other operators.\n\n .. math:: O = O_1 O_2 \\dots O_n.\n\n\n\n .. seealso:: :meth:`~.BasisEmbedding.decomposition`.\n\n Args:\n features (tensor-like): binary input of shape ``(len(wires), )``\n wires (Any or Iterable[Any]): wires that the operator acts on\n\n Returns:\n list[.Operator]: decomposition of the operator\n\n **Example**\n\n >>> features = torch.tensor([1, 0, 1])\n >>> qml.BasisEmbedding.compute_decomposition(features, wires=[\"a\", \"b\", \"c\"])\n [PauliX(wires=['a']),\n PauliX(wires=['c'])]\n \"\"\"\n ops_list = []\n for wire, bit in zip(wires, basis_state):\n if bit == 1:\n ops_list.append(qml.PauliX(wire))\n\n return ops_list\n", "path": "pennylane/templates/embeddings/basis.py"}], "after_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the BasisEmbedding template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport pennylane as qml\nfrom pennylane.operation import Operation, AnyWires\nfrom pennylane.wires import Wires\n\n\nclass BasisEmbedding(Operation):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n\n For example, for ``features=np.array([0, 1, 0])`` or ``features=2`` (binary 10), the\n quantum system will be prepared in state :math:`|010 \\rangle`.\n\n .. warning::\n\n ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.\n The ``features`` argument is therefore not differentiable when using the template, and\n gradients with respect to the argument cannot be computed by PennyLane.\n\n Args:\n features (tensor_like): binary input of shape ``(len(wires), )``\n wires (Any or Iterable[Any]): wires that the template acts on\n\n Example:\n\n Basis embedding encodes the binary feature vector into a basis state.\n\n .. code-block:: python\n\n dev = qml.device('default.qubit', wires=3)\n\n @qml.qnode(dev)\n def circuit(feature_vector):\n qml.BasisEmbedding(features=feature_vector, wires=range(3))\n return qml.state()\n\n X = [1,1,1]\n\n The resulting circuit is:\n\n >>> print(qml.draw(circuit, expansion_strategy=\"device\")(X))\n 0: \u2500\u2500X\u2500\u2524 State\n 1: \u2500\u2500X\u2500\u2524 State\n 2: \u2500\u2500X\u2500\u2524 State\n\n And, the output state is:\n\n >>> print(circuit(X))\n [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j]\n\n Thus, ``[1,1,1]`` is mapped to :math:`|111 \\rangle`.\n\n \"\"\"\n\n num_wires = AnyWires\n grad_method = None\n\n def __init__(self, features, wires, do_queue=True, id=None):\n\n if isinstance(features, int):\n bin_string = f\"{features:b}\".zfill(len(wires))\n features = [1 if d == \"1\" else 0 for d in bin_string]\n\n wires = Wires(wires)\n shape = qml.math.shape(features)\n\n if len(shape) != 1:\n raise ValueError(f\"Features must be one-dimensional; got shape {shape}.\")\n\n n_features = shape[0]\n if n_features != len(wires):\n raise ValueError(\n f\"Features must be of length {len(wires)}; got length {n_features} (features={features}).\"\n )\n\n features = list(qml.math.toarray(features))\n\n if not set(features).issubset({0, 1}):\n raise ValueError(f\"Basis state must only consist of 0s and 1s; got {features}\")\n\n self._hyperparameters = {\"basis_state\": features}\n\n super().__init__(wires=wires, do_queue=do_queue, id=id)\n\n @property\n def num_params(self):\n return 0\n\n @staticmethod\n def compute_decomposition(wires, basis_state): # pylint: disable=arguments-differ\n r\"\"\"Representation of the operator as a product of other operators.\n\n .. math:: O = O_1 O_2 \\dots O_n.\n\n\n\n .. seealso:: :meth:`~.BasisEmbedding.decomposition`.\n\n Args:\n features (tensor-like): binary input of shape ``(len(wires), )``\n wires (Any or Iterable[Any]): wires that the operator acts on\n\n Returns:\n list[.Operator]: decomposition of the operator\n\n **Example**\n\n >>> features = torch.tensor([1, 0, 1])\n >>> qml.BasisEmbedding.compute_decomposition(features, wires=[\"a\", \"b\", \"c\"])\n [PauliX(wires=['a']),\n PauliX(wires=['c'])]\n \"\"\"\n ops_list = []\n for wire, bit in zip(wires, basis_state):\n if bit == 1:\n ops_list.append(qml.PauliX(wire))\n\n return ops_list\n", "path": "pennylane/templates/embeddings/basis.py"}]} | 2,021 | 394 |
gh_patches_debug_17293 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1637 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature]: Add support for Christchurch (NZ) special date overrides
### I propose a feature for:
Sources
### Describe your wanted feature
The API for collection dates for Christchurch City Council does not automatically apply any special date overrides (for example, when your collection day falls near a public holiday and is moved).
A separate URL provides a list of these potential overrides, it needs to be called separately and the results merged.
The URL is [https://ccc.govt.nz/api/kerbsidedateoverrides](https://ccc.govt.nz/api/kerbsidedateoverrides)
It responds to HTTP GET with no authentication requirements and will return an array of overrides dates in this format:
```
{
ID: 32,
Title: "New Year Friday 2024",
OriginalDate: "2024-01-05",
NewDate: "2024-01-06",
Expired: 0
}
```
If your collection date falls on `OriginalDate` it needs to be moved to `NewDate`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py`
Content:
```
1 import datetime
2
3 import requests
4 from waste_collection_schedule import Collection
5
6 # Include work around for SSL UNSAFE_LEGACY_RENEGOTIATION_DISABLED error
7 from waste_collection_schedule.service.SSLError import get_legacy_session
8
9
10 TITLE = "Christchurch City Council"
11 DESCRIPTION = "Source for Christchurch City Council."
12 URL = "https://ccc.govt.nz"
13 TEST_CASES = {"53 Hereford Street": {"address": "53 Hereford Street"}}
14
15
16 class Source:
17 def __init__(self, address):
18 self._address = address
19
20 def fetch(self):
21
22 s = get_legacy_session()
23
24 entries = []
25
26 # Find the Rating Unit ID by the physical address
27 # While a property may have more than one address, bins are allocated by each Rating Unit
28 addressQuery = {
29 "q": self._address,
30 "status": "current",
31 "crs": "epsg:4326",
32 "limit": 1,
33 }
34
35 r = s.get("https://opendata.ccc.govt.nz/CCCSearch/rest/address/suggest",
36 params=addressQuery,
37 # verify=False,
38 )
39 address = r.json()
40
41 # Find the Bin service by Rating Unit ID
42 binsHeaders = {
43 "client_id": "69f433c880c74c349b0128e9fa1b6a93",
44 "client_secret": "139F3D2A83E34AdF98c80566f2eb7212"
45 }
46
47 # Updated request using SSL code snippet
48 r = s.get("https://ccc-data-citizen-api-v1-prod.au-s1.cloudhub.io/api/v1/properties/" + str(address[0]["RatingUnitID"]),
49 headers=binsHeaders
50 # verify=False,
51 )
52 bins = r.json()
53
54 # Deduplicate the Bins in case the Rating Unit has more than one of the same Bin type
55 bins = {each["material"]: each for each in bins["bins"]["collections"]}.values()
56
57 # Process each Bin
58 for bin in bins:
59 entries.append(
60 Collection(
61 datetime.datetime.strptime(
62 bin["next_planned_date_app"], "%Y-%m-%d"
63 ).date(),
64 bin["material"],
65 )
66 )
67
68 return entries
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py
@@ -54,6 +54,16 @@
# Deduplicate the Bins in case the Rating Unit has more than one of the same Bin type
bins = {each["material"]: each for each in bins["bins"]["collections"]}.values()
+ # Get the list of Overrides for any special dates
+ # It will be an array of these: { ID: 32, Title: "New Year Friday 2024", OriginalDate: "2024-01-05", NewDate: "2024-01-06", Expired: 0 }
+ overrides = requests.get("https://ccc.govt.nz/api/kerbsidedateoverrides").json()
+
+ # Process each Override
+ for bin in bins:
+ for override in overrides:
+ if override["OriginalDate"] == bin["next_planned_date_app"]:
+ bin["next_planned_date_app"] = override["NewDate"]
+
# Process each Bin
for bin in bins:
entries.append(
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py\n@@ -54,6 +54,16 @@\n # Deduplicate the Bins in case the Rating Unit has more than one of the same Bin type\n bins = {each[\"material\"]: each for each in bins[\"bins\"][\"collections\"]}.values()\n \n+ # Get the list of Overrides for any special dates\n+ # It will be an array of these: { ID: 32, Title: \"New Year Friday 2024\", OriginalDate: \"2024-01-05\", NewDate: \"2024-01-06\", Expired: 0 }\n+ overrides = requests.get(\"https://ccc.govt.nz/api/kerbsidedateoverrides\").json()\n+\n+ # Process each Override\n+ for bin in bins:\n+ for override in overrides:\n+ if override[\"OriginalDate\"] == bin[\"next_planned_date_app\"]:\n+ bin[\"next_planned_date_app\"] = override[\"NewDate\"]\n+\n # Process each Bin\n for bin in bins:\n entries.append(\n", "issue": "[Feature]: Add support for Christchurch (NZ) special date overrides\n### I propose a feature for:\n\nSources\n\n### Describe your wanted feature\n\nThe API for collection dates for Christchurch City Council does not automatically apply any special date overrides (for example, when your collection day falls near a public holiday and is moved).\r\nA separate URL provides a list of these potential overrides, it needs to be called separately and the results merged.\r\n\r\nThe URL is [https://ccc.govt.nz/api/kerbsidedateoverrides](https://ccc.govt.nz/api/kerbsidedateoverrides)\r\nIt responds to HTTP GET with no authentication requirements and will return an array of overrides dates in this format:\r\n```\r\n{\r\n ID: 32,\r\n Title: \"New Year Friday 2024\",\r\n OriginalDate: \"2024-01-05\",\r\n NewDate: \"2024-01-06\",\r\n Expired: 0\r\n}\r\n```\r\nIf your collection date falls on `OriginalDate` it needs to be moved to `NewDate`.\n", "before_files": [{"content": "import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection\n\n# Include work around for SSL UNSAFE_LEGACY_RENEGOTIATION_DISABLED error\nfrom waste_collection_schedule.service.SSLError import get_legacy_session\n\n\nTITLE = \"Christchurch City Council\"\nDESCRIPTION = \"Source for Christchurch City Council.\"\nURL = \"https://ccc.govt.nz\"\nTEST_CASES = {\"53 Hereford Street\": {\"address\": \"53 Hereford Street\"}}\n\n\nclass Source:\n def __init__(self, address):\n self._address = address\n\n def fetch(self):\n\n s = get_legacy_session()\n\n entries = []\n\n # Find the Rating Unit ID by the physical address\n # While a property may have more than one address, bins are allocated by each Rating Unit\n addressQuery = {\n \"q\": self._address,\n \"status\": \"current\",\n \"crs\": \"epsg:4326\",\n \"limit\": 1,\n }\n\n r = s.get(\"https://opendata.ccc.govt.nz/CCCSearch/rest/address/suggest\",\n params=addressQuery,\n # verify=False,\n )\n address = r.json()\n\n # Find the Bin service by Rating Unit ID\n binsHeaders = {\n \"client_id\": \"69f433c880c74c349b0128e9fa1b6a93\",\n \"client_secret\": \"139F3D2A83E34AdF98c80566f2eb7212\"\n }\n\n # Updated request using SSL code snippet\n r = s.get(\"https://ccc-data-citizen-api-v1-prod.au-s1.cloudhub.io/api/v1/properties/\" + str(address[0][\"RatingUnitID\"]),\n headers=binsHeaders\n # verify=False,\n )\n bins = r.json()\n \n # Deduplicate the Bins in case the Rating Unit has more than one of the same Bin type\n bins = {each[\"material\"]: each for each in bins[\"bins\"][\"collections\"]}.values()\n\n # Process each Bin\n for bin in bins:\n entries.append(\n Collection(\n datetime.datetime.strptime(\n bin[\"next_planned_date_app\"], \"%Y-%m-%d\"\n ).date(),\n bin[\"material\"],\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py"}], "after_files": [{"content": "import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection\n\n# Include work around for SSL UNSAFE_LEGACY_RENEGOTIATION_DISABLED error\nfrom waste_collection_schedule.service.SSLError import get_legacy_session\n\n\nTITLE = \"Christchurch City Council\"\nDESCRIPTION = \"Source for Christchurch City Council.\"\nURL = \"https://ccc.govt.nz\"\nTEST_CASES = {\"53 Hereford Street\": {\"address\": \"53 Hereford Street\"}}\n\n\nclass Source:\n def __init__(self, address):\n self._address = address\n\n def fetch(self):\n\n s = get_legacy_session()\n\n entries = []\n\n # Find the Rating Unit ID by the physical address\n # While a property may have more than one address, bins are allocated by each Rating Unit\n addressQuery = {\n \"q\": self._address,\n \"status\": \"current\",\n \"crs\": \"epsg:4326\",\n \"limit\": 1,\n }\n\n r = s.get(\"https://opendata.ccc.govt.nz/CCCSearch/rest/address/suggest\",\n params=addressQuery,\n # verify=False,\n )\n address = r.json()\n\n # Find the Bin service by Rating Unit ID\n binsHeaders = {\n \"client_id\": \"69f433c880c74c349b0128e9fa1b6a93\",\n \"client_secret\": \"139F3D2A83E34AdF98c80566f2eb7212\"\n }\n\n # Updated request using SSL code snippet\n r = s.get(\"https://ccc-data-citizen-api-v1-prod.au-s1.cloudhub.io/api/v1/properties/\" + str(address[0][\"RatingUnitID\"]),\n headers=binsHeaders\n # verify=False,\n )\n bins = r.json()\n \n # Deduplicate the Bins in case the Rating Unit has more than one of the same Bin type\n bins = {each[\"material\"]: each for each in bins[\"bins\"][\"collections\"]}.values()\n\n # Get the list of Overrides for any special dates\n # It will be an array of these: { ID: 32, Title: \"New Year Friday 2024\", OriginalDate: \"2024-01-05\", NewDate: \"2024-01-06\", Expired: 0 }\n overrides = requests.get(\"https://ccc.govt.nz/api/kerbsidedateoverrides\").json()\n\n # Process each Override\n for bin in bins:\n for override in overrides:\n if override[\"OriginalDate\"] == bin[\"next_planned_date_app\"]:\n bin[\"next_planned_date_app\"] = override[\"NewDate\"]\n\n # Process each Bin\n for bin in bins:\n entries.append(\n Collection(\n datetime.datetime.strptime(\n bin[\"next_planned_date_app\"], \"%Y-%m-%d\"\n ).date(),\n bin[\"material\"],\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ccc_govt_nz.py"}]} | 1,149 | 319 |
gh_patches_debug_35436 | rasdani/github-patches | git_diff | cocotb__cocotb-2200 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cocotb.hook does not do anything unless in a module present in COCOTB_HOOKS
Marking a function with `@cocotb.hook` is insufficient for it to do anything - you also have to set the `COCOTB_HOOKS` environment variable to contain the module name where the decorated function resides.
Either we should document this, or we should just remove `cocotb.hook`. None of our tests test `cocotb.hook`. Is anyone using it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/xunit_reporter.py`
Content:
```
1 # Copyright (c) 2013 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 from xml.etree.ElementTree import Element, SubElement
29 import xml.etree.ElementTree as ET
30
31 import mmap
32 from io import StringIO
33
34 TRUNCATE_LINES = 100
35
36
37 # file from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
38 class File(StringIO):
39
40 def countlines(self):
41 buf = mmap.mmap(self.fileno(), 0)
42 lines = 0
43 while buf.readline():
44 lines += 1
45 return lines
46
47 def head(self, lines_2find=1):
48 self.seek(0) # Rewind file
49 return [self.next() for x in range(lines_2find)]
50
51 def tail(self, lines_2find=1):
52 self.seek(0, 2) # go to end of file
53 bytes_in_file = self.tell()
54 lines_found, total_bytes_scanned = 0, 0
55 while (lines_2find+1 > lines_found and
56 bytes_in_file > total_bytes_scanned):
57 byte_block = min(1024, bytes_in_file-total_bytes_scanned)
58 self.seek(-(byte_block+total_bytes_scanned), 2)
59 total_bytes_scanned += byte_block
60 lines_found += self.read(1024).count('\n')
61 self.seek(-total_bytes_scanned, 2)
62 line_list = list(self.readlines())
63 return line_list[-lines_2find:]
64
65
66 class XUnitReporter:
67
68 def __init__(self, filename="results.xml"):
69 self.results = Element("testsuites", name="results")
70 self.filename = filename
71
72 def add_testsuite(self, **kwargs):
73 self.last_testsuite = SubElement(self.results, "testsuite", **kwargs)
74 return self.last_testsuite
75
76 def add_testcase(self, testsuite=None, **kwargs):
77 if testsuite is None:
78 testsuite = self.last_testsuite
79 self.last_testcase = SubElement(testsuite, "testcase", **kwargs)
80 return self.last_testcase
81
82 def add_property(self, testsuite=None, **kwargs):
83 if testsuite is None:
84 testsuite = self.last_testsuite
85 self.last_property = SubElement(testsuite, "property", **kwargs)
86 return self.last_property
87
88 def update_testsuite(self, testsuite=None, **kwargs):
89 if testsuite is None:
90 testsuite = self.last_testsuite
91 for k in kwargs:
92 testsuite.set(k, str(kwargs[k]))
93
94 def update_testsuites(self, **kwargs):
95 for k in kwargs:
96 self.results.set(k, str(kwargs[k]))
97
98 def add_log(self, logfile, testcase=None):
99 if testcase is None:
100 testcase = self.last_testcase
101 log = SubElement(testcase, "system-out")
102 f = File(logfile, 'r+')
103 lines = f.countlines()
104 if lines > (TRUNCATE_LINES * 2):
105 head = f.head(TRUNCATE_LINES)
106 tail = f.tail(TRUNCATE_LINES)
107 log.text = "".join(head + list("[...truncated %d lines...]\n" %
108 ((lines - (TRUNCATE_LINES*2)))) + tail)
109 else:
110 log.text = "".join(f.readlines())
111
112 def add_failure(self, testcase=None, **kwargs):
113 if testcase is None:
114 testcase = self.last_testcase
115 SubElement(testcase, "failure", **kwargs)
116
117 def add_skipped(self, testcase=None, **kwargs):
118 if testcase is None:
119 testcase = self.last_testcase
120 SubElement(testcase, "skipped", **kwargs)
121
122 def indent(self, elem, level=0):
123 i = "\n" + level*" "
124 if len(elem):
125 if not elem.text or not elem.text.strip():
126 elem.text = i + " "
127 if not elem.tail or not elem.tail.strip():
128 elem.tail = i
129 for elem in elem:
130 self.indent(elem, level+1)
131 if not elem.tail or not elem.tail.strip():
132 elem.tail = i
133 else:
134 if level and (not elem.tail or not elem.tail.strip()):
135 elem.tail = i
136
137 def write(self):
138 self.indent(self.results)
139 ET.ElementTree(self.results).write(self.filename, encoding="UTF-8")
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cocotb/xunit_reporter.py b/cocotb/xunit_reporter.py
--- a/cocotb/xunit_reporter.py
+++ b/cocotb/xunit_reporter.py
@@ -28,40 +28,6 @@
from xml.etree.ElementTree import Element, SubElement
import xml.etree.ElementTree as ET
-import mmap
-from io import StringIO
-
-TRUNCATE_LINES = 100
-
-
-# file from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
-class File(StringIO):
-
- def countlines(self):
- buf = mmap.mmap(self.fileno(), 0)
- lines = 0
- while buf.readline():
- lines += 1
- return lines
-
- def head(self, lines_2find=1):
- self.seek(0) # Rewind file
- return [self.next() for x in range(lines_2find)]
-
- def tail(self, lines_2find=1):
- self.seek(0, 2) # go to end of file
- bytes_in_file = self.tell()
- lines_found, total_bytes_scanned = 0, 0
- while (lines_2find+1 > lines_found and
- bytes_in_file > total_bytes_scanned):
- byte_block = min(1024, bytes_in_file-total_bytes_scanned)
- self.seek(-(byte_block+total_bytes_scanned), 2)
- total_bytes_scanned += byte_block
- lines_found += self.read(1024).count('\n')
- self.seek(-total_bytes_scanned, 2)
- line_list = list(self.readlines())
- return line_list[-lines_2find:]
-
class XUnitReporter:
@@ -85,30 +51,6 @@
self.last_property = SubElement(testsuite, "property", **kwargs)
return self.last_property
- def update_testsuite(self, testsuite=None, **kwargs):
- if testsuite is None:
- testsuite = self.last_testsuite
- for k in kwargs:
- testsuite.set(k, str(kwargs[k]))
-
- def update_testsuites(self, **kwargs):
- for k in kwargs:
- self.results.set(k, str(kwargs[k]))
-
- def add_log(self, logfile, testcase=None):
- if testcase is None:
- testcase = self.last_testcase
- log = SubElement(testcase, "system-out")
- f = File(logfile, 'r+')
- lines = f.countlines()
- if lines > (TRUNCATE_LINES * 2):
- head = f.head(TRUNCATE_LINES)
- tail = f.tail(TRUNCATE_LINES)
- log.text = "".join(head + list("[...truncated %d lines...]\n" %
- ((lines - (TRUNCATE_LINES*2)))) + tail)
- else:
- log.text = "".join(f.readlines())
-
def add_failure(self, testcase=None, **kwargs):
if testcase is None:
testcase = self.last_testcase
| {"golden_diff": "diff --git a/cocotb/xunit_reporter.py b/cocotb/xunit_reporter.py\n--- a/cocotb/xunit_reporter.py\n+++ b/cocotb/xunit_reporter.py\n@@ -28,40 +28,6 @@\n from xml.etree.ElementTree import Element, SubElement\n import xml.etree.ElementTree as ET\n \n-import mmap\n-from io import StringIO\n-\n-TRUNCATE_LINES = 100\n-\n-\n-# file from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail\n-class File(StringIO):\n-\n- def countlines(self):\n- buf = mmap.mmap(self.fileno(), 0)\n- lines = 0\n- while buf.readline():\n- lines += 1\n- return lines\n-\n- def head(self, lines_2find=1):\n- self.seek(0) # Rewind file\n- return [self.next() for x in range(lines_2find)]\n-\n- def tail(self, lines_2find=1):\n- self.seek(0, 2) # go to end of file\n- bytes_in_file = self.tell()\n- lines_found, total_bytes_scanned = 0, 0\n- while (lines_2find+1 > lines_found and\n- bytes_in_file > total_bytes_scanned):\n- byte_block = min(1024, bytes_in_file-total_bytes_scanned)\n- self.seek(-(byte_block+total_bytes_scanned), 2)\n- total_bytes_scanned += byte_block\n- lines_found += self.read(1024).count('\\n')\n- self.seek(-total_bytes_scanned, 2)\n- line_list = list(self.readlines())\n- return line_list[-lines_2find:]\n-\n \n class XUnitReporter:\n \n@@ -85,30 +51,6 @@\n self.last_property = SubElement(testsuite, \"property\", **kwargs)\n return self.last_property\n \n- def update_testsuite(self, testsuite=None, **kwargs):\n- if testsuite is None:\n- testsuite = self.last_testsuite\n- for k in kwargs:\n- testsuite.set(k, str(kwargs[k]))\n-\n- def update_testsuites(self, **kwargs):\n- for k in kwargs:\n- self.results.set(k, str(kwargs[k]))\n-\n- def add_log(self, logfile, testcase=None):\n- if testcase is None:\n- testcase = self.last_testcase\n- log = SubElement(testcase, \"system-out\")\n- f = File(logfile, 'r+')\n- lines = f.countlines()\n- if lines > (TRUNCATE_LINES * 2):\n- head = f.head(TRUNCATE_LINES)\n- tail = f.tail(TRUNCATE_LINES)\n- log.text = \"\".join(head + list(\"[...truncated %d lines...]\\n\" %\n- ((lines - (TRUNCATE_LINES*2)))) + tail)\n- else:\n- log.text = \"\".join(f.readlines())\n-\n def add_failure(self, testcase=None, **kwargs):\n if testcase is None:\n testcase = self.last_testcase\n", "issue": "cocotb.hook does not do anything unless in a module present in COCOTB_HOOKS\nMarking a function with `@cocotb.hook` is insufficient for it to do anything - you also have to set the `COCOTB_HOOKS` environment variable to contain the module name where the decorated function resides.\r\n\r\nEither we should document this, or we should just remove `cocotb.hook`. None of our tests test `cocotb.hook`. Is anyone using it?\n", "before_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom xml.etree.ElementTree import Element, SubElement\nimport xml.etree.ElementTree as ET\n\nimport mmap\nfrom io import StringIO\n\nTRUNCATE_LINES = 100\n\n\n# file from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail\nclass File(StringIO):\n\n def countlines(self):\n buf = mmap.mmap(self.fileno(), 0)\n lines = 0\n while buf.readline():\n lines += 1\n return lines\n\n def head(self, lines_2find=1):\n self.seek(0) # Rewind file\n return [self.next() for x in range(lines_2find)]\n\n def tail(self, lines_2find=1):\n self.seek(0, 2) # go to end of file\n bytes_in_file = self.tell()\n lines_found, total_bytes_scanned = 0, 0\n while (lines_2find+1 > lines_found and\n bytes_in_file > total_bytes_scanned):\n byte_block = min(1024, bytes_in_file-total_bytes_scanned)\n self.seek(-(byte_block+total_bytes_scanned), 2)\n total_bytes_scanned += byte_block\n lines_found += self.read(1024).count('\\n')\n self.seek(-total_bytes_scanned, 2)\n line_list = list(self.readlines())\n return line_list[-lines_2find:]\n\n\nclass XUnitReporter:\n\n def __init__(self, filename=\"results.xml\"):\n self.results = Element(\"testsuites\", name=\"results\")\n self.filename = filename\n\n def add_testsuite(self, **kwargs):\n self.last_testsuite = SubElement(self.results, \"testsuite\", **kwargs)\n return self.last_testsuite\n\n def add_testcase(self, testsuite=None, **kwargs):\n if testsuite is None:\n testsuite = self.last_testsuite\n self.last_testcase = SubElement(testsuite, \"testcase\", **kwargs)\n return self.last_testcase\n\n def add_property(self, testsuite=None, **kwargs):\n if testsuite is None:\n testsuite = self.last_testsuite\n self.last_property = SubElement(testsuite, \"property\", **kwargs)\n return self.last_property\n\n def update_testsuite(self, testsuite=None, **kwargs):\n if testsuite is None:\n testsuite = self.last_testsuite\n for k in kwargs:\n testsuite.set(k, str(kwargs[k]))\n\n def update_testsuites(self, **kwargs):\n for k in kwargs:\n self.results.set(k, str(kwargs[k]))\n\n def add_log(self, logfile, testcase=None):\n if testcase is None:\n testcase = self.last_testcase\n log = SubElement(testcase, \"system-out\")\n f = File(logfile, 'r+')\n lines = f.countlines()\n if lines > (TRUNCATE_LINES * 2):\n head = f.head(TRUNCATE_LINES)\n tail = f.tail(TRUNCATE_LINES)\n log.text = \"\".join(head + list(\"[...truncated %d lines...]\\n\" %\n ((lines - (TRUNCATE_LINES*2)))) + tail)\n else:\n log.text = \"\".join(f.readlines())\n\n def add_failure(self, testcase=None, **kwargs):\n if testcase is None:\n testcase = self.last_testcase\n SubElement(testcase, \"failure\", **kwargs)\n\n def add_skipped(self, testcase=None, **kwargs):\n if testcase is None:\n testcase = self.last_testcase\n SubElement(testcase, \"skipped\", **kwargs)\n\n def indent(self, elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n self.indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n def write(self):\n self.indent(self.results)\n ET.ElementTree(self.results).write(self.filename, encoding=\"UTF-8\")\n", "path": "cocotb/xunit_reporter.py"}], "after_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom xml.etree.ElementTree import Element, SubElement\nimport xml.etree.ElementTree as ET\n\n\nclass XUnitReporter:\n\n def __init__(self, filename=\"results.xml\"):\n self.results = Element(\"testsuites\", name=\"results\")\n self.filename = filename\n\n def add_testsuite(self, **kwargs):\n self.last_testsuite = SubElement(self.results, \"testsuite\", **kwargs)\n return self.last_testsuite\n\n def add_testcase(self, testsuite=None, **kwargs):\n if testsuite is None:\n testsuite = self.last_testsuite\n self.last_testcase = SubElement(testsuite, \"testcase\", **kwargs)\n return self.last_testcase\n\n def add_property(self, testsuite=None, **kwargs):\n if testsuite is None:\n testsuite = self.last_testsuite\n self.last_property = SubElement(testsuite, \"property\", **kwargs)\n return self.last_property\n\n def add_failure(self, testcase=None, **kwargs):\n if testcase is None:\n testcase = self.last_testcase\n SubElement(testcase, \"failure\", **kwargs)\n\n def add_skipped(self, testcase=None, **kwargs):\n if testcase is None:\n testcase = self.last_testcase\n SubElement(testcase, \"skipped\", **kwargs)\n\n def indent(self, elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n self.indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n def write(self):\n self.indent(self.results)\n ET.ElementTree(self.results).write(self.filename, encoding=\"UTF-8\")\n", "path": "cocotb/xunit_reporter.py"}]} | 1,970 | 712 |
gh_patches_debug_25338 | rasdani/github-patches | git_diff | streamlit__streamlit-2482 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Streamlit fails to start without Git executable
# Summary
Streamlit version `0.69.1` fails to start when run inside a Docker container that doesn't have Git installed.
# Steps to reproduce
1. Create a `Dockerfile` with the following contents:
```dockerfile
FROM python:3.8-slim
RUN pip install streamlit
CMD ["streamlit", "hello"]
```
2. Build the image:
```bash
docker build -t demo .
```
3. Run the app:
```bash
docker run -it --rm demo
```
## Expected behavior:
Streamlit starts without issues.
## Actual behavior:
Streamlit fails to start and displays the following error message:
```bash
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/git/__init__.py", line 83, in <module>
refresh()
File "/usr/local/lib/python3.8/site-packages/git/__init__.py", line 73, in refresh
if not Git.refresh(path=path):
File "/usr/local/lib/python3.8/site-packages/git/cmd.py", line 278, in refresh
raise ImportError(err)
ImportError: Bad git executable.
The git executable must be specified in one of the following ways:
- be included in your $PATH
- be set via $GIT_PYTHON_GIT_EXECUTABLE
- explicitly set via git.refresh()
All git commands will error until this is rectified.
This initial warning can be silenced or aggravated in the future by setting the
$GIT_PYTHON_REFRESH environment variable. Use one of the following values:
- quiet|q|silence|s|none|n|0: for no warning or exception
- warn|w|warning|1: for a printed warning
- error|e|raise|r|2: for a raised exception
Example:
export GIT_PYTHON_REFRESH=quiet
```
## Is this a regression?
**yes** (worked up until at least version `0.67.1`)
# Debug info
- Streamlit version: `0.69.1`
- Python version: `3.8.6`
- Using Conda? PipEnv? PyEnv? Pex? **NO**
- OS version: `4.19.76-linuxkit`
# Additional information
This bug can be worked around by setting `GIT_PYTHON_REFRESH=quiet` environment variable inside the Docker image.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/git_util.py`
Content:
```
1 import os
2 import re
3 from typing import Optional, Tuple
4
5 import git # type: ignore[import]
6
7 # Github has two URLs, one that is https and one that is ssh
8 GITHUB_HTTP_URL = r"^https://(www\.)?github.com/(.+)/(.+).git$"
9 GITHUB_SSH_URL = r"^[email protected]:(.+)/(.+).git$"
10
11 # We don't support git < 2.7, because we can't get repo info without
12 # talking to the remote server, which results in the user being prompted
13 # for credentials.
14 MIN_GIT_VERSION = (2, 7, 0)
15
16
17 class GitRepo:
18 def __init__(self, path):
19 # If we have a valid repo, git_version will be a tuple of 3+ ints:
20 # (major, minor, patch, possible_additional_patch_number)
21 self.git_version = None # type: Optional[Tuple[int, ...]]
22
23 try:
24 self.repo = git.Repo(path, search_parent_directories=True)
25 self.git_version = self.repo.git.version_info
26 if self.git_version >= MIN_GIT_VERSION:
27 git_root = self.repo.git.rev_parse("--show-toplevel")
28 self.module = os.path.relpath(path, git_root)
29
30 except:
31 # The git repo must be invalid for the following reasons:
32 # * No .git folder
33 # * Corrupted .git folder
34 # * Path is invalid
35 self.repo = None
36
37 def is_valid(self) -> bool:
38 """True if there's a git repo here, and git.version >= MIN_GIT_VERSION."""
39 return (
40 self.repo is not None
41 and self.git_version is not None
42 and self.git_version >= MIN_GIT_VERSION
43 )
44
45 @property
46 def tracking_branch(self):
47 if not self.is_valid():
48 return None
49 return self.repo.active_branch.tracking_branch()
50
51 def get_tracking_branch_remote(self):
52 if not self.is_valid():
53 return None
54
55 tracking_branch = self.tracking_branch
56 if tracking_branch is None:
57 return None
58
59 remote_name, *branch = tracking_branch.name.split("/")
60 branch_name = "/".join(branch)
61
62 return self.repo.remote(remote_name), branch_name
63
64 def is_github_repo(self):
65 if not self.is_valid():
66 return False
67
68 remote_info = self.get_tracking_branch_remote()
69 if remote_info is None:
70 return False
71
72 remote, _branch = remote_info
73
74 for url in remote.urls:
75 if (
76 re.match(GITHUB_HTTP_URL, url) is not None
77 or re.match(GITHUB_SSH_URL, url) is not None
78 ):
79 return True
80
81 return False
82
83 def get_repo_info(self):
84 if not self.is_valid():
85 return None
86
87 remote_info = self.get_tracking_branch_remote()
88 if remote_info is None:
89 return None
90
91 remote, branch = remote_info
92
93 repo = None
94 for url in remote.urls:
95 https_matches = re.match(GITHUB_HTTP_URL, url)
96 ssh_matches = re.match(GITHUB_SSH_URL, url)
97 if https_matches is not None:
98 repo = f"{https_matches.group(2)}/{https_matches.group(3)}"
99 break
100
101 if ssh_matches is not None:
102 repo = f"{ssh_matches.group(1)}/{ssh_matches.group(2)}"
103 break
104
105 if repo is None:
106 return None
107
108 return repo, branch, self.module
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/git_util.py b/lib/streamlit/git_util.py
--- a/lib/streamlit/git_util.py
+++ b/lib/streamlit/git_util.py
@@ -2,8 +2,6 @@
import re
from typing import Optional, Tuple
-import git # type: ignore[import]
-
# Github has two URLs, one that is https and one that is ssh
GITHUB_HTTP_URL = r"^https://(www\.)?github.com/(.+)/(.+).git$"
GITHUB_SSH_URL = r"^[email protected]:(.+)/(.+).git$"
@@ -21,6 +19,8 @@
self.git_version = None # type: Optional[Tuple[int, ...]]
try:
+ import git # type: ignore[import]
+
self.repo = git.Repo(path, search_parent_directories=True)
self.git_version = self.repo.git.version_info
if self.git_version >= MIN_GIT_VERSION:
@@ -29,6 +29,7 @@
except:
# The git repo must be invalid for the following reasons:
+ # * git binary or GitPython not installed
# * No .git folder
# * Corrupted .git folder
# * Path is invalid
| {"golden_diff": "diff --git a/lib/streamlit/git_util.py b/lib/streamlit/git_util.py\n--- a/lib/streamlit/git_util.py\n+++ b/lib/streamlit/git_util.py\n@@ -2,8 +2,6 @@\n import re\n from typing import Optional, Tuple\n \n-import git # type: ignore[import]\n-\n # Github has two URLs, one that is https and one that is ssh\n GITHUB_HTTP_URL = r\"^https://(www\\.)?github.com/(.+)/(.+).git$\"\n GITHUB_SSH_URL = r\"^[email protected]:(.+)/(.+).git$\"\n@@ -21,6 +19,8 @@\n self.git_version = None # type: Optional[Tuple[int, ...]]\n \n try:\n+ import git # type: ignore[import]\n+\n self.repo = git.Repo(path, search_parent_directories=True)\n self.git_version = self.repo.git.version_info\n if self.git_version >= MIN_GIT_VERSION:\n@@ -29,6 +29,7 @@\n \n except:\n # The git repo must be invalid for the following reasons:\n+ # * git binary or GitPython not installed\n # * No .git folder\n # * Corrupted .git folder\n # * Path is invalid\n", "issue": "Streamlit fails to start without Git executable\n# Summary\r\n\r\nStreamlit version `0.69.1` fails to start when run inside a Docker container that doesn't have Git installed.\r\n\r\n# Steps to reproduce\r\n\r\n1. Create a `Dockerfile` with the following contents:\r\n```dockerfile\r\nFROM python:3.8-slim\r\nRUN pip install streamlit\r\nCMD [\"streamlit\", \"hello\"]\r\n```\r\n2. Build the image:\r\n```bash\r\ndocker build -t demo .\r\n```\r\n3. Run the app:\r\n```bash\r\ndocker run -it --rm demo\r\n```\r\n\r\n## Expected behavior:\r\n\r\nStreamlit starts without issues.\r\n\r\n## Actual behavior:\r\n\r\nStreamlit fails to start and displays the following error message:\r\n\r\n```bash\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/git/__init__.py\", line 83, in <module>\r\n refresh()\r\n File \"/usr/local/lib/python3.8/site-packages/git/__init__.py\", line 73, in refresh\r\n if not Git.refresh(path=path):\r\n File \"/usr/local/lib/python3.8/site-packages/git/cmd.py\", line 278, in refresh\r\n raise ImportError(err)\r\nImportError: Bad git executable.\r\nThe git executable must be specified in one of the following ways:\r\n - be included in your $PATH\r\n - be set via $GIT_PYTHON_GIT_EXECUTABLE\r\n - explicitly set via git.refresh()\r\n\r\nAll git commands will error until this is rectified.\r\n\r\nThis initial warning can be silenced or aggravated in the future by setting the\r\n$GIT_PYTHON_REFRESH environment variable. Use one of the following values:\r\n - quiet|q|silence|s|none|n|0: for no warning or exception\r\n - warn|w|warning|1: for a printed warning\r\n - error|e|raise|r|2: for a raised exception\r\n\r\nExample:\r\n export GIT_PYTHON_REFRESH=quiet\r\n```\r\n\r\n## Is this a regression?\r\n\r\n**yes** (worked up until at least version `0.67.1`)\r\n\r\n# Debug info\r\n\r\n- Streamlit version: `0.69.1`\r\n- Python version: `3.8.6`\r\n- Using Conda? PipEnv? PyEnv? Pex? **NO**\r\n- OS version: `4.19.76-linuxkit`\r\n\r\n# Additional information\r\n\r\nThis bug can be worked around by setting `GIT_PYTHON_REFRESH=quiet` environment variable inside the Docker image.\r\n\n", "before_files": [{"content": "import os\nimport re\nfrom typing import Optional, Tuple\n\nimport git # type: ignore[import]\n\n# Github has two URLs, one that is https and one that is ssh\nGITHUB_HTTP_URL = r\"^https://(www\\.)?github.com/(.+)/(.+).git$\"\nGITHUB_SSH_URL = r\"^[email protected]:(.+)/(.+).git$\"\n\n# We don't support git < 2.7, because we can't get repo info without\n# talking to the remote server, which results in the user being prompted\n# for credentials.\nMIN_GIT_VERSION = (2, 7, 0)\n\n\nclass GitRepo:\n def __init__(self, path):\n # If we have a valid repo, git_version will be a tuple of 3+ ints:\n # (major, minor, patch, possible_additional_patch_number)\n self.git_version = None # type: Optional[Tuple[int, ...]]\n\n try:\n self.repo = git.Repo(path, search_parent_directories=True)\n self.git_version = self.repo.git.version_info\n if self.git_version >= MIN_GIT_VERSION:\n git_root = self.repo.git.rev_parse(\"--show-toplevel\")\n self.module = os.path.relpath(path, git_root)\n\n except:\n # The git repo must be invalid for the following reasons:\n # * No .git folder\n # * Corrupted .git folder\n # * Path is invalid\n self.repo = None\n\n def is_valid(self) -> bool:\n \"\"\"True if there's a git repo here, and git.version >= MIN_GIT_VERSION.\"\"\"\n return (\n self.repo is not None\n and self.git_version is not None\n and self.git_version >= MIN_GIT_VERSION\n )\n\n @property\n def tracking_branch(self):\n if not self.is_valid():\n return None\n return self.repo.active_branch.tracking_branch()\n\n def get_tracking_branch_remote(self):\n if not self.is_valid():\n return None\n\n tracking_branch = self.tracking_branch\n if tracking_branch is None:\n return None\n\n remote_name, *branch = tracking_branch.name.split(\"/\")\n branch_name = \"/\".join(branch)\n\n return self.repo.remote(remote_name), branch_name\n\n def is_github_repo(self):\n if not self.is_valid():\n return False\n\n remote_info = self.get_tracking_branch_remote()\n if remote_info is None:\n return False\n\n remote, _branch = remote_info\n\n for url in remote.urls:\n if (\n re.match(GITHUB_HTTP_URL, url) is not None\n or re.match(GITHUB_SSH_URL, url) is not None\n ):\n return True\n\n return False\n\n def get_repo_info(self):\n if not self.is_valid():\n return None\n\n remote_info = self.get_tracking_branch_remote()\n if remote_info is None:\n return None\n\n remote, branch = remote_info\n\n repo = None\n for url in remote.urls:\n https_matches = re.match(GITHUB_HTTP_URL, url)\n ssh_matches = re.match(GITHUB_SSH_URL, url)\n if https_matches is not None:\n repo = f\"{https_matches.group(2)}/{https_matches.group(3)}\"\n break\n\n if ssh_matches is not None:\n repo = f\"{ssh_matches.group(1)}/{ssh_matches.group(2)}\"\n break\n\n if repo is None:\n return None\n\n return repo, branch, self.module\n", "path": "lib/streamlit/git_util.py"}], "after_files": [{"content": "import os\nimport re\nfrom typing import Optional, Tuple\n\n# Github has two URLs, one that is https and one that is ssh\nGITHUB_HTTP_URL = r\"^https://(www\\.)?github.com/(.+)/(.+).git$\"\nGITHUB_SSH_URL = r\"^[email protected]:(.+)/(.+).git$\"\n\n# We don't support git < 2.7, because we can't get repo info without\n# talking to the remote server, which results in the user being prompted\n# for credentials.\nMIN_GIT_VERSION = (2, 7, 0)\n\n\nclass GitRepo:\n def __init__(self, path):\n # If we have a valid repo, git_version will be a tuple of 3+ ints:\n # (major, minor, patch, possible_additional_patch_number)\n self.git_version = None # type: Optional[Tuple[int, ...]]\n\n try:\n import git # type: ignore[import]\n\n self.repo = git.Repo(path, search_parent_directories=True)\n self.git_version = self.repo.git.version_info\n if self.git_version >= MIN_GIT_VERSION:\n git_root = self.repo.git.rev_parse(\"--show-toplevel\")\n self.module = os.path.relpath(path, git_root)\n\n except:\n # The git repo must be invalid for the following reasons:\n # * git binary or GitPython not installed\n # * No .git folder\n # * Corrupted .git folder\n # * Path is invalid\n self.repo = None\n\n def is_valid(self) -> bool:\n \"\"\"True if there's a git repo here, and git.version >= MIN_GIT_VERSION.\"\"\"\n return (\n self.repo is not None\n and self.git_version is not None\n and self.git_version >= MIN_GIT_VERSION\n )\n\n @property\n def tracking_branch(self):\n if not self.is_valid():\n return None\n return self.repo.active_branch.tracking_branch()\n\n def get_tracking_branch_remote(self):\n if not self.is_valid():\n return None\n\n tracking_branch = self.tracking_branch\n if tracking_branch is None:\n return None\n\n remote_name, *branch = tracking_branch.name.split(\"/\")\n branch_name = \"/\".join(branch)\n\n return self.repo.remote(remote_name), branch_name\n\n def is_github_repo(self):\n if not self.is_valid():\n return False\n\n remote_info = self.get_tracking_branch_remote()\n if remote_info is None:\n return False\n\n remote, _branch = remote_info\n\n for url in remote.urls:\n if (\n re.match(GITHUB_HTTP_URL, url) is not None\n or re.match(GITHUB_SSH_URL, url) is not None\n ):\n return True\n\n return False\n\n def get_repo_info(self):\n if not self.is_valid():\n return None\n\n remote_info = self.get_tracking_branch_remote()\n if remote_info is None:\n return None\n\n remote, branch = remote_info\n\n repo = None\n for url in remote.urls:\n https_matches = re.match(GITHUB_HTTP_URL, url)\n ssh_matches = re.match(GITHUB_SSH_URL, url)\n if https_matches is not None:\n repo = f\"{https_matches.group(2)}/{https_matches.group(3)}\"\n break\n\n if ssh_matches is not None:\n repo = f\"{ssh_matches.group(1)}/{ssh_matches.group(2)}\"\n break\n\n if repo is None:\n return None\n\n return repo, branch, self.module\n", "path": "lib/streamlit/git_util.py"}]} | 1,779 | 281 |
gh_patches_debug_1623 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-671 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use extra for asyncio dependencies
Hello! The latest release for this library pulls in aiohttp and its dependencies unconditionally, which adds non-trivial burden to projects that don’t need it. Would you consider using a packaging extra so that people can opt-in?
TODO: undo pin of 'aiohttp' once 'aioresponses' releases a fix
Environment details
- OS: $ sw_vers
ProductName: Mac OS X
ProductVersion: 10.14.6
BuildVersion: 18G6020
- Python version: 3.6, 3.7, 3.8
- pip version: pip 20.2.4
- `google-auth` version: 5906c8583ca351b5385a079a30521a9a8a0c7c59
#### Steps to reproduce
1. nox -s unit
There are 9 tests that fail, all with the same error:
`TypeError: __init__() missing 1 required positional argument: 'limit'`
```
====================================================== short test summary info =======================================================
FAILED tests_async/transport/test_aiohttp_requests.py::TestCombinedResponse::test_content_compressed - TypeError: __init__() missin...
FAILED tests_async/transport/test_aiohttp_requests.py::TestResponse::test_headers_prop - TypeError: __init__() missing 1 required p...
FAILED tests_async/transport/test_aiohttp_requests.py::TestResponse::test_status_prop - TypeError: __init__() missing 1 required po...
FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request - TypeError: __init__() missing 1 requir...
FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_ctx - TypeError: __init__() missing 1 required p...
FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_http_headers - TypeError: __init__() missing 1 r...
FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_regexp_example - TypeError: __init__() missing 1...
FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request_no_refresh - TypeError: __init__() missi...
FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request_refresh - TypeError: __init__() missing ...
============================================ 9 failed, 609 passed, 12 warnings in 33.41s =============================================
```
Here is the traceback for one of the failing tests:
```
____________________________________________ TestCombinedResponse.test_content_compressed ____________________________________________
self = <tests_async.transport.test_aiohttp_requests.TestCombinedResponse object at 0x108803160>
urllib3_mock = <function decompress at 0x10880a820>
@mock.patch(
"google.auth.transport._aiohttp_requests.urllib3.response.MultiDecoder.decompress",
return_value="decompressed",
autospec=True,
)
@pytest.mark.asyncio
async def test_content_compressed(self, urllib3_mock):
rm = core.RequestMatch(
"url", headers={"Content-Encoding": "gzip"}, payload="compressed"
)
> response = await rm.build_response(core.URL("url"))
tests_async/transport/test_aiohttp_requests.py:72:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/core.py:192: in build_response
resp = self._build_response(
../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/core.py:173: in _build_response
resp.content = stream_reader_factory(loop)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
loop = <Mock id='4437587472'>
def stream_reader_factory( # noqa
loop: 'Optional[asyncio.AbstractEventLoop]' = None
):
protocol = ResponseHandler(loop=loop)
> return StreamReader(protocol, loop=loop)
E TypeError: __init__() missing 1 required positional argument: 'limit'
../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/compat.py:48: TypeError
========================================================== warnings summary ==========================================================
```
The root cause is a change in aiohttp version 3.7.0 which was released a few hours ago. The signature for StreamReader has changed, making the optional argument `limit` a required argument.
https://github.com/aio-libs/aiohttp/blob/56e78836aa7c67292ace9e256711699d51d57285/aiohttp/streams.py#L106
This change breaks aioresponses:
https://github.com/pnuckowski/aioresponses/blob/e61977f42a0164e0c572031dfb18ae95ba198df0/aioresponses/compat.py#L44
Add support for Python 3.9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synth.py`
Content:
```
1 import synthtool as s
2 from synthtool import gcp
3
4 common = gcp.CommonTemplates()
5
6 # ----------------------------------------------------------------------------
7 # Add templated files
8 # ----------------------------------------------------------------------------
9 templated_files = common.py_library(unit_cov_level=100, cov_level=100)
10 s.move(
11 templated_files / ".kokoro",
12 excludes=[
13 ".kokoro/continuous/common.cfg",
14 ".kokoro/presubmit/common.cfg",
15 ".kokoro/build.sh",
16 ],
17 ) # just move kokoro configs
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/synth.py b/synth.py
--- a/synth.py
+++ b/synth.py
@@ -10,8 +10,8 @@
s.move(
templated_files / ".kokoro",
excludes=[
- ".kokoro/continuous/common.cfg",
- ".kokoro/presubmit/common.cfg",
- ".kokoro/build.sh",
+ "continuous/common.cfg",
+ "presubmit/common.cfg",
+ "build.sh",
],
) # just move kokoro configs
| {"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -10,8 +10,8 @@\n s.move(\n templated_files / \".kokoro\",\n excludes=[\n- \".kokoro/continuous/common.cfg\",\n- \".kokoro/presubmit/common.cfg\",\n- \".kokoro/build.sh\",\n+ \"continuous/common.cfg\",\n+ \"presubmit/common.cfg\",\n+ \"build.sh\",\n ],\n ) # just move kokoro configs\n", "issue": "Use extra for asyncio dependencies\nHello! The latest release for this library pulls in aiohttp and its dependencies unconditionally, which adds non-trivial burden to projects that don\u2019t need it. Would you consider using a packaging extra so that people can opt-in?\nTODO: undo pin of 'aiohttp' once 'aioresponses' releases a fix\nEnvironment details\r\n\r\n - OS: $ sw_vers\r\nProductName: Mac OS X\r\nProductVersion: 10.14.6\r\nBuildVersion: 18G6020\r\n\r\n - Python version: 3.6, 3.7, 3.8\r\n - pip version: pip 20.2.4\r\n - `google-auth` version: 5906c8583ca351b5385a079a30521a9a8a0c7c59\r\n\r\n#### Steps to reproduce\r\n\r\n 1. nox -s unit\r\n\r\n\r\nThere are 9 tests that fail, all with the same error:\r\n\r\n`TypeError: __init__() missing 1 required positional argument: 'limit'`\r\n\r\n\r\n```\r\n====================================================== short test summary info =======================================================\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestCombinedResponse::test_content_compressed - TypeError: __init__() missin...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestResponse::test_headers_prop - TypeError: __init__() missing 1 required p...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestResponse::test_status_prop - TypeError: __init__() missing 1 required po...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request - TypeError: __init__() missing 1 requir...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_ctx - TypeError: __init__() missing 1 required p...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_http_headers - TypeError: __init__() missing 1 r...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_regexp_example - TypeError: __init__() missing 1...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request_no_refresh - TypeError: __init__() missi...\r\nFAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request_refresh - TypeError: __init__() missing ...\r\n============================================ 9 failed, 609 passed, 12 warnings in 33.41s =============================================\r\n```\r\n\r\nHere is the traceback for one of the failing tests:\r\n\r\n\r\n```\r\n____________________________________________ TestCombinedResponse.test_content_compressed ____________________________________________\r\n\r\nself = <tests_async.transport.test_aiohttp_requests.TestCombinedResponse object at 0x108803160>\r\nurllib3_mock = <function decompress at 0x10880a820>\r\n\r\n @mock.patch(\r\n \"google.auth.transport._aiohttp_requests.urllib3.response.MultiDecoder.decompress\",\r\n return_value=\"decompressed\",\r\n autospec=True,\r\n )\r\n @pytest.mark.asyncio\r\n async def test_content_compressed(self, urllib3_mock):\r\n rm = core.RequestMatch(\r\n \"url\", headers={\"Content-Encoding\": \"gzip\"}, payload=\"compressed\"\r\n )\r\n> response = await rm.build_response(core.URL(\"url\"))\r\n\r\ntests_async/transport/test_aiohttp_requests.py:72: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/core.py:192: in build_response\r\n resp = self._build_response(\r\n../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/core.py:173: in _build_response\r\n resp.content = stream_reader_factory(loop)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nloop = <Mock id='4437587472'>\r\n\r\n def stream_reader_factory( # noqa\r\n loop: 'Optional[asyncio.AbstractEventLoop]' = None\r\n ):\r\n protocol = ResponseHandler(loop=loop)\r\n> return StreamReader(protocol, loop=loop)\r\nE TypeError: __init__() missing 1 required positional argument: 'limit'\r\n\r\n../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/compat.py:48: TypeError\r\n========================================================== warnings summary ==========================================================\r\n```\r\n\r\nThe root cause is a change in aiohttp version 3.7.0 which was released a few hours ago. The signature for StreamReader has changed, making the optional argument `limit` a required argument.\r\n\r\nhttps://github.com/aio-libs/aiohttp/blob/56e78836aa7c67292ace9e256711699d51d57285/aiohttp/streams.py#L106\r\n\r\nThis change breaks aioresponses:\r\n\r\nhttps://github.com/pnuckowski/aioresponses/blob/e61977f42a0164e0c572031dfb18ae95ba198df0/aioresponses/compat.py#L44\r\n\r\n\nAdd support for Python 3.9\n\n", "before_files": [{"content": "import synthtool as s\nfrom synthtool import gcp\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=100, cov_level=100)\ns.move(\n templated_files / \".kokoro\",\n excludes=[\n \".kokoro/continuous/common.cfg\",\n \".kokoro/presubmit/common.cfg\",\n \".kokoro/build.sh\",\n ],\n) # just move kokoro configs\n", "path": "synth.py"}], "after_files": [{"content": "import synthtool as s\nfrom synthtool import gcp\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=100, cov_level=100)\ns.move(\n templated_files / \".kokoro\",\n excludes=[\n \"continuous/common.cfg\",\n \"presubmit/common.cfg\",\n \"build.sh\",\n ],\n) # just move kokoro configs\n", "path": "synth.py"}]} | 1,645 | 114 |
gh_patches_debug_26588 | rasdani/github-patches | git_diff | ckan__ckan-5723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Top Rated Datasets" needs to be removed from stats
**CKAN version**
2.9.1
**Describe the bug**
"Top Rated Datasets" is still in /stats even though ratings are being deprecated #5558
**Steps to reproduce**
With stats enabled, go to /stats page.
**Expected behavior**
"Top Rated Datasets" is removed from stats.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext/stats/blueprint.py`
Content:
```
1 # encoding: utf-8
2
3 from flask import Blueprint
4
5 from ckan.plugins.toolkit import c, render
6 import ckanext.stats.stats as stats_lib
7 import ckan.lib.helpers as h
8
9 stats = Blueprint(u'stats', __name__)
10
11
12 @stats.route(u'/stats')
13 def index():
14 stats = stats_lib.Stats()
15 extra_vars = {
16 u'top_rated_packages': stats.top_rated_packages(),
17 u'largest_groups': stats.largest_groups(),
18 u'top_tags': stats.top_tags(),
19 u'top_package_creators': stats.top_package_creators(),
20 }
21 return render(u'ckanext/stats/index.html', extra_vars)
22
```
Path: `ckanext/stats/stats.py`
Content:
```
1 # encoding: utf-8
2
3 import datetime
4 import logging
5 from ckan.common import config
6 from six import text_type
7 from sqlalchemy import Table, select, join, func, and_
8
9 import ckan.plugins as p
10 import ckan.model as model
11
12 log = logging.getLogger(__name__)
13 cache_enabled = p.toolkit.asbool(
14 config.get('ckanext.stats.cache_enabled', False)
15 )
16
17 if cache_enabled:
18 log.warn(
19 'ckanext.stats does not support caching in current implementations'
20 )
21
22 DATE_FORMAT = '%Y-%m-%d'
23
24
25 def table(name):
26 return Table(name, model.meta.metadata, autoload=True)
27
28
29 def datetime2date(datetime_):
30 return datetime.date(datetime_.year, datetime_.month, datetime_.day)
31
32
33 class Stats(object):
34
35 @classmethod
36 def top_rated_packages(cls, limit=10):
37 # NB Not using sqlalchemy as sqla 0.4 doesn't work using both group_by
38 # and apply_avg
39 package = table('package')
40 rating = table('rating')
41 sql = select(
42 [
43 package.c.id,
44 func.avg(rating.c.rating),
45 func.count(rating.c.rating)
46 ],
47 from_obj=[package.join(rating)]
48 ).where(and_(package.c.private == False, package.c.state == 'active')
49 ).group_by(package.c.id).order_by(
50 func.avg(rating.c.rating).desc(),
51 func.count(rating.c.rating).desc()
52 ).limit(limit)
53 res_ids = model.Session.execute(sql).fetchall()
54 res_pkgs = [(
55 model.Session.query(model.Package).get(text_type(pkg_id)), avg, num
56 ) for pkg_id, avg, num in res_ids]
57 return res_pkgs
58
59 @classmethod
60 def largest_groups(cls, limit=10):
61 member = table('member')
62 package = table('package')
63
64 j = join(member, package, member.c.table_id == package.c.id)
65
66 s = select(
67 [member.c.group_id,
68 func.count(member.c.table_id)]
69 ).select_from(j).group_by(member.c.group_id).where(
70 and_(
71 member.c.group_id != None, member.c.table_name == 'package',
72 package.c.private == False, package.c.state == 'active'
73 )
74 ).order_by(func.count(member.c.table_id).desc()).limit(limit)
75
76 res_ids = model.Session.execute(s).fetchall()
77 res_groups = [
78 (model.Session.query(model.Group).get(text_type(group_id)), val)
79 for group_id, val in res_ids
80 ]
81 return res_groups
82
83 @classmethod
84 def top_tags(cls, limit=10, returned_tag_info='object'): # by package
85 assert returned_tag_info in ('name', 'id', 'object')
86 tag = table('tag')
87 package_tag = table('package_tag')
88 package = table('package')
89 if returned_tag_info == 'name':
90 from_obj = [package_tag.join(tag)]
91 tag_column = tag.c.name
92 else:
93 from_obj = None
94 tag_column = package_tag.c.tag_id
95 j = join(
96 package_tag, package, package_tag.c.package_id == package.c.id
97 )
98 s = select([tag_column,
99 func.count(package_tag.c.package_id)],
100 from_obj=from_obj).select_from(j).where(
101 and_(
102 package_tag.c.state == 'active',
103 package.c.private == False,
104 package.c.state == 'active'
105 )
106 )
107 s = s.group_by(tag_column).order_by(
108 func.count(package_tag.c.package_id).desc()
109 ).limit(limit)
110 res_col = model.Session.execute(s).fetchall()
111 if returned_tag_info in ('id', 'name'):
112 return res_col
113 elif returned_tag_info == 'object':
114 res_tags = [
115 (model.Session.query(model.Tag).get(text_type(tag_id)), val)
116 for tag_id, val in res_col
117 ]
118 return res_tags
119
120 @classmethod
121 def top_package_creators(cls, limit=10):
122 userid_count = model.Session.query(
123 model.Package.creator_user_id,
124 func.count(model.Package.creator_user_id)
125 ).filter(model.Package.state == 'active'
126 ).filter(model.Package.private == False).group_by(
127 model.Package.creator_user_id
128 ).order_by(func.count(model.Package.creator_user_id).desc()
129 ).limit(limit).all()
130 user_count = [
131 (model.Session.query(model.User).get(text_type(user_id)), count)
132 for user_id, count in userid_count
133 if user_id
134 ]
135 return user_count
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext/stats/blueprint.py b/ckanext/stats/blueprint.py
--- a/ckanext/stats/blueprint.py
+++ b/ckanext/stats/blueprint.py
@@ -13,7 +13,6 @@
def index():
stats = stats_lib.Stats()
extra_vars = {
- u'top_rated_packages': stats.top_rated_packages(),
u'largest_groups': stats.largest_groups(),
u'top_tags': stats.top_tags(),
u'top_package_creators': stats.top_package_creators(),
diff --git a/ckanext/stats/stats.py b/ckanext/stats/stats.py
--- a/ckanext/stats/stats.py
+++ b/ckanext/stats/stats.py
@@ -32,30 +32,6 @@
class Stats(object):
- @classmethod
- def top_rated_packages(cls, limit=10):
- # NB Not using sqlalchemy as sqla 0.4 doesn't work using both group_by
- # and apply_avg
- package = table('package')
- rating = table('rating')
- sql = select(
- [
- package.c.id,
- func.avg(rating.c.rating),
- func.count(rating.c.rating)
- ],
- from_obj=[package.join(rating)]
- ).where(and_(package.c.private == False, package.c.state == 'active')
- ).group_by(package.c.id).order_by(
- func.avg(rating.c.rating).desc(),
- func.count(rating.c.rating).desc()
- ).limit(limit)
- res_ids = model.Session.execute(sql).fetchall()
- res_pkgs = [(
- model.Session.query(model.Package).get(text_type(pkg_id)), avg, num
- ) for pkg_id, avg, num in res_ids]
- return res_pkgs
-
@classmethod
def largest_groups(cls, limit=10):
member = table('member')
| {"golden_diff": "diff --git a/ckanext/stats/blueprint.py b/ckanext/stats/blueprint.py\n--- a/ckanext/stats/blueprint.py\n+++ b/ckanext/stats/blueprint.py\n@@ -13,7 +13,6 @@\n def index():\n stats = stats_lib.Stats()\n extra_vars = {\n- u'top_rated_packages': stats.top_rated_packages(),\n u'largest_groups': stats.largest_groups(),\n u'top_tags': stats.top_tags(),\n u'top_package_creators': stats.top_package_creators(),\ndiff --git a/ckanext/stats/stats.py b/ckanext/stats/stats.py\n--- a/ckanext/stats/stats.py\n+++ b/ckanext/stats/stats.py\n@@ -32,30 +32,6 @@\n \n class Stats(object):\n \n- @classmethod\n- def top_rated_packages(cls, limit=10):\n- # NB Not using sqlalchemy as sqla 0.4 doesn't work using both group_by\n- # and apply_avg\n- package = table('package')\n- rating = table('rating')\n- sql = select(\n- [\n- package.c.id,\n- func.avg(rating.c.rating),\n- func.count(rating.c.rating)\n- ],\n- from_obj=[package.join(rating)]\n- ).where(and_(package.c.private == False, package.c.state == 'active')\n- ).group_by(package.c.id).order_by(\n- func.avg(rating.c.rating).desc(),\n- func.count(rating.c.rating).desc()\n- ).limit(limit)\n- res_ids = model.Session.execute(sql).fetchall()\n- res_pkgs = [(\n- model.Session.query(model.Package).get(text_type(pkg_id)), avg, num\n- ) for pkg_id, avg, num in res_ids]\n- return res_pkgs\n-\n @classmethod\n def largest_groups(cls, limit=10):\n member = table('member')\n", "issue": "\"Top Rated Datasets\" needs to be removed from stats\n**CKAN version**\r\n2.9.1\r\n\r\n**Describe the bug**\r\n\"Top Rated Datasets\" is still in /stats even though ratings are being deprecated #5558\r\n\r\n**Steps to reproduce**\r\nWith stats enabled, go to /stats page.\r\n\r\n**Expected behavior**\r\n\"Top Rated Datasets\" is removed from stats.\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom flask import Blueprint\n\nfrom ckan.plugins.toolkit import c, render\nimport ckanext.stats.stats as stats_lib\nimport ckan.lib.helpers as h\n\nstats = Blueprint(u'stats', __name__)\n\n\[email protected](u'/stats')\ndef index():\n stats = stats_lib.Stats()\n extra_vars = {\n u'top_rated_packages': stats.top_rated_packages(),\n u'largest_groups': stats.largest_groups(),\n u'top_tags': stats.top_tags(),\n u'top_package_creators': stats.top_package_creators(),\n }\n return render(u'ckanext/stats/index.html', extra_vars)\n", "path": "ckanext/stats/blueprint.py"}, {"content": "# encoding: utf-8\n\nimport datetime\nimport logging\nfrom ckan.common import config\nfrom six import text_type\nfrom sqlalchemy import Table, select, join, func, and_\n\nimport ckan.plugins as p\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\ncache_enabled = p.toolkit.asbool(\n config.get('ckanext.stats.cache_enabled', False)\n)\n\nif cache_enabled:\n log.warn(\n 'ckanext.stats does not support caching in current implementations'\n )\n\nDATE_FORMAT = '%Y-%m-%d'\n\n\ndef table(name):\n return Table(name, model.meta.metadata, autoload=True)\n\n\ndef datetime2date(datetime_):\n return datetime.date(datetime_.year, datetime_.month, datetime_.day)\n\n\nclass Stats(object):\n\n @classmethod\n def top_rated_packages(cls, limit=10):\n # NB Not using sqlalchemy as sqla 0.4 doesn't work using both group_by\n # and apply_avg\n package = table('package')\n rating = table('rating')\n sql = select(\n [\n package.c.id,\n func.avg(rating.c.rating),\n func.count(rating.c.rating)\n ],\n from_obj=[package.join(rating)]\n ).where(and_(package.c.private == False, package.c.state == 'active')\n ).group_by(package.c.id).order_by(\n func.avg(rating.c.rating).desc(),\n func.count(rating.c.rating).desc()\n ).limit(limit)\n res_ids = model.Session.execute(sql).fetchall()\n res_pkgs = [(\n model.Session.query(model.Package).get(text_type(pkg_id)), avg, num\n ) for pkg_id, avg, num in res_ids]\n return res_pkgs\n\n @classmethod\n def largest_groups(cls, limit=10):\n member = table('member')\n package = table('package')\n\n j = join(member, package, member.c.table_id == package.c.id)\n\n s = select(\n [member.c.group_id,\n func.count(member.c.table_id)]\n ).select_from(j).group_by(member.c.group_id).where(\n and_(\n member.c.group_id != None, member.c.table_name == 'package',\n package.c.private == False, package.c.state == 'active'\n )\n ).order_by(func.count(member.c.table_id).desc()).limit(limit)\n\n res_ids = model.Session.execute(s).fetchall()\n res_groups = [\n (model.Session.query(model.Group).get(text_type(group_id)), val)\n for group_id, val in res_ids\n ]\n return res_groups\n\n @classmethod\n def top_tags(cls, limit=10, returned_tag_info='object'): # by package\n assert returned_tag_info in ('name', 'id', 'object')\n tag = table('tag')\n package_tag = table('package_tag')\n package = table('package')\n if returned_tag_info == 'name':\n from_obj = [package_tag.join(tag)]\n tag_column = tag.c.name\n else:\n from_obj = None\n tag_column = package_tag.c.tag_id\n j = join(\n package_tag, package, package_tag.c.package_id == package.c.id\n )\n s = select([tag_column,\n func.count(package_tag.c.package_id)],\n from_obj=from_obj).select_from(j).where(\n and_(\n package_tag.c.state == 'active',\n package.c.private == False,\n package.c.state == 'active'\n )\n )\n s = s.group_by(tag_column).order_by(\n func.count(package_tag.c.package_id).desc()\n ).limit(limit)\n res_col = model.Session.execute(s).fetchall()\n if returned_tag_info in ('id', 'name'):\n return res_col\n elif returned_tag_info == 'object':\n res_tags = [\n (model.Session.query(model.Tag).get(text_type(tag_id)), val)\n for tag_id, val in res_col\n ]\n return res_tags\n\n @classmethod\n def top_package_creators(cls, limit=10):\n userid_count = model.Session.query(\n model.Package.creator_user_id,\n func.count(model.Package.creator_user_id)\n ).filter(model.Package.state == 'active'\n ).filter(model.Package.private == False).group_by(\n model.Package.creator_user_id\n ).order_by(func.count(model.Package.creator_user_id).desc()\n ).limit(limit).all()\n user_count = [\n (model.Session.query(model.User).get(text_type(user_id)), count)\n for user_id, count in userid_count\n if user_id\n ]\n return user_count\n", "path": "ckanext/stats/stats.py"}], "after_files": [{"content": "# encoding: utf-8\n\nfrom flask import Blueprint\n\nfrom ckan.plugins.toolkit import c, render\nimport ckanext.stats.stats as stats_lib\nimport ckan.lib.helpers as h\n\nstats = Blueprint(u'stats', __name__)\n\n\[email protected](u'/stats')\ndef index():\n stats = stats_lib.Stats()\n extra_vars = {\n u'largest_groups': stats.largest_groups(),\n u'top_tags': stats.top_tags(),\n u'top_package_creators': stats.top_package_creators(),\n }\n return render(u'ckanext/stats/index.html', extra_vars)\n", "path": "ckanext/stats/blueprint.py"}, {"content": "# encoding: utf-8\n\nimport datetime\nimport logging\nfrom ckan.common import config\nfrom six import text_type\nfrom sqlalchemy import Table, select, join, func, and_\n\nimport ckan.plugins as p\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\ncache_enabled = p.toolkit.asbool(\n config.get('ckanext.stats.cache_enabled', False)\n)\n\nif cache_enabled:\n log.warn(\n 'ckanext.stats does not support caching in current implementations'\n )\n\nDATE_FORMAT = '%Y-%m-%d'\n\n\ndef table(name):\n return Table(name, model.meta.metadata, autoload=True)\n\n\ndef datetime2date(datetime_):\n return datetime.date(datetime_.year, datetime_.month, datetime_.day)\n\n\nclass Stats(object):\n\n @classmethod\n def largest_groups(cls, limit=10):\n member = table('member')\n package = table('package')\n\n j = join(member, package, member.c.table_id == package.c.id)\n\n s = select(\n [member.c.group_id,\n func.count(member.c.table_id)]\n ).select_from(j).group_by(member.c.group_id).where(\n and_(\n member.c.group_id != None, member.c.table_name == 'package',\n package.c.private == False, package.c.state == 'active'\n )\n ).order_by(func.count(member.c.table_id).desc()).limit(limit)\n\n res_ids = model.Session.execute(s).fetchall()\n res_groups = [\n (model.Session.query(model.Group).get(text_type(group_id)), val)\n for group_id, val in res_ids\n ]\n return res_groups\n\n @classmethod\n def top_tags(cls, limit=10, returned_tag_info='object'): # by package\n assert returned_tag_info in ('name', 'id', 'object')\n tag = table('tag')\n package_tag = table('package_tag')\n package = table('package')\n if returned_tag_info == 'name':\n from_obj = [package_tag.join(tag)]\n tag_column = tag.c.name\n else:\n from_obj = None\n tag_column = package_tag.c.tag_id\n j = join(\n package_tag, package, package_tag.c.package_id == package.c.id\n )\n s = select([tag_column,\n func.count(package_tag.c.package_id)],\n from_obj=from_obj).select_from(j).where(\n and_(\n package_tag.c.state == 'active',\n package.c.private == False,\n package.c.state == 'active'\n )\n )\n s = s.group_by(tag_column).order_by(\n func.count(package_tag.c.package_id).desc()\n ).limit(limit)\n res_col = model.Session.execute(s).fetchall()\n if returned_tag_info in ('id', 'name'):\n return res_col\n elif returned_tag_info == 'object':\n res_tags = [\n (model.Session.query(model.Tag).get(text_type(tag_id)), val)\n for tag_id, val in res_col\n ]\n return res_tags\n\n @classmethod\n def top_package_creators(cls, limit=10):\n userid_count = model.Session.query(\n model.Package.creator_user_id,\n func.count(model.Package.creator_user_id)\n ).filter(model.Package.state == 'active'\n ).filter(model.Package.private == False).group_by(\n model.Package.creator_user_id\n ).order_by(func.count(model.Package.creator_user_id).desc()\n ).limit(limit).all()\n user_count = [\n (model.Session.query(model.User).get(text_type(user_id)), count)\n for user_id, count in userid_count\n if user_id\n ]\n return user_count\n", "path": "ckanext/stats/stats.py"}]} | 1,844 | 434 |
gh_patches_debug_582 | rasdani/github-patches | git_diff | pex-tool__pex-777 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 1.6.12
On the docket:
+ [x] PythonInterpreter: support python binary names with single letter suffixes #769
+ [x] Pex should support some form of verifiably reproducible resolve. #772
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.6.11'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.6.11'
+__version__ = '1.6.12'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.6.11'\n+__version__ = '1.6.12'\n", "issue": "Release 1.6.12\nOn the docket:\r\n+ [x] PythonInterpreter: support python binary names with single letter suffixes #769\r\n+ [x] Pex should support some form of verifiably reproducible resolve. #772\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.11'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.12'\n", "path": "pex/version.py"}]} | 365 | 96 |
gh_patches_debug_23322 | rasdani/github-patches | git_diff | fossasia__open-event-server-9034 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow mutiple station for same location
<!--
(Thanks for sending a pull request! Please make sure you click the link above to view the contribution guidelines, then fill out the blanks below.)
-->
<!-- Add the issue number that is fixed by this PR (In the form Fixes #123) -->
Fixes #8958
#### Short description of what this resolves:
- fix issue to allow mutiple station for same location
#### Changes proposed in this pull request:
- allow mutiple station for same location
#### Checklist
- [x] I have read the [Contribution & Best practices Guide](https://blog.fossasia.org/open-source-developer-guide-and-best-practices-at-fossasia) and my PR follows them.
- [x] My branch is up-to-date with the Upstream `development` branch.
- [ ] The unit tests pass locally with my changes <!-- use `nosetests tests/` to run all the tests -->
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] I have added necessary documentation (if appropriate)
<!-- If an existing function does not have a docstring, please add one -->
- [ ] All the functions created/modified in this PR contain relevant docstrings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/station.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from flask_rest_jsonapi.exceptions import ObjectNotFound
3
4 from app.api.helpers.db import safe_query_kwargs
5 from app.api.helpers.permission_manager import has_access
6 from app.api.helpers.permissions import jwt_required
7 from app.api.helpers.utilities import require_relationship
8 from app.api.schema.station import StationSchema
9 from app.models import db
10 from app.models.event import Event
11 from app.models.microlocation import Microlocation
12 from app.models.station import Station
13
14
15 class StationList(ResourceList):
16 """Create and List Station"""
17
18 def query(self, view_kwargs):
19 """
20 query method for different view_kwargs
21 :param view_kwargs:
22 :return:
23 """
24 query_ = self.session.query(Station)
25 if view_kwargs.get('event_id'):
26 event = safe_query_kwargs(Event, view_kwargs, 'event_id')
27 query_ = query_.filter_by(event_id=event.id)
28
29 elif view_kwargs.get('microlocation_id'):
30 event = safe_query_kwargs(Microlocation, view_kwargs, 'microlocation_id')
31 query_ = query_.filter_by(microlocation_id=event.id)
32
33 return query_
34
35 view_kwargs = True
36 schema = StationSchema
37 data_layer = {
38 'session': db.session,
39 'model': Station,
40 'methods': {'query': query},
41 }
42
43
44 class StationDetail(ResourceDetail):
45 """Station detail by id"""
46
47 @staticmethod
48 def before_patch(args, kwargs, data):
49 """
50 before patch method
51 :param args:
52 :param kwargs:
53 :param data:
54 :return:
55 """
56 require_relationship(['event'], data)
57 if not has_access('is_coorganizer', event_id=data['event']):
58 raise ObjectNotFound(
59 {'parameter': 'event'},
60 f"Event: {data['event']} not found {args} {kwargs}",
61 )
62
63 if data.get('microlocation'):
64 require_relationship(['microlocation'], data)
65 else:
66 if data['station_type'] in ('check in', 'check out', 'daily'):
67 raise ObjectNotFound(
68 {'parameter': 'microlocation'},
69 "Microlocation: microlocation_id is missing from your request.",
70 )
71
72 schema = StationSchema
73 data_layer = {
74 'session': db.session,
75 'model': Station,
76 }
77
78
79 class StationRelationship(ResourceRelationship):
80 """Station Relationship (Required)"""
81
82 decorators = (jwt_required,)
83 methods = ['GET', 'PATCH']
84 schema = StationSchema
85 data_layer = {'session': db.session, 'model': Station}
86
87
88 class StationListPost(ResourceList):
89 """Create and List Station"""
90
91 @staticmethod
92 def before_post(args, kwargs, data):
93 """
94 method to check for required relationship with event and microlocation
95 :param data:
96 :param args:
97 :param kwargs:
98 :return:
99 """
100 require_relationship(['event'], data)
101 if not has_access('is_coorganizer', event_id=data['event']):
102 raise ObjectNotFound(
103 {'parameter': 'event'},
104 f"Event: {data['event']} not found {args} {kwargs}",
105 )
106
107 if data.get('microlocation'):
108 require_relationship(['microlocation'], data)
109 else:
110 if data['station_type'] in ('check in', 'check out', 'daily'):
111 raise ObjectNotFound(
112 {'parameter': 'microlocation'},
113 "Microlocation: missing from your request.",
114 )
115
116 schema = StationSchema
117 methods = [
118 'POST',
119 ]
120 data_layer = {
121 'session': db.session,
122 'model': Station,
123 }
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/station.py b/app/api/station.py
--- a/app/api/station.py
+++ b/app/api/station.py
@@ -63,7 +63,7 @@
if data.get('microlocation'):
require_relationship(['microlocation'], data)
else:
- if data['station_type'] in ('check in', 'check out', 'daily'):
+ if data['station_type'] in ('check in', 'check out'):
raise ObjectNotFound(
{'parameter': 'microlocation'},
"Microlocation: microlocation_id is missing from your request.",
@@ -107,7 +107,7 @@
if data.get('microlocation'):
require_relationship(['microlocation'], data)
else:
- if data['station_type'] in ('check in', 'check out', 'daily'):
+ if data['station_type'] in ('check in', 'check out'):
raise ObjectNotFound(
{'parameter': 'microlocation'},
"Microlocation: missing from your request.",
| {"golden_diff": "diff --git a/app/api/station.py b/app/api/station.py\n--- a/app/api/station.py\n+++ b/app/api/station.py\n@@ -63,7 +63,7 @@\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n- if data['station_type'] in ('check in', 'check out', 'daily'):\n+ if data['station_type'] in ('check in', 'check out'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: microlocation_id is missing from your request.\",\n@@ -107,7 +107,7 @@\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n- if data['station_type'] in ('check in', 'check out', 'daily'):\n+ if data['station_type'] in ('check in', 'check out'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: missing from your request.\",\n", "issue": "allow mutiple station for same location\n<!--\r\n(Thanks for sending a pull request! Please make sure you click the link above to view the contribution guidelines, then fill out the blanks below.)\r\n-->\r\n<!-- Add the issue number that is fixed by this PR (In the form Fixes #123) -->\r\n\r\nFixes #8958 \r\n\r\n#### Short description of what this resolves:\r\n- fix issue to allow mutiple station for same location\r\n\r\n#### Changes proposed in this pull request:\r\n\r\n- allow mutiple station for same location\r\n\r\n#### Checklist\r\n\r\n- [x] I have read the [Contribution & Best practices Guide](https://blog.fossasia.org/open-source-developer-guide-and-best-practices-at-fossasia) and my PR follows them.\r\n- [x] My branch is up-to-date with the Upstream `development` branch.\r\n- [ ] The unit tests pass locally with my changes <!-- use `nosetests tests/` to run all the tests -->\r\n- [ ] I have added tests that prove my fix is effective or that my feature works\r\n- [ ] I have added necessary documentation (if appropriate)\r\n<!-- If an existing function does not have a docstring, please add one -->\r\n- [ ] All the functions created/modified in this PR contain relevant docstrings.\r\n\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.helpers.db import safe_query_kwargs\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.station import StationSchema\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.microlocation import Microlocation\nfrom app.models.station import Station\n\n\nclass StationList(ResourceList):\n \"\"\"Create and List Station\"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Station)\n if view_kwargs.get('event_id'):\n event = safe_query_kwargs(Event, view_kwargs, 'event_id')\n query_ = query_.filter_by(event_id=event.id)\n\n elif view_kwargs.get('microlocation_id'):\n event = safe_query_kwargs(Microlocation, view_kwargs, 'microlocation_id')\n query_ = query_.filter_by(microlocation_id=event.id)\n\n return query_\n\n view_kwargs = True\n schema = StationSchema\n data_layer = {\n 'session': db.session,\n 'model': Station,\n 'methods': {'query': query},\n }\n\n\nclass StationDetail(ResourceDetail):\n \"\"\"Station detail by id\"\"\"\n\n @staticmethod\n def before_patch(args, kwargs, data):\n \"\"\"\n before patch method\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound(\n {'parameter': 'event'},\n f\"Event: {data['event']} not found {args} {kwargs}\",\n )\n\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n if data['station_type'] in ('check in', 'check out', 'daily'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: microlocation_id is missing from your request.\",\n )\n\n schema = StationSchema\n data_layer = {\n 'session': db.session,\n 'model': Station,\n }\n\n\nclass StationRelationship(ResourceRelationship):\n \"\"\"Station Relationship (Required)\"\"\"\n\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = StationSchema\n data_layer = {'session': db.session, 'model': Station}\n\n\nclass StationListPost(ResourceList):\n \"\"\"Create and List Station\"\"\"\n\n @staticmethod\n def before_post(args, kwargs, data):\n \"\"\"\n method to check for required relationship with event and microlocation\n :param data:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound(\n {'parameter': 'event'},\n f\"Event: {data['event']} not found {args} {kwargs}\",\n )\n\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n if data['station_type'] in ('check in', 'check out', 'daily'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: missing from your request.\",\n )\n\n schema = StationSchema\n methods = [\n 'POST',\n ]\n data_layer = {\n 'session': db.session,\n 'model': Station,\n }\n", "path": "app/api/station.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.helpers.db import safe_query_kwargs\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.station import StationSchema\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.microlocation import Microlocation\nfrom app.models.station import Station\n\n\nclass StationList(ResourceList):\n \"\"\"Create and List Station\"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Station)\n if view_kwargs.get('event_id'):\n event = safe_query_kwargs(Event, view_kwargs, 'event_id')\n query_ = query_.filter_by(event_id=event.id)\n\n elif view_kwargs.get('microlocation_id'):\n event = safe_query_kwargs(Microlocation, view_kwargs, 'microlocation_id')\n query_ = query_.filter_by(microlocation_id=event.id)\n\n return query_\n\n view_kwargs = True\n schema = StationSchema\n data_layer = {\n 'session': db.session,\n 'model': Station,\n 'methods': {'query': query},\n }\n\n\nclass StationDetail(ResourceDetail):\n \"\"\"Station detail by id\"\"\"\n\n @staticmethod\n def before_patch(args, kwargs, data):\n \"\"\"\n before patch method\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound(\n {'parameter': 'event'},\n f\"Event: {data['event']} not found {args} {kwargs}\",\n )\n\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n if data['station_type'] in ('check in', 'check out'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: microlocation_id is missing from your request.\",\n )\n\n schema = StationSchema\n data_layer = {\n 'session': db.session,\n 'model': Station,\n }\n\n\nclass StationRelationship(ResourceRelationship):\n \"\"\"Station Relationship (Required)\"\"\"\n\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = StationSchema\n data_layer = {'session': db.session, 'model': Station}\n\n\nclass StationListPost(ResourceList):\n \"\"\"Create and List Station\"\"\"\n\n @staticmethod\n def before_post(args, kwargs, data):\n \"\"\"\n method to check for required relationship with event and microlocation\n :param data:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound(\n {'parameter': 'event'},\n f\"Event: {data['event']} not found {args} {kwargs}\",\n )\n\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n if data['station_type'] in ('check in', 'check out'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: missing from your request.\",\n )\n\n schema = StationSchema\n methods = [\n 'POST',\n ]\n data_layer = {\n 'session': db.session,\n 'model': Station,\n }\n", "path": "app/api/station.py"}]} | 1,565 | 222 |
gh_patches_debug_41468 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-323 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use sentinel value for input parameters that aren't sent by the clients
When using input types with optional fields we cannot differentiate by fields that have sent as null and fields that haven't been sent at all.
So I think we should use a sentinel value that tells the field is unset, and also behaves as falsy:
```python
class _Unset:
def __bool__(self): return False
UNSET = _Unset()
# this utility might be useful, so we don't have to use an internal representation
def is_unset(value: Any):
return value is UNSET
```
then we can use this class when instantiating the input types for a resolver:)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/utils/arguments.py`
Content:
```
1 import enum
2 from dataclasses import is_dataclass
3 from datetime import date, datetime, time
4
5 from ..exceptions import UnsupportedTypeError
6 from .str_converters import to_camel_case, to_snake_case
7 from .typing import get_list_annotation, get_optional_annotation, is_list, is_optional
8
9
10 SCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]
11
12
13 def _to_type(value, annotation):
14 if value is None:
15 return None
16
17 if is_optional(annotation):
18 annotation = get_optional_annotation(annotation)
19
20 # TODO: change this to be a is_scalar util and make sure it works with any scalar
21 if getattr(annotation, "__supertype__", annotation) in SCALAR_TYPES:
22 return value
23
24 # Convert Enum fields to instances using the value. This is safe
25 # because graphql-core has already validated the input.
26 if isinstance(annotation, enum.EnumMeta):
27 return annotation(value)
28
29 if is_list(annotation):
30 annotation = get_list_annotation(annotation)
31
32 return [_to_type(x, annotation) for x in value]
33
34 if is_dataclass(annotation):
35 fields = annotation.__dataclass_fields__
36
37 kwargs = {}
38
39 for name, field in fields.items():
40 dict_name = name
41
42 if hasattr(field, "field_name") and field.field_name:
43 dict_name = field.field_name
44 else:
45 dict_name = to_camel_case(name)
46
47 kwargs[name] = _to_type(value.get(dict_name), field.type)
48
49 return annotation(**kwargs)
50
51 raise UnsupportedTypeError(annotation)
52
53
54 def convert_args(args, annotations):
55 """Converts a nested dictionary to a dictionary of strawberry input types."""
56
57 converted_args = {}
58
59 for key, value in args.items():
60 key = to_snake_case(key)
61 annotation = annotations[key]
62
63 converted_args[key] = _to_type(value, annotation)
64
65 return converted_args
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/utils/arguments.py b/strawberry/utils/arguments.py
--- a/strawberry/utils/arguments.py
+++ b/strawberry/utils/arguments.py
@@ -1,19 +1,49 @@
import enum
+import typing
from dataclasses import is_dataclass
from datetime import date, datetime, time
from ..exceptions import UnsupportedTypeError
-from .str_converters import to_camel_case, to_snake_case
+from .str_converters import to_camel_case
from .typing import get_list_annotation, get_optional_annotation, is_list, is_optional
SCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]
-def _to_type(value, annotation):
+class _Unset:
+ def __str__(self):
+ return ""
+
+ def __bool__(self):
+ return False
+
+
+UNSET = _Unset()
+
+
+def is_unset(value: typing.Any) -> bool:
+ return value is UNSET
+
+
+def convert_args(
+ value: typing.Union[typing.Dict[str, typing.Any], typing.Any],
+ annotation: typing.Union[typing.Dict[str, typing.Type], typing.Type],
+):
+ """Converts a nested dictionary to a dictionary of actual types.
+
+ It deals with conversion of input types to proper dataclasses and
+ also uses a sentinel value for unset values."""
+
+ if annotation == {}:
+ return value
+
if value is None:
return None
+ if is_unset(value):
+ return value
+
if is_optional(annotation):
annotation = get_optional_annotation(annotation)
@@ -24,19 +54,27 @@
# Convert Enum fields to instances using the value. This is safe
# because graphql-core has already validated the input.
if isinstance(annotation, enum.EnumMeta):
- return annotation(value)
+ return annotation(value) # type: ignore
if is_list(annotation):
annotation = get_list_annotation(annotation)
- return [_to_type(x, annotation) for x in value]
+ return [convert_args(x, annotation) for x in value]
+
+ fields = None
- if is_dataclass(annotation):
- fields = annotation.__dataclass_fields__
+ # we receive dicts when converting resolvers arguments to
+ # actual types
+ if isinstance(annotation, dict):
+ fields = annotation.items()
+ elif is_dataclass(annotation):
+ fields = annotation.__dataclass_fields__.items()
+
+ if fields:
kwargs = {}
- for name, field in fields.items():
+ for name, field in fields:
dict_name = name
if hasattr(field, "field_name") and field.field_name:
@@ -44,22 +82,19 @@
else:
dict_name = to_camel_case(name)
- kwargs[name] = _to_type(value.get(dict_name), field.type)
-
- return annotation(**kwargs)
-
- raise UnsupportedTypeError(annotation)
-
-
-def convert_args(args, annotations):
- """Converts a nested dictionary to a dictionary of strawberry input types."""
+ # dataclasses field have a .type attribute
+ if hasattr(field, "type"):
+ field_type = field.type
+ # meanwhile when using dicts the value of the field is
+ # the actual type, for example in: { 'name': str }
+ else:
+ field_type = field
- converted_args = {}
+ kwargs[name] = convert_args(value.get(dict_name, UNSET), field_type)
- for key, value in args.items():
- key = to_snake_case(key)
- annotation = annotations[key]
+ if is_dataclass(annotation):
+ return annotation(**kwargs) # type: ignore
- converted_args[key] = _to_type(value, annotation)
+ return kwargs
- return converted_args
+ raise UnsupportedTypeError(annotation)
| {"golden_diff": "diff --git a/strawberry/utils/arguments.py b/strawberry/utils/arguments.py\n--- a/strawberry/utils/arguments.py\n+++ b/strawberry/utils/arguments.py\n@@ -1,19 +1,49 @@\n import enum\n+import typing\n from dataclasses import is_dataclass\n from datetime import date, datetime, time\n \n from ..exceptions import UnsupportedTypeError\n-from .str_converters import to_camel_case, to_snake_case\n+from .str_converters import to_camel_case\n from .typing import get_list_annotation, get_optional_annotation, is_list, is_optional\n \n \n SCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]\n \n \n-def _to_type(value, annotation):\n+class _Unset:\n+ def __str__(self):\n+ return \"\"\n+\n+ def __bool__(self):\n+ return False\n+\n+\n+UNSET = _Unset()\n+\n+\n+def is_unset(value: typing.Any) -> bool:\n+ return value is UNSET\n+\n+\n+def convert_args(\n+ value: typing.Union[typing.Dict[str, typing.Any], typing.Any],\n+ annotation: typing.Union[typing.Dict[str, typing.Type], typing.Type],\n+):\n+ \"\"\"Converts a nested dictionary to a dictionary of actual types.\n+\n+ It deals with conversion of input types to proper dataclasses and\n+ also uses a sentinel value for unset values.\"\"\"\n+\n+ if annotation == {}:\n+ return value\n+\n if value is None:\n return None\n \n+ if is_unset(value):\n+ return value\n+\n if is_optional(annotation):\n annotation = get_optional_annotation(annotation)\n \n@@ -24,19 +54,27 @@\n # Convert Enum fields to instances using the value. This is safe\n # because graphql-core has already validated the input.\n if isinstance(annotation, enum.EnumMeta):\n- return annotation(value)\n+ return annotation(value) # type: ignore\n \n if is_list(annotation):\n annotation = get_list_annotation(annotation)\n \n- return [_to_type(x, annotation) for x in value]\n+ return [convert_args(x, annotation) for x in value]\n+\n+ fields = None\n \n- if is_dataclass(annotation):\n- fields = annotation.__dataclass_fields__\n+ # we receive dicts when converting resolvers arguments to\n+ # actual types\n+ if isinstance(annotation, dict):\n+ fields = annotation.items()\n \n+ elif is_dataclass(annotation):\n+ fields = annotation.__dataclass_fields__.items()\n+\n+ if fields:\n kwargs = {}\n \n- for name, field in fields.items():\n+ for name, field in fields:\n dict_name = name\n \n if hasattr(field, \"field_name\") and field.field_name:\n@@ -44,22 +82,19 @@\n else:\n dict_name = to_camel_case(name)\n \n- kwargs[name] = _to_type(value.get(dict_name), field.type)\n-\n- return annotation(**kwargs)\n-\n- raise UnsupportedTypeError(annotation)\n-\n-\n-def convert_args(args, annotations):\n- \"\"\"Converts a nested dictionary to a dictionary of strawberry input types.\"\"\"\n+ # dataclasses field have a .type attribute\n+ if hasattr(field, \"type\"):\n+ field_type = field.type\n+ # meanwhile when using dicts the value of the field is\n+ # the actual type, for example in: { 'name': str }\n+ else:\n+ field_type = field\n \n- converted_args = {}\n+ kwargs[name] = convert_args(value.get(dict_name, UNSET), field_type)\n \n- for key, value in args.items():\n- key = to_snake_case(key)\n- annotation = annotations[key]\n+ if is_dataclass(annotation):\n+ return annotation(**kwargs) # type: ignore\n \n- converted_args[key] = _to_type(value, annotation)\n+ return kwargs\n \n- return converted_args\n+ raise UnsupportedTypeError(annotation)\n", "issue": "Use sentinel value for input parameters that aren't sent by the clients\nWhen using input types with optional fields we cannot differentiate by fields that have sent as null and fields that haven't been sent at all.\r\n\r\nSo I think we should use a sentinel value that tells the field is unset, and also behaves as falsy:\r\n\r\n```python\r\nclass _Unset:\r\n def __bool__(self): return False\r\n\r\nUNSET = _Unset()\r\n\r\n# this utility might be useful, so we don't have to use an internal representation\r\ndef is_unset(value: Any):\r\n return value is UNSET\r\n```\r\n\r\nthen we can use this class when instantiating the input types for a resolver:)\n", "before_files": [{"content": "import enum\nfrom dataclasses import is_dataclass\nfrom datetime import date, datetime, time\n\nfrom ..exceptions import UnsupportedTypeError\nfrom .str_converters import to_camel_case, to_snake_case\nfrom .typing import get_list_annotation, get_optional_annotation, is_list, is_optional\n\n\nSCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]\n\n\ndef _to_type(value, annotation):\n if value is None:\n return None\n\n if is_optional(annotation):\n annotation = get_optional_annotation(annotation)\n\n # TODO: change this to be a is_scalar util and make sure it works with any scalar\n if getattr(annotation, \"__supertype__\", annotation) in SCALAR_TYPES:\n return value\n\n # Convert Enum fields to instances using the value. This is safe\n # because graphql-core has already validated the input.\n if isinstance(annotation, enum.EnumMeta):\n return annotation(value)\n\n if is_list(annotation):\n annotation = get_list_annotation(annotation)\n\n return [_to_type(x, annotation) for x in value]\n\n if is_dataclass(annotation):\n fields = annotation.__dataclass_fields__\n\n kwargs = {}\n\n for name, field in fields.items():\n dict_name = name\n\n if hasattr(field, \"field_name\") and field.field_name:\n dict_name = field.field_name\n else:\n dict_name = to_camel_case(name)\n\n kwargs[name] = _to_type(value.get(dict_name), field.type)\n\n return annotation(**kwargs)\n\n raise UnsupportedTypeError(annotation)\n\n\ndef convert_args(args, annotations):\n \"\"\"Converts a nested dictionary to a dictionary of strawberry input types.\"\"\"\n\n converted_args = {}\n\n for key, value in args.items():\n key = to_snake_case(key)\n annotation = annotations[key]\n\n converted_args[key] = _to_type(value, annotation)\n\n return converted_args\n", "path": "strawberry/utils/arguments.py"}], "after_files": [{"content": "import enum\nimport typing\nfrom dataclasses import is_dataclass\nfrom datetime import date, datetime, time\n\nfrom ..exceptions import UnsupportedTypeError\nfrom .str_converters import to_camel_case\nfrom .typing import get_list_annotation, get_optional_annotation, is_list, is_optional\n\n\nSCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]\n\n\nclass _Unset:\n def __str__(self):\n return \"\"\n\n def __bool__(self):\n return False\n\n\nUNSET = _Unset()\n\n\ndef is_unset(value: typing.Any) -> bool:\n return value is UNSET\n\n\ndef convert_args(\n value: typing.Union[typing.Dict[str, typing.Any], typing.Any],\n annotation: typing.Union[typing.Dict[str, typing.Type], typing.Type],\n):\n \"\"\"Converts a nested dictionary to a dictionary of actual types.\n\n It deals with conversion of input types to proper dataclasses and\n also uses a sentinel value for unset values.\"\"\"\n\n if annotation == {}:\n return value\n\n if value is None:\n return None\n\n if is_unset(value):\n return value\n\n if is_optional(annotation):\n annotation = get_optional_annotation(annotation)\n\n # TODO: change this to be a is_scalar util and make sure it works with any scalar\n if getattr(annotation, \"__supertype__\", annotation) in SCALAR_TYPES:\n return value\n\n # Convert Enum fields to instances using the value. This is safe\n # because graphql-core has already validated the input.\n if isinstance(annotation, enum.EnumMeta):\n return annotation(value) # type: ignore\n\n if is_list(annotation):\n annotation = get_list_annotation(annotation)\n\n return [convert_args(x, annotation) for x in value]\n\n fields = None\n\n # we receive dicts when converting resolvers arguments to\n # actual types\n if isinstance(annotation, dict):\n fields = annotation.items()\n\n elif is_dataclass(annotation):\n fields = annotation.__dataclass_fields__.items()\n\n if fields:\n kwargs = {}\n\n for name, field in fields:\n dict_name = name\n\n if hasattr(field, \"field_name\") and field.field_name:\n dict_name = field.field_name\n else:\n dict_name = to_camel_case(name)\n\n # dataclasses field have a .type attribute\n if hasattr(field, \"type\"):\n field_type = field.type\n # meanwhile when using dicts the value of the field is\n # the actual type, for example in: { 'name': str }\n else:\n field_type = field\n\n kwargs[name] = convert_args(value.get(dict_name, UNSET), field_type)\n\n if is_dataclass(annotation):\n return annotation(**kwargs) # type: ignore\n\n return kwargs\n\n raise UnsupportedTypeError(annotation)\n", "path": "strawberry/utils/arguments.py"}]} | 938 | 872 |
gh_patches_debug_34000 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-2280 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Engagement bot thinks every user is a returning user
### Context
https://alexslemonade.slack.com/archives/CRK42AL1Y/p1587988808265500
### Problem or idea
@dvenprasad says 6 of those are new users. There must be a bug in the queries it uses or something.
### Solution or next step
Fix the engagement bot so it reports new users as new users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/data_refinery_api/management/commands/post_downloads_summary.py`
Content:
```
1 import datetime
2
3 from django.conf import settings
4 from django.core.management.base import BaseCommand
5 from django.utils import timezone
6
7 import requests
8
9 from data_refinery_common.models import DatasetAnnotation
10
11
12 class Command(BaseCommand):
13 help = "Post downloads summary to slack"
14
15 def add_arguments(self, parser):
16 parser.add_argument(
17 "--days",
18 type=int,
19 default=7, # default to a week
20 help=("Number of days in the past for which to build the stats"),
21 )
22 parser.add_argument(
23 "--channel",
24 type=str,
25 default="ccdl-general",
26 help=("Optional parameter to choose the channel where the message will be posted."),
27 )
28
29 def handle(self, *args, **options):
30 days = options["days"]
31 start_time = timezone.now() - datetime.timedelta(days=days)
32
33 annotation_queryset = DatasetAnnotation.objects.filter(
34 created_at__gt=start_time
35 ).prefetch_related("dataset")
36 annotations = [
37 annotation
38 for annotation in annotation_queryset
39 if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
40 ]
41
42 unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
43 unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
44
45 if unique_users:
46 fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
47 days, len(unique_users), len(unique_ips)
48 )
49 else:
50 fallback_text = "There were no downloads in the last {0} days.".format(days)
51
52 new_users = ""
53 returning_users = ""
54 for email in unique_users:
55 user_annotations = annotation_queryset.filter(dataset__email_address=email)
56 total_downloads = user_annotations.count()
57 unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
58 locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
59 is_new_user = DatasetAnnotation.objects.filter(
60 created_at__lt=start_time, dataset__email_address=email
61 )
62 text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
63 if is_new_user:
64 new_users += text
65 else:
66 returning_users += text
67
68 blocks = [
69 {
70 "type": "section",
71 "text": {"type": "plain_text", "emoji": True, "text": fallback_text},
72 }
73 ]
74 if new_users:
75 blocks.append(
76 {
77 "type": "section",
78 "text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
79 }
80 )
81 if returning_users:
82 blocks.append(
83 {
84 "type": "section",
85 "text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
86 }
87 )
88
89 # Post to slack
90 requests.post(
91 settings.ENGAGEMENTBOT_WEBHOOK,
92 json={
93 "username": "EngagementBot",
94 "icon_emoji": ":halal:",
95 "channel": "#" + options["channel"],
96 "text": fallback_text,
97 "blocks": blocks,
98 },
99 headers={"Content-Type": "application/json"},
100 timeout=10,
101 )
102
103
104 def should_display_email(email: str) -> bool:
105 """ Returns true if the given email is not associated with the CCDL suers """
106 if not email:
107 return False
108 return not (
109 email.startswith("cansav09")
110 or email.startswith("arielsvn")
111 or email.startswith("jaclyn.n.taroni")
112 or email.startswith("kurt.wheeler")
113 or email.startswith("greenescientist")
114 or email.startswith("miserlou")
115 or email.startswith("d.prasad")
116 or email.endswith("@alexslemonade.org")
117 or email is ("[email protected]")
118 or email is ("[email protected]")
119 )
120
121
122 def get_ip_location(remote_ip):
123 try:
124 data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()
125 return "{0}, {1}".format(data["city"], data["country_name"])
126 except Exception:
127 return remote_ip
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py
--- a/api/data_refinery_api/management/commands/post_downloads_summary.py
+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py
@@ -42,28 +42,30 @@
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
- if unique_users:
- fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
- days, len(unique_users), len(unique_ips)
- )
- else:
- fallback_text = "There were no downloads in the last {0} days.".format(days)
-
new_users = ""
returning_users = ""
+ total_downloads = 0
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
- total_downloads = user_annotations.count()
+ downloads = user_annotations.count()
+ total_downloads += downloads
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
- is_new_user = DatasetAnnotation.objects.filter(
+ is_returning_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
- text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
- if is_new_user:
- new_users += text
- else:
+ text = "{0} | {1} downloads from {2}\n".format(email, downloads, locations)
+ if is_returning_user:
returning_users += text
+ else:
+ new_users += text
+
+ if total_downloads > 0:
+ fallback_text = "In the last {0} days, {1} users downloaded {2} datasets from {3} locations.".format(
+ days, len(unique_users), total_downloads, len(unique_ips)
+ )
+ else:
+ fallback_text = "There were no downloads in the last {0} days.".format(days)
blocks = [
{
| {"golden_diff": "diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py\n--- a/api/data_refinery_api/management/commands/post_downloads_summary.py\n+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py\n@@ -42,28 +42,30 @@\n unique_users = list(set(annotation.dataset.email_address for annotation in annotations))\n unique_ips = list(set(annotation.data[\"ip\"] for annotation in annotations))\n \n- if unique_users:\n- fallback_text = \"In the last {0} days, {1} users downloaded datasets from {2} locations.\".format(\n- days, len(unique_users), len(unique_ips)\n- )\n- else:\n- fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n-\n new_users = \"\"\n returning_users = \"\"\n+ total_downloads = 0\n for email in unique_users:\n user_annotations = annotation_queryset.filter(dataset__email_address=email)\n- total_downloads = user_annotations.count()\n+ downloads = user_annotations.count()\n+ total_downloads += downloads\n unique_locations = list(set(annotation.data[\"ip\"] for annotation in user_annotations))\n locations = \", \".join(get_ip_location(ip) for ip in unique_locations)\n- is_new_user = DatasetAnnotation.objects.filter(\n+ is_returning_user = DatasetAnnotation.objects.filter(\n created_at__lt=start_time, dataset__email_address=email\n )\n- text = \"{0} | {1} downloads from {2}\\n\".format(email, total_downloads, locations)\n- if is_new_user:\n- new_users += text\n- else:\n+ text = \"{0} | {1} downloads from {2}\\n\".format(email, downloads, locations)\n+ if is_returning_user:\n returning_users += text\n+ else:\n+ new_users += text\n+\n+ if total_downloads > 0:\n+ fallback_text = \"In the last {0} days, {1} users downloaded {2} datasets from {3} locations.\".format(\n+ days, len(unique_users), total_downloads, len(unique_ips)\n+ )\n+ else:\n+ fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n \n blocks = [\n {\n", "issue": "Engagement bot thinks every user is a returning user\n### Context\r\n\r\nhttps://alexslemonade.slack.com/archives/CRK42AL1Y/p1587988808265500\r\n\r\n### Problem or idea\r\n\r\n@dvenprasad says 6 of those are new users. There must be a bug in the queries it uses or something.\r\n\r\n### Solution or next step\r\n\r\nFix the engagement bot so it reports new users as new users.\n", "before_files": [{"content": "import datetime\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nimport requests\n\nfrom data_refinery_common.models import DatasetAnnotation\n\n\nclass Command(BaseCommand):\n help = \"Post downloads summary to slack\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--days\",\n type=int,\n default=7, # default to a week\n help=(\"Number of days in the past for which to build the stats\"),\n )\n parser.add_argument(\n \"--channel\",\n type=str,\n default=\"ccdl-general\",\n help=(\"Optional parameter to choose the channel where the message will be posted.\"),\n )\n\n def handle(self, *args, **options):\n days = options[\"days\"]\n start_time = timezone.now() - datetime.timedelta(days=days)\n\n annotation_queryset = DatasetAnnotation.objects.filter(\n created_at__gt=start_time\n ).prefetch_related(\"dataset\")\n annotations = [\n annotation\n for annotation in annotation_queryset\n if annotation.data[\"start\"] and should_display_email(annotation.dataset.email_address)\n ]\n\n unique_users = list(set(annotation.dataset.email_address for annotation in annotations))\n unique_ips = list(set(annotation.data[\"ip\"] for annotation in annotations))\n\n if unique_users:\n fallback_text = \"In the last {0} days, {1} users downloaded datasets from {2} locations.\".format(\n days, len(unique_users), len(unique_ips)\n )\n else:\n fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n\n new_users = \"\"\n returning_users = \"\"\n for email in unique_users:\n user_annotations = annotation_queryset.filter(dataset__email_address=email)\n total_downloads = user_annotations.count()\n unique_locations = list(set(annotation.data[\"ip\"] for annotation in user_annotations))\n locations = \", \".join(get_ip_location(ip) for ip in unique_locations)\n is_new_user = DatasetAnnotation.objects.filter(\n created_at__lt=start_time, dataset__email_address=email\n )\n text = \"{0} | {1} downloads from {2}\\n\".format(email, total_downloads, locations)\n if is_new_user:\n new_users += text\n else:\n returning_users += text\n\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text},\n }\n ]\n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*New users* \\n\" + new_users,},\n }\n )\n if returning_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*Returning users* \\n\" + returning_users,},\n }\n )\n\n # Post to slack\n requests.post(\n settings.ENGAGEMENTBOT_WEBHOOK,\n json={\n \"username\": \"EngagementBot\",\n \"icon_emoji\": \":halal:\",\n \"channel\": \"#\" + options[\"channel\"],\n \"text\": fallback_text,\n \"blocks\": blocks,\n },\n headers={\"Content-Type\": \"application/json\"},\n timeout=10,\n )\n\n\ndef should_display_email(email: str) -> bool:\n \"\"\" Returns true if the given email is not associated with the CCDL suers \"\"\"\n if not email:\n return False\n return not (\n email.startswith(\"cansav09\")\n or email.startswith(\"arielsvn\")\n or email.startswith(\"jaclyn.n.taroni\")\n or email.startswith(\"kurt.wheeler\")\n or email.startswith(\"greenescientist\")\n or email.startswith(\"miserlou\")\n or email.startswith(\"d.prasad\")\n or email.endswith(\"@alexslemonade.org\")\n or email is (\"[email protected]\")\n or email is (\"[email protected]\")\n )\n\n\ndef get_ip_location(remote_ip):\n try:\n data = requests.get(\"https://ipapi.co/\" + remote_ip + \"/json/\", timeout=10).json()\n return \"{0}, {1}\".format(data[\"city\"], data[\"country_name\"])\n except Exception:\n return remote_ip\n", "path": "api/data_refinery_api/management/commands/post_downloads_summary.py"}], "after_files": [{"content": "import datetime\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nimport requests\n\nfrom data_refinery_common.models import DatasetAnnotation\n\n\nclass Command(BaseCommand):\n help = \"Post downloads summary to slack\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--days\",\n type=int,\n default=7, # default to a week\n help=(\"Number of days in the past for which to build the stats\"),\n )\n parser.add_argument(\n \"--channel\",\n type=str,\n default=\"ccdl-general\",\n help=(\"Optional parameter to choose the channel where the message will be posted.\"),\n )\n\n def handle(self, *args, **options):\n days = options[\"days\"]\n start_time = timezone.now() - datetime.timedelta(days=days)\n\n annotation_queryset = DatasetAnnotation.objects.filter(\n created_at__gt=start_time\n ).prefetch_related(\"dataset\")\n annotations = [\n annotation\n for annotation in annotation_queryset\n if annotation.data[\"start\"] and should_display_email(annotation.dataset.email_address)\n ]\n\n unique_users = list(set(annotation.dataset.email_address for annotation in annotations))\n unique_ips = list(set(annotation.data[\"ip\"] for annotation in annotations))\n\n new_users = \"\"\n returning_users = \"\"\n total_downloads = 0\n for email in unique_users:\n user_annotations = annotation_queryset.filter(dataset__email_address=email)\n downloads = user_annotations.count()\n total_downloads += downloads\n unique_locations = list(set(annotation.data[\"ip\"] for annotation in user_annotations))\n locations = \", \".join(get_ip_location(ip) for ip in unique_locations)\n is_returning_user = DatasetAnnotation.objects.filter(\n created_at__lt=start_time, dataset__email_address=email\n )\n text = \"{0} | {1} downloads from {2}\\n\".format(email, downloads, locations)\n if is_returning_user:\n returning_users += text\n else:\n new_users += text\n\n if total_downloads > 0:\n fallback_text = \"In the last {0} days, {1} users downloaded {2} datasets from {3} locations.\".format(\n days, len(unique_users), total_downloads, len(unique_ips)\n )\n else:\n fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text},\n }\n ]\n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*New users* \\n\" + new_users,},\n }\n )\n if returning_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*Returning users* \\n\" + returning_users,},\n }\n )\n\n # Post to slack\n requests.post(\n settings.ENGAGEMENTBOT_WEBHOOK,\n json={\n \"username\": \"EngagementBot\",\n \"icon_emoji\": \":halal:\",\n \"channel\": \"#\" + options[\"channel\"],\n \"text\": fallback_text,\n \"blocks\": blocks,\n },\n headers={\"Content-Type\": \"application/json\"},\n timeout=10,\n )\n\n\ndef should_display_email(email: str) -> bool:\n \"\"\" Returns true if the given email is not associated with the CCDL suers \"\"\"\n if not email:\n return False\n return not (\n email.startswith(\"cansav09\")\n or email.startswith(\"arielsvn\")\n or email.startswith(\"jaclyn.n.taroni\")\n or email.startswith(\"kurt.wheeler\")\n or email.startswith(\"greenescientist\")\n or email.startswith(\"miserlou\")\n or email.startswith(\"d.prasad\")\n or email.endswith(\"@alexslemonade.org\")\n or email is (\"[email protected]\")\n or email is (\"[email protected]\")\n )\n\n\ndef get_ip_location(remote_ip):\n try:\n data = requests.get(\"https://ipapi.co/\" + remote_ip + \"/json/\", timeout=10).json()\n return \"{0}, {1}\".format(data[\"city\"], data[\"country_name\"])\n except Exception:\n return remote_ip\n", "path": "api/data_refinery_api/management/commands/post_downloads_summary.py"}]} | 1,574 | 515 |
gh_patches_debug_26690 | rasdani/github-patches | git_diff | getsentry__sentry-python-2755 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make EventScrubber recursive
### Problem Statement
We have a custom `before_send` implementation that scrubs data recursively. I was hopping to replace the custom implementation with the built-in EventScrubber but I found out that it doesn't scrub `vars` recursively.
As far as I can tell this was a consistency, perf trade-off thing but it would be nice to have a built-in option to make it recursive.
Thank you!
### Solution Brainstorm
`EventScrubber(recursive=True)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/scrubber.py`
Content:
```
1 from sentry_sdk.utils import (
2 capture_internal_exceptions,
3 AnnotatedValue,
4 iter_event_frames,
5 )
6 from sentry_sdk._compat import string_types
7 from sentry_sdk._types import TYPE_CHECKING
8
9 if TYPE_CHECKING:
10 from sentry_sdk._types import Event
11 from typing import Any
12 from typing import Dict
13 from typing import List
14 from typing import Optional
15
16
17 DEFAULT_DENYLIST = [
18 # stolen from relay
19 "password",
20 "passwd",
21 "secret",
22 "api_key",
23 "apikey",
24 "auth",
25 "credentials",
26 "mysql_pwd",
27 "privatekey",
28 "private_key",
29 "token",
30 "ip_address",
31 "session",
32 # django
33 "csrftoken",
34 "sessionid",
35 # wsgi
36 "remote_addr",
37 "x_csrftoken",
38 "x_forwarded_for",
39 "set_cookie",
40 "cookie",
41 "authorization",
42 "x_api_key",
43 "x_forwarded_for",
44 "x_real_ip",
45 # other common names used in the wild
46 "aiohttp_session", # aiohttp
47 "connect.sid", # Express
48 "csrf_token", # Pyramid
49 "csrf", # (this is a cookie name used in accepted answers on stack overflow)
50 "_csrf", # Express
51 "_csrf_token", # Bottle
52 "PHPSESSID", # PHP
53 "_session", # Sanic
54 "symfony", # Symfony
55 "user_session", # Vue
56 "_xsrf", # Tornado
57 "XSRF-TOKEN", # Angular, Laravel
58 ]
59
60
61 class EventScrubber(object):
62 def __init__(self, denylist=None):
63 # type: (Optional[List[str]]) -> None
64 self.denylist = DEFAULT_DENYLIST if denylist is None else denylist
65 self.denylist = [x.lower() for x in self.denylist]
66
67 def scrub_dict(self, d):
68 # type: (Dict[str, Any]) -> None
69 if not isinstance(d, dict):
70 return
71
72 for k in d.keys():
73 if isinstance(k, string_types) and k.lower() in self.denylist:
74 d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()
75
76 def scrub_request(self, event):
77 # type: (Event) -> None
78 with capture_internal_exceptions():
79 if "request" in event:
80 if "headers" in event["request"]:
81 self.scrub_dict(event["request"]["headers"])
82 if "cookies" in event["request"]:
83 self.scrub_dict(event["request"]["cookies"])
84 if "data" in event["request"]:
85 self.scrub_dict(event["request"]["data"])
86
87 def scrub_extra(self, event):
88 # type: (Event) -> None
89 with capture_internal_exceptions():
90 if "extra" in event:
91 self.scrub_dict(event["extra"])
92
93 def scrub_user(self, event):
94 # type: (Event) -> None
95 with capture_internal_exceptions():
96 if "user" in event:
97 self.scrub_dict(event["user"])
98
99 def scrub_breadcrumbs(self, event):
100 # type: (Event) -> None
101 with capture_internal_exceptions():
102 if "breadcrumbs" in event:
103 if "values" in event["breadcrumbs"]:
104 for value in event["breadcrumbs"]["values"]:
105 if "data" in value:
106 self.scrub_dict(value["data"])
107
108 def scrub_frames(self, event):
109 # type: (Event) -> None
110 with capture_internal_exceptions():
111 for frame in iter_event_frames(event):
112 if "vars" in frame:
113 self.scrub_dict(frame["vars"])
114
115 def scrub_spans(self, event):
116 # type: (Event) -> None
117 with capture_internal_exceptions():
118 if "spans" in event:
119 for span in event["spans"]:
120 if "data" in span:
121 self.scrub_dict(span["data"])
122
123 def scrub_event(self, event):
124 # type: (Event) -> None
125 self.scrub_request(event)
126 self.scrub_extra(event)
127 self.scrub_user(event)
128 self.scrub_breadcrumbs(event)
129 self.scrub_frames(event)
130 self.scrub_spans(event)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/scrubber.py b/sentry_sdk/scrubber.py
--- a/sentry_sdk/scrubber.py
+++ b/sentry_sdk/scrubber.py
@@ -59,19 +59,36 @@
class EventScrubber(object):
- def __init__(self, denylist=None):
- # type: (Optional[List[str]]) -> None
+ def __init__(self, denylist=None, recursive=False):
+ # type: (Optional[List[str]], bool) -> None
self.denylist = DEFAULT_DENYLIST if denylist is None else denylist
self.denylist = [x.lower() for x in self.denylist]
+ self.recursive = recursive
+
+ def scrub_list(self, lst):
+ # type: (List[Any]) -> None
+ if not isinstance(lst, list):
+ return
+
+ for v in lst:
+ if isinstance(v, dict):
+ self.scrub_dict(v)
+ elif isinstance(v, list):
+ self.scrub_list(v)
def scrub_dict(self, d):
# type: (Dict[str, Any]) -> None
if not isinstance(d, dict):
return
- for k in d.keys():
+ for k, v in d.items():
if isinstance(k, string_types) and k.lower() in self.denylist:
d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()
+ elif self.recursive:
+ if isinstance(v, dict):
+ self.scrub_dict(v)
+ elif isinstance(v, list):
+ self.scrub_list(v)
def scrub_request(self, event):
# type: (Event) -> None
| {"golden_diff": "diff --git a/sentry_sdk/scrubber.py b/sentry_sdk/scrubber.py\n--- a/sentry_sdk/scrubber.py\n+++ b/sentry_sdk/scrubber.py\n@@ -59,19 +59,36 @@\n \n \n class EventScrubber(object):\n- def __init__(self, denylist=None):\n- # type: (Optional[List[str]]) -> None\n+ def __init__(self, denylist=None, recursive=False):\n+ # type: (Optional[List[str]], bool) -> None\n self.denylist = DEFAULT_DENYLIST if denylist is None else denylist\n self.denylist = [x.lower() for x in self.denylist]\n+ self.recursive = recursive\n+\n+ def scrub_list(self, lst):\n+ # type: (List[Any]) -> None\n+ if not isinstance(lst, list):\n+ return\n+\n+ for v in lst:\n+ if isinstance(v, dict):\n+ self.scrub_dict(v)\n+ elif isinstance(v, list):\n+ self.scrub_list(v)\n \n def scrub_dict(self, d):\n # type: (Dict[str, Any]) -> None\n if not isinstance(d, dict):\n return\n \n- for k in d.keys():\n+ for k, v in d.items():\n if isinstance(k, string_types) and k.lower() in self.denylist:\n d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()\n+ elif self.recursive:\n+ if isinstance(v, dict):\n+ self.scrub_dict(v)\n+ elif isinstance(v, list):\n+ self.scrub_list(v)\n \n def scrub_request(self, event):\n # type: (Event) -> None\n", "issue": "Make EventScrubber recursive\n### Problem Statement\r\n\r\nWe have a custom `before_send` implementation that scrubs data recursively. I was hopping to replace the custom implementation with the built-in EventScrubber but I found out that it doesn't scrub `vars` recursively.\r\n\r\nAs far as I can tell this was a consistency, perf trade-off thing but it would be nice to have a built-in option to make it recursive.\r\n\r\nThank you!\r\n\r\n### Solution Brainstorm\r\n\r\n`EventScrubber(recursive=True)`\n", "before_files": [{"content": "from sentry_sdk.utils import (\n capture_internal_exceptions,\n AnnotatedValue,\n iter_event_frames,\n)\nfrom sentry_sdk._compat import string_types\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from sentry_sdk._types import Event\n from typing import Any\n from typing import Dict\n from typing import List\n from typing import Optional\n\n\nDEFAULT_DENYLIST = [\n # stolen from relay\n \"password\",\n \"passwd\",\n \"secret\",\n \"api_key\",\n \"apikey\",\n \"auth\",\n \"credentials\",\n \"mysql_pwd\",\n \"privatekey\",\n \"private_key\",\n \"token\",\n \"ip_address\",\n \"session\",\n # django\n \"csrftoken\",\n \"sessionid\",\n # wsgi\n \"remote_addr\",\n \"x_csrftoken\",\n \"x_forwarded_for\",\n \"set_cookie\",\n \"cookie\",\n \"authorization\",\n \"x_api_key\",\n \"x_forwarded_for\",\n \"x_real_ip\",\n # other common names used in the wild\n \"aiohttp_session\", # aiohttp\n \"connect.sid\", # Express\n \"csrf_token\", # Pyramid\n \"csrf\", # (this is a cookie name used in accepted answers on stack overflow)\n \"_csrf\", # Express\n \"_csrf_token\", # Bottle\n \"PHPSESSID\", # PHP\n \"_session\", # Sanic\n \"symfony\", # Symfony\n \"user_session\", # Vue\n \"_xsrf\", # Tornado\n \"XSRF-TOKEN\", # Angular, Laravel\n]\n\n\nclass EventScrubber(object):\n def __init__(self, denylist=None):\n # type: (Optional[List[str]]) -> None\n self.denylist = DEFAULT_DENYLIST if denylist is None else denylist\n self.denylist = [x.lower() for x in self.denylist]\n\n def scrub_dict(self, d):\n # type: (Dict[str, Any]) -> None\n if not isinstance(d, dict):\n return\n\n for k in d.keys():\n if isinstance(k, string_types) and k.lower() in self.denylist:\n d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()\n\n def scrub_request(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"request\" in event:\n if \"headers\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"headers\"])\n if \"cookies\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"cookies\"])\n if \"data\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"data\"])\n\n def scrub_extra(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"extra\" in event:\n self.scrub_dict(event[\"extra\"])\n\n def scrub_user(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"user\" in event:\n self.scrub_dict(event[\"user\"])\n\n def scrub_breadcrumbs(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"breadcrumbs\" in event:\n if \"values\" in event[\"breadcrumbs\"]:\n for value in event[\"breadcrumbs\"][\"values\"]:\n if \"data\" in value:\n self.scrub_dict(value[\"data\"])\n\n def scrub_frames(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n for frame in iter_event_frames(event):\n if \"vars\" in frame:\n self.scrub_dict(frame[\"vars\"])\n\n def scrub_spans(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"spans\" in event:\n for span in event[\"spans\"]:\n if \"data\" in span:\n self.scrub_dict(span[\"data\"])\n\n def scrub_event(self, event):\n # type: (Event) -> None\n self.scrub_request(event)\n self.scrub_extra(event)\n self.scrub_user(event)\n self.scrub_breadcrumbs(event)\n self.scrub_frames(event)\n self.scrub_spans(event)\n", "path": "sentry_sdk/scrubber.py"}], "after_files": [{"content": "from sentry_sdk.utils import (\n capture_internal_exceptions,\n AnnotatedValue,\n iter_event_frames,\n)\nfrom sentry_sdk._compat import string_types\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from sentry_sdk._types import Event\n from typing import Any\n from typing import Dict\n from typing import List\n from typing import Optional\n\n\nDEFAULT_DENYLIST = [\n # stolen from relay\n \"password\",\n \"passwd\",\n \"secret\",\n \"api_key\",\n \"apikey\",\n \"auth\",\n \"credentials\",\n \"mysql_pwd\",\n \"privatekey\",\n \"private_key\",\n \"token\",\n \"ip_address\",\n \"session\",\n # django\n \"csrftoken\",\n \"sessionid\",\n # wsgi\n \"remote_addr\",\n \"x_csrftoken\",\n \"x_forwarded_for\",\n \"set_cookie\",\n \"cookie\",\n \"authorization\",\n \"x_api_key\",\n \"x_forwarded_for\",\n \"x_real_ip\",\n # other common names used in the wild\n \"aiohttp_session\", # aiohttp\n \"connect.sid\", # Express\n \"csrf_token\", # Pyramid\n \"csrf\", # (this is a cookie name used in accepted answers on stack overflow)\n \"_csrf\", # Express\n \"_csrf_token\", # Bottle\n \"PHPSESSID\", # PHP\n \"_session\", # Sanic\n \"symfony\", # Symfony\n \"user_session\", # Vue\n \"_xsrf\", # Tornado\n \"XSRF-TOKEN\", # Angular, Laravel\n]\n\n\nclass EventScrubber(object):\n def __init__(self, denylist=None, recursive=False):\n # type: (Optional[List[str]], bool) -> None\n self.denylist = DEFAULT_DENYLIST if denylist is None else denylist\n self.denylist = [x.lower() for x in self.denylist]\n self.recursive = recursive\n\n def scrub_list(self, lst):\n # type: (List[Any]) -> None\n if not isinstance(lst, list):\n return\n\n for v in lst:\n if isinstance(v, dict):\n self.scrub_dict(v)\n elif isinstance(v, list):\n self.scrub_list(v)\n\n def scrub_dict(self, d):\n # type: (Dict[str, Any]) -> None\n if not isinstance(d, dict):\n return\n\n for k, v in d.items():\n if isinstance(k, string_types) and k.lower() in self.denylist:\n d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()\n elif self.recursive:\n if isinstance(v, dict):\n self.scrub_dict(v)\n elif isinstance(v, list):\n self.scrub_list(v)\n\n def scrub_request(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"request\" in event:\n if \"headers\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"headers\"])\n if \"cookies\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"cookies\"])\n if \"data\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"data\"])\n\n def scrub_extra(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"extra\" in event:\n self.scrub_dict(event[\"extra\"])\n\n def scrub_user(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"user\" in event:\n self.scrub_dict(event[\"user\"])\n\n def scrub_breadcrumbs(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"breadcrumbs\" in event:\n if \"values\" in event[\"breadcrumbs\"]:\n for value in event[\"breadcrumbs\"][\"values\"]:\n if \"data\" in value:\n self.scrub_dict(value[\"data\"])\n\n def scrub_frames(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n for frame in iter_event_frames(event):\n if \"vars\" in frame:\n self.scrub_dict(frame[\"vars\"])\n\n def scrub_spans(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"spans\" in event:\n for span in event[\"spans\"]:\n if \"data\" in span:\n self.scrub_dict(span[\"data\"])\n\n def scrub_event(self, event):\n # type: (Event) -> None\n self.scrub_request(event)\n self.scrub_extra(event)\n self.scrub_user(event)\n self.scrub_breadcrumbs(event)\n self.scrub_frames(event)\n self.scrub_spans(event)\n", "path": "sentry_sdk/scrubber.py"}]} | 1,603 | 385 |
gh_patches_debug_6031 | rasdani/github-patches | git_diff | pytorch__audio-579 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
_audio_backends platform independent
## 🐛 Bug
The constant [_audio_backends](https://github.com/pytorch/audio/blob/c29598d54185d73b4ed04103330573e190bbdb69/torchaudio/_backend.py#L12) is referenced by [BACKENDS](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/common_utils.py#L10) in the common utilities of the test folder. [test_batch_mfcc](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/test_batch_consistency.py#L181) is skipped if the 'sox' key is not present in that constant, but it always is. That means this test will be executed in environments where the package may not exist.
```
(base) PS C:\Users\chris\dev\audio> python .\test\test_batch_consistency.py TestTransforms.test_batch_mfcc
E
======================================================================
ERROR: test_batch_mfcc (__main__.TestTransforms)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\tools\Anaconda3\lib\contextlib.py", line 74, in inner
return func(*args, **kwds)
File ".\test\test_batch_consistency.py", line 185, in test_batch_mfcc
waveform, _ = torchaudio.load(test_filepath)
File "c:\users\chris\dev\audio\torchaudio\__init__.py", line 87, in load
filetype=filetype,
File "c:\users\chris\dev\audio\torchaudio\_sox_backend.py", line 38, in load
import _torch_sox
ModuleNotFoundError: No module named '_torch_sox'
----------------------------------------------------------------------
Ran 1 test in 0.001s
FAILED (errors=1)
```
## To Reproduce
Steps to reproduce the behavior:
1. Remove sox from your environment
2. Run ```python test/test_batch_consistency.py TestTransform.test_batch_mfcc```
I can provide more detailed information if required.
## Expected behavior
The test should be skipped if sox is not available.
## Environment
```
(base) PS C:\Users\chris\dev\audio> python .\collect_env_1.py
Collecting environment information...
PyTorch version: 1.6.0a0+8a60d8b
Is debug build: No
CUDA used to build PyTorch: None
OS: Microsoft Windows 10 Home
GCC version: Could not collect
CMake version: version 3.14.0
Python version: 3.7
Is CUDA available: No
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
Versions of relevant libraries:
[pip] numpy==1.18.1
[pip] numpydoc==0.9.2
[pip] torch==1.6.0a0+8a60d8b
[pip] torchaudio==0.5.0a0+5a75b63
[conda] blas 1.0 mkl
[conda] mkl 2020.0 166
[conda] mkl-include 2020.0 166
[conda] mkl-service 2.3.0 py37hb782905_0
[conda] mkl_fft 1.0.15 py37h14836fe_0
[conda] mkl_random 1.1.0 py37h675688f_0
[conda] numpy 1.18.1 py37h93ca92e_0
[conda] numpy-base 1.18.1 py37hc3f5095_1
[conda] numpydoc 0.9.2 py_0
[conda] torch 1.6.0a0+8a60d8b dev_0 <develop>
[conda] torchaudio 0.5.0a0+5a75b63 dev_0 <develop>
```
## Additional context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchaudio/_backend.py`
Content:
```
1 from functools import wraps
2 from typing import Any, List, Union
3
4 import platform
5 import torch
6 from torch import Tensor
7
8 from . import _soundfile_backend, _sox_backend
9
10
11 _audio_backend = "soundfile" if platform.system() == "Windows" else "sox"
12 _audio_backends = {"sox": _sox_backend, "soundfile": _soundfile_backend}
13
14
15 def set_audio_backend(backend: str) -> None:
16 """
17 Specifies the package used to load.
18 Args:
19 backend (str): Name of the backend. One of {}.
20 """.format(_audio_backends.keys())
21 global _audio_backend
22 if backend not in _audio_backends:
23 raise ValueError(
24 "Invalid backend '{}'. Options are {}.".format(backend, _audio_backends.keys())
25 )
26 _audio_backend = backend
27
28
29 def get_audio_backend() -> str:
30 """
31 Gets the name of the package used to load.
32 """
33 return _audio_backend
34
35
36 def _get_audio_backend_module() -> Any:
37 """
38 Gets the module backend to load.
39 """
40 backend = get_audio_backend()
41 return _audio_backends[backend]
42
43
44 def _audio_backend_guard(backends: Union[str, List[str]]) -> Any:
45
46 if isinstance(backends, str):
47 backends = [backends]
48
49 def decorator(func):
50 @wraps(func)
51 def wrapper(*args, **kwargs):
52 if get_audio_backend() not in backends:
53 raise RuntimeError("Function {} requires backend to be one of {}.".format(func.__name__, backends))
54 return func(*args, **kwargs)
55 return wrapper
56
57 return decorator
58
59
60 def check_input(src: Tensor) -> None:
61 if not torch.is_tensor(src):
62 raise TypeError('Expected a tensor, got %s' % type(src))
63 if src.is_cuda:
64 raise TypeError('Expected a CPU based tensor, got %s' % type(src))
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchaudio/_backend.py b/torchaudio/_backend.py
--- a/torchaudio/_backend.py
+++ b/torchaudio/_backend.py
@@ -8,8 +8,12 @@
from . import _soundfile_backend, _sox_backend
-_audio_backend = "soundfile" if platform.system() == "Windows" else "sox"
-_audio_backends = {"sox": _sox_backend, "soundfile": _soundfile_backend}
+if platform.system() == "Windows":
+ _audio_backend = "soundfile"
+ _audio_backends = {"soundfile": _soundfile_backend}
+else:
+ _audio_backend = "sox"
+ _audio_backends = {"sox": _sox_backend, "soundfile": _soundfile_backend}
def set_audio_backend(backend: str) -> None:
| {"golden_diff": "diff --git a/torchaudio/_backend.py b/torchaudio/_backend.py\n--- a/torchaudio/_backend.py\n+++ b/torchaudio/_backend.py\n@@ -8,8 +8,12 @@\n from . import _soundfile_backend, _sox_backend\n \n \n-_audio_backend = \"soundfile\" if platform.system() == \"Windows\" else \"sox\"\n-_audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n+if platform.system() == \"Windows\":\n+ _audio_backend = \"soundfile\"\n+ _audio_backends = {\"soundfile\": _soundfile_backend}\n+else:\n+ _audio_backend = \"sox\"\n+ _audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n \n \n def set_audio_backend(backend: str) -> None:\n", "issue": "_audio_backends platform independent\n## \ud83d\udc1b Bug\r\n\r\nThe constant [_audio_backends](https://github.com/pytorch/audio/blob/c29598d54185d73b4ed04103330573e190bbdb69/torchaudio/_backend.py#L12) is referenced by [BACKENDS](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/common_utils.py#L10) in the common utilities of the test folder. [test_batch_mfcc](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/test_batch_consistency.py#L181) is skipped if the 'sox' key is not present in that constant, but it always is. That means this test will be executed in environments where the package may not exist.\r\n\r\n```\r\n(base) PS C:\\Users\\chris\\dev\\audio> python .\\test\\test_batch_consistency.py TestTransforms.test_batch_mfcc\r\nE\r\n======================================================================\r\nERROR: test_batch_mfcc (__main__.TestTransforms)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"C:\\tools\\Anaconda3\\lib\\contextlib.py\", line 74, in inner\r\n return func(*args, **kwds)\r\n File \".\\test\\test_batch_consistency.py\", line 185, in test_batch_mfcc\r\n waveform, _ = torchaudio.load(test_filepath)\r\n File \"c:\\users\\chris\\dev\\audio\\torchaudio\\__init__.py\", line 87, in load\r\n filetype=filetype,\r\n File \"c:\\users\\chris\\dev\\audio\\torchaudio\\_sox_backend.py\", line 38, in load\r\n import _torch_sox\r\nModuleNotFoundError: No module named '_torch_sox'\r\n\r\n----------------------------------------------------------------------\r\nRan 1 test in 0.001s\r\n\r\nFAILED (errors=1)\r\n```\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Remove sox from your environment\r\n2. Run ```python test/test_batch_consistency.py TestTransform.test_batch_mfcc```\r\n\r\nI can provide more detailed information if required.\r\n\r\n## Expected behavior\r\n\r\nThe test should be skipped if sox is not available.\r\n\r\n## Environment\r\n\r\n```\r\n(base) PS C:\\Users\\chris\\dev\\audio> python .\\collect_env_1.py\r\nCollecting environment information...\r\nPyTorch version: 1.6.0a0+8a60d8b\r\nIs debug build: No\r\nCUDA used to build PyTorch: None\r\n\r\nOS: Microsoft Windows 10 Home\r\nGCC version: Could not collect\r\nCMake version: version 3.14.0\r\n\r\nPython version: 3.7\r\nIs CUDA available: No\r\nCUDA runtime version: No CUDA\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\n\r\nVersions of relevant libraries:\r\n[pip] numpy==1.18.1\r\n[pip] numpydoc==0.9.2\r\n[pip] torch==1.6.0a0+8a60d8b\r\n[pip] torchaudio==0.5.0a0+5a75b63\r\n[conda] blas 1.0 mkl\r\n[conda] mkl 2020.0 166\r\n[conda] mkl-include 2020.0 166\r\n[conda] mkl-service 2.3.0 py37hb782905_0\r\n[conda] mkl_fft 1.0.15 py37h14836fe_0\r\n[conda] mkl_random 1.1.0 py37h675688f_0\r\n[conda] numpy 1.18.1 py37h93ca92e_0\r\n[conda] numpy-base 1.18.1 py37hc3f5095_1\r\n[conda] numpydoc 0.9.2 py_0\r\n[conda] torch 1.6.0a0+8a60d8b dev_0 <develop>\r\n[conda] torchaudio 0.5.0a0+5a75b63 dev_0 <develop>\r\n```\r\n## Additional context\r\n\r\n\n", "before_files": [{"content": "from functools import wraps\nfrom typing import Any, List, Union\n\nimport platform\nimport torch\nfrom torch import Tensor\n\nfrom . import _soundfile_backend, _sox_backend\n\n\n_audio_backend = \"soundfile\" if platform.system() == \"Windows\" else \"sox\"\n_audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n\n\ndef set_audio_backend(backend: str) -> None:\n \"\"\"\n Specifies the package used to load.\n Args:\n backend (str): Name of the backend. One of {}.\n \"\"\".format(_audio_backends.keys())\n global _audio_backend\n if backend not in _audio_backends:\n raise ValueError(\n \"Invalid backend '{}'. Options are {}.\".format(backend, _audio_backends.keys())\n )\n _audio_backend = backend\n\n\ndef get_audio_backend() -> str:\n \"\"\"\n Gets the name of the package used to load.\n \"\"\"\n return _audio_backend\n\n\ndef _get_audio_backend_module() -> Any:\n \"\"\"\n Gets the module backend to load.\n \"\"\"\n backend = get_audio_backend()\n return _audio_backends[backend]\n\n\ndef _audio_backend_guard(backends: Union[str, List[str]]) -> Any:\n\n if isinstance(backends, str):\n backends = [backends]\n\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if get_audio_backend() not in backends:\n raise RuntimeError(\"Function {} requires backend to be one of {}.\".format(func.__name__, backends))\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n\ndef check_input(src: Tensor) -> None:\n if not torch.is_tensor(src):\n raise TypeError('Expected a tensor, got %s' % type(src))\n if src.is_cuda:\n raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "path": "torchaudio/_backend.py"}], "after_files": [{"content": "from functools import wraps\nfrom typing import Any, List, Union\n\nimport platform\nimport torch\nfrom torch import Tensor\n\nfrom . import _soundfile_backend, _sox_backend\n\n\nif platform.system() == \"Windows\":\n _audio_backend = \"soundfile\"\n _audio_backends = {\"soundfile\": _soundfile_backend}\nelse:\n _audio_backend = \"sox\"\n _audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n\n\ndef set_audio_backend(backend: str) -> None:\n \"\"\"\n Specifies the package used to load.\n Args:\n backend (str): Name of the backend. One of {}.\n \"\"\".format(_audio_backends.keys())\n global _audio_backend\n if backend not in _audio_backends:\n raise ValueError(\n \"Invalid backend '{}'. Options are {}.\".format(backend, _audio_backends.keys())\n )\n _audio_backend = backend\n\n\ndef get_audio_backend() -> str:\n \"\"\"\n Gets the name of the package used to load.\n \"\"\"\n return _audio_backend\n\n\ndef _get_audio_backend_module() -> Any:\n \"\"\"\n Gets the module backend to load.\n \"\"\"\n backend = get_audio_backend()\n return _audio_backends[backend]\n\n\ndef _audio_backend_guard(backends: Union[str, List[str]]) -> Any:\n\n if isinstance(backends, str):\n backends = [backends]\n\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if get_audio_backend() not in backends:\n raise RuntimeError(\"Function {} requires backend to be one of {}.\".format(func.__name__, backends))\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n\ndef check_input(src: Tensor) -> None:\n if not torch.is_tensor(src):\n raise TypeError('Expected a tensor, got %s' % type(src))\n if src.is_cuda:\n raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "path": "torchaudio/_backend.py"}]} | 1,852 | 195 |
gh_patches_debug_19340 | rasdani/github-patches | git_diff | pyca__cryptography-7382 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include Rust version in DEBUG ASSISTENCE message?
I'm not sure what the best way to do this is but it seems like it would be helpful to include the output of `rustc -V` in the DEBUG ASSISTENCE.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 import os
8 import platform
9 import sys
10
11 from setuptools import setup
12
13 try:
14 from setuptools_rust import RustExtension
15 except ImportError:
16 print(
17 """
18 =============================DEBUG ASSISTANCE==========================
19 If you are seeing an error here please try the following to
20 successfully install cryptography:
21
22 Upgrade to the latest pip and try again. This will fix errors for most
23 users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip
24 =============================DEBUG ASSISTANCE==========================
25 """
26 )
27 raise
28
29
30 base_dir = os.path.dirname(__file__)
31 src_dir = os.path.join(base_dir, "src")
32
33 # When executing the setup.py, we need to be able to import ourselves, this
34 # means that we need to add the src/ directory to the sys.path.
35 sys.path.insert(0, src_dir)
36
37 try:
38 # See setup.cfg for most of the config metadata.
39 setup(
40 cffi_modules=[
41 "src/_cffi_src/build_openssl.py:ffi",
42 ],
43 rust_extensions=[
44 RustExtension(
45 "_rust",
46 "src/rust/Cargo.toml",
47 py_limited_api=True,
48 # Enable abi3 mode if we're not using PyPy.
49 features=(
50 []
51 if platform.python_implementation() == "PyPy"
52 else ["pyo3/abi3-py36"]
53 ),
54 rust_version=">=1.48.0",
55 )
56 ],
57 )
58 except: # noqa: E722
59 # Note: This is a bare exception that re-raises so that we don't interfere
60 # with anything the installation machinery might want to do. Because we
61 # print this for any exception this msg can appear (e.g. in verbose logs)
62 # even if there's no failure. For example, SetupRequirementsError is raised
63 # during PEP517 building and prints this text. setuptools raises SystemExit
64 # when compilation fails right now, but it's possible this isn't stable
65 # or a public API commitment so we'll remain ultra conservative.
66
67 import pkg_resources
68
69 print(
70 """
71 =============================DEBUG ASSISTANCE=============================
72 If you are seeing a compilation error please try the following steps to
73 successfully install cryptography:
74 1) Upgrade to the latest pip and try again. This will fix errors for most
75 users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip
76 2) Read https://cryptography.io/en/latest/installation/ for specific
77 instructions for your platform.
78 3) Check our frequently asked questions for more information:
79 https://cryptography.io/en/latest/faq/
80 4) Ensure you have a recent Rust toolchain installed:
81 https://cryptography.io/en/latest/installation/#rust
82 """
83 )
84 print(f" Python: {'.'.join(str(v) for v in sys.version_info[:3])}")
85 print(f" platform: {platform.platform()}")
86 for dist in ["pip", "setuptools", "setuptools_rust"]:
87 try:
88 version = pkg_resources.get_distribution(dist).version
89 except pkg_resources.DistributionNotFound:
90 version = "n/a"
91 print(f" {dist}: {version}")
92 print(
93 """\
94 =============================DEBUG ASSISTANCE=============================
95 """
96 )
97 raise
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,9 @@
import os
import platform
+import re
+import shutil
+import subprocess
import sys
from setuptools import setup
@@ -89,6 +92,22 @@
except pkg_resources.DistributionNotFound:
version = "n/a"
print(f" {dist}: {version}")
+ version = "n/a"
+ if shutil.which("rustc") is not None:
+ try:
+ # If for any reason `rustc --version` fails, silently ignore it
+ rustc_output = subprocess.run(
+ ["rustc", "--version"],
+ capture_output=True,
+ timeout=0.5,
+ encoding="utf8",
+ check=True,
+ ).stdout
+ version = re.sub("^rustc ", "", rustc_output.strip())
+ except subprocess.SubprocessError:
+ pass
+ print(f" rustc: {version}")
+
print(
"""\
=============================DEBUG ASSISTANCE=============================
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,6 +6,9 @@\n \n import os\n import platform\n+import re\n+import shutil\n+import subprocess\n import sys\n \n from setuptools import setup\n@@ -89,6 +92,22 @@\n except pkg_resources.DistributionNotFound:\n version = \"n/a\"\n print(f\" {dist}: {version}\")\n+ version = \"n/a\"\n+ if shutil.which(\"rustc\") is not None:\n+ try:\n+ # If for any reason `rustc --version` fails, silently ignore it\n+ rustc_output = subprocess.run(\n+ [\"rustc\", \"--version\"],\n+ capture_output=True,\n+ timeout=0.5,\n+ encoding=\"utf8\",\n+ check=True,\n+ ).stdout\n+ version = re.sub(\"^rustc \", \"\", rustc_output.strip())\n+ except subprocess.SubprocessError:\n+ pass\n+ print(f\" rustc: {version}\")\n+\n print(\n \"\"\"\\\n =============================DEBUG ASSISTANCE=============================\n", "issue": "Include Rust version in DEBUG ASSISTENCE message?\nI'm not sure what the best way to do this is but it seems like it would be helpful to include the output of `rustc -V` in the DEBUG ASSISTENCE.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport os\nimport platform\nimport sys\n\nfrom setuptools import setup\n\ntry:\n from setuptools_rust import RustExtension\nexcept ImportError:\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE==========================\n If you are seeing an error here please try the following to\n successfully install cryptography:\n\n Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n =============================DEBUG ASSISTANCE==========================\n \"\"\"\n )\n raise\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\ntry:\n # See setup.cfg for most of the config metadata.\n setup(\n cffi_modules=[\n \"src/_cffi_src/build_openssl.py:ffi\",\n ],\n rust_extensions=[\n RustExtension(\n \"_rust\",\n \"src/rust/Cargo.toml\",\n py_limited_api=True,\n # Enable abi3 mode if we're not using PyPy.\n features=(\n []\n if platform.python_implementation() == \"PyPy\"\n else [\"pyo3/abi3-py36\"]\n ),\n rust_version=\">=1.48.0\",\n )\n ],\n )\nexcept: # noqa: E722\n # Note: This is a bare exception that re-raises so that we don't interfere\n # with anything the installation machinery might want to do. Because we\n # print this for any exception this msg can appear (e.g. in verbose logs)\n # even if there's no failure. For example, SetupRequirementsError is raised\n # during PEP517 building and prints this text. setuptools raises SystemExit\n # when compilation fails right now, but it's possible this isn't stable\n # or a public API commitment so we'll remain ultra conservative.\n\n import pkg_resources\n\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE=============================\n If you are seeing a compilation error please try the following steps to\n successfully install cryptography:\n 1) Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n 2) Read https://cryptography.io/en/latest/installation/ for specific\n instructions for your platform.\n 3) Check our frequently asked questions for more information:\n https://cryptography.io/en/latest/faq/\n 4) Ensure you have a recent Rust toolchain installed:\n https://cryptography.io/en/latest/installation/#rust\n \"\"\"\n )\n print(f\" Python: {'.'.join(str(v) for v in sys.version_info[:3])}\")\n print(f\" platform: {platform.platform()}\")\n for dist in [\"pip\", \"setuptools\", \"setuptools_rust\"]:\n try:\n version = pkg_resources.get_distribution(dist).version\n except pkg_resources.DistributionNotFound:\n version = \"n/a\"\n print(f\" {dist}: {version}\")\n print(\n \"\"\"\\\n =============================DEBUG ASSISTANCE=============================\n \"\"\"\n )\n raise\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport os\nimport platform\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\ntry:\n from setuptools_rust import RustExtension\nexcept ImportError:\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE==========================\n If you are seeing an error here please try the following to\n successfully install cryptography:\n\n Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n =============================DEBUG ASSISTANCE==========================\n \"\"\"\n )\n raise\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\ntry:\n # See setup.cfg for most of the config metadata.\n setup(\n cffi_modules=[\n \"src/_cffi_src/build_openssl.py:ffi\",\n ],\n rust_extensions=[\n RustExtension(\n \"_rust\",\n \"src/rust/Cargo.toml\",\n py_limited_api=True,\n # Enable abi3 mode if we're not using PyPy.\n features=(\n []\n if platform.python_implementation() == \"PyPy\"\n else [\"pyo3/abi3-py36\"]\n ),\n rust_version=\">=1.48.0\",\n )\n ],\n )\nexcept: # noqa: E722\n # Note: This is a bare exception that re-raises so that we don't interfere\n # with anything the installation machinery might want to do. Because we\n # print this for any exception this msg can appear (e.g. in verbose logs)\n # even if there's no failure. For example, SetupRequirementsError is raised\n # during PEP517 building and prints this text. setuptools raises SystemExit\n # when compilation fails right now, but it's possible this isn't stable\n # or a public API commitment so we'll remain ultra conservative.\n\n import pkg_resources\n\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE=============================\n If you are seeing a compilation error please try the following steps to\n successfully install cryptography:\n 1) Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n 2) Read https://cryptography.io/en/latest/installation/ for specific\n instructions for your platform.\n 3) Check our frequently asked questions for more information:\n https://cryptography.io/en/latest/faq/\n 4) Ensure you have a recent Rust toolchain installed:\n https://cryptography.io/en/latest/installation/#rust\n \"\"\"\n )\n print(f\" Python: {'.'.join(str(v) for v in sys.version_info[:3])}\")\n print(f\" platform: {platform.platform()}\")\n for dist in [\"pip\", \"setuptools\", \"setuptools_rust\"]:\n try:\n version = pkg_resources.get_distribution(dist).version\n except pkg_resources.DistributionNotFound:\n version = \"n/a\"\n print(f\" {dist}: {version}\")\n version = \"n/a\"\n if shutil.which(\"rustc\") is not None:\n try:\n # If for any reason `rustc --version` fails, silently ignore it\n rustc_output = subprocess.run(\n [\"rustc\", \"--version\"],\n capture_output=True,\n timeout=0.5,\n encoding=\"utf8\",\n check=True,\n ).stdout\n version = re.sub(\"^rustc \", \"\", rustc_output.strip())\n except subprocess.SubprocessError:\n pass\n print(f\" rustc: {version}\")\n\n print(\n \"\"\"\\\n =============================DEBUG ASSISTANCE=============================\n \"\"\"\n )\n raise\n", "path": "setup.py"}]} | 1,274 | 245 |
gh_patches_debug_42599 | rasdani/github-patches | git_diff | StackStorm__st2-5467 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix multiple file support in linux.file_watch.line + black + fstring
When multiple file_watch rules are defined, the last defined file reference is used for all files being watched. This causes trigger-instances to fail rule enforcement.
Adding the reference to the logging shows `test1.log` has the reference ending with `8c505`
```
2021-11-30 18:50:40,434 140243179888112 INFO file_watch_sensor [-] Added file "/var/log/test1.log" with reference linux.7e55ad75-b10c-44db-b53e-95164a18c505
2021-11-30 18:50:41,459 140243179888112 INFO file_watch_sensor [-] Added file "/var/log/test2.log" with reference linux.590de8c1-c578-4125-9082-2cee03b030a9
```
When the file contents are updated a trigger is emitted by the sensor using the reference of `test2.log` ending in `b030a9`
```
root@u1804:~# st2 trigger-instance get 61a6649f164625c2d94dccb8 -y
id: 61a6649f164625c2d94dccb8
occurrence_time: '2021-11-30T17:51:27.294000Z'
payload:
file_name: test1.log
file_path: /var/log/test1.log
line: Tue Nov 30 18:51:27 CET 2021 dhcp
status: processed
trigger: linux.590de8c1-c578-4125-9082-2cee03b030a9
```
This PR consists of adding a dictionary that is used to track the `path_name` and `reference` pair and looks up the reference for the file that was altered when creating the trigger.
The code is formatted with black and updated to use fstrings since all instances will be using Python 3.6+
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib/linux/sensors/file_watch_sensor.py`
Content:
```
1 # Copyright 2020 The StackStorm Authors.
2 # Copyright 2019 Extreme Networks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17
18 import eventlet
19
20 from logshipper.tail import Tail
21
22 from st2reactor.sensor.base import Sensor
23
24
25 class FileWatchSensor(Sensor):
26 def __init__(self, sensor_service, config=None):
27 super(FileWatchSensor, self).__init__(
28 sensor_service=sensor_service, config=config
29 )
30 self._trigger = None
31 self._logger = self._sensor_service.get_logger(__name__)
32 self._tail = None
33
34 def setup(self):
35 self._tail = Tail(filenames=[])
36 self._tail.handler = self._handle_line
37 self._tail.should_run = True
38
39 def run(self):
40 self._tail.run()
41
42 def cleanup(self):
43 if self._tail:
44 self._tail.should_run = False
45
46 try:
47 self._tail.notifier.stop()
48 except Exception:
49 self._logger.exception("Unable to stop the tail notifier")
50
51 def add_trigger(self, trigger):
52 file_path = trigger["parameters"].get("file_path", None)
53
54 if not file_path:
55 self._logger.error('Received trigger type without "file_path" field.')
56 return
57
58 self._trigger = trigger.get("ref", None)
59
60 if not self._trigger:
61 raise Exception("Trigger %s did not contain a ref." % trigger)
62
63 # Wait a bit to avoid initialization race in logshipper library
64 eventlet.sleep(1.0)
65
66 self._tail.add_file(filename=file_path)
67 self._logger.info('Added file "%s"' % (file_path))
68
69 def update_trigger(self, trigger):
70 pass
71
72 def remove_trigger(self, trigger):
73 file_path = trigger["parameters"].get("file_path", None)
74
75 if not file_path:
76 self._logger.error('Received trigger type without "file_path" field.')
77 return
78
79 self._tail.remove_file(filename=file_path)
80 self._trigger = None
81
82 self._logger.info('Removed file "%s"' % (file_path))
83
84 def _handle_line(self, file_path, line):
85 trigger = self._trigger
86 payload = {
87 "file_path": file_path,
88 "file_name": os.path.basename(file_path),
89 "line": line,
90 }
91 self._logger.debug(
92 "Sending payload %s for trigger %s to sensor_service.", payload, trigger
93 )
94 self.sensor_service.dispatch(trigger=trigger, payload=payload)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/contrib/linux/sensors/file_watch_sensor.py b/contrib/linux/sensors/file_watch_sensor.py
--- a/contrib/linux/sensors/file_watch_sensor.py
+++ b/contrib/linux/sensors/file_watch_sensor.py
@@ -14,7 +14,6 @@
# limitations under the License.
import os
-
import eventlet
from logshipper.tail import Tail
@@ -27,44 +26,46 @@
super(FileWatchSensor, self).__init__(
sensor_service=sensor_service, config=config
)
- self._trigger = None
- self._logger = self._sensor_service.get_logger(__name__)
- self._tail = None
+ self.log = self._sensor_service.get_logger(__name__)
+ self.tail = None
+ self.file_ref = {}
def setup(self):
- self._tail = Tail(filenames=[])
- self._tail.handler = self._handle_line
- self._tail.should_run = True
+ self.tail = Tail(filenames=[])
+ self.tail.handler = self._handle_line
+ self.tail.should_run = True
def run(self):
- self._tail.run()
+ self.tail.run()
def cleanup(self):
- if self._tail:
- self._tail.should_run = False
+ if self.tail:
+ self.tail.should_run = False
try:
- self._tail.notifier.stop()
+ self.tail.notifier.stop()
except Exception:
- self._logger.exception("Unable to stop the tail notifier")
+ self.log.exception("Unable to stop the tail notifier")
def add_trigger(self, trigger):
file_path = trigger["parameters"].get("file_path", None)
if not file_path:
- self._logger.error('Received trigger type without "file_path" field.')
+ self.log.error('Received trigger type without "file_path" field.')
return
- self._trigger = trigger.get("ref", None)
+ trigger = trigger.get("ref", None)
- if not self._trigger:
- raise Exception("Trigger %s did not contain a ref." % trigger)
+ if not trigger:
+ raise Exception(f"Trigger {trigger} did not contain a ref.")
# Wait a bit to avoid initialization race in logshipper library
eventlet.sleep(1.0)
- self._tail.add_file(filename=file_path)
- self._logger.info('Added file "%s"' % (file_path))
+ self.tail.add_file(filename=file_path)
+ self.file_ref[file_path] = trigger
+
+ self.log.info(f"Added file '{file_path}' ({trigger}) to watch list.")
def update_trigger(self, trigger):
pass
@@ -73,22 +74,28 @@
file_path = trigger["parameters"].get("file_path", None)
if not file_path:
- self._logger.error('Received trigger type without "file_path" field.')
+ self.log.error("Received trigger type without 'file_path' field.")
return
- self._tail.remove_file(filename=file_path)
- self._trigger = None
+ self.tail.remove_file(filename=file_path)
+ self.file_ref.pop(file_path)
- self._logger.info('Removed file "%s"' % (file_path))
+ self.log.info(f"Removed file '{file_path}' ({trigger}) from watch list.")
def _handle_line(self, file_path, line):
- trigger = self._trigger
+ if file_path not in self.file_ref:
+ self.log.error(
+ f"No reference found for {file_path}, unable to emit trigger!"
+ )
+ return
+
+ trigger = self.file_ref[file_path]
payload = {
"file_path": file_path,
"file_name": os.path.basename(file_path),
"line": line,
}
- self._logger.debug(
- "Sending payload %s for trigger %s to sensor_service.", payload, trigger
+ self.log.debug(
+ f"Sending payload {payload} for trigger {trigger} to sensor_service."
)
self.sensor_service.dispatch(trigger=trigger, payload=payload)
| {"golden_diff": "diff --git a/contrib/linux/sensors/file_watch_sensor.py b/contrib/linux/sensors/file_watch_sensor.py\n--- a/contrib/linux/sensors/file_watch_sensor.py\n+++ b/contrib/linux/sensors/file_watch_sensor.py\n@@ -14,7 +14,6 @@\n # limitations under the License.\n \n import os\n-\n import eventlet\n \n from logshipper.tail import Tail\n@@ -27,44 +26,46 @@\n super(FileWatchSensor, self).__init__(\n sensor_service=sensor_service, config=config\n )\n- self._trigger = None\n- self._logger = self._sensor_service.get_logger(__name__)\n- self._tail = None\n+ self.log = self._sensor_service.get_logger(__name__)\n+ self.tail = None\n+ self.file_ref = {}\n \n def setup(self):\n- self._tail = Tail(filenames=[])\n- self._tail.handler = self._handle_line\n- self._tail.should_run = True\n+ self.tail = Tail(filenames=[])\n+ self.tail.handler = self._handle_line\n+ self.tail.should_run = True\n \n def run(self):\n- self._tail.run()\n+ self.tail.run()\n \n def cleanup(self):\n- if self._tail:\n- self._tail.should_run = False\n+ if self.tail:\n+ self.tail.should_run = False\n \n try:\n- self._tail.notifier.stop()\n+ self.tail.notifier.stop()\n except Exception:\n- self._logger.exception(\"Unable to stop the tail notifier\")\n+ self.log.exception(\"Unable to stop the tail notifier\")\n \n def add_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n \n if not file_path:\n- self._logger.error('Received trigger type without \"file_path\" field.')\n+ self.log.error('Received trigger type without \"file_path\" field.')\n return\n \n- self._trigger = trigger.get(\"ref\", None)\n+ trigger = trigger.get(\"ref\", None)\n \n- if not self._trigger:\n- raise Exception(\"Trigger %s did not contain a ref.\" % trigger)\n+ if not trigger:\n+ raise Exception(f\"Trigger {trigger} did not contain a ref.\")\n \n # Wait a bit to avoid initialization race in logshipper library\n eventlet.sleep(1.0)\n \n- self._tail.add_file(filename=file_path)\n- self._logger.info('Added file \"%s\"' % (file_path))\n+ self.tail.add_file(filename=file_path)\n+ self.file_ref[file_path] = trigger\n+\n+ self.log.info(f\"Added file '{file_path}' ({trigger}) to watch list.\")\n \n def update_trigger(self, trigger):\n pass\n@@ -73,22 +74,28 @@\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n \n if not file_path:\n- self._logger.error('Received trigger type without \"file_path\" field.')\n+ self.log.error(\"Received trigger type without 'file_path' field.\")\n return\n \n- self._tail.remove_file(filename=file_path)\n- self._trigger = None\n+ self.tail.remove_file(filename=file_path)\n+ self.file_ref.pop(file_path)\n \n- self._logger.info('Removed file \"%s\"' % (file_path))\n+ self.log.info(f\"Removed file '{file_path}' ({trigger}) from watch list.\")\n \n def _handle_line(self, file_path, line):\n- trigger = self._trigger\n+ if file_path not in self.file_ref:\n+ self.log.error(\n+ f\"No reference found for {file_path}, unable to emit trigger!\"\n+ )\n+ return\n+\n+ trigger = self.file_ref[file_path]\n payload = {\n \"file_path\": file_path,\n \"file_name\": os.path.basename(file_path),\n \"line\": line,\n }\n- self._logger.debug(\n- \"Sending payload %s for trigger %s to sensor_service.\", payload, trigger\n+ self.log.debug(\n+ f\"Sending payload {payload} for trigger {trigger} to sensor_service.\"\n )\n self.sensor_service.dispatch(trigger=trigger, payload=payload)\n", "issue": "Fix multiple file support in linux.file_watch.line + black + fstring\nWhen multiple file_watch rules are defined, the last defined file reference is used for all files being watched. This causes trigger-instances to fail rule enforcement.\r\n\r\nAdding the reference to the logging shows `test1.log` has the reference ending with `8c505`\r\n```\r\n2021-11-30 18:50:40,434 140243179888112 INFO file_watch_sensor [-] Added file \"/var/log/test1.log\" with reference linux.7e55ad75-b10c-44db-b53e-95164a18c505\r\n2021-11-30 18:50:41,459 140243179888112 INFO file_watch_sensor [-] Added file \"/var/log/test2.log\" with reference linux.590de8c1-c578-4125-9082-2cee03b030a9\r\n```\r\n\r\nWhen the file contents are updated a trigger is emitted by the sensor using the reference of `test2.log` ending in `b030a9`\r\n```\r\nroot@u1804:~# st2 trigger-instance get 61a6649f164625c2d94dccb8 -y\r\nid: 61a6649f164625c2d94dccb8\r\noccurrence_time: '2021-11-30T17:51:27.294000Z'\r\npayload:\r\n file_name: test1.log\r\n file_path: /var/log/test1.log\r\n line: Tue Nov 30 18:51:27 CET 2021 dhcp\r\nstatus: processed\r\ntrigger: linux.590de8c1-c578-4125-9082-2cee03b030a9\r\n```\r\n\r\nThis PR consists of adding a dictionary that is used to track the `path_name` and `reference` pair and looks up the reference for the file that was altered when creating the trigger.\r\n\r\nThe code is formatted with black and updated to use fstrings since all instances will be using Python 3.6+\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport eventlet\n\nfrom logshipper.tail import Tail\n\nfrom st2reactor.sensor.base import Sensor\n\n\nclass FileWatchSensor(Sensor):\n def __init__(self, sensor_service, config=None):\n super(FileWatchSensor, self).__init__(\n sensor_service=sensor_service, config=config\n )\n self._trigger = None\n self._logger = self._sensor_service.get_logger(__name__)\n self._tail = None\n\n def setup(self):\n self._tail = Tail(filenames=[])\n self._tail.handler = self._handle_line\n self._tail.should_run = True\n\n def run(self):\n self._tail.run()\n\n def cleanup(self):\n if self._tail:\n self._tail.should_run = False\n\n try:\n self._tail.notifier.stop()\n except Exception:\n self._logger.exception(\"Unable to stop the tail notifier\")\n\n def add_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n\n if not file_path:\n self._logger.error('Received trigger type without \"file_path\" field.')\n return\n\n self._trigger = trigger.get(\"ref\", None)\n\n if not self._trigger:\n raise Exception(\"Trigger %s did not contain a ref.\" % trigger)\n\n # Wait a bit to avoid initialization race in logshipper library\n eventlet.sleep(1.0)\n\n self._tail.add_file(filename=file_path)\n self._logger.info('Added file \"%s\"' % (file_path))\n\n def update_trigger(self, trigger):\n pass\n\n def remove_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n\n if not file_path:\n self._logger.error('Received trigger type without \"file_path\" field.')\n return\n\n self._tail.remove_file(filename=file_path)\n self._trigger = None\n\n self._logger.info('Removed file \"%s\"' % (file_path))\n\n def _handle_line(self, file_path, line):\n trigger = self._trigger\n payload = {\n \"file_path\": file_path,\n \"file_name\": os.path.basename(file_path),\n \"line\": line,\n }\n self._logger.debug(\n \"Sending payload %s for trigger %s to sensor_service.\", payload, trigger\n )\n self.sensor_service.dispatch(trigger=trigger, payload=payload)\n", "path": "contrib/linux/sensors/file_watch_sensor.py"}], "after_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport eventlet\n\nfrom logshipper.tail import Tail\n\nfrom st2reactor.sensor.base import Sensor\n\n\nclass FileWatchSensor(Sensor):\n def __init__(self, sensor_service, config=None):\n super(FileWatchSensor, self).__init__(\n sensor_service=sensor_service, config=config\n )\n self.log = self._sensor_service.get_logger(__name__)\n self.tail = None\n self.file_ref = {}\n\n def setup(self):\n self.tail = Tail(filenames=[])\n self.tail.handler = self._handle_line\n self.tail.should_run = True\n\n def run(self):\n self.tail.run()\n\n def cleanup(self):\n if self.tail:\n self.tail.should_run = False\n\n try:\n self.tail.notifier.stop()\n except Exception:\n self.log.exception(\"Unable to stop the tail notifier\")\n\n def add_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n\n if not file_path:\n self.log.error('Received trigger type without \"file_path\" field.')\n return\n\n trigger = trigger.get(\"ref\", None)\n\n if not trigger:\n raise Exception(f\"Trigger {trigger} did not contain a ref.\")\n\n # Wait a bit to avoid initialization race in logshipper library\n eventlet.sleep(1.0)\n\n self.tail.add_file(filename=file_path)\n self.file_ref[file_path] = trigger\n\n self.log.info(f\"Added file '{file_path}' ({trigger}) to watch list.\")\n\n def update_trigger(self, trigger):\n pass\n\n def remove_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n\n if not file_path:\n self.log.error(\"Received trigger type without 'file_path' field.\")\n return\n\n self.tail.remove_file(filename=file_path)\n self.file_ref.pop(file_path)\n\n self.log.info(f\"Removed file '{file_path}' ({trigger}) from watch list.\")\n\n def _handle_line(self, file_path, line):\n if file_path not in self.file_ref:\n self.log.error(\n f\"No reference found for {file_path}, unable to emit trigger!\"\n )\n return\n\n trigger = self.file_ref[file_path]\n payload = {\n \"file_path\": file_path,\n \"file_name\": os.path.basename(file_path),\n \"line\": line,\n }\n self.log.debug(\n f\"Sending payload {payload} for trigger {trigger} to sensor_service.\"\n )\n self.sensor_service.dispatch(trigger=trigger, payload=payload)\n", "path": "contrib/linux/sensors/file_watch_sensor.py"}]} | 1,654 | 917 |
gh_patches_debug_2998 | rasdani/github-patches | git_diff | archlinux__archinstall-763 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
in xfce, it need xarchiver for create archive & extract here-to
in xfce, it need xarchiver for create archive & extract here-to
in xfce, it need xarchiver for create archive & extract here-to
in xfce, it need xarchiver for create archive & extract here-to
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/xfce4.py`
Content:
```
1 # A desktop environment using "Xfce4"
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = [
8 "xfce4",
9 "xfce4-goodies",
10 "pavucontrol",
11 "lightdm",
12 "lightdm-gtk-greeter",
13 "gvfs",
14 "network-manager-applet",
15 ]
16
17
18 def _prep_function(*args, **kwargs):
19 """
20 Magic function called by the importing installer
21 before continuing any further. It also avoids executing any
22 other code in this stage. So it's a safe way to ask the user
23 for more input before any other installer steps start.
24 """
25
26 # XFCE requires a functional xorg installation.
27 profile = archinstall.Profile(None, 'xorg')
28 with profile.load_instructions(namespace='xorg.py') as imported:
29 if hasattr(imported, '_prep_function'):
30 return imported._prep_function()
31 else:
32 print('Deprecated (??): xorg profile has no _prep_function() anymore')
33
34
35 # Ensures that this code only gets executed if executed
36 # through importlib.util.spec_from_file_location("xfce4", "/somewhere/xfce4.py")
37 # or through conventional import xfce4
38 if __name__ == 'xfce4':
39 # Install dependency profiles
40 archinstall.storage['installation_session'].install_profile('xorg')
41
42 # Install the XFCE4 packages
43 archinstall.storage['installation_session'].add_additional_packages(__packages__)
44
45 archinstall.storage['installation_session'].enable_service('lightdm') # Light Display Manager
46
```
Path: `profiles/kde.py`
Content:
```
1 # A desktop environment using "KDE".
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = [
8 "plasma-meta",
9 "konsole",
10 "kate",
11 "dolphin",
12 "sddm",
13 "plasma-wayland-session",
14 "egl-wayland",
15 ]
16
17
18 # TODO: Remove hard dependency of bash (due to .bash_profile)
19
20
21 def _prep_function(*args, **kwargs):
22 """
23 Magic function called by the importing installer
24 before continuing any further. It also avoids executing any
25 other code in this stage. So it's a safe way to ask the user
26 for more input before any other installer steps start.
27 """
28
29 # KDE requires a functioning Xorg installation.
30 profile = archinstall.Profile(None, 'xorg')
31 with profile.load_instructions(namespace='xorg.py') as imported:
32 if hasattr(imported, '_prep_function'):
33 return imported._prep_function()
34 else:
35 print('Deprecated (??): xorg profile has no _prep_function() anymore')
36
37
38 """
39 def _post_install(*args, **kwargs):
40 if "nvidia" in _gfx_driver_packages:
41 print("Plasma Wayland has known compatibility issues with the proprietary Nvidia driver")
42 print("After booting, you can choose between Wayland and Xorg using the drop-down menu")
43 return True
44 """
45
46 # Ensures that this code only gets executed if executed
47 # through importlib.util.spec_from_file_location("kde", "/somewhere/kde.py")
48 # or through conventional import kde
49 if __name__ == 'kde':
50 # Install dependency profiles
51 archinstall.storage['installation_session'].install_profile('xorg')
52
53 # Install the KDE packages
54 archinstall.storage['installation_session'].add_additional_packages(__packages__)
55
56 # Enable autostart of KDE for all users
57 archinstall.storage['installation_session'].enable_service('sddm')
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/profiles/kde.py b/profiles/kde.py
--- a/profiles/kde.py
+++ b/profiles/kde.py
@@ -9,6 +9,7 @@
"konsole",
"kate",
"dolphin",
+ "ark",
"sddm",
"plasma-wayland-session",
"egl-wayland",
diff --git a/profiles/xfce4.py b/profiles/xfce4.py
--- a/profiles/xfce4.py
+++ b/profiles/xfce4.py
@@ -12,6 +12,7 @@
"lightdm-gtk-greeter",
"gvfs",
"network-manager-applet",
+ "xarchiver"
]
| {"golden_diff": "diff --git a/profiles/kde.py b/profiles/kde.py\n--- a/profiles/kde.py\n+++ b/profiles/kde.py\n@@ -9,6 +9,7 @@\n \t\"konsole\",\n \t\"kate\",\n \t\"dolphin\",\n+\t\"ark\",\n \t\"sddm\",\n \t\"plasma-wayland-session\",\n \t\"egl-wayland\",\ndiff --git a/profiles/xfce4.py b/profiles/xfce4.py\n--- a/profiles/xfce4.py\n+++ b/profiles/xfce4.py\n@@ -12,6 +12,7 @@\n \t\"lightdm-gtk-greeter\",\n \t\"gvfs\",\n \t\"network-manager-applet\",\n+\t\"xarchiver\"\n ]\n", "issue": "in xfce, it need xarchiver for create archive & extract here-to\nin xfce, it need xarchiver for create archive & extract here-to\nin xfce, it need xarchiver for create archive & extract here-to\nin xfce, it need xarchiver for create archive & extract here-to\n", "before_files": [{"content": "# A desktop environment using \"Xfce4\"\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"xfce4\",\n\t\"xfce4-goodies\",\n\t\"pavucontrol\",\n\t\"lightdm\",\n\t\"lightdm-gtk-greeter\",\n\t\"gvfs\",\n\t\"network-manager-applet\",\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# XFCE requires a functional xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"xfce4\", \"/somewhere/xfce4.py\")\n# or through conventional import xfce4\nif __name__ == 'xfce4':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the XFCE4 packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\tarchinstall.storage['installation_session'].enable_service('lightdm') # Light Display Manager\n", "path": "profiles/xfce4.py"}, {"content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kate\",\n\t\"dolphin\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py"}], "after_files": [{"content": "# A desktop environment using \"Xfce4\"\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"xfce4\",\n\t\"xfce4-goodies\",\n\t\"pavucontrol\",\n\t\"lightdm\",\n\t\"lightdm-gtk-greeter\",\n\t\"gvfs\",\n\t\"network-manager-applet\",\n\t\"xarchiver\"\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# XFCE requires a functional xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"xfce4\", \"/somewhere/xfce4.py\")\n# or through conventional import xfce4\nif __name__ == 'xfce4':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the XFCE4 packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\tarchinstall.storage['installation_session'].enable_service('lightdm') # Light Display Manager\n", "path": "profiles/xfce4.py"}, {"content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kate\",\n\t\"dolphin\",\n\t\"ark\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py"}]} | 1,326 | 170 |
gh_patches_debug_2479 | rasdani/github-patches | git_diff | boto__boto-2475 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VPC Peering Connection "delete()" calls wrong method
The "delete()" method of VpcPeeringConnection calls "self.connection.delete_vpc(self.id)" instead of "self.connection.delete_vpc_peering_connection(self.id)"
**File:** boto/vpc/vpc_peering_connection.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `boto/vpc/vpc_peering_connection.py`
Content:
```
1 # Copyright (c) 2014 Skytap http://skytap.com/
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 """
23 Represents a VPC Peering Connection.
24 """
25
26 from boto.ec2.ec2object import TaggedEC2Object
27
28 class VpcInfo(object):
29 def __init__(self):
30 """
31 Information on peer Vpc.
32
33 :ivar id: The unique ID of peer Vpc.
34 :ivar owner_id: Owner of peer Vpc.
35 :ivar cidr_block: CIDR Block of peer Vpc.
36 """
37
38 self.vpc_id = None
39 self.owner_id = None
40 self.cidr_block = None
41
42 def __repr__(self):
43 return 'VpcInfo:%s' % self.vpc_id
44
45 def startElement(self, name, attrs, connection):
46 pass
47
48 def endElement(self, name, value, connection):
49 if name == 'vpcId':
50 self.vpc_id = value
51 elif name == 'ownerId':
52 self.owner_id = value
53 elif name == 'cidrBlock':
54 self.cidr_block = value
55 else:
56 setattr(self, name, value)
57
58 class VpcPeeringConnectionStatus(object):
59 """
60 The status of VPC peering connection.
61
62 :ivar code: The status of the VPC peering connection. Valid values are:
63
64 * pending-acceptance
65 * failed
66 * expired
67 * provisioning
68 * active
69 * deleted
70 * rejected
71
72 :ivar message: A message that provides more information about the status of the VPC peering connection, if applicable.
73 """
74 def __init__(self, code=0, message=None):
75 self.code = code
76 self.message = message
77
78 def __repr__(self):
79 return '%s(%d)' % (self.code, self.message)
80
81 def startElement(self, name, attrs, connection):
82 pass
83
84 def endElement(self, name, value, connection):
85 if name == 'code':
86 self.code = value
87 elif name == 'message':
88 self.message = value
89 else:
90 setattr(self, name, value)
91
92
93
94 class VpcPeeringConnection(TaggedEC2Object):
95
96 def __init__(self, connection=None):
97 """
98 Represents a VPC peering connection.
99
100 :ivar id: The unique ID of the VPC peering connection.
101 :ivar accepter_vpc_info: Information on peer Vpc.
102 :ivar requester_vpc_info: Information on requester Vpc.
103 :ivar expiration_time: The expiration date and time for the VPC peering connection.
104 :ivar status_code: The status of the VPC peering connection.
105 :ivar status_message: A message that provides more information about the status of the VPC peering connection, if applicable.
106 """
107 super(VpcPeeringConnection, self).__init__(connection)
108 self.id = None
109 self.accepter_vpc_info = VpcInfo()
110 self.requester_vpc_info = VpcInfo()
111 self.expiration_time = None
112 self._status = VpcPeeringConnectionStatus()
113
114 @property
115 def status_code(self):
116 return self._status.code
117
118 @property
119 def status_message(self):
120 return self._status.message
121
122 def __repr__(self):
123 return 'VpcPeeringConnection:%s' % self.id
124
125 def startElement(self, name, attrs, connection):
126 retval = super(VpcPeeringConnection, self).startElement(name, attrs, connection)
127 if retval is not None:
128 return retval
129
130 if name == 'requesterVpcInfo':
131 return self.requester_vpc_info
132 elif name == 'accepterVpcInfo':
133 return self.accepter_vpc_info
134 elif name == 'status':
135 return self._status
136
137 return None
138
139 def endElement(self, name, value, connection):
140 if name == 'vpcPeeringConnectionId':
141 self.id = value
142 elif name == 'expirationTime':
143 self.expiration_time = value
144 else:
145 setattr(self, name, value)
146
147 def delete(self):
148 return self.connection.delete_vpc(self.id)
149
150 def _update(self, updated):
151 self.__dict__.update(updated.__dict__)
152
153 def update(self, validate=False, dry_run=False):
154 vpc_peering_connection_list = self.connection.get_all_vpc_peering_connections(
155 [self.id],
156 dry_run=dry_run
157 )
158 if len(vpc_peering_connection_list):
159 updated_vpc_peering_connection = vpc_peering_connection_list[0]
160 self._update(updated_vpc_peering_connection)
161 elif validate:
162 raise ValueError('%s is not a valid VpcPeeringConnection ID' % (self.id,))
163 return self.status_code
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/boto/vpc/vpc_peering_connection.py b/boto/vpc/vpc_peering_connection.py
--- a/boto/vpc/vpc_peering_connection.py
+++ b/boto/vpc/vpc_peering_connection.py
@@ -145,7 +145,7 @@
setattr(self, name, value)
def delete(self):
- return self.connection.delete_vpc(self.id)
+ return self.connection.delete_vpc_peering_connection(self.id)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
| {"golden_diff": "diff --git a/boto/vpc/vpc_peering_connection.py b/boto/vpc/vpc_peering_connection.py\n--- a/boto/vpc/vpc_peering_connection.py\n+++ b/boto/vpc/vpc_peering_connection.py\n@@ -145,7 +145,7 @@\n setattr(self, name, value)\n \n def delete(self):\n- return self.connection.delete_vpc(self.id)\n+ return self.connection.delete_vpc_peering_connection(self.id)\n \n def _update(self, updated):\n self.__dict__.update(updated.__dict__)\n", "issue": "VPC Peering Connection \"delete()\" calls wrong method\nThe \"delete()\" method of VpcPeeringConnection calls \"self.connection.delete_vpc(self.id)\" instead of \"self.connection.delete_vpc_peering_connection(self.id)\"\n\n**File:** boto/vpc/vpc_peering_connection.py\n\n", "before_files": [{"content": "# Copyright (c) 2014 Skytap http://skytap.com/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a VPC Peering Connection.\n\"\"\"\n\nfrom boto.ec2.ec2object import TaggedEC2Object\n\nclass VpcInfo(object):\n def __init__(self):\n \"\"\"\n Information on peer Vpc.\n \n :ivar id: The unique ID of peer Vpc.\n :ivar owner_id: Owner of peer Vpc.\n :ivar cidr_block: CIDR Block of peer Vpc.\n \"\"\"\n\n self.vpc_id = None\n self.owner_id = None\n self.cidr_block = None\n\n def __repr__(self):\n return 'VpcInfo:%s' % self.vpc_id\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'vpcId':\n self.vpc_id = value\n elif name == 'ownerId':\n self.owner_id = value\n elif name == 'cidrBlock':\n self.cidr_block = value\n else:\n setattr(self, name, value)\n\nclass VpcPeeringConnectionStatus(object):\n \"\"\"\n The status of VPC peering connection.\n\n :ivar code: The status of the VPC peering connection. Valid values are:\n\n * pending-acceptance\n * failed\n * expired\n * provisioning\n * active\n * deleted\n * rejected\n\n :ivar message: A message that provides more information about the status of the VPC peering connection, if applicable.\n \"\"\"\n def __init__(self, code=0, message=None):\n self.code = code\n self.message = message\n\n def __repr__(self):\n return '%s(%d)' % (self.code, self.message)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'code':\n self.code = value\n elif name == 'message':\n self.message = value\n else:\n setattr(self, name, value)\n\n \n\nclass VpcPeeringConnection(TaggedEC2Object):\n\n def __init__(self, connection=None):\n \"\"\"\n Represents a VPC peering connection.\n\n :ivar id: The unique ID of the VPC peering connection.\n :ivar accepter_vpc_info: Information on peer Vpc.\n :ivar requester_vpc_info: Information on requester Vpc.\n :ivar expiration_time: The expiration date and time for the VPC peering connection.\n :ivar status_code: The status of the VPC peering connection.\n :ivar status_message: A message that provides more information about the status of the VPC peering connection, if applicable.\n \"\"\"\n super(VpcPeeringConnection, self).__init__(connection)\n self.id = None\n self.accepter_vpc_info = VpcInfo()\n self.requester_vpc_info = VpcInfo()\n self.expiration_time = None\n self._status = VpcPeeringConnectionStatus()\n\n @property\n def status_code(self):\n return self._status.code\n\n @property\n def status_message(self):\n return self._status.message\n\n def __repr__(self):\n return 'VpcPeeringConnection:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n retval = super(VpcPeeringConnection, self).startElement(name, attrs, connection)\n if retval is not None:\n return retval\n \n if name == 'requesterVpcInfo':\n return self.requester_vpc_info\n elif name == 'accepterVpcInfo':\n return self.accepter_vpc_info\n elif name == 'status':\n return self._status\n\n return None\n\n def endElement(self, name, value, connection):\n if name == 'vpcPeeringConnectionId':\n self.id = value\n elif name == 'expirationTime':\n self.expiration_time = value\n else:\n setattr(self, name, value)\n\n def delete(self):\n return self.connection.delete_vpc(self.id)\n\n def _update(self, updated):\n self.__dict__.update(updated.__dict__)\n\n def update(self, validate=False, dry_run=False):\n vpc_peering_connection_list = self.connection.get_all_vpc_peering_connections(\n [self.id],\n dry_run=dry_run\n )\n if len(vpc_peering_connection_list):\n updated_vpc_peering_connection = vpc_peering_connection_list[0]\n self._update(updated_vpc_peering_connection)\n elif validate:\n raise ValueError('%s is not a valid VpcPeeringConnection ID' % (self.id,))\n return self.status_code\n", "path": "boto/vpc/vpc_peering_connection.py"}], "after_files": [{"content": "# Copyright (c) 2014 Skytap http://skytap.com/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a VPC Peering Connection.\n\"\"\"\n\nfrom boto.ec2.ec2object import TaggedEC2Object\n\nclass VpcInfo(object):\n def __init__(self):\n \"\"\"\n Information on peer Vpc.\n \n :ivar id: The unique ID of peer Vpc.\n :ivar owner_id: Owner of peer Vpc.\n :ivar cidr_block: CIDR Block of peer Vpc.\n \"\"\"\n\n self.vpc_id = None\n self.owner_id = None\n self.cidr_block = None\n\n def __repr__(self):\n return 'VpcInfo:%s' % self.vpc_id\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'vpcId':\n self.vpc_id = value\n elif name == 'ownerId':\n self.owner_id = value\n elif name == 'cidrBlock':\n self.cidr_block = value\n else:\n setattr(self, name, value)\n\nclass VpcPeeringConnectionStatus(object):\n \"\"\"\n The status of VPC peering connection.\n\n :ivar code: The status of the VPC peering connection. Valid values are:\n\n * pending-acceptance\n * failed\n * expired\n * provisioning\n * active\n * deleted\n * rejected\n\n :ivar message: A message that provides more information about the status of the VPC peering connection, if applicable.\n \"\"\"\n def __init__(self, code=0, message=None):\n self.code = code\n self.message = message\n\n def __repr__(self):\n return '%s(%d)' % (self.code, self.message)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'code':\n self.code = value\n elif name == 'message':\n self.message = value\n else:\n setattr(self, name, value)\n\n \n\nclass VpcPeeringConnection(TaggedEC2Object):\n\n def __init__(self, connection=None):\n \"\"\"\n Represents a VPC peering connection.\n\n :ivar id: The unique ID of the VPC peering connection.\n :ivar accepter_vpc_info: Information on peer Vpc.\n :ivar requester_vpc_info: Information on requester Vpc.\n :ivar expiration_time: The expiration date and time for the VPC peering connection.\n :ivar status_code: The status of the VPC peering connection.\n :ivar status_message: A message that provides more information about the status of the VPC peering connection, if applicable.\n \"\"\"\n super(VpcPeeringConnection, self).__init__(connection)\n self.id = None\n self.accepter_vpc_info = VpcInfo()\n self.requester_vpc_info = VpcInfo()\n self.expiration_time = None\n self._status = VpcPeeringConnectionStatus()\n\n @property\n def status_code(self):\n return self._status.code\n\n @property\n def status_message(self):\n return self._status.message\n\n def __repr__(self):\n return 'VpcPeeringConnection:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n retval = super(VpcPeeringConnection, self).startElement(name, attrs, connection)\n if retval is not None:\n return retval\n \n if name == 'requesterVpcInfo':\n return self.requester_vpc_info\n elif name == 'accepterVpcInfo':\n return self.accepter_vpc_info\n elif name == 'status':\n return self._status\n\n return None\n\n def endElement(self, name, value, connection):\n if name == 'vpcPeeringConnectionId':\n self.id = value\n elif name == 'expirationTime':\n self.expiration_time = value\n else:\n setattr(self, name, value)\n\n def delete(self):\n return self.connection.delete_vpc_peering_connection(self.id)\n\n def _update(self, updated):\n self.__dict__.update(updated.__dict__)\n\n def update(self, validate=False, dry_run=False):\n vpc_peering_connection_list = self.connection.get_all_vpc_peering_connections(\n [self.id],\n dry_run=dry_run\n )\n if len(vpc_peering_connection_list):\n updated_vpc_peering_connection = vpc_peering_connection_list[0]\n self._update(updated_vpc_peering_connection)\n elif validate:\n raise ValueError('%s is not a valid VpcPeeringConnection ID' % (self.id,))\n return self.status_code\n", "path": "boto/vpc/vpc_peering_connection.py"}]} | 1,978 | 124 |
gh_patches_debug_37928 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3315 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider lenscrafters is broken
During the global build at 2021-08-25-14-42-15, spider **lenscrafters** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/lenscrafters.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/lenscrafters.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class LensCraftersSpider(scrapy.Spider):
12 name = "lenscrafters"
13 item_attributes = { 'brand': "Lenscrafters" }
14 allowed_domains = ['local.lenscrafters.com']
15 start_urls = [
16 'https://local.lenscrafters.com/'
17 ]
18
19 def parse_hours(self, hours):
20 opening_hours = OpeningHours()
21 for group in hours:
22 if "Closed" in group:
23 pass
24 else:
25 days, open_time, close_time = re.search(r'([a-zA-Z,]+)\s([\d:]+)-([\d:]+)', group).groups()
26 days = days.split(',')
27 for day in days:
28 opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')
29
30 return opening_hours.as_opening_hours()
31
32 def parse(self, response):
33 urls = response.xpath(
34 '//a[@class="c-directory-list-content-item-link" or @class="c-location-grid-item-link"]/@href').extract()
35 # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page
36 if len(urls) == 0:
37 properties = {
38 'name': response.xpath('//*[@class="location-name h1-normal"]/text()').extract_first(),
39 'addr_full': response.xpath('//*[@class="c-address-street-1"]/text()').extract_first(),
40 'city': response.xpath('//*[@class="c-address-city"]/text()').extract_first(),
41 'state': response.xpath('//*[@class="c-address-state"]/text()').extract_first(),
42 'postcode': response.xpath('//*[@class="c-address-postal-code"]/text()').extract_first(),
43 'phone': response.xpath('//*[@id="phone-main"]/text()').extract_first(),
44 'ref': "_".join(re.search(r".+/(.+?)/(.+?)/(.+?)/?(?:\.html|$)", response.url).groups()),
45 'website': response.url,
46 'lat': response.xpath('//*[@itemprop="latitude"]/@content').extract_first(),
47 'lon': response.xpath('//*[@itemprop="longitude"]/@content').extract_first(),
48 }
49
50 hours = self.parse_hours(response.xpath('//*[@itemprop="openingHours"]/@content').extract())
51 if hours:
52 properties["opening_hours"] = hours
53
54 yield GeojsonPointItem(**properties)
55 else:
56 for path in urls:
57 yield scrapy.Request(url=response.urljoin(path), callback=self.parse)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/lenscrafters.py b/locations/spiders/lenscrafters.py
--- a/locations/spiders/lenscrafters.py
+++ b/locations/spiders/lenscrafters.py
@@ -10,7 +10,7 @@
class LensCraftersSpider(scrapy.Spider):
name = "lenscrafters"
- item_attributes = { 'brand': "Lenscrafters" }
+ item_attributes = {'brand': "Lenscrafters"}
allowed_domains = ['local.lenscrafters.com']
start_urls = [
'https://local.lenscrafters.com/'
@@ -30,21 +30,21 @@
return opening_hours.as_opening_hours()
def parse(self, response):
- urls = response.xpath(
- '//a[@class="c-directory-list-content-item-link" or @class="c-location-grid-item-link"]/@href').extract()
- # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page
+ urls = response.xpath('//a[@class="Directory-listLink Link--directory"]/@href').extract()
+
+ # If cannot find 'Directory-listLink Link--directory' then this is a store page
if len(urls) == 0:
properties = {
- 'name': response.xpath('//*[@class="location-name h1-normal"]/text()').extract_first(),
- 'addr_full': response.xpath('//*[@class="c-address-street-1"]/text()').extract_first(),
- 'city': response.xpath('//*[@class="c-address-city"]/text()').extract_first(),
- 'state': response.xpath('//*[@class="c-address-state"]/text()').extract_first(),
- 'postcode': response.xpath('//*[@class="c-address-postal-code"]/text()').extract_first(),
- 'phone': response.xpath('//*[@id="phone-main"]/text()').extract_first(),
- 'ref': "_".join(re.search(r".+/(.+?)/(.+?)/(.+?)/?(?:\.html|$)", response.url).groups()),
- 'website': response.url,
- 'lat': response.xpath('//*[@itemprop="latitude"]/@content').extract_first(),
- 'lon': response.xpath('//*[@itemprop="longitude"]/@content').extract_first(),
+ 'name': response.xpath('//h1[@id="location-name"]/text()').extract_first(),
+ 'addr_full': response.xpath('//span[@class="c-address-street-1"]/text()').extract_first(),
+ 'city': response.xpath('//span[@class="c-address-city"]/text()').extract_first(),
+ 'state': response.xpath('//abbr[@class="c-address-state"]/text()').extract_first(),
+ 'postcode': response.xpath('//span[@class="c-address-postal-code"]/text()').extract_first(),
+ 'phone': response.xpath('//div[@id="phone-main"]/text()').extract_first(),
+ 'ref': response.xpath('//link[@rel="canonical"]/@href').extract_first(),
+ 'website': response.xpath('//link[@rel="canonical"]/@href').extract_first(),
+ 'lat': response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
+ 'lon': response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
}
hours = self.parse_hours(response.xpath('//*[@itemprop="openingHours"]/@content').extract())
| {"golden_diff": "diff --git a/locations/spiders/lenscrafters.py b/locations/spiders/lenscrafters.py\n--- a/locations/spiders/lenscrafters.py\n+++ b/locations/spiders/lenscrafters.py\n@@ -10,7 +10,7 @@\n \n class LensCraftersSpider(scrapy.Spider):\n name = \"lenscrafters\"\n- item_attributes = { 'brand': \"Lenscrafters\" }\n+ item_attributes = {'brand': \"Lenscrafters\"}\n allowed_domains = ['local.lenscrafters.com']\n start_urls = [\n 'https://local.lenscrafters.com/'\n@@ -30,21 +30,21 @@\n return opening_hours.as_opening_hours()\n \n def parse(self, response):\n- urls = response.xpath(\n- '//a[@class=\"c-directory-list-content-item-link\" or @class=\"c-location-grid-item-link\"]/@href').extract()\n- # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page\n+ urls = response.xpath('//a[@class=\"Directory-listLink Link--directory\"]/@href').extract()\n+\n+ # If cannot find 'Directory-listLink Link--directory' then this is a store page\n if len(urls) == 0:\n properties = {\n- 'name': response.xpath('//*[@class=\"location-name h1-normal\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//*[@class=\"c-address-street-1\"]/text()').extract_first(),\n- 'city': response.xpath('//*[@class=\"c-address-city\"]/text()').extract_first(),\n- 'state': response.xpath('//*[@class=\"c-address-state\"]/text()').extract_first(),\n- 'postcode': response.xpath('//*[@class=\"c-address-postal-code\"]/text()').extract_first(),\n- 'phone': response.xpath('//*[@id=\"phone-main\"]/text()').extract_first(),\n- 'ref': \"_\".join(re.search(r\".+/(.+?)/(.+?)/(.+?)/?(?:\\.html|$)\", response.url).groups()),\n- 'website': response.url,\n- 'lat': response.xpath('//*[@itemprop=\"latitude\"]/@content').extract_first(),\n- 'lon': response.xpath('//*[@itemprop=\"longitude\"]/@content').extract_first(),\n+ 'name': response.xpath('//h1[@id=\"location-name\"]/text()').extract_first(),\n+ 'addr_full': response.xpath('//span[@class=\"c-address-street-1\"]/text()').extract_first(),\n+ 'city': response.xpath('//span[@class=\"c-address-city\"]/text()').extract_first(),\n+ 'state': response.xpath('//abbr[@class=\"c-address-state\"]/text()').extract_first(),\n+ 'postcode': response.xpath('//span[@class=\"c-address-postal-code\"]/text()').extract_first(),\n+ 'phone': response.xpath('//div[@id=\"phone-main\"]/text()').extract_first(),\n+ 'ref': response.xpath('//link[@rel=\"canonical\"]/@href').extract_first(),\n+ 'website': response.xpath('//link[@rel=\"canonical\"]/@href').extract_first(),\n+ 'lat': response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n+ 'lon': response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n }\n \n hours = self.parse_hours(response.xpath('//*[@itemprop=\"openingHours\"]/@content').extract())\n", "issue": "Spider lenscrafters is broken\nDuring the global build at 2021-08-25-14-42-15, spider **lenscrafters** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/lenscrafters.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LensCraftersSpider(scrapy.Spider):\n name = \"lenscrafters\"\n item_attributes = { 'brand': \"Lenscrafters\" }\n allowed_domains = ['local.lenscrafters.com']\n start_urls = [\n 'https://local.lenscrafters.com/'\n ]\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n for group in hours:\n if \"Closed\" in group:\n pass\n else:\n days, open_time, close_time = re.search(r'([a-zA-Z,]+)\\s([\\d:]+)-([\\d:]+)', group).groups()\n days = days.split(',')\n for day in days:\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath(\n '//a[@class=\"c-directory-list-content-item-link\" or @class=\"c-location-grid-item-link\"]/@href').extract()\n # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page\n if len(urls) == 0:\n properties = {\n 'name': response.xpath('//*[@class=\"location-name h1-normal\"]/text()').extract_first(),\n 'addr_full': response.xpath('//*[@class=\"c-address-street-1\"]/text()').extract_first(),\n 'city': response.xpath('//*[@class=\"c-address-city\"]/text()').extract_first(),\n 'state': response.xpath('//*[@class=\"c-address-state\"]/text()').extract_first(),\n 'postcode': response.xpath('//*[@class=\"c-address-postal-code\"]/text()').extract_first(),\n 'phone': response.xpath('//*[@id=\"phone-main\"]/text()').extract_first(),\n 'ref': \"_\".join(re.search(r\".+/(.+?)/(.+?)/(.+?)/?(?:\\.html|$)\", response.url).groups()),\n 'website': response.url,\n 'lat': response.xpath('//*[@itemprop=\"latitude\"]/@content').extract_first(),\n 'lon': response.xpath('//*[@itemprop=\"longitude\"]/@content').extract_first(),\n }\n\n hours = self.parse_hours(response.xpath('//*[@itemprop=\"openingHours\"]/@content').extract())\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n else:\n for path in urls:\n yield scrapy.Request(url=response.urljoin(path), callback=self.parse)\n", "path": "locations/spiders/lenscrafters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LensCraftersSpider(scrapy.Spider):\n name = \"lenscrafters\"\n item_attributes = {'brand': \"Lenscrafters\"}\n allowed_domains = ['local.lenscrafters.com']\n start_urls = [\n 'https://local.lenscrafters.com/'\n ]\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n for group in hours:\n if \"Closed\" in group:\n pass\n else:\n days, open_time, close_time = re.search(r'([a-zA-Z,]+)\\s([\\d:]+)-([\\d:]+)', group).groups()\n days = days.split(',')\n for day in days:\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink Link--directory\"]/@href').extract()\n\n # If cannot find 'Directory-listLink Link--directory' then this is a store page\n if len(urls) == 0:\n properties = {\n 'name': response.xpath('//h1[@id=\"location-name\"]/text()').extract_first(),\n 'addr_full': response.xpath('//span[@class=\"c-address-street-1\"]/text()').extract_first(),\n 'city': response.xpath('//span[@class=\"c-address-city\"]/text()').extract_first(),\n 'state': response.xpath('//abbr[@class=\"c-address-state\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@class=\"c-address-postal-code\"]/text()').extract_first(),\n 'phone': response.xpath('//div[@id=\"phone-main\"]/text()').extract_first(),\n 'ref': response.xpath('//link[@rel=\"canonical\"]/@href').extract_first(),\n 'website': response.xpath('//link[@rel=\"canonical\"]/@href').extract_first(),\n 'lat': response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n 'lon': response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n }\n\n hours = self.parse_hours(response.xpath('//*[@itemprop=\"openingHours\"]/@content').extract())\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n else:\n for path in urls:\n yield scrapy.Request(url=response.urljoin(path), callback=self.parse)\n", "path": "locations/spiders/lenscrafters.py"}]} | 1,129 | 741 |
gh_patches_debug_19352 | rasdani/github-patches | git_diff | sublimelsp__LSP-339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scopes priorities while selecting configuration
## Bug:
When there are multiple language servers configured, all of which are for similar scopes (Ex. `source.json`, `source.json.sublime.settings`) the configuration with the most specific scope should be preferred; however right now one or the other could "win", some times leading to erroneous configuration.
Example comes from configuring **vscode-json-languageserver** to work with both `json` and `jsonc` languageIds.
### Suggestion:
Give priority to the configuration with the most specific scope that matches.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/configurations.py`
Content:
```
1 import sublime
2
3 from .settings import ClientConfig, client_configs
4 from .logging import debug
5 from .workspace import get_project_config
6
7 assert ClientConfig
8
9 try:
10 from typing import Any, List, Dict, Tuple, Callable, Optional
11 assert Any and List and Dict and Tuple and Callable and Optional
12 except ImportError:
13 pass
14
15
16 window_client_configs = dict() # type: Dict[int, List[ClientConfig]]
17
18
19 def get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':
20 for config in configs:
21 for scope in config.scopes:
22 if len(view.sel()) > 0:
23 if view.match_selector(view.sel()[0].begin(), scope):
24 return config
25
26 return None
27
28
29 def register_client_config(config: ClientConfig) -> None:
30 window_client_configs.clear()
31 client_configs.add_external_config(config)
32
33
34 def get_global_client_config(view: sublime.View) -> 'Optional[ClientConfig]':
35 return get_scope_client_config(view, client_configs.all)
36
37
38 def get_default_client_config(view: sublime.View) -> 'Optional[ClientConfig]':
39 return get_scope_client_config(view, client_configs.defaults)
40
41
42 def get_window_client_config(view: sublime.View) -> 'Optional[ClientConfig]':
43 window = view.window()
44 if window:
45 configs_for_window = window_client_configs.get(window.id(), [])
46 return get_scope_client_config(view, configs_for_window)
47 else:
48 return None
49
50
51 def config_for_scope(view: sublime.View) -> 'Optional[ClientConfig]':
52 # check window_client_config first
53 window_client_config = get_window_client_config(view)
54 if not window_client_config:
55 global_client_config = get_global_client_config(view)
56
57 if global_client_config:
58 window = view.window()
59 if window:
60 window_client_config = apply_window_settings(global_client_config, view)
61 add_window_client_config(window, window_client_config)
62 return window_client_config
63 else:
64 # always return a client config even if the view has no window anymore
65 return global_client_config
66
67 return window_client_config
68
69
70 def add_window_client_config(window: 'sublime.Window', config: 'ClientConfig'):
71 global window_client_configs
72 window_client_configs.setdefault(window.id(), []).append(config)
73
74
75 def clear_window_client_configs(window: 'sublime.Window'):
76 global window_client_configs
77 if window.id() in window_client_configs:
78 del window_client_configs[window.id()]
79
80
81 def apply_window_settings(client_config: 'ClientConfig', view: 'sublime.View') -> 'ClientConfig':
82 window = view.window()
83 if window:
84 window_config = get_project_config(window)
85
86 if client_config.name in window_config:
87 overrides = window_config[client_config.name]
88 debug('window has override for', client_config.name, overrides)
89 return ClientConfig(
90 client_config.name,
91 overrides.get("command", client_config.binary_args),
92 overrides.get("tcp_port", client_config.tcp_port),
93 overrides.get("scopes", client_config.scopes),
94 overrides.get("syntaxes", client_config.syntaxes),
95 overrides.get("languageId", client_config.languageId),
96 overrides.get("enabled", client_config.enabled),
97 overrides.get("initializationOptions", client_config.init_options),
98 overrides.get("settings", client_config.settings),
99 overrides.get("env", client_config.env)
100 )
101
102 return client_config
103
104
105 def is_supportable_syntax(syntax: str) -> bool:
106 # TODO: filter out configs disabled by the user.
107 for config in client_configs.defaults:
108 if syntax in config.syntaxes:
109 return True
110 return False
111
112
113 def is_supported_syntax(syntax: str) -> bool:
114 for config in client_configs.all:
115 if syntax in config.syntaxes:
116 return True
117 return False
118
119
120 def is_supported_view(view: sublime.View) -> bool:
121 # TODO: perhaps make this check for a client instead of a config
122 if config_for_scope(view):
123 return True
124 else:
125 return False
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/configurations.py b/plugin/core/configurations.py
--- a/plugin/core/configurations.py
+++ b/plugin/core/configurations.py
@@ -17,13 +17,21 @@
def get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':
+ # When there are multiple server configurations, all of which are for
+ # similar scopes (e.g. 'source.json', 'source.json.sublime.settings') the
+ # configuration with the most specific scope (highest ranked selector)
+ # in the current position is preferred.
+ scope_score = 0
+ scope_client_config = None
for config in configs:
for scope in config.scopes:
- if len(view.sel()) > 0:
- if view.match_selector(view.sel()[0].begin(), scope):
- return config
-
- return None
+ sel = view.sel()
+ if len(sel) > 0:
+ score = view.score_selector(sel[0].begin(), scope)
+ if score > scope_score:
+ scope_score = score
+ scope_client_config = config
+ return scope_client_config
def register_client_config(config: ClientConfig) -> None:
| {"golden_diff": "diff --git a/plugin/core/configurations.py b/plugin/core/configurations.py\n--- a/plugin/core/configurations.py\n+++ b/plugin/core/configurations.py\n@@ -17,13 +17,21 @@\n \n \n def get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':\n+ # When there are multiple server configurations, all of which are for\n+ # similar scopes (e.g. 'source.json', 'source.json.sublime.settings') the\n+ # configuration with the most specific scope (highest ranked selector)\n+ # in the current position is preferred.\n+ scope_score = 0\n+ scope_client_config = None\n for config in configs:\n for scope in config.scopes:\n- if len(view.sel()) > 0:\n- if view.match_selector(view.sel()[0].begin(), scope):\n- return config\n-\n- return None\n+ sel = view.sel()\n+ if len(sel) > 0:\n+ score = view.score_selector(sel[0].begin(), scope)\n+ if score > scope_score:\n+ scope_score = score\n+ scope_client_config = config\n+ return scope_client_config\n \n \n def register_client_config(config: ClientConfig) -> None:\n", "issue": "Scopes priorities while selecting configuration\n## Bug:\r\n\r\nWhen there are multiple language servers configured, all of which are for similar scopes (Ex. `source.json`, `source.json.sublime.settings`) the configuration with the most specific scope should be preferred; however right now one or the other could \"win\", some times leading to erroneous configuration.\r\n\r\nExample comes from configuring **vscode-json-languageserver** to work with both `json` and `jsonc` languageIds.\r\n\r\n### Suggestion:\r\n\r\nGive priority to the configuration with the most specific scope that matches.\r\n\n", "before_files": [{"content": "import sublime\n\nfrom .settings import ClientConfig, client_configs\nfrom .logging import debug\nfrom .workspace import get_project_config\n\nassert ClientConfig\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\n\nwindow_client_configs = dict() # type: Dict[int, List[ClientConfig]]\n\n\ndef get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':\n for config in configs:\n for scope in config.scopes:\n if len(view.sel()) > 0:\n if view.match_selector(view.sel()[0].begin(), scope):\n return config\n\n return None\n\n\ndef register_client_config(config: ClientConfig) -> None:\n window_client_configs.clear()\n client_configs.add_external_config(config)\n\n\ndef get_global_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n return get_scope_client_config(view, client_configs.all)\n\n\ndef get_default_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n return get_scope_client_config(view, client_configs.defaults)\n\n\ndef get_window_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n window = view.window()\n if window:\n configs_for_window = window_client_configs.get(window.id(), [])\n return get_scope_client_config(view, configs_for_window)\n else:\n return None\n\n\ndef config_for_scope(view: sublime.View) -> 'Optional[ClientConfig]':\n # check window_client_config first\n window_client_config = get_window_client_config(view)\n if not window_client_config:\n global_client_config = get_global_client_config(view)\n\n if global_client_config:\n window = view.window()\n if window:\n window_client_config = apply_window_settings(global_client_config, view)\n add_window_client_config(window, window_client_config)\n return window_client_config\n else:\n # always return a client config even if the view has no window anymore\n return global_client_config\n\n return window_client_config\n\n\ndef add_window_client_config(window: 'sublime.Window', config: 'ClientConfig'):\n global window_client_configs\n window_client_configs.setdefault(window.id(), []).append(config)\n\n\ndef clear_window_client_configs(window: 'sublime.Window'):\n global window_client_configs\n if window.id() in window_client_configs:\n del window_client_configs[window.id()]\n\n\ndef apply_window_settings(client_config: 'ClientConfig', view: 'sublime.View') -> 'ClientConfig':\n window = view.window()\n if window:\n window_config = get_project_config(window)\n\n if client_config.name in window_config:\n overrides = window_config[client_config.name]\n debug('window has override for', client_config.name, overrides)\n return ClientConfig(\n client_config.name,\n overrides.get(\"command\", client_config.binary_args),\n overrides.get(\"tcp_port\", client_config.tcp_port),\n overrides.get(\"scopes\", client_config.scopes),\n overrides.get(\"syntaxes\", client_config.syntaxes),\n overrides.get(\"languageId\", client_config.languageId),\n overrides.get(\"enabled\", client_config.enabled),\n overrides.get(\"initializationOptions\", client_config.init_options),\n overrides.get(\"settings\", client_config.settings),\n overrides.get(\"env\", client_config.env)\n )\n\n return client_config\n\n\ndef is_supportable_syntax(syntax: str) -> bool:\n # TODO: filter out configs disabled by the user.\n for config in client_configs.defaults:\n if syntax in config.syntaxes:\n return True\n return False\n\n\ndef is_supported_syntax(syntax: str) -> bool:\n for config in client_configs.all:\n if syntax in config.syntaxes:\n return True\n return False\n\n\ndef is_supported_view(view: sublime.View) -> bool:\n # TODO: perhaps make this check for a client instead of a config\n if config_for_scope(view):\n return True\n else:\n return False\n", "path": "plugin/core/configurations.py"}], "after_files": [{"content": "import sublime\n\nfrom .settings import ClientConfig, client_configs\nfrom .logging import debug\nfrom .workspace import get_project_config\n\nassert ClientConfig\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\n\nwindow_client_configs = dict() # type: Dict[int, List[ClientConfig]]\n\n\ndef get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':\n # When there are multiple server configurations, all of which are for\n # similar scopes (e.g. 'source.json', 'source.json.sublime.settings') the\n # configuration with the most specific scope (highest ranked selector)\n # in the current position is preferred.\n scope_score = 0\n scope_client_config = None\n for config in configs:\n for scope in config.scopes:\n sel = view.sel()\n if len(sel) > 0:\n score = view.score_selector(sel[0].begin(), scope)\n if score > scope_score:\n scope_score = score\n scope_client_config = config\n return scope_client_config\n\n\ndef register_client_config(config: ClientConfig) -> None:\n window_client_configs.clear()\n client_configs.add_external_config(config)\n\n\ndef get_global_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n return get_scope_client_config(view, client_configs.all)\n\n\ndef get_default_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n return get_scope_client_config(view, client_configs.defaults)\n\n\ndef get_window_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n window = view.window()\n if window:\n configs_for_window = window_client_configs.get(window.id(), [])\n return get_scope_client_config(view, configs_for_window)\n else:\n return None\n\n\ndef config_for_scope(view: sublime.View) -> 'Optional[ClientConfig]':\n # check window_client_config first\n window_client_config = get_window_client_config(view)\n if not window_client_config:\n global_client_config = get_global_client_config(view)\n\n if global_client_config:\n window = view.window()\n if window:\n window_client_config = apply_window_settings(global_client_config, view)\n add_window_client_config(window, window_client_config)\n return window_client_config\n else:\n # always return a client config even if the view has no window anymore\n return global_client_config\n\n return window_client_config\n\n\ndef add_window_client_config(window: 'sublime.Window', config: 'ClientConfig'):\n global window_client_configs\n window_client_configs.setdefault(window.id(), []).append(config)\n\n\ndef clear_window_client_configs(window: 'sublime.Window'):\n global window_client_configs\n if window.id() in window_client_configs:\n del window_client_configs[window.id()]\n\n\ndef apply_window_settings(client_config: 'ClientConfig', view: 'sublime.View') -> 'ClientConfig':\n window = view.window()\n if window:\n window_config = get_project_config(window)\n\n if client_config.name in window_config:\n overrides = window_config[client_config.name]\n debug('window has override for', client_config.name, overrides)\n return ClientConfig(\n client_config.name,\n overrides.get(\"command\", client_config.binary_args),\n overrides.get(\"tcp_port\", client_config.tcp_port),\n overrides.get(\"scopes\", client_config.scopes),\n overrides.get(\"syntaxes\", client_config.syntaxes),\n overrides.get(\"languageId\", client_config.languageId),\n overrides.get(\"enabled\", client_config.enabled),\n overrides.get(\"initializationOptions\", client_config.init_options),\n overrides.get(\"settings\", client_config.settings),\n overrides.get(\"env\", client_config.env)\n )\n\n return client_config\n\n\ndef is_supportable_syntax(syntax: str) -> bool:\n # TODO: filter out configs disabled by the user.\n for config in client_configs.defaults:\n if syntax in config.syntaxes:\n return True\n return False\n\n\ndef is_supported_syntax(syntax: str) -> bool:\n for config in client_configs.all:\n if syntax in config.syntaxes:\n return True\n return False\n\n\ndef is_supported_view(view: sublime.View) -> bool:\n # TODO: perhaps make this check for a client instead of a config\n if config_for_scope(view):\n return True\n else:\n return False\n", "path": "plugin/core/configurations.py"}]} | 1,514 | 280 |
gh_patches_debug_12787 | rasdani/github-patches | git_diff | numba__numba-672 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong type coercion on input arguments
If the following snippet, it looks like first calling the function with int arguments then coerces any further float arguments to int:
```
>>> @jit(nopython=True)
... def mpow(a, b):
... return math.pow(a, b)
...
>>>
>>> mpow(0, 1)
0.0
>>> mpow(0, 0.666)
1.0
>>> mpow(0, 1.666)
0.0
```
It doesn't happen if the function is called with float arguments first:
```
>>> @jit(nopython=True)
... def mpow2(a, b):
... return math.pow(a, b)
...
>>> mpow2(0, 0.666)
0.0
>>> mpow2(0, 1)
0.0
>>> mpow2(0, 0.666)
0.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/typeconv/typeconv.py`
Content:
```
1 from __future__ import print_function, absolute_import
2 from . import _typeconv
3
4
5 class TypeManager(object):
6 def __init__(self):
7 self._ptr = _typeconv.new_type_manager()
8
9 def select_overload(self, sig, overloads):
10 sig = [t._code for t in sig]
11 overloads = [[t._code for t in s] for s in overloads ]
12 return _typeconv.select_overload(self._ptr, sig, overloads)
13
14 def check_compatible(self, fromty, toty):
15 return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)
16
17 def set_compatible(self, fromty, toty, by):
18 _typeconv.set_compatible(self._ptr, fromty._code, toty._code, by)
19
20 def set_promote(self, fromty, toty):
21 self.set_compatible(fromty, toty, ord("p"))
22
23 def set_unsafe_convert(self, fromty, toty):
24 self.set_compatible(fromty, toty, ord("u"))
25
26 def set_safe_convert(self, fromty, toty):
27 self.set_compatible(fromty, toty, ord("s"))
28
29 def get_pointer(self):
30 return _typeconv.get_pointer(self._ptr)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numba/typeconv/typeconv.py b/numba/typeconv/typeconv.py
--- a/numba/typeconv/typeconv.py
+++ b/numba/typeconv/typeconv.py
@@ -6,10 +6,10 @@
def __init__(self):
self._ptr = _typeconv.new_type_manager()
- def select_overload(self, sig, overloads):
+ def select_overload(self, sig, overloads, allow_unsafe):
sig = [t._code for t in sig]
overloads = [[t._code for t in s] for s in overloads ]
- return _typeconv.select_overload(self._ptr, sig, overloads)
+ return _typeconv.select_overload(self._ptr, sig, overloads, allow_unsafe)
def check_compatible(self, fromty, toty):
return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)
| {"golden_diff": "diff --git a/numba/typeconv/typeconv.py b/numba/typeconv/typeconv.py\n--- a/numba/typeconv/typeconv.py\n+++ b/numba/typeconv/typeconv.py\n@@ -6,10 +6,10 @@\n def __init__(self):\n self._ptr = _typeconv.new_type_manager()\n \n- def select_overload(self, sig, overloads):\n+ def select_overload(self, sig, overloads, allow_unsafe):\n sig = [t._code for t in sig]\n overloads = [[t._code for t in s] for s in overloads ]\n- return _typeconv.select_overload(self._ptr, sig, overloads)\n+ return _typeconv.select_overload(self._ptr, sig, overloads, allow_unsafe)\n \n def check_compatible(self, fromty, toty):\n return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)\n", "issue": "Wrong type coercion on input arguments\nIf the following snippet, it looks like first calling the function with int arguments then coerces any further float arguments to int:\n\n```\n>>> @jit(nopython=True)\n... def mpow(a, b):\n... return math.pow(a, b)\n... \n>>> \n>>> mpow(0, 1)\n0.0\n>>> mpow(0, 0.666)\n1.0\n>>> mpow(0, 1.666)\n0.0\n```\n\nIt doesn't happen if the function is called with float arguments first:\n\n```\n>>> @jit(nopython=True)\n... def mpow2(a, b):\n... return math.pow(a, b)\n... \n>>> mpow2(0, 0.666)\n0.0\n>>> mpow2(0, 1)\n0.0\n>>> mpow2(0, 0.666)\n0.0\n```\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import\nfrom . import _typeconv\n\n\nclass TypeManager(object):\n def __init__(self):\n self._ptr = _typeconv.new_type_manager()\n\n def select_overload(self, sig, overloads):\n sig = [t._code for t in sig]\n overloads = [[t._code for t in s] for s in overloads ]\n return _typeconv.select_overload(self._ptr, sig, overloads)\n\n def check_compatible(self, fromty, toty):\n return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)\n\n def set_compatible(self, fromty, toty, by):\n _typeconv.set_compatible(self._ptr, fromty._code, toty._code, by)\n\n def set_promote(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"p\"))\n\n def set_unsafe_convert(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"u\"))\n\n def set_safe_convert(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"s\"))\n\n def get_pointer(self):\n return _typeconv.get_pointer(self._ptr)\n", "path": "numba/typeconv/typeconv.py"}], "after_files": [{"content": "from __future__ import print_function, absolute_import\nfrom . import _typeconv\n\n\nclass TypeManager(object):\n def __init__(self):\n self._ptr = _typeconv.new_type_manager()\n\n def select_overload(self, sig, overloads, allow_unsafe):\n sig = [t._code for t in sig]\n overloads = [[t._code for t in s] for s in overloads ]\n return _typeconv.select_overload(self._ptr, sig, overloads, allow_unsafe)\n\n def check_compatible(self, fromty, toty):\n return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)\n\n def set_compatible(self, fromty, toty, by):\n _typeconv.set_compatible(self._ptr, fromty._code, toty._code, by)\n\n def set_promote(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"p\"))\n\n def set_unsafe_convert(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"u\"))\n\n def set_safe_convert(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"s\"))\n\n def get_pointer(self):\n return _typeconv.get_pointer(self._ptr)\n", "path": "numba/typeconv/typeconv.py"}]} | 808 | 213 |
gh_patches_debug_33843 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-731 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E3031 CidrIp contains invalid characters fails when Fn::Sub is present
*cfn-lint version: (`cfn-lint --version`)*
`cfn-lint 0.16.0`
*Description of issue.*
When `CidrIp` value is `!Sub`ed from `Parameters` - E3031 lint error is raised. Sample template:
```lang=yaml
AWSTemplateFormatVersion: 2010-09-09
Description: AMI Builder Stack
Parameters:
BuilderCidr:
Type: String
Resources:
SecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Description
VpcId: vpc-id
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 1
ToPort: 65535
CidrIp: !Sub ${BuilderCidr}
```
Expected output: successful lint
Actual output:
```
E3031 CidrIp contains invalid characters (Pattern: x.x.x.x/y) at Resources/SecurityGroup/Properties/SecurityGroupIngress/0/CidrIp/Fn::Sub
```
> Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)
The problem still persists after running `cfn-lint -u`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/properties/AllowedPattern.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 import six
19 from cfnlint import CloudFormationLintRule
20 from cfnlint import RuleMatch
21
22 from cfnlint.helpers import RESOURCE_SPECS
23
24
25 class AllowedPattern(CloudFormationLintRule):
26 """Check if properties have a valid value"""
27 id = 'E3031'
28 shortdesc = 'Check if property values adhere to a specific pattern'
29 description = 'Check if properties have a valid value in case of a pattern (Regular Expression)'
30 source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedpattern'
31 tags = ['resources', 'property', 'allowed pattern', 'regex']
32
33 def initialize(self, cfn):
34 """Initialize the rule"""
35 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
36 self.resource_property_types.append(resource_type_spec)
37 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
38 self.resource_sub_property_types.append(property_type_spec)
39
40 def check_sub(self, value, path, property_name, **kwargs):
41 """Check Value of a Sub"""
42 matches = []
43
44 if isinstance(value, list):
45 if isinstance(value[0], six.string_types):
46 # Remove the sub (${}) from the value
47 stripped_value = re.sub(r'\${.*}', '', value[0])
48 matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))
49 else:
50 # Remove the sub (${}) from the value
51 stripped_value = re.sub(r'\${.*}', '', value)
52 matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))
53 return matches
54
55 def check_value(self, value, path, property_name, **kwargs):
56 """Check Value"""
57 matches = []
58
59 # Get the Allowed Pattern Regex
60 value_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})
61 # Get the "Human Readable" version for the error message. Optional, if not specified,
62 # the RegEx itself is used.
63 value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)
64
65 if value_pattern_regex:
66 regex = re.compile(value_pattern_regex)
67 if not regex.match(value):
68 full_path = ('/'.join(str(x) for x in path))
69
70 message = '{} contains invalid characters (Pattern: {}) at {}'
71 matches.append(RuleMatch(path, message.format(property_name, value_pattern, full_path)))
72
73 return matches
74
75 def check(self, cfn, properties, value_specs, property_specs, path):
76 """Check itself"""
77 matches = list()
78 for p_value, p_path in properties.items_safe(path[:]):
79 for prop in p_value:
80 if prop in value_specs:
81 value = value_specs.get(prop).get('Value', {})
82 if value:
83 value_type = value.get('ValueType', '')
84 property_type = property_specs.get('Properties').get(prop).get('Type')
85 matches.extend(
86 cfn.check_value(
87 p_value, prop, p_path,
88 check_value=self.check_value,
89 check_sub=self.check_sub,
90 value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
91 cfn=cfn, property_type=property_type, property_name=prop
92 )
93 )
94 return matches
95
96 def match_resource_sub_properties(self, properties, property_type, path, cfn):
97 """Match for sub properties"""
98 matches = list()
99
100 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
101 property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
102 matches.extend(self.check(cfn, properties, specs, property_specs, path))
103
104 return matches
105
106 def match_resource_properties(self, properties, resource_type, path, cfn):
107 """Check CloudFormation Properties"""
108 matches = list()
109
110 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
111 resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
112 matches.extend(self.check(cfn, properties, specs, resource_specs, path))
113
114 return matches
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py
--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py
+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py
@@ -15,7 +15,6 @@
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
-import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
@@ -37,21 +36,6 @@
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec)
- def check_sub(self, value, path, property_name, **kwargs):
- """Check Value of a Sub"""
- matches = []
-
- if isinstance(value, list):
- if isinstance(value[0], six.string_types):
- # Remove the sub (${}) from the value
- stripped_value = re.sub(r'\${.*}', '', value[0])
- matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))
- else:
- # Remove the sub (${}) from the value
- stripped_value = re.sub(r'\${.*}', '', value)
- matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))
- return matches
-
def check_value(self, value, path, property_name, **kwargs):
"""Check Value"""
matches = []
@@ -86,7 +70,6 @@
cfn.check_value(
p_value, prop, p_path,
check_value=self.check_value,
- check_sub=self.check_sub,
value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
cfn=cfn, property_type=property_type, property_name=prop
)
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py\n+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n@@ -15,7 +15,6 @@\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n import re\n-import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n \n@@ -37,21 +36,6 @@\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n \n- def check_sub(self, value, path, property_name, **kwargs):\n- \"\"\"Check Value of a Sub\"\"\"\n- matches = []\n-\n- if isinstance(value, list):\n- if isinstance(value[0], six.string_types):\n- # Remove the sub (${}) from the value\n- stripped_value = re.sub(r'\\${.*}', '', value[0])\n- matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))\n- else:\n- # Remove the sub (${}) from the value\n- stripped_value = re.sub(r'\\${.*}', '', value)\n- matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))\n- return matches\n-\n def check_value(self, value, path, property_name, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n@@ -86,7 +70,6 @@\n cfn.check_value(\n p_value, prop, p_path,\n check_value=self.check_value,\n- check_sub=self.check_sub,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n", "issue": "E3031 CidrIp contains invalid characters fails when Fn::Sub is present\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\n`cfn-lint 0.16.0`\r\n\r\n*Description of issue.*\r\n\r\nWhen `CidrIp` value is `!Sub`ed from `Parameters` - E3031 lint error is raised. Sample template:\r\n\r\n```lang=yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nDescription: AMI Builder Stack\r\n\r\nParameters:\r\n\r\n BuilderCidr:\r\n Type: String\r\n\r\nResources:\r\n\r\n SecurityGroup:\r\n Type: AWS::EC2::SecurityGroup\r\n Properties:\r\n GroupDescription: Description\r\n VpcId: vpc-id\r\n SecurityGroupIngress:\r\n - IpProtocol: tcp\r\n FromPort: 1\r\n ToPort: 65535\r\n CidrIp: !Sub ${BuilderCidr}\r\n```\r\n\r\nExpected output: successful lint\r\nActual output:\r\n\r\n```\r\nE3031 CidrIp contains invalid characters (Pattern: x.x.x.x/y) at Resources/SecurityGroup/Properties/SecurityGroupIngress/0/CidrIp/Fn::Sub\r\n```\r\n\r\n> Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\r\nThe problem still persists after running `cfn-lint -u`\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass AllowedPattern(CloudFormationLintRule):\n \"\"\"Check if properties have a valid value\"\"\"\n id = 'E3031'\n shortdesc = 'Check if property values adhere to a specific pattern'\n description = 'Check if properties have a valid value in case of a pattern (Regular Expression)'\n source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedpattern'\n tags = ['resources', 'property', 'allowed pattern', 'regex']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def check_sub(self, value, path, property_name, **kwargs):\n \"\"\"Check Value of a Sub\"\"\"\n matches = []\n\n if isinstance(value, list):\n if isinstance(value[0], six.string_types):\n # Remove the sub (${}) from the value\n stripped_value = re.sub(r'\\${.*}', '', value[0])\n matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))\n else:\n # Remove the sub (${}) from the value\n stripped_value = re.sub(r'\\${.*}', '', value)\n matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))\n return matches\n\n def check_value(self, value, path, property_name, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n\n # Get the Allowed Pattern Regex\n value_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})\n # Get the \"Human Readable\" version for the error message. Optional, if not specified,\n # the RegEx itself is used.\n value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)\n\n if value_pattern_regex:\n regex = re.compile(value_pattern_regex)\n if not regex.match(value):\n full_path = ('/'.join(str(x) for x in path))\n\n message = '{} contains invalid characters (Pattern: {}) at {}'\n matches.append(RuleMatch(path, message.format(property_name, value_pattern, full_path)))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_value=self.check_value,\n check_sub=self.check_sub,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/AllowedPattern.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass AllowedPattern(CloudFormationLintRule):\n \"\"\"Check if properties have a valid value\"\"\"\n id = 'E3031'\n shortdesc = 'Check if property values adhere to a specific pattern'\n description = 'Check if properties have a valid value in case of a pattern (Regular Expression)'\n source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedpattern'\n tags = ['resources', 'property', 'allowed pattern', 'regex']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def check_value(self, value, path, property_name, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n\n # Get the Allowed Pattern Regex\n value_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})\n # Get the \"Human Readable\" version for the error message. Optional, if not specified,\n # the RegEx itself is used.\n value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)\n\n if value_pattern_regex:\n regex = re.compile(value_pattern_regex)\n if not regex.match(value):\n full_path = ('/'.join(str(x) for x in path))\n\n message = '{} contains invalid characters (Pattern: {}) at {}'\n matches.append(RuleMatch(path, message.format(property_name, value_pattern, full_path)))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_value=self.check_value,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/AllowedPattern.py"}]} | 2,029 | 430 |
gh_patches_debug_5376 | rasdani/github-patches | git_diff | great-expectations__great_expectations-4471 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/rule_based_profiler/types/__init__.py`
Content:
```
1 from .attributes import Attributes # isort:skip
2 from .builder import Builder # isort:skip
3
4 from .domain import ( # isort:skip
5 Domain,
6 SemanticDomainTypes,
7 InferredSemanticDomainType,
8 )
9 from .parameter_container import ( # isort:skip
10 DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
11 FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,
12 PARAMETER_KEY,
13 VARIABLES_KEY,
14 VARIABLES_PREFIX,
15 ParameterNode,
16 ParameterContainer,
17 build_parameter_container,
18 build_parameter_container_for_variables,
19 is_fully_qualified_parameter_name_literal_string_format,
20 get_parameter_value_by_fully_qualified_parameter_name,
21 get_parameter_values_for_fully_qualified_parameter_names,
22 get_fully_qualified_parameter_names,
23 )
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/rule_based_profiler/types/__init__.py b/great_expectations/rule_based_profiler/types/__init__.py
--- a/great_expectations/rule_based_profiler/types/__init__.py
+++ b/great_expectations/rule_based_profiler/types/__init__.py
@@ -9,6 +9,8 @@
from .parameter_container import ( # isort:skip
DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,
+ FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,
+ FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,
PARAMETER_KEY,
VARIABLES_KEY,
VARIABLES_PREFIX,
| {"golden_diff": "diff --git a/great_expectations/rule_based_profiler/types/__init__.py b/great_expectations/rule_based_profiler/types/__init__.py\n--- a/great_expectations/rule_based_profiler/types/__init__.py\n+++ b/great_expectations/rule_based_profiler/types/__init__.py\n@@ -9,6 +9,8 @@\n from .parameter_container import ( # isort:skip\n DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,\n+ FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,\n+ FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,\n PARAMETER_KEY,\n VARIABLES_KEY,\n VARIABLES_PREFIX,\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from .attributes import Attributes # isort:skip\nfrom .builder import Builder # isort:skip\n\nfrom .domain import ( # isort:skip\n Domain,\n SemanticDomainTypes,\n InferredSemanticDomainType,\n)\nfrom .parameter_container import ( # isort:skip\n DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,\n PARAMETER_KEY,\n VARIABLES_KEY,\n VARIABLES_PREFIX,\n ParameterNode,\n ParameterContainer,\n build_parameter_container,\n build_parameter_container_for_variables,\n is_fully_qualified_parameter_name_literal_string_format,\n get_parameter_value_by_fully_qualified_parameter_name,\n get_parameter_values_for_fully_qualified_parameter_names,\n get_fully_qualified_parameter_names,\n)\n", "path": "great_expectations/rule_based_profiler/types/__init__.py"}], "after_files": [{"content": "from .attributes import Attributes # isort:skip\nfrom .builder import Builder # isort:skip\n\nfrom .domain import ( # isort:skip\n Domain,\n SemanticDomainTypes,\n InferredSemanticDomainType,\n)\nfrom .parameter_container import ( # isort:skip\n DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,\n FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,\n PARAMETER_KEY,\n VARIABLES_KEY,\n VARIABLES_PREFIX,\n ParameterNode,\n ParameterContainer,\n build_parameter_container,\n build_parameter_container_for_variables,\n is_fully_qualified_parameter_name_literal_string_format,\n get_parameter_value_by_fully_qualified_parameter_name,\n get_parameter_values_for_fully_qualified_parameter_names,\n get_fully_qualified_parameter_names,\n)\n", "path": "great_expectations/rule_based_profiler/types/__init__.py"}]} | 504 | 148 |
gh_patches_debug_8097 | rasdani/github-patches | git_diff | uccser__cs-unplugged-652 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify docker configuration to work on OSX
Docker for Mac does not properly support the `network_mode: host` option for containers. In order to run the system on OSX, it will be necessary to network the containers using a bridged network:
> By default Compose sets up a single network for your app. Each container for a service joins the default network and is both reachable by other containers on that network, and discoverable by them at a hostname identical to the container name."
Rather than accessing other containers via a port on localhost, containers will access each other using the instance name as the hostname. Port 80 will then be exposed from the nginx container to the host.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/config/settings/database_proxy.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Django settings for connecting via Google Cloud SQL Proxy."""
3
4 from .base import * # noqa: F403
5
6
7 # DATABASE CONFIGURATION
8 # ----------------------------------------------------------------------------
9 # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
10 DATABASES = {
11 "default": {
12 "ENGINE": "django.db.backends.postgresql",
13 "HOST": "localhost",
14 "PORT": "5433",
15 "NAME": "csunplugged",
16 "USER": env("GOOGLE_CLOUD_SQL_DATABASE_USERNAME"), # noqa: F405
17 "PASSWORD": env("GOOGLE_CLOUD_SQL_DATABASE_PASSWORD"), # noqa: F405
18 "ATOMIC_REQUESTS": True,
19 }
20 }
21
22 SECRET_KEY = env("DJANGO_SECRET_KEY") # noqa: F405
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/config/settings/database_proxy.py b/csunplugged/config/settings/database_proxy.py
--- a/csunplugged/config/settings/database_proxy.py
+++ b/csunplugged/config/settings/database_proxy.py
@@ -10,8 +10,8 @@
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
- "HOST": "localhost",
- "PORT": "5433",
+ "HOST": "cloud_sql_proxy",
+ "PORT": "5432",
"NAME": "csunplugged",
"USER": env("GOOGLE_CLOUD_SQL_DATABASE_USERNAME"), # noqa: F405
"PASSWORD": env("GOOGLE_CLOUD_SQL_DATABASE_PASSWORD"), # noqa: F405
| {"golden_diff": "diff --git a/csunplugged/config/settings/database_proxy.py b/csunplugged/config/settings/database_proxy.py\n--- a/csunplugged/config/settings/database_proxy.py\n+++ b/csunplugged/config/settings/database_proxy.py\n@@ -10,8 +10,8 @@\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n- \"HOST\": \"localhost\",\n- \"PORT\": \"5433\",\n+ \"HOST\": \"cloud_sql_proxy\",\n+ \"PORT\": \"5432\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n", "issue": "Modify docker configuration to work on OSX\nDocker for Mac does not properly support the `network_mode: host` option for containers. In order to run the system on OSX, it will be necessary to network the containers using a bridged network:\r\n\r\n> By default Compose sets up a single network for your app. Each container for a service joins the default network and is both reachable by other containers on that network, and discoverable by them at a hostname identical to the container name.\"\r\n\r\nRather than accessing other containers via a port on localhost, containers will access each other using the instance name as the hostname. Port 80 will then be exposed from the nginx container to the host.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Django settings for connecting via Google Cloud SQL Proxy.\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": \"localhost\",\n \"PORT\": \"5433\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"ATOMIC_REQUESTS\": True,\n }\n}\n\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n", "path": "csunplugged/config/settings/database_proxy.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Django settings for connecting via Google Cloud SQL Proxy.\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": \"cloud_sql_proxy\",\n \"PORT\": \"5432\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"ATOMIC_REQUESTS\": True,\n }\n}\n\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n", "path": "csunplugged/config/settings/database_proxy.py"}]} | 625 | 176 |
gh_patches_debug_2946 | rasdani/github-patches | git_diff | beetbox__beets-3703 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor documentation correction: correct id3.org url
https://github.com/beetbox/beets/blob/master/docs/faq.rst#L303
refers to:
http://www.id3.org/id3v2.4.0-structure
as a reference url for a copy of the ID3v2.4 standard documentation, but this returns a "Not found" error. I've found 2 possibilities for the replacement:
https://id3.org/id3v2.4.0-structure
(with adverts) or
https://github.com/id3/ID3v2.4/raw/master/id3v2.40-structure.txt
(without adverts)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from __future__ import division, absolute_import, print_function
4
5 AUTHOR = u'Adrian Sampson'
6
7 # General configuration
8
9 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']
10
11 exclude_patterns = ['_build']
12 source_suffix = '.rst'
13 master_doc = 'index'
14
15 project = u'beets'
16 copyright = u'2016, Adrian Sampson'
17
18 version = '1.5'
19 release = '1.5.0'
20
21 pygments_style = 'sphinx'
22
23 # External links to the bug tracker and other sites.
24 extlinks = {
25 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'),
26 'user': ('https://github.com/%s', ''),
27 'pypi': ('https://pypi.org/project/%s/', ''),
28 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),
29 }
30
31 # Options for HTML output
32 htmlhelp_basename = 'beetsdoc'
33
34 # Options for LaTeX output
35 latex_documents = [
36 ('index', 'beets.tex', u'beets Documentation',
37 AUTHOR, 'manual'),
38 ]
39
40 # Options for manual page output
41 man_pages = [
42 ('reference/cli', 'beet', u'music tagger and library organizer',
43 [AUTHOR], 1),
44 ('reference/config', 'beetsconfig', u'beets configuration file',
45 [AUTHOR], 5),
46 ]
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -28,6 +28,13 @@
'stdlib': ('https://docs.python.org/3/library/%s.html', ''),
}
+linkcheck_ignore = [
+ r'https://github.com/beetbox/beets/issues/',
+ r'https://github.com/\w+$', # ignore user pages
+ r'.*localhost.*',
+ r'https://www.musixmatch.com/', # blocks requests
+]
+
# Options for HTML output
htmlhelp_basename = 'beetsdoc'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -28,6 +28,13 @@\n 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),\n }\n \n+linkcheck_ignore = [\n+ r'https://github.com/beetbox/beets/issues/',\n+ r'https://github.com/\\w+$', # ignore user pages\n+ r'.*localhost.*',\n+ r'https://www.musixmatch.com/', # blocks requests\n+]\n+\n # Options for HTML output\n htmlhelp_basename = 'beetsdoc'\n", "issue": "Minor documentation correction: correct id3.org url\nhttps://github.com/beetbox/beets/blob/master/docs/faq.rst#L303\r\nrefers to:\r\nhttp://www.id3.org/id3v2.4.0-structure\r\nas a reference url for a copy of the ID3v2.4 standard documentation, but this returns a \"Not found\" error. I've found 2 possibilities for the replacement:\r\nhttps://id3.org/id3v2.4.0-structure\r\n(with adverts) or\r\nhttps://github.com/id3/ID3v2.4/raw/master/id3v2.40-structure.txt\r\n(without adverts)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, absolute_import, print_function\n\nAUTHOR = u'Adrian Sampson'\n\n# General configuration\n\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']\n\nexclude_patterns = ['_build']\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\nproject = u'beets'\ncopyright = u'2016, Adrian Sampson'\n\nversion = '1.5'\nrelease = '1.5.0'\n\npygments_style = 'sphinx'\n\n# External links to the bug tracker and other sites.\nextlinks = {\n 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'),\n 'user': ('https://github.com/%s', ''),\n 'pypi': ('https://pypi.org/project/%s/', ''),\n 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),\n}\n\n# Options for HTML output\nhtmlhelp_basename = 'beetsdoc'\n\n# Options for LaTeX output\nlatex_documents = [\n ('index', 'beets.tex', u'beets Documentation',\n AUTHOR, 'manual'),\n]\n\n# Options for manual page output\nman_pages = [\n ('reference/cli', 'beet', u'music tagger and library organizer',\n [AUTHOR], 1),\n ('reference/config', 'beetsconfig', u'beets configuration file',\n [AUTHOR], 5),\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, absolute_import, print_function\n\nAUTHOR = u'Adrian Sampson'\n\n# General configuration\n\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']\n\nexclude_patterns = ['_build']\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\nproject = u'beets'\ncopyright = u'2016, Adrian Sampson'\n\nversion = '1.5'\nrelease = '1.5.0'\n\npygments_style = 'sphinx'\n\n# External links to the bug tracker and other sites.\nextlinks = {\n 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'),\n 'user': ('https://github.com/%s', ''),\n 'pypi': ('https://pypi.org/project/%s/', ''),\n 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),\n}\n\nlinkcheck_ignore = [\n r'https://github.com/beetbox/beets/issues/',\n r'https://github.com/\\w+$', # ignore user pages\n r'.*localhost.*',\n r'https://www.musixmatch.com/', # blocks requests\n]\n\n# Options for HTML output\nhtmlhelp_basename = 'beetsdoc'\n\n# Options for LaTeX output\nlatex_documents = [\n ('index', 'beets.tex', u'beets Documentation',\n AUTHOR, 'manual'),\n]\n\n# Options for manual page output\nman_pages = [\n ('reference/cli', 'beet', u'music tagger and library organizer',\n [AUTHOR], 1),\n ('reference/config', 'beetsconfig', u'beets configuration file',\n [AUTHOR], 5),\n]\n", "path": "docs/conf.py"}]} | 806 | 137 |
gh_patches_debug_27634 | rasdani/github-patches | git_diff | adap__flower-465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve docstring for `start_server`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/py/flwr/server/app.py`
Content:
```
1 # Copyright 2020 Adap GmbH. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Flower server app."""
16
17
18 from logging import INFO
19 from typing import Dict, Optional
20
21 from flwr.common import GRPC_MAX_MESSAGE_LENGTH
22 from flwr.common.logger import log
23 from flwr.server.client_manager import SimpleClientManager
24 from flwr.server.grpc_server.grpc_server import start_insecure_grpc_server
25 from flwr.server.server import Server
26 from flwr.server.strategy import FedAvg, Strategy
27
28 DEFAULT_SERVER_ADDRESS = "[::]:8080"
29
30
31 def start_server(
32 server_address: str = DEFAULT_SERVER_ADDRESS,
33 server: Optional[Server] = None,
34 config: Optional[Dict[str, int]] = None,
35 strategy: Optional[Strategy] = None,
36 grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
37 ) -> None:
38 """Start a Flower server using the gRPC transport layer."""
39
40 # Create server instance if none was given
41 if server is None:
42 client_manager = SimpleClientManager()
43 if strategy is None:
44 strategy = FedAvg()
45 server = Server(client_manager=client_manager, strategy=strategy)
46
47 # Set default config values
48 if config is None:
49 config = {}
50 if "num_rounds" not in config:
51 config["num_rounds"] = 1
52
53 # Start gRPC server
54 grpc_server = start_insecure_grpc_server(
55 client_manager=server.client_manager(),
56 server_address=server_address,
57 max_message_length=grpc_max_message_length,
58 )
59 log(INFO, "Flower server running (insecure, %s rounds)", config["num_rounds"])
60
61 # Fit model
62 hist = server.fit(num_rounds=config["num_rounds"])
63 log(INFO, "app_fit: losses_distributed %s", str(hist.losses_distributed))
64 log(INFO, "app_fit: accuracies_distributed %s", str(hist.accuracies_distributed))
65 log(INFO, "app_fit: losses_centralized %s", str(hist.losses_centralized))
66 log(INFO, "app_fit: accuracies_centralized %s", str(hist.accuracies_centralized))
67
68 # Temporary workaround to force distributed evaluation
69 server.strategy.eval_fn = None # type: ignore
70
71 # Evaluate the final trained model
72 res = server.evaluate(rnd=-1)
73 if res is not None:
74 loss, (results, failures) = res
75 log(INFO, "app_evaluate: federated loss: %s", str(loss))
76 log(
77 INFO,
78 "app_evaluate: results %s",
79 str([(res[0].cid, res[1]) for res in results]),
80 )
81 log(INFO, "app_evaluate: failures %s", str(failures))
82 else:
83 log(INFO, "app_evaluate: no evaluation result")
84
85 # Stop the gRPC server
86 grpc_server.stop(1)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py
--- a/src/py/flwr/server/app.py
+++ b/src/py/flwr/server/app.py
@@ -35,7 +35,33 @@
strategy: Optional[Strategy] = None,
grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
) -> None:
- """Start a Flower server using the gRPC transport layer."""
+ """Start a Flower server using the gRPC transport layer.
+
+ Arguments:
+ server_address: Optional[str] (default: `"[::]:8080"`). The IPv6
+ address of the server.
+ server: Optional[flwr.server.Server] (default: None). An implementation
+ of the abstract base class `flwr.server.Server`. If no instance is
+ provided, then `start_server` will create one.
+ config: Optional[Dict[str, int]] (default: None). The only currently
+ supported values is `num_rounds`, so a full configuration object
+ instructing the server to perform three rounds of federated
+ learning looks like the following: `{"num_rounds": 3}`.
+ strategy: Optional[flwr.server.Strategy] (default: None). An
+ implementation of the abstract base class `flwr.server.Strategy`.
+ If no strategy is provided, then `start_server` will use
+ `flwr.server.strategy.FedAvg`.
+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).
+ The maximum length of gRPC messages that can be exchanged with the
+ Flower clients. The default should be sufficient for most models.
+ Users who train very large models might need to increase this
+ value. Note that the Flower clients needs to started with the same
+ value (see `flwr.client.start_client`), otherwise clients will not
+ know about the increased limit and block larger messages.
+
+ Returns:
+ None.
+ """
# Create server instance if none was given
if server is None:
| {"golden_diff": "diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py\n--- a/src/py/flwr/server/app.py\n+++ b/src/py/flwr/server/app.py\n@@ -35,7 +35,33 @@\n strategy: Optional[Strategy] = None,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n ) -> None:\n- \"\"\"Start a Flower server using the gRPC transport layer.\"\"\"\n+ \"\"\"Start a Flower server using the gRPC transport layer.\n+\n+ Arguments:\n+ server_address: Optional[str] (default: `\"[::]:8080\"`). The IPv6\n+ address of the server.\n+ server: Optional[flwr.server.Server] (default: None). An implementation\n+ of the abstract base class `flwr.server.Server`. If no instance is\n+ provided, then `start_server` will create one.\n+ config: Optional[Dict[str, int]] (default: None). The only currently\n+ supported values is `num_rounds`, so a full configuration object\n+ instructing the server to perform three rounds of federated\n+ learning looks like the following: `{\"num_rounds\": 3}`.\n+ strategy: Optional[flwr.server.Strategy] (default: None). An\n+ implementation of the abstract base class `flwr.server.Strategy`.\n+ If no strategy is provided, then `start_server` will use\n+ `flwr.server.strategy.FedAvg`.\n+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).\n+ The maximum length of gRPC messages that can be exchanged with the\n+ Flower clients. The default should be sufficient for most models.\n+ Users who train very large models might need to increase this\n+ value. Note that the Flower clients needs to started with the same\n+ value (see `flwr.client.start_client`), otherwise clients will not\n+ know about the increased limit and block larger messages.\n+\n+ Returns:\n+ None.\n+ \"\"\"\n \n # Create server instance if none was given\n if server is None:\n", "issue": "Improve docstring for `start_server`\n\n", "before_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Flower server app.\"\"\"\n\n\nfrom logging import INFO\nfrom typing import Dict, Optional\n\nfrom flwr.common import GRPC_MAX_MESSAGE_LENGTH\nfrom flwr.common.logger import log\nfrom flwr.server.client_manager import SimpleClientManager\nfrom flwr.server.grpc_server.grpc_server import start_insecure_grpc_server\nfrom flwr.server.server import Server\nfrom flwr.server.strategy import FedAvg, Strategy\n\nDEFAULT_SERVER_ADDRESS = \"[::]:8080\"\n\n\ndef start_server(\n server_address: str = DEFAULT_SERVER_ADDRESS,\n server: Optional[Server] = None,\n config: Optional[Dict[str, int]] = None,\n strategy: Optional[Strategy] = None,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n) -> None:\n \"\"\"Start a Flower server using the gRPC transport layer.\"\"\"\n\n # Create server instance if none was given\n if server is None:\n client_manager = SimpleClientManager()\n if strategy is None:\n strategy = FedAvg()\n server = Server(client_manager=client_manager, strategy=strategy)\n\n # Set default config values\n if config is None:\n config = {}\n if \"num_rounds\" not in config:\n config[\"num_rounds\"] = 1\n\n # Start gRPC server\n grpc_server = start_insecure_grpc_server(\n client_manager=server.client_manager(),\n server_address=server_address,\n max_message_length=grpc_max_message_length,\n )\n log(INFO, \"Flower server running (insecure, %s rounds)\", config[\"num_rounds\"])\n\n # Fit model\n hist = server.fit(num_rounds=config[\"num_rounds\"])\n log(INFO, \"app_fit: losses_distributed %s\", str(hist.losses_distributed))\n log(INFO, \"app_fit: accuracies_distributed %s\", str(hist.accuracies_distributed))\n log(INFO, \"app_fit: losses_centralized %s\", str(hist.losses_centralized))\n log(INFO, \"app_fit: accuracies_centralized %s\", str(hist.accuracies_centralized))\n\n # Temporary workaround to force distributed evaluation\n server.strategy.eval_fn = None # type: ignore\n\n # Evaluate the final trained model\n res = server.evaluate(rnd=-1)\n if res is not None:\n loss, (results, failures) = res\n log(INFO, \"app_evaluate: federated loss: %s\", str(loss))\n log(\n INFO,\n \"app_evaluate: results %s\",\n str([(res[0].cid, res[1]) for res in results]),\n )\n log(INFO, \"app_evaluate: failures %s\", str(failures))\n else:\n log(INFO, \"app_evaluate: no evaluation result\")\n\n # Stop the gRPC server\n grpc_server.stop(1)\n", "path": "src/py/flwr/server/app.py"}], "after_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Flower server app.\"\"\"\n\n\nfrom logging import INFO\nfrom typing import Dict, Optional\n\nfrom flwr.common import GRPC_MAX_MESSAGE_LENGTH\nfrom flwr.common.logger import log\nfrom flwr.server.client_manager import SimpleClientManager\nfrom flwr.server.grpc_server.grpc_server import start_insecure_grpc_server\nfrom flwr.server.server import Server\nfrom flwr.server.strategy import FedAvg, Strategy\n\nDEFAULT_SERVER_ADDRESS = \"[::]:8080\"\n\n\ndef start_server(\n server_address: str = DEFAULT_SERVER_ADDRESS,\n server: Optional[Server] = None,\n config: Optional[Dict[str, int]] = None,\n strategy: Optional[Strategy] = None,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n) -> None:\n \"\"\"Start a Flower server using the gRPC transport layer.\n\n Arguments:\n server_address: Optional[str] (default: `\"[::]:8080\"`). The IPv6\n address of the server.\n server: Optional[flwr.server.Server] (default: None). An implementation\n of the abstract base class `flwr.server.Server`. If no instance is\n provided, then `start_server` will create one.\n config: Optional[Dict[str, int]] (default: None). The only currently\n supported values is `num_rounds`, so a full configuration object\n instructing the server to perform three rounds of federated\n learning looks like the following: `{\"num_rounds\": 3}`.\n strategy: Optional[flwr.server.Strategy] (default: None). An\n implementation of the abstract base class `flwr.server.Strategy`.\n If no strategy is provided, then `start_server` will use\n `flwr.server.strategy.FedAvg`.\n grpc_max_message_length: int (default: 536_870_912, this equals 512MB).\n The maximum length of gRPC messages that can be exchanged with the\n Flower clients. The default should be sufficient for most models.\n Users who train very large models might need to increase this\n value. Note that the Flower clients needs to started with the same\n value (see `flwr.client.start_client`), otherwise clients will not\n know about the increased limit and block larger messages.\n\n Returns:\n None.\n \"\"\"\n\n # Create server instance if none was given\n if server is None:\n client_manager = SimpleClientManager()\n if strategy is None:\n strategy = FedAvg()\n server = Server(client_manager=client_manager, strategy=strategy)\n\n # Set default config values\n if config is None:\n config = {}\n if \"num_rounds\" not in config:\n config[\"num_rounds\"] = 1\n\n # Start gRPC server\n grpc_server = start_insecure_grpc_server(\n client_manager=server.client_manager(),\n server_address=server_address,\n max_message_length=grpc_max_message_length,\n )\n log(INFO, \"Flower server running (insecure, %s rounds)\", config[\"num_rounds\"])\n\n # Fit model\n hist = server.fit(num_rounds=config[\"num_rounds\"])\n log(INFO, \"app_fit: losses_distributed %s\", str(hist.losses_distributed))\n log(INFO, \"app_fit: accuracies_distributed %s\", str(hist.accuracies_distributed))\n log(INFO, \"app_fit: losses_centralized %s\", str(hist.losses_centralized))\n log(INFO, \"app_fit: accuracies_centralized %s\", str(hist.accuracies_centralized))\n\n # Temporary workaround to force distributed evaluation\n server.strategy.eval_fn = None # type: ignore\n\n # Evaluate the final trained model\n res = server.evaluate(rnd=-1)\n if res is not None:\n loss, (results, failures) = res\n log(INFO, \"app_evaluate: federated loss: %s\", str(loss))\n log(\n INFO,\n \"app_evaluate: results %s\",\n str([(res[0].cid, res[1]) for res in results]),\n )\n log(INFO, \"app_evaluate: failures %s\", str(failures))\n else:\n log(INFO, \"app_evaluate: no evaluation result\")\n\n # Stop the gRPC server\n grpc_server.stop(1)\n", "path": "src/py/flwr/server/app.py"}]} | 1,190 | 476 |
gh_patches_debug_2933 | rasdani/github-patches | git_diff | conda__conda-5009 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When lacking permissions to write, clone message should quote prefix.
When trying to install a new package into a location that the user lacks write permissions (read-only root), conda helpfully suggests cloning the environment into a new location:
```
CondaIOError: IO error: Missing write permissions in: C:\Program Files\Anaconda
#
# You don't appear to have the necessary permissions to install packages
# into the install area 'C:\Program Files\Anaconda'.
# However you can clone this environment into your home directory and
# then make changes to it.
# This may be done using the command:
#
# $ conda create -n my_deathstar --clone=C:\Program Files\Anaconda\envs\deathstar
```
As shown in the example above, this clone path may include spaces. This will be particularly common on Windows, where a global install will result in files written to Program Files, which a non-administrator user will not be able to write to, and contains spaces. Because the command presents a prefix, it should be quoted to guard against this case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/cli/help.py`
Content:
```
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from os.path import join
4
5 from .common import name_prefix
6 from ..base.context import context
7 from ..exceptions import CondaIOError
8
9
10 def read_message(fn):
11 res = []
12 for envs_dir in context.envs_dirs:
13 path = join(envs_dir, '.conda-help', fn)
14 try:
15 with open(path) as fi:
16 s = fi.read().decode('utf-8')
17 s = s.replace('${envs_dir}', envs_dir)
18 res.append(s)
19 except IOError:
20 pass
21 return ''.join(res)
22
23
24 def root_read_only(command, prefix, json=False):
25 assert command in {'install', 'update', 'remove'}
26
27 msg = read_message('ro.txt')
28 if not msg:
29 msg = """\
30 Missing write permissions in: ${root_dir}
31 #
32 # You don't appear to have the necessary permissions to ${command} packages
33 # into the install area '${root_dir}'.
34 # However you can clone this environment into your home directory and
35 # then make changes to it.
36 # This may be done using the command:
37 #
38 # $ conda create -n my_${name} --clone=${prefix}
39 """
40 msg = msg.replace('${root_dir}', context.root_prefix)
41 msg = msg.replace('${prefix}', prefix)
42 msg = msg.replace('${name}', name_prefix(prefix))
43 msg = msg.replace('${command}', command)
44 raise CondaIOError(msg)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/cli/help.py b/conda/cli/help.py
--- a/conda/cli/help.py
+++ b/conda/cli/help.py
@@ -35,7 +35,7 @@
# then make changes to it.
# This may be done using the command:
#
-# $ conda create -n my_${name} --clone=${prefix}
+# $ conda create -n my_${name} --clone="${prefix}"
"""
msg = msg.replace('${root_dir}', context.root_prefix)
msg = msg.replace('${prefix}', prefix)
| {"golden_diff": "diff --git a/conda/cli/help.py b/conda/cli/help.py\n--- a/conda/cli/help.py\n+++ b/conda/cli/help.py\n@@ -35,7 +35,7 @@\n # then make changes to it.\n # This may be done using the command:\n #\n-# $ conda create -n my_${name} --clone=${prefix}\n+# $ conda create -n my_${name} --clone=\"${prefix}\"\n \"\"\"\n msg = msg.replace('${root_dir}', context.root_prefix)\n msg = msg.replace('${prefix}', prefix)\n", "issue": "When lacking permissions to write, clone message should quote prefix.\nWhen trying to install a new package into a location that the user lacks write permissions (read-only root), conda helpfully suggests cloning the environment into a new location:\r\n\r\n```\r\nCondaIOError: IO error: Missing write permissions in: C:\\Program Files\\Anaconda\r\n#\r\n# You don't appear to have the necessary permissions to install packages\r\n# into the install area 'C:\\Program Files\\Anaconda'.\r\n# However you can clone this environment into your home directory and\r\n# then make changes to it.\r\n# This may be done using the command:\r\n#\r\n# $ conda create -n my_deathstar --clone=C:\\Program Files\\Anaconda\\envs\\deathstar\r\n```\r\nAs shown in the example above, this clone path may include spaces. This will be particularly common on Windows, where a global install will result in files written to Program Files, which a non-administrator user will not be able to write to, and contains spaces. Because the command presents a prefix, it should be quoted to guard against this case.\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import join\n\nfrom .common import name_prefix\nfrom ..base.context import context\nfrom ..exceptions import CondaIOError\n\n\ndef read_message(fn):\n res = []\n for envs_dir in context.envs_dirs:\n path = join(envs_dir, '.conda-help', fn)\n try:\n with open(path) as fi:\n s = fi.read().decode('utf-8')\n s = s.replace('${envs_dir}', envs_dir)\n res.append(s)\n except IOError:\n pass\n return ''.join(res)\n\n\ndef root_read_only(command, prefix, json=False):\n assert command in {'install', 'update', 'remove'}\n\n msg = read_message('ro.txt')\n if not msg:\n msg = \"\"\"\\\nMissing write permissions in: ${root_dir}\n#\n# You don't appear to have the necessary permissions to ${command} packages\n# into the install area '${root_dir}'.\n# However you can clone this environment into your home directory and\n# then make changes to it.\n# This may be done using the command:\n#\n# $ conda create -n my_${name} --clone=${prefix}\n\"\"\"\n msg = msg.replace('${root_dir}', context.root_prefix)\n msg = msg.replace('${prefix}', prefix)\n msg = msg.replace('${name}', name_prefix(prefix))\n msg = msg.replace('${command}', command)\n raise CondaIOError(msg)\n", "path": "conda/cli/help.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import join\n\nfrom .common import name_prefix\nfrom ..base.context import context\nfrom ..exceptions import CondaIOError\n\n\ndef read_message(fn):\n res = []\n for envs_dir in context.envs_dirs:\n path = join(envs_dir, '.conda-help', fn)\n try:\n with open(path) as fi:\n s = fi.read().decode('utf-8')\n s = s.replace('${envs_dir}', envs_dir)\n res.append(s)\n except IOError:\n pass\n return ''.join(res)\n\n\ndef root_read_only(command, prefix, json=False):\n assert command in {'install', 'update', 'remove'}\n\n msg = read_message('ro.txt')\n if not msg:\n msg = \"\"\"\\\nMissing write permissions in: ${root_dir}\n#\n# You don't appear to have the necessary permissions to ${command} packages\n# into the install area '${root_dir}'.\n# However you can clone this environment into your home directory and\n# then make changes to it.\n# This may be done using the command:\n#\n# $ conda create -n my_${name} --clone=\"${prefix}\"\n\"\"\"\n msg = msg.replace('${root_dir}', context.root_prefix)\n msg = msg.replace('${prefix}', prefix)\n msg = msg.replace('${name}', name_prefix(prefix))\n msg = msg.replace('${command}', command)\n raise CondaIOError(msg)\n", "path": "conda/cli/help.py"}]} | 888 | 118 |
gh_patches_debug_610 | rasdani/github-patches | git_diff | ivy-llc__ivy-23142 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ifft
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/numpy/fft.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
4 from ivy.func_wrapper import with_unsupported_dtypes
5
6
7 @to_ivy_arrays_and_back
8 def fft(a, n=None, axis=-1, norm=None):
9 if norm is None:
10 norm = "backward"
11 return ivy.fft(a, axis, norm=norm, n=n)
12
13
14 @to_ivy_arrays_and_back
15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
16 def fftshift(x, axes=None, name=None):
17 shape = x.shape
18
19 if axes is None:
20 axes = tuple(range(x.ndim))
21 shifts = [(dim // 2) for dim in shape]
22 elif isinstance(axes, int):
23 shifts = shape[axes] // 2
24 else:
25 shifts = [shape[ax] // 2 for ax in axes]
26
27 roll = ivy.roll(x, shifts, axis=axes)
28
29 return roll
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py
--- a/ivy/functional/frontends/jax/numpy/fft.py
+++ b/ivy/functional/frontends/jax/numpy/fft.py
@@ -27,3 +27,10 @@
roll = ivy.roll(x, shifts, axis=axes)
return roll
+
+
+@to_ivy_arrays_and_back
+def ifft(a, n=None, axis=-1, norm=None):
+ if norm is None:
+ norm = "backward"
+ return ivy.ifft(a, axis, norm=norm, n=n)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py\n--- a/ivy/functional/frontends/jax/numpy/fft.py\n+++ b/ivy/functional/frontends/jax/numpy/fft.py\n@@ -27,3 +27,10 @@\n roll = ivy.roll(x, shifts, axis=axes)\n \n return roll\n+\n+\n+@to_ivy_arrays_and_back\n+def ifft(a, n=None, axis=-1, norm=None):\n+ if norm is None:\n+ norm = \"backward\"\n+ return ivy.ifft(a, axis, norm=norm, n=n)\n", "issue": "ifft\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.fft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = [shape[ax] // 2 for ax in axes]\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n", "path": "ivy/functional/frontends/jax/numpy/fft.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.fft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = [shape[ax] // 2 for ax in axes]\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n", "path": "ivy/functional/frontends/jax/numpy/fft.py"}]} | 554 | 155 |
gh_patches_debug_28326 | rasdani/github-patches | git_diff | pyca__cryptography-5803 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Circular import error (again) in 3.4.3
Opening a new issue as the old one (https://github.com/pyca/cryptography/issues/5756) wasn't re-opened after my comment:
@reaperhulk
Hi,
still got some circular import problems here with cryptography 3.4.3 :
`AttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)`
reproducer used (gencrypto.py):
```
pip3 install -U cryptography
Collecting cryptography
Downloading cryptography-3.4.3-cp36-abi3-manylinux2014_x86_64.whl (3.2 MB)
|████████████████████████████████| 3.2 MB 5.3 MB/s
python3 gencrypto.py
Traceback (most recent call last):
File "gencrypto.py", line 2, in <module>
from cryptography.hazmat.primitives.asymmetric import dh
File "/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/dh.py", line 11, in <module>
from cryptography.hazmat.primitives import serialization
File "/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/__init__.py", line 15, in <module>
from cryptography.hazmat.primitives.serialization.base import (
File "/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/base.py", line 28, in <module>
def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:
AttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)
```
**gencrypto.py**
```
import time
from cryptography.hazmat.primitives.asymmetric import dh
for i in [2048,3072,4096]:
begin=time.time()
params = dh.generate_parameters(
generator=2,
key_size=i,
)
end=time.time()
print('took {}s for {} keysize'.format(int(end-begin), i))
```
Create py.typed
the changelog lists:
> cryptography now has PEP 484 type hints on nearly all of of its public APIs. Users can begin using them to type check their code with mypy.
but I'm still falling back to typeshed because this file is missing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/serialization/base.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import typing
7
8 from cryptography.hazmat._types import (
9 _PRIVATE_KEY_TYPES,
10 _PUBLIC_KEY_TYPES,
11 )
12 from cryptography.hazmat.backends import _get_backend
13 from cryptography.hazmat.primitives.asymmetric import dh
14
15
16 def load_pem_private_key(
17 data: bytes, password: typing.Optional[bytes], backend=None
18 ) -> _PRIVATE_KEY_TYPES:
19 backend = _get_backend(backend)
20 return backend.load_pem_private_key(data, password)
21
22
23 def load_pem_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:
24 backend = _get_backend(backend)
25 return backend.load_pem_public_key(data)
26
27
28 def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:
29 backend = _get_backend(backend)
30 return backend.load_pem_parameters(data)
31
32
33 def load_der_private_key(
34 data: bytes, password: typing.Optional[bytes], backend=None
35 ) -> _PRIVATE_KEY_TYPES:
36 backend = _get_backend(backend)
37 return backend.load_der_private_key(data, password)
38
39
40 def load_der_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:
41 backend = _get_backend(backend)
42 return backend.load_der_public_key(data)
43
44
45 def load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:
46 backend = _get_backend(backend)
47 return backend.load_der_parameters(data)
48
```
Path: `src/cryptography/__about__.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 __all__ = [
7 "__title__",
8 "__summary__",
9 "__uri__",
10 "__version__",
11 "__author__",
12 "__email__",
13 "__license__",
14 "__copyright__",
15 ]
16
17 __title__ = "cryptography"
18 __summary__ = (
19 "cryptography is a package which provides cryptographic recipes"
20 " and primitives to Python developers."
21 )
22 __uri__ = "https://github.com/pyca/cryptography"
23
24 __version__ = "3.4.3"
25
26 __author__ = "The Python Cryptographic Authority and individual contributors"
27 __email__ = "[email protected]"
28
29 __license__ = "BSD or Apache License, Version 2.0"
30 __copyright__ = "Copyright 2013-2021 {}".format(__author__)
31
```
Path: `vectors/cryptography_vectors/__about__.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 __all__ = [
6 "__title__",
7 "__summary__",
8 "__uri__",
9 "__version__",
10 "__author__",
11 "__email__",
12 "__license__",
13 "__copyright__",
14 ]
15
16 __title__ = "cryptography_vectors"
17 __summary__ = "Test vectors for the cryptography package."
18
19 __uri__ = "https://github.com/pyca/cryptography"
20
21 __version__ = "3.4.3"
22
23 __author__ = "The Python Cryptographic Authority and individual contributors"
24 __email__ = "[email protected]"
25
26 __license__ = "BSD or Apache License, Version 2.0"
27 __copyright__ = "Copyright 2013-2021 %s" % __author__
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py
--- a/src/cryptography/__about__.py
+++ b/src/cryptography/__about__.py
@@ -21,7 +21,7 @@
)
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "3.4.3"
+__version__ = "3.4.4"
__author__ = "The Python Cryptographic Authority and individual contributors"
__email__ = "[email protected]"
diff --git a/src/cryptography/hazmat/primitives/serialization/base.py b/src/cryptography/hazmat/primitives/serialization/base.py
--- a/src/cryptography/hazmat/primitives/serialization/base.py
+++ b/src/cryptography/hazmat/primitives/serialization/base.py
@@ -25,7 +25,7 @@
return backend.load_pem_public_key(data)
-def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:
+def load_pem_parameters(data: bytes, backend=None) -> "dh.DHParameters":
backend = _get_backend(backend)
return backend.load_pem_parameters(data)
@@ -42,6 +42,6 @@
return backend.load_der_public_key(data)
-def load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:
+def load_der_parameters(data: bytes, backend=None) -> "dh.DHParameters":
backend = _get_backend(backend)
return backend.load_der_parameters(data)
diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py
--- a/vectors/cryptography_vectors/__about__.py
+++ b/vectors/cryptography_vectors/__about__.py
@@ -18,7 +18,7 @@
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "3.4.3"
+__version__ = "3.4.4"
__author__ = "The Python Cryptographic Authority and individual contributors"
__email__ = "[email protected]"
| {"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -21,7 +21,7 @@\n )\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.4.3\"\n+__version__ = \"3.4.4\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\ndiff --git a/src/cryptography/hazmat/primitives/serialization/base.py b/src/cryptography/hazmat/primitives/serialization/base.py\n--- a/src/cryptography/hazmat/primitives/serialization/base.py\n+++ b/src/cryptography/hazmat/primitives/serialization/base.py\n@@ -25,7 +25,7 @@\n return backend.load_pem_public_key(data)\n \n \n-def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:\n+def load_pem_parameters(data: bytes, backend=None) -> \"dh.DHParameters\":\n backend = _get_backend(backend)\n return backend.load_pem_parameters(data)\n \n@@ -42,6 +42,6 @@\n return backend.load_der_public_key(data)\n \n \n-def load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:\n+def load_der_parameters(data: bytes, backend=None) -> \"dh.DHParameters\":\n backend = _get_backend(backend)\n return backend.load_der_parameters(data)\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -18,7 +18,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.4.3\"\n+__version__ = \"3.4.4\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\n", "issue": "Circular import error (again) in 3.4.3\nOpening a new issue as the old one (https://github.com/pyca/cryptography/issues/5756) wasn't re-opened after my comment:\r\n\r\n\r\n@reaperhulk\r\n\r\nHi,\r\n\r\nstill got some circular import problems here with cryptography 3.4.3 :\r\n\r\n`AttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)`\r\n\r\nreproducer used (gencrypto.py):\r\n\r\n```\r\npip3 install -U cryptography\r\nCollecting cryptography\r\n Downloading cryptography-3.4.3-cp36-abi3-manylinux2014_x86_64.whl (3.2 MB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 3.2 MB 5.3 MB/s \r\n\r\n\r\npython3 gencrypto.py\r\n\r\nTraceback (most recent call last):\r\n File \"gencrypto.py\", line 2, in <module>\r\n from cryptography.hazmat.primitives.asymmetric import dh\r\n File \"/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/dh.py\", line 11, in <module>\r\n from cryptography.hazmat.primitives import serialization\r\n File \"/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/__init__.py\", line 15, in <module>\r\n from cryptography.hazmat.primitives.serialization.base import (\r\n File \"/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/base.py\", line 28, in <module>\r\n def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:\r\nAttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)\r\n```\r\n\r\n**gencrypto.py**\r\n\r\n```\r\nimport time\r\nfrom cryptography.hazmat.primitives.asymmetric import dh\r\n\r\n\r\nfor i in [2048,3072,4096]:\r\n begin=time.time()\r\n params = dh.generate_parameters(\r\n generator=2,\r\n key_size=i,\r\n )\r\n end=time.time()\r\n print('took {}s for {} keysize'.format(int(end-begin), i))\r\n```\nCreate py.typed\nthe changelog lists:\r\n\r\n> cryptography now has PEP 484 type hints on nearly all of of its public APIs. Users can begin using them to type check their code with mypy.\r\n\r\nbut I'm still falling back to typeshed because this file is missing\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport typing\n\nfrom cryptography.hazmat._types import (\n _PRIVATE_KEY_TYPES,\n _PUBLIC_KEY_TYPES,\n)\nfrom cryptography.hazmat.backends import _get_backend\nfrom cryptography.hazmat.primitives.asymmetric import dh\n\n\ndef load_pem_private_key(\n data: bytes, password: typing.Optional[bytes], backend=None\n) -> _PRIVATE_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_pem_private_key(data, password)\n\n\ndef load_pem_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_pem_public_key(data)\n\n\ndef load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:\n backend = _get_backend(backend)\n return backend.load_pem_parameters(data)\n\n\ndef load_der_private_key(\n data: bytes, password: typing.Optional[bytes], backend=None\n) -> _PRIVATE_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_der_private_key(data, password)\n\n\ndef load_der_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_der_public_key(data)\n\n\ndef load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:\n backend = _get_backend(backend)\n return backend.load_der_parameters(data)\n", "path": "src/cryptography/hazmat/primitives/serialization/base.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.4.3\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.4.3\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport typing\n\nfrom cryptography.hazmat._types import (\n _PRIVATE_KEY_TYPES,\n _PUBLIC_KEY_TYPES,\n)\nfrom cryptography.hazmat.backends import _get_backend\nfrom cryptography.hazmat.primitives.asymmetric import dh\n\n\ndef load_pem_private_key(\n data: bytes, password: typing.Optional[bytes], backend=None\n) -> _PRIVATE_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_pem_private_key(data, password)\n\n\ndef load_pem_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_pem_public_key(data)\n\n\ndef load_pem_parameters(data: bytes, backend=None) -> \"dh.DHParameters\":\n backend = _get_backend(backend)\n return backend.load_pem_parameters(data)\n\n\ndef load_der_private_key(\n data: bytes, password: typing.Optional[bytes], backend=None\n) -> _PRIVATE_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_der_private_key(data, password)\n\n\ndef load_der_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_der_public_key(data)\n\n\ndef load_der_parameters(data: bytes, backend=None) -> \"dh.DHParameters\":\n backend = _get_backend(backend)\n return backend.load_der_parameters(data)\n", "path": "src/cryptography/hazmat/primitives/serialization/base.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.4.4\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.4.4\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]} | 1,844 | 461 |
gh_patches_debug_50232 | rasdani/github-patches | git_diff | pex-tool__pex-1720 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.79
On the docket:
+ [x] The --lock resolver only includes extras from the 1st encounter of a required project in its graph walk. #1717
+ [x] Support canonicalizing absolute paths in locks. (#1716)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.78"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.78"
+__version__ = "2.1.79"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.78\"\n+__version__ = \"2.1.79\"\n", "issue": "Release 2.1.79\nOn the docket:\r\n+ [x] The --lock resolver only includes extras from the 1st encounter of a required project in its graph walk. #1717 \r\n+ [x] Support canonicalizing absolute paths in locks. (#1716)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.78\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.79\"\n", "path": "pex/version.py"}]} | 371 | 96 |
gh_patches_debug_21195 | rasdani/github-patches | git_diff | open-mmlab__mmocr-1587 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] 数据集预处理中,对于含","的特殊文本未特殊处理
### Prerequisite
- [X] I have searched [Issues](https://github.com/open-mmlab/mmocr/issues) and [Discussions](https://github.com/open-mmlab/mmocr/discussions) but cannot get the expected help.
- [X] The bug has not been fixed in the [latest version (0.x)](https://github.com/open-mmlab/mmocr) or [latest version (1.x)](https://github.com/open-mmlab/mmocr/tree/dev-1.x).
### Task
I'm using the official example scripts/configs for the officially supported tasks/models/datasets.
### Branch
1.x branch https://github.com/open-mmlab/mmocr/tree/dev-1.x
### Environment
本次bug和环境无关
torch 1.7.0
torchvision 0.8.1
mmcv 2.0.0rc3
mmcv-full 1.7.0
mmdet 3.0.0rc0
mmengine 0.1.0
mmocr 1.0.0rc3
### Reproduces the problem - code sample
相关代码在mmocr/datasets/preparers/parsers/icdar_txt_parser.py
### Reproduces the problem - command or script
python tools/dataset_converters/prepare_dataset.py icdar2015 --task textspotting
### Reproduces the problem - error message
no error message,检查发现部分图片预处理后的文本gt不对
### Additional information
1. 使用mmocr 1.x版本进行预处理icdar2015数据集为统一格式的json文件
2. 发现对文本处理时,未处理含分隔符的情况,如原始数据集的100,000文本,在统一格式中为100,即分隔符将文本分开后仅取了第一个文本
3. 具体例子如mmocr/data/icdar2015/textspotting_train.json中,在图片data/icdar2015/textdet_imgs/train/img_39.jpg的gt中,原始数据集文本标注为402,85,460,86,457,106,399,105,100,000,其中402,85,460,86,457,106,399,105为polygon标注,100,000为文本标注。但是预处理后的ocr数据集中,文本标注为100。
(因为在区分polygon标注和text标注时,通过分隔符","来区分,导致text文本被截断)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmocr/datasets/preparers/parsers/base.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 from abc import abstractmethod
3 from functools import partial
4 from typing import Dict, List, Optional, Tuple, Union
5
6 from mmengine import track_parallel_progress
7
8
9 class BaseParser:
10 """Base class for parsing annotations.
11
12 Args:
13 data_root (str, optional): Path to the data root. Defaults to None.
14 nproc (int, optional): Number of processes. Defaults to 1.
15 """
16
17 def __init__(self,
18 data_root: Optional[str] = None,
19 nproc: int = 1) -> None:
20 self.data_root = data_root
21 self.nproc = nproc
22
23 def __call__(self, files: List[Tuple], split: str) -> List:
24 """Parse annotations.
25
26 Args:
27 files (List[Tuple]): A list of a tuple of
28 (image_path, annotation_path).
29 split (str): The split of the dataset.
30
31 Returns:
32 List: A list of a tuple of (image_path, instances)
33 """
34 samples = self.parse_files(files, split)
35 return samples
36
37 def parse_files(self, files: List[Tuple], split: str) -> List[Tuple]:
38 """Convert annotations to MMOCR format.
39
40 Args:
41 files (Tuple): A list of tuple of path to image and annotation.
42
43 Returns:
44 List[Tuple]: A list of a tuple of (image_path, instances)
45 """
46 func = partial(self.parse_file, split=split)
47 samples = track_parallel_progress(func, files, nproc=self.nproc)
48 return samples
49
50 @abstractmethod
51 def parse_file(self, file: Tuple, split: str) -> Tuple:
52 """Convert annotation for a single image.
53
54 Args:
55 file (Tuple): A tuple of path to image and annotation
56 split (str): Current split.
57
58 Returns:
59 Tuple: A tuple of (img_path, instance). Instance is a list of dict
60 containing parsed annotations, which should contain the
61 following keys:
62 - 'poly' or 'box' (textdet or textspotting)
63 - 'text' (textspotting or textrecog)
64 - 'ignore' (all task)
65
66 Examples:
67 An example of returned values:
68 >>> ('imgs/train/xxx.jpg',
69 >>> dict(
70 >>> poly=[[[0, 1], [1, 1], [1, 0], [0, 0]]],
71 >>> text='hello',
72 >>> ignore=False)
73 >>> )
74 """
75 raise NotImplementedError
76
77 def loader(self,
78 file_path: str,
79 separator: str = ',',
80 format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',
81 encoding='utf-8') -> Union[Dict, str]:
82 """A basic loader designed for .txt format annotation.
83
84 Args:
85 file_path (str): Path to the txt file.
86 separator (str, optional): Separator of data. Defaults to ','.
87 format (str, optional): Annotation format.
88 Defaults to 'x1,y1,x2,y2,x3,y3,x4,y4,trans'.
89 encoding (str, optional): Encoding format. Defaults to 'utf-8'.
90
91 Yields:
92 Iterator[Union[Dict, str]]: Original text line or a dict containing
93 the information of the text line.
94 """
95 keys = format.split(separator)
96 with open(file_path, 'r', encoding=encoding) as f:
97 for line in f.readlines():
98 line = line.strip()
99 if line:
100 yield dict(zip(keys, line.split(separator)))
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmocr/datasets/preparers/parsers/base.py b/mmocr/datasets/preparers/parsers/base.py
--- a/mmocr/datasets/preparers/parsers/base.py
+++ b/mmocr/datasets/preparers/parsers/base.py
@@ -79,7 +79,8 @@
separator: str = ',',
format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',
encoding='utf-8') -> Union[Dict, str]:
- """A basic loader designed for .txt format annotation.
+ """A basic loader designed for .txt format annotation. It greedily
+ extracts information separated by separators.
Args:
file_path (str): Path to the txt file.
@@ -96,5 +97,8 @@
with open(file_path, 'r', encoding=encoding) as f:
for line in f.readlines():
line = line.strip()
+ values = line.split(separator)
+ values = values[:len(keys) -
+ 1] + [separator.join(values[len(keys) - 1:])]
if line:
- yield dict(zip(keys, line.split(separator)))
+ yield dict(zip(keys, values))
| {"golden_diff": "diff --git a/mmocr/datasets/preparers/parsers/base.py b/mmocr/datasets/preparers/parsers/base.py\n--- a/mmocr/datasets/preparers/parsers/base.py\n+++ b/mmocr/datasets/preparers/parsers/base.py\n@@ -79,7 +79,8 @@\n separator: str = ',',\n format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',\n encoding='utf-8') -> Union[Dict, str]:\n- \"\"\"A basic loader designed for .txt format annotation.\n+ \"\"\"A basic loader designed for .txt format annotation. It greedily\n+ extracts information separated by separators.\n \n Args:\n file_path (str): Path to the txt file.\n@@ -96,5 +97,8 @@\n with open(file_path, 'r', encoding=encoding) as f:\n for line in f.readlines():\n line = line.strip()\n+ values = line.split(separator)\n+ values = values[:len(keys) -\n+ 1] + [separator.join(values[len(keys) - 1:])]\n if line:\n- yield dict(zip(keys, line.split(separator)))\n+ yield dict(zip(keys, values))\n", "issue": "[Bug] \u6570\u636e\u96c6\u9884\u5904\u7406\u4e2d\uff0c\u5bf9\u4e8e\u542b\",\"\u7684\u7279\u6b8a\u6587\u672c\u672a\u7279\u6b8a\u5904\u7406\n### Prerequisite\n\n- [X] I have searched [Issues](https://github.com/open-mmlab/mmocr/issues) and [Discussions](https://github.com/open-mmlab/mmocr/discussions) but cannot get the expected help.\n- [X] The bug has not been fixed in the [latest version (0.x)](https://github.com/open-mmlab/mmocr) or [latest version (1.x)](https://github.com/open-mmlab/mmocr/tree/dev-1.x).\n\n### Task\n\nI'm using the official example scripts/configs for the officially supported tasks/models/datasets.\n\n### Branch\n\n1.x branch https://github.com/open-mmlab/mmocr/tree/dev-1.x\n\n### Environment\n\n\u672c\u6b21bug\u548c\u73af\u5883\u65e0\u5173\r\ntorch 1.7.0\r\ntorchvision 0.8.1\r\nmmcv 2.0.0rc3\r\nmmcv-full 1.7.0\r\nmmdet 3.0.0rc0\r\nmmengine 0.1.0\r\nmmocr 1.0.0rc3 \r\n\n\n### Reproduces the problem - code sample\n\n\u76f8\u5173\u4ee3\u7801\u5728mmocr/datasets/preparers/parsers/icdar_txt_parser.py\n\n### Reproduces the problem - command or script\n\npython tools/dataset_converters/prepare_dataset.py icdar2015 --task textspotting\n\n### Reproduces the problem - error message\n\nno error message\uff0c\u68c0\u67e5\u53d1\u73b0\u90e8\u5206\u56fe\u7247\u9884\u5904\u7406\u540e\u7684\u6587\u672cgt\u4e0d\u5bf9\n\n### Additional information\n\n1. \u4f7f\u7528mmocr 1.x\u7248\u672c\u8fdb\u884c\u9884\u5904\u7406icdar2015\u6570\u636e\u96c6\u4e3a\u7edf\u4e00\u683c\u5f0f\u7684json\u6587\u4ef6\r\n2. \u53d1\u73b0\u5bf9\u6587\u672c\u5904\u7406\u65f6\uff0c\u672a\u5904\u7406\u542b\u5206\u9694\u7b26\u7684\u60c5\u51b5\uff0c\u5982\u539f\u59cb\u6570\u636e\u96c6\u7684100,000\u6587\u672c\uff0c\u5728\u7edf\u4e00\u683c\u5f0f\u4e2d\u4e3a100\uff0c\u5373\u5206\u9694\u7b26\u5c06\u6587\u672c\u5206\u5f00\u540e\u4ec5\u53d6\u4e86\u7b2c\u4e00\u4e2a\u6587\u672c\r\n3. \u5177\u4f53\u4f8b\u5b50\u5982mmocr/data/icdar2015/textspotting_train.json\u4e2d\uff0c\u5728\u56fe\u7247data/icdar2015/textdet_imgs/train/img_39.jpg\u7684gt\u4e2d\uff0c\u539f\u59cb\u6570\u636e\u96c6\u6587\u672c\u6807\u6ce8\u4e3a402,85,460,86,457,106,399,105,100,000\uff0c\u5176\u4e2d402,85,460,86,457,106,399,105\u4e3apolygon\u6807\u6ce8\uff0c100,000\u4e3a\u6587\u672c\u6807\u6ce8\u3002\u4f46\u662f\u9884\u5904\u7406\u540e\u7684ocr\u6570\u636e\u96c6\u4e2d\uff0c\u6587\u672c\u6807\u6ce8\u4e3a100\u3002\r\n\uff08\u56e0\u4e3a\u5728\u533a\u5206polygon\u6807\u6ce8\u548ctext\u6807\u6ce8\u65f6\uff0c\u901a\u8fc7\u5206\u9694\u7b26\",\"\u6765\u533a\u5206\uff0c\u5bfc\u81f4text\u6587\u672c\u88ab\u622a\u65ad\uff09\r\n\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import abstractmethod\nfrom functools import partial\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom mmengine import track_parallel_progress\n\n\nclass BaseParser:\n \"\"\"Base class for parsing annotations.\n\n Args:\n data_root (str, optional): Path to the data root. Defaults to None.\n nproc (int, optional): Number of processes. Defaults to 1.\n \"\"\"\n\n def __init__(self,\n data_root: Optional[str] = None,\n nproc: int = 1) -> None:\n self.data_root = data_root\n self.nproc = nproc\n\n def __call__(self, files: List[Tuple], split: str) -> List:\n \"\"\"Parse annotations.\n\n Args:\n files (List[Tuple]): A list of a tuple of\n (image_path, annotation_path).\n split (str): The split of the dataset.\n\n Returns:\n List: A list of a tuple of (image_path, instances)\n \"\"\"\n samples = self.parse_files(files, split)\n return samples\n\n def parse_files(self, files: List[Tuple], split: str) -> List[Tuple]:\n \"\"\"Convert annotations to MMOCR format.\n\n Args:\n files (Tuple): A list of tuple of path to image and annotation.\n\n Returns:\n List[Tuple]: A list of a tuple of (image_path, instances)\n \"\"\"\n func = partial(self.parse_file, split=split)\n samples = track_parallel_progress(func, files, nproc=self.nproc)\n return samples\n\n @abstractmethod\n def parse_file(self, file: Tuple, split: str) -> Tuple:\n \"\"\"Convert annotation for a single image.\n\n Args:\n file (Tuple): A tuple of path to image and annotation\n split (str): Current split.\n\n Returns:\n Tuple: A tuple of (img_path, instance). Instance is a list of dict\n containing parsed annotations, which should contain the\n following keys:\n - 'poly' or 'box' (textdet or textspotting)\n - 'text' (textspotting or textrecog)\n - 'ignore' (all task)\n\n Examples:\n An example of returned values:\n >>> ('imgs/train/xxx.jpg',\n >>> dict(\n >>> poly=[[[0, 1], [1, 1], [1, 0], [0, 0]]],\n >>> text='hello',\n >>> ignore=False)\n >>> )\n \"\"\"\n raise NotImplementedError\n\n def loader(self,\n file_path: str,\n separator: str = ',',\n format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',\n encoding='utf-8') -> Union[Dict, str]:\n \"\"\"A basic loader designed for .txt format annotation.\n\n Args:\n file_path (str): Path to the txt file.\n separator (str, optional): Separator of data. Defaults to ','.\n format (str, optional): Annotation format.\n Defaults to 'x1,y1,x2,y2,x3,y3,x4,y4,trans'.\n encoding (str, optional): Encoding format. Defaults to 'utf-8'.\n\n Yields:\n Iterator[Union[Dict, str]]: Original text line or a dict containing\n the information of the text line.\n \"\"\"\n keys = format.split(separator)\n with open(file_path, 'r', encoding=encoding) as f:\n for line in f.readlines():\n line = line.strip()\n if line:\n yield dict(zip(keys, line.split(separator)))\n", "path": "mmocr/datasets/preparers/parsers/base.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import abstractmethod\nfrom functools import partial\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom mmengine import track_parallel_progress\n\n\nclass BaseParser:\n \"\"\"Base class for parsing annotations.\n\n Args:\n data_root (str, optional): Path to the data root. Defaults to None.\n nproc (int, optional): Number of processes. Defaults to 1.\n \"\"\"\n\n def __init__(self,\n data_root: Optional[str] = None,\n nproc: int = 1) -> None:\n self.data_root = data_root\n self.nproc = nproc\n\n def __call__(self, files: List[Tuple], split: str) -> List:\n \"\"\"Parse annotations.\n\n Args:\n files (List[Tuple]): A list of a tuple of\n (image_path, annotation_path).\n split (str): The split of the dataset.\n\n Returns:\n List: A list of a tuple of (image_path, instances)\n \"\"\"\n samples = self.parse_files(files, split)\n return samples\n\n def parse_files(self, files: List[Tuple], split: str) -> List[Tuple]:\n \"\"\"Convert annotations to MMOCR format.\n\n Args:\n files (Tuple): A list of tuple of path to image and annotation.\n\n Returns:\n List[Tuple]: A list of a tuple of (image_path, instances)\n \"\"\"\n func = partial(self.parse_file, split=split)\n samples = track_parallel_progress(func, files, nproc=self.nproc)\n return samples\n\n @abstractmethod\n def parse_file(self, file: Tuple, split: str) -> Tuple:\n \"\"\"Convert annotation for a single image.\n\n Args:\n file (Tuple): A tuple of path to image and annotation\n split (str): Current split.\n\n Returns:\n Tuple: A tuple of (img_path, instance). Instance is a list of dict\n containing parsed annotations, which should contain the\n following keys:\n - 'poly' or 'box' (textdet or textspotting)\n - 'text' (textspotting or textrecog)\n - 'ignore' (all task)\n\n Examples:\n An example of returned values:\n >>> ('imgs/train/xxx.jpg',\n >>> dict(\n >>> poly=[[[0, 1], [1, 1], [1, 0], [0, 0]]],\n >>> text='hello',\n >>> ignore=False)\n >>> )\n \"\"\"\n raise NotImplementedError\n\n def loader(self,\n file_path: str,\n separator: str = ',',\n format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',\n encoding='utf-8') -> Union[Dict, str]:\n \"\"\"A basic loader designed for .txt format annotation. It greedily\n extracts information separated by separators.\n\n Args:\n file_path (str): Path to the txt file.\n separator (str, optional): Separator of data. Defaults to ','.\n format (str, optional): Annotation format.\n Defaults to 'x1,y1,x2,y2,x3,y3,x4,y4,trans'.\n encoding (str, optional): Encoding format. Defaults to 'utf-8'.\n\n Yields:\n Iterator[Union[Dict, str]]: Original text line or a dict containing\n the information of the text line.\n \"\"\"\n keys = format.split(separator)\n with open(file_path, 'r', encoding=encoding) as f:\n for line in f.readlines():\n line = line.strip()\n values = line.split(separator)\n values = values[:len(keys) -\n 1] + [separator.join(values[len(keys) - 1:])]\n if line:\n yield dict(zip(keys, values))\n", "path": "mmocr/datasets/preparers/parsers/base.py"}]} | 1,858 | 269 |
gh_patches_debug_7801 | rasdani/github-patches | git_diff | python-pillow__Pillow-7357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in QOI decoder leading to corrupted images
### What did you do?
Loaded dice.qoi from the official test image set (https://qoiformat.org/qoi_test_images.zip).
### What did you expect to happen?
Image is not corrupted.
### What actually happened?
Image is corrupted.
### What are your OS, Python and Pillow versions?
* OS: doesn't matter
* Python: doesn't matter
* Pillow: all versions with QOI support
The problem is the faulty implementation of the QOI_OP_RGB operation here https://github.com/python-pillow/Pillow/blob/24606216e1e5931a8fe6f41acde9e7e67489905d/src/PIL/QoiImagePlugin.py#L58C10-L58C10
The implementation sets the alpha channel to 255, however, the QOI specification says it should use the previous alpha value.
Replacing that line with something like `value = self.fd.read(3) + o8(self._previous_pixel[3])` fixes the problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/QoiImagePlugin.py`
Content:
```
1 #
2 # The Python Imaging Library.
3 #
4 # QOI support for PIL
5 #
6 # See the README file for information on usage and redistribution.
7 #
8
9 import os
10
11 from . import Image, ImageFile
12 from ._binary import i32be as i32
13 from ._binary import o8
14
15
16 def _accept(prefix):
17 return prefix[:4] == b"qoif"
18
19
20 class QoiImageFile(ImageFile.ImageFile):
21 format = "QOI"
22 format_description = "Quite OK Image"
23
24 def _open(self):
25 if not _accept(self.fp.read(4)):
26 msg = "not a QOI file"
27 raise SyntaxError(msg)
28
29 self._size = tuple(i32(self.fp.read(4)) for i in range(2))
30
31 channels = self.fp.read(1)[0]
32 self._mode = "RGB" if channels == 3 else "RGBA"
33
34 self.fp.seek(1, os.SEEK_CUR) # colorspace
35 self.tile = [("qoi", (0, 0) + self._size, self.fp.tell(), None)]
36
37
38 class QoiDecoder(ImageFile.PyDecoder):
39 _pulls_fd = True
40
41 def _add_to_previous_pixels(self, value):
42 self._previous_pixel = value
43
44 r, g, b, a = value
45 hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64
46 self._previously_seen_pixels[hash_value] = value
47
48 def decode(self, buffer):
49 self._previously_seen_pixels = {}
50 self._previous_pixel = None
51 self._add_to_previous_pixels(b"".join(o8(i) for i in (0, 0, 0, 255)))
52
53 data = bytearray()
54 bands = Image.getmodebands(self.mode)
55 while len(data) < self.state.xsize * self.state.ysize * bands:
56 byte = self.fd.read(1)[0]
57 if byte == 0b11111110: # QOI_OP_RGB
58 value = self.fd.read(3) + o8(255)
59 elif byte == 0b11111111: # QOI_OP_RGBA
60 value = self.fd.read(4)
61 else:
62 op = byte >> 6
63 if op == 0: # QOI_OP_INDEX
64 op_index = byte & 0b00111111
65 value = self._previously_seen_pixels.get(op_index, (0, 0, 0, 0))
66 elif op == 1: # QOI_OP_DIFF
67 value = (
68 (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)
69 % 256,
70 (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)
71 % 256,
72 (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,
73 )
74 value += (self._previous_pixel[3],)
75 elif op == 2: # QOI_OP_LUMA
76 second_byte = self.fd.read(1)[0]
77 diff_green = (byte & 0b00111111) - 32
78 diff_red = ((second_byte & 0b11110000) >> 4) - 8
79 diff_blue = (second_byte & 0b00001111) - 8
80
81 value = tuple(
82 (self._previous_pixel[i] + diff_green + diff) % 256
83 for i, diff in enumerate((diff_red, 0, diff_blue))
84 )
85 value += (self._previous_pixel[3],)
86 elif op == 3: # QOI_OP_RUN
87 run_length = (byte & 0b00111111) + 1
88 value = self._previous_pixel
89 if bands == 3:
90 value = value[:3]
91 data += value * run_length
92 continue
93 value = b"".join(o8(i) for i in value)
94 self._add_to_previous_pixels(value)
95
96 if bands == 3:
97 value = value[:3]
98 data += value
99 self.set_as_raw(bytes(data))
100 return -1, 0
101
102
103 Image.register_open(QoiImageFile.format, QoiImageFile, _accept)
104 Image.register_decoder("qoi", QoiDecoder)
105 Image.register_extension(QoiImageFile.format, ".qoi")
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/PIL/QoiImagePlugin.py b/src/PIL/QoiImagePlugin.py
--- a/src/PIL/QoiImagePlugin.py
+++ b/src/PIL/QoiImagePlugin.py
@@ -55,7 +55,7 @@
while len(data) < self.state.xsize * self.state.ysize * bands:
byte = self.fd.read(1)[0]
if byte == 0b11111110: # QOI_OP_RGB
- value = self.fd.read(3) + o8(255)
+ value = self.fd.read(3) + self._previous_pixel[3:]
elif byte == 0b11111111: # QOI_OP_RGBA
value = self.fd.read(4)
else:
| {"golden_diff": "diff --git a/src/PIL/QoiImagePlugin.py b/src/PIL/QoiImagePlugin.py\n--- a/src/PIL/QoiImagePlugin.py\n+++ b/src/PIL/QoiImagePlugin.py\n@@ -55,7 +55,7 @@\n while len(data) < self.state.xsize * self.state.ysize * bands:\n byte = self.fd.read(1)[0]\n if byte == 0b11111110: # QOI_OP_RGB\n- value = self.fd.read(3) + o8(255)\n+ value = self.fd.read(3) + self._previous_pixel[3:]\n elif byte == 0b11111111: # QOI_OP_RGBA\n value = self.fd.read(4)\n else:\n", "issue": "Bug in QOI decoder leading to corrupted images\n### What did you do?\r\n\r\nLoaded dice.qoi from the official test image set (https://qoiformat.org/qoi_test_images.zip).\r\n\r\n### What did you expect to happen?\r\n\r\nImage is not corrupted.\r\n\r\n### What actually happened?\r\n\r\nImage is corrupted.\r\n\r\n### What are your OS, Python and Pillow versions?\r\n\r\n* OS: doesn't matter\r\n* Python: doesn't matter\r\n* Pillow: all versions with QOI support\r\n\r\nThe problem is the faulty implementation of the QOI_OP_RGB operation here https://github.com/python-pillow/Pillow/blob/24606216e1e5931a8fe6f41acde9e7e67489905d/src/PIL/QoiImagePlugin.py#L58C10-L58C10\r\nThe implementation sets the alpha channel to 255, however, the QOI specification says it should use the previous alpha value.\r\nReplacing that line with something like `value = self.fd.read(3) + o8(self._previous_pixel[3])` fixes the problem.\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n#\n# QOI support for PIL\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport os\n\nfrom . import Image, ImageFile\nfrom ._binary import i32be as i32\nfrom ._binary import o8\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"qoif\"\n\n\nclass QoiImageFile(ImageFile.ImageFile):\n format = \"QOI\"\n format_description = \"Quite OK Image\"\n\n def _open(self):\n if not _accept(self.fp.read(4)):\n msg = \"not a QOI file\"\n raise SyntaxError(msg)\n\n self._size = tuple(i32(self.fp.read(4)) for i in range(2))\n\n channels = self.fp.read(1)[0]\n self._mode = \"RGB\" if channels == 3 else \"RGBA\"\n\n self.fp.seek(1, os.SEEK_CUR) # colorspace\n self.tile = [(\"qoi\", (0, 0) + self._size, self.fp.tell(), None)]\n\n\nclass QoiDecoder(ImageFile.PyDecoder):\n _pulls_fd = True\n\n def _add_to_previous_pixels(self, value):\n self._previous_pixel = value\n\n r, g, b, a = value\n hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64\n self._previously_seen_pixels[hash_value] = value\n\n def decode(self, buffer):\n self._previously_seen_pixels = {}\n self._previous_pixel = None\n self._add_to_previous_pixels(b\"\".join(o8(i) for i in (0, 0, 0, 255)))\n\n data = bytearray()\n bands = Image.getmodebands(self.mode)\n while len(data) < self.state.xsize * self.state.ysize * bands:\n byte = self.fd.read(1)[0]\n if byte == 0b11111110: # QOI_OP_RGB\n value = self.fd.read(3) + o8(255)\n elif byte == 0b11111111: # QOI_OP_RGBA\n value = self.fd.read(4)\n else:\n op = byte >> 6\n if op == 0: # QOI_OP_INDEX\n op_index = byte & 0b00111111\n value = self._previously_seen_pixels.get(op_index, (0, 0, 0, 0))\n elif op == 1: # QOI_OP_DIFF\n value = (\n (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)\n % 256,\n (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)\n % 256,\n (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,\n )\n value += (self._previous_pixel[3],)\n elif op == 2: # QOI_OP_LUMA\n second_byte = self.fd.read(1)[0]\n diff_green = (byte & 0b00111111) - 32\n diff_red = ((second_byte & 0b11110000) >> 4) - 8\n diff_blue = (second_byte & 0b00001111) - 8\n\n value = tuple(\n (self._previous_pixel[i] + diff_green + diff) % 256\n for i, diff in enumerate((diff_red, 0, diff_blue))\n )\n value += (self._previous_pixel[3],)\n elif op == 3: # QOI_OP_RUN\n run_length = (byte & 0b00111111) + 1\n value = self._previous_pixel\n if bands == 3:\n value = value[:3]\n data += value * run_length\n continue\n value = b\"\".join(o8(i) for i in value)\n self._add_to_previous_pixels(value)\n\n if bands == 3:\n value = value[:3]\n data += value\n self.set_as_raw(bytes(data))\n return -1, 0\n\n\nImage.register_open(QoiImageFile.format, QoiImageFile, _accept)\nImage.register_decoder(\"qoi\", QoiDecoder)\nImage.register_extension(QoiImageFile.format, \".qoi\")\n", "path": "src/PIL/QoiImagePlugin.py"}], "after_files": [{"content": "#\n# The Python Imaging Library.\n#\n# QOI support for PIL\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport os\n\nfrom . import Image, ImageFile\nfrom ._binary import i32be as i32\nfrom ._binary import o8\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"qoif\"\n\n\nclass QoiImageFile(ImageFile.ImageFile):\n format = \"QOI\"\n format_description = \"Quite OK Image\"\n\n def _open(self):\n if not _accept(self.fp.read(4)):\n msg = \"not a QOI file\"\n raise SyntaxError(msg)\n\n self._size = tuple(i32(self.fp.read(4)) for i in range(2))\n\n channels = self.fp.read(1)[0]\n self._mode = \"RGB\" if channels == 3 else \"RGBA\"\n\n self.fp.seek(1, os.SEEK_CUR) # colorspace\n self.tile = [(\"qoi\", (0, 0) + self._size, self.fp.tell(), None)]\n\n\nclass QoiDecoder(ImageFile.PyDecoder):\n _pulls_fd = True\n\n def _add_to_previous_pixels(self, value):\n self._previous_pixel = value\n\n r, g, b, a = value\n hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64\n self._previously_seen_pixels[hash_value] = value\n\n def decode(self, buffer):\n self._previously_seen_pixels = {}\n self._previous_pixel = None\n self._add_to_previous_pixels(b\"\".join(o8(i) for i in (0, 0, 0, 255)))\n\n data = bytearray()\n bands = Image.getmodebands(self.mode)\n while len(data) < self.state.xsize * self.state.ysize * bands:\n byte = self.fd.read(1)[0]\n if byte == 0b11111110: # QOI_OP_RGB\n value = self.fd.read(3) + self._previous_pixel[3:]\n elif byte == 0b11111111: # QOI_OP_RGBA\n value = self.fd.read(4)\n else:\n op = byte >> 6\n if op == 0: # QOI_OP_INDEX\n op_index = byte & 0b00111111\n value = self._previously_seen_pixels.get(op_index, (0, 0, 0, 0))\n elif op == 1: # QOI_OP_DIFF\n value = (\n (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)\n % 256,\n (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)\n % 256,\n (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,\n )\n value += (self._previous_pixel[3],)\n elif op == 2: # QOI_OP_LUMA\n second_byte = self.fd.read(1)[0]\n diff_green = (byte & 0b00111111) - 32\n diff_red = ((second_byte & 0b11110000) >> 4) - 8\n diff_blue = (second_byte & 0b00001111) - 8\n\n value = tuple(\n (self._previous_pixel[i] + diff_green + diff) % 256\n for i, diff in enumerate((diff_red, 0, diff_blue))\n )\n value += (self._previous_pixel[3],)\n elif op == 3: # QOI_OP_RUN\n run_length = (byte & 0b00111111) + 1\n value = self._previous_pixel\n if bands == 3:\n value = value[:3]\n data += value * run_length\n continue\n value = b\"\".join(o8(i) for i in value)\n self._add_to_previous_pixels(value)\n\n if bands == 3:\n value = value[:3]\n data += value\n self.set_as_raw(bytes(data))\n return -1, 0\n\n\nImage.register_open(QoiImageFile.format, QoiImageFile, _accept)\nImage.register_decoder(\"qoi\", QoiDecoder)\nImage.register_extension(QoiImageFile.format, \".qoi\")\n", "path": "src/PIL/QoiImagePlugin.py"}]} | 1,772 | 179 |
gh_patches_debug_16247 | rasdani/github-patches | git_diff | pyca__cryptography-1397 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dsa_private_key.pem vector has p and q whose lengths we don't normally allow
We currently enforce that `p` and `q` have lengths which are one of:
- `(1024, 160)`
- `(2048, 256)`
- `(3072, 256)`
However, this vector has `(p, q)` with lengths of `(2048, 160)`. Do we need to be less restrictive, use a different vector?
This was discovered in the process of writing a pure python PEM loader.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/hazmat/primitives/asymmetric/dsa.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import six
17
18 from cryptography import utils
19
20
21 def generate_parameters(key_size, backend):
22 return backend.generate_dsa_parameters(key_size)
23
24
25 def generate_private_key(key_size, backend):
26 return backend.generate_dsa_private_key_and_parameters(key_size)
27
28
29 def _check_dsa_parameters(parameters):
30 if (utils.bit_length(parameters.p),
31 utils.bit_length(parameters.q)) not in (
32 (1024, 160),
33 (2048, 256),
34 (3072, 256)):
35 raise ValueError(
36 "p and q's bit-lengths must be one of these pairs (1024, 160), "
37 "(2048, 256), or (3072, 256). Not ({0:d}, {1:d})".format(
38 utils.bit_length(parameters.p), utils.bit_length(parameters.q)
39 )
40 )
41
42 if not (1 < parameters.g < parameters.p):
43 raise ValueError("g, p don't satisfy 1 < g < p.")
44
45
46 def _check_dsa_private_numbers(numbers):
47 parameters = numbers.public_numbers.parameter_numbers
48 _check_dsa_parameters(parameters)
49 if numbers.x <= 0 or numbers.x >= parameters.q:
50 raise ValueError("x must be > 0 and < q.")
51
52 if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p):
53 raise ValueError("y must be equal to (g ** x % p).")
54
55
56 class DSAParameterNumbers(object):
57 def __init__(self, p, q, g):
58 if (
59 not isinstance(p, six.integer_types) or
60 not isinstance(q, six.integer_types) or
61 not isinstance(g, six.integer_types)
62 ):
63 raise TypeError(
64 "DSAParameterNumbers p, q, and g arguments must be integers."
65 )
66
67 self._p = p
68 self._q = q
69 self._g = g
70
71 @property
72 def p(self):
73 return self._p
74
75 @property
76 def q(self):
77 return self._q
78
79 @property
80 def g(self):
81 return self._g
82
83 def parameters(self, backend):
84 return backend.load_dsa_parameter_numbers(self)
85
86
87 class DSAPublicNumbers(object):
88 def __init__(self, y, parameter_numbers):
89 if not isinstance(y, six.integer_types):
90 raise TypeError("DSAPublicNumbers y argument must be an integer.")
91
92 if not isinstance(parameter_numbers, DSAParameterNumbers):
93 raise TypeError(
94 "parameter_numbers must be a DSAParameterNumbers instance."
95 )
96
97 self._y = y
98 self._parameter_numbers = parameter_numbers
99
100 @property
101 def y(self):
102 return self._y
103
104 @property
105 def parameter_numbers(self):
106 return self._parameter_numbers
107
108 def public_key(self, backend):
109 return backend.load_dsa_public_numbers(self)
110
111
112 class DSAPrivateNumbers(object):
113 def __init__(self, x, public_numbers):
114 if not isinstance(x, six.integer_types):
115 raise TypeError("DSAPrivateNumbers x argument must be an integer.")
116
117 if not isinstance(public_numbers, DSAPublicNumbers):
118 raise TypeError(
119 "public_numbers must be a DSAPublicNumbers instance."
120 )
121 self._public_numbers = public_numbers
122 self._x = x
123
124 @property
125 def x(self):
126 return self._x
127
128 @property
129 def public_numbers(self):
130 return self._public_numbers
131
132 def private_key(self, backend):
133 return backend.load_dsa_private_numbers(self)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cryptography/hazmat/primitives/asymmetric/dsa.py b/cryptography/hazmat/primitives/asymmetric/dsa.py
--- a/cryptography/hazmat/primitives/asymmetric/dsa.py
+++ b/cryptography/hazmat/primitives/asymmetric/dsa.py
@@ -27,17 +27,10 @@
def _check_dsa_parameters(parameters):
- if (utils.bit_length(parameters.p),
- utils.bit_length(parameters.q)) not in (
- (1024, 160),
- (2048, 256),
- (3072, 256)):
- raise ValueError(
- "p and q's bit-lengths must be one of these pairs (1024, 160), "
- "(2048, 256), or (3072, 256). Not ({0:d}, {1:d})".format(
- utils.bit_length(parameters.p), utils.bit_length(parameters.q)
- )
- )
+ if utils.bit_length(parameters.p) not in [1024, 2048, 3072]:
+ raise ValueError("p must be exactly 1024, 2048, or 3072 bits long")
+ if utils.bit_length(parameters.q) not in [160, 256]:
+ raise ValueError("q must be exactly 160 or 256 bits long")
if not (1 < parameters.g < parameters.p):
raise ValueError("g, p don't satisfy 1 < g < p.")
| {"golden_diff": "diff --git a/cryptography/hazmat/primitives/asymmetric/dsa.py b/cryptography/hazmat/primitives/asymmetric/dsa.py\n--- a/cryptography/hazmat/primitives/asymmetric/dsa.py\n+++ b/cryptography/hazmat/primitives/asymmetric/dsa.py\n@@ -27,17 +27,10 @@\n \n \n def _check_dsa_parameters(parameters):\n- if (utils.bit_length(parameters.p),\n- utils.bit_length(parameters.q)) not in (\n- (1024, 160),\n- (2048, 256),\n- (3072, 256)):\n- raise ValueError(\n- \"p and q's bit-lengths must be one of these pairs (1024, 160), \"\n- \"(2048, 256), or (3072, 256). Not ({0:d}, {1:d})\".format(\n- utils.bit_length(parameters.p), utils.bit_length(parameters.q)\n- )\n- )\n+ if utils.bit_length(parameters.p) not in [1024, 2048, 3072]:\n+ raise ValueError(\"p must be exactly 1024, 2048, or 3072 bits long\")\n+ if utils.bit_length(parameters.q) not in [160, 256]:\n+ raise ValueError(\"q must be exactly 160 or 256 bits long\")\n \n if not (1 < parameters.g < parameters.p):\n raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n", "issue": "dsa_private_key.pem vector has p and q whose lengths we don't normally allow\nWe currently enforce that `p` and `q` have lengths which are one of:\n- `(1024, 160)`\n- `(2048, 256)`\n- `(3072, 256)`\n\nHowever, this vector has `(p, q)` with lengths of `(2048, 160)`. Do we need to be less restrictive, use a different vector?\n\nThis was discovered in the process of writing a pure python PEM loader.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\n\n\ndef generate_parameters(key_size, backend):\n return backend.generate_dsa_parameters(key_size)\n\n\ndef generate_private_key(key_size, backend):\n return backend.generate_dsa_private_key_and_parameters(key_size)\n\n\ndef _check_dsa_parameters(parameters):\n if (utils.bit_length(parameters.p),\n utils.bit_length(parameters.q)) not in (\n (1024, 160),\n (2048, 256),\n (3072, 256)):\n raise ValueError(\n \"p and q's bit-lengths must be one of these pairs (1024, 160), \"\n \"(2048, 256), or (3072, 256). Not ({0:d}, {1:d})\".format(\n utils.bit_length(parameters.p), utils.bit_length(parameters.q)\n )\n )\n\n if not (1 < parameters.g < parameters.p):\n raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n\n\ndef _check_dsa_private_numbers(numbers):\n parameters = numbers.public_numbers.parameter_numbers\n _check_dsa_parameters(parameters)\n if numbers.x <= 0 or numbers.x >= parameters.q:\n raise ValueError(\"x must be > 0 and < q.\")\n\n if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p):\n raise ValueError(\"y must be equal to (g ** x % p).\")\n\n\nclass DSAParameterNumbers(object):\n def __init__(self, p, q, g):\n if (\n not isinstance(p, six.integer_types) or\n not isinstance(q, six.integer_types) or\n not isinstance(g, six.integer_types)\n ):\n raise TypeError(\n \"DSAParameterNumbers p, q, and g arguments must be integers.\"\n )\n\n self._p = p\n self._q = q\n self._g = g\n\n @property\n def p(self):\n return self._p\n\n @property\n def q(self):\n return self._q\n\n @property\n def g(self):\n return self._g\n\n def parameters(self, backend):\n return backend.load_dsa_parameter_numbers(self)\n\n\nclass DSAPublicNumbers(object):\n def __init__(self, y, parameter_numbers):\n if not isinstance(y, six.integer_types):\n raise TypeError(\"DSAPublicNumbers y argument must be an integer.\")\n\n if not isinstance(parameter_numbers, DSAParameterNumbers):\n raise TypeError(\n \"parameter_numbers must be a DSAParameterNumbers instance.\"\n )\n\n self._y = y\n self._parameter_numbers = parameter_numbers\n\n @property\n def y(self):\n return self._y\n\n @property\n def parameter_numbers(self):\n return self._parameter_numbers\n\n def public_key(self, backend):\n return backend.load_dsa_public_numbers(self)\n\n\nclass DSAPrivateNumbers(object):\n def __init__(self, x, public_numbers):\n if not isinstance(x, six.integer_types):\n raise TypeError(\"DSAPrivateNumbers x argument must be an integer.\")\n\n if not isinstance(public_numbers, DSAPublicNumbers):\n raise TypeError(\n \"public_numbers must be a DSAPublicNumbers instance.\"\n )\n self._public_numbers = public_numbers\n self._x = x\n\n @property\n def x(self):\n return self._x\n\n @property\n def public_numbers(self):\n return self._public_numbers\n\n def private_key(self, backend):\n return backend.load_dsa_private_numbers(self)\n", "path": "cryptography/hazmat/primitives/asymmetric/dsa.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\n\n\ndef generate_parameters(key_size, backend):\n return backend.generate_dsa_parameters(key_size)\n\n\ndef generate_private_key(key_size, backend):\n return backend.generate_dsa_private_key_and_parameters(key_size)\n\n\ndef _check_dsa_parameters(parameters):\n if utils.bit_length(parameters.p) not in [1024, 2048, 3072]:\n raise ValueError(\"p must be exactly 1024, 2048, or 3072 bits long\")\n if utils.bit_length(parameters.q) not in [160, 256]:\n raise ValueError(\"q must be exactly 160 or 256 bits long\")\n\n if not (1 < parameters.g < parameters.p):\n raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n\n\ndef _check_dsa_private_numbers(numbers):\n parameters = numbers.public_numbers.parameter_numbers\n _check_dsa_parameters(parameters)\n if numbers.x <= 0 or numbers.x >= parameters.q:\n raise ValueError(\"x must be > 0 and < q.\")\n\n if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p):\n raise ValueError(\"y must be equal to (g ** x % p).\")\n\n\nclass DSAParameterNumbers(object):\n def __init__(self, p, q, g):\n if (\n not isinstance(p, six.integer_types) or\n not isinstance(q, six.integer_types) or\n not isinstance(g, six.integer_types)\n ):\n raise TypeError(\n \"DSAParameterNumbers p, q, and g arguments must be integers.\"\n )\n\n self._p = p\n self._q = q\n self._g = g\n\n @property\n def p(self):\n return self._p\n\n @property\n def q(self):\n return self._q\n\n @property\n def g(self):\n return self._g\n\n def parameters(self, backend):\n return backend.load_dsa_parameter_numbers(self)\n\n\nclass DSAPublicNumbers(object):\n def __init__(self, y, parameter_numbers):\n if not isinstance(y, six.integer_types):\n raise TypeError(\"DSAPublicNumbers y argument must be an integer.\")\n\n if not isinstance(parameter_numbers, DSAParameterNumbers):\n raise TypeError(\n \"parameter_numbers must be a DSAParameterNumbers instance.\"\n )\n\n self._y = y\n self._parameter_numbers = parameter_numbers\n\n @property\n def y(self):\n return self._y\n\n @property\n def parameter_numbers(self):\n return self._parameter_numbers\n\n def public_key(self, backend):\n return backend.load_dsa_public_numbers(self)\n\n\nclass DSAPrivateNumbers(object):\n def __init__(self, x, public_numbers):\n if not isinstance(x, six.integer_types):\n raise TypeError(\"DSAPrivateNumbers x argument must be an integer.\")\n\n if not isinstance(public_numbers, DSAPublicNumbers):\n raise TypeError(\n \"public_numbers must be a DSAPublicNumbers instance.\"\n )\n self._public_numbers = public_numbers\n self._x = x\n\n @property\n def x(self):\n return self._x\n\n @property\n def public_numbers(self):\n return self._public_numbers\n\n def private_key(self, backend):\n return backend.load_dsa_private_numbers(self)\n", "path": "cryptography/hazmat/primitives/asymmetric/dsa.py"}]} | 1,608 | 364 |
gh_patches_debug_16759 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1479 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_78 misreported
**Describe the bug**
Checkov is returning as vulnerability CKV_AWS_78, but the solution breaks Terraform validation.
Accordigly to Checkov if `encryption_disabled = false` is not set in the main block it can be considered a vulnerability
```
resource "aws_codebuild_project" "project-with-cache" {
name = "test-project-cache"
description = "test_codebuild_project_cache"
build_timeout = "5"
queued_timeout = "5"
+ encryption_disabled = false
}
```
as described here: https://docs.bridgecrew.io/docs/bc_aws_general_30
Unfortunately in Terraform v1.0.3 `encryption_disabled` is not available in that location but only in blocks `artifacts`, `secondary_artifacts` and `logs_config: s3_logs` as you can see here: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codebuild_project
So if not set it gives vulnerability, if set terraform fails during the validation.
**To Reproduce**
Steps to reproduce the behavior:
1. Set in **aws_codebuild_project** **encryption_disabled = false**
```
resource "aws_codebuild_project" "project-with-cache" {
name = "test-project-cache"
description = "test_codebuild_project_cache"
build_timeout = "5"
queued_timeout = "5"
+ encryption_disabled = false
}
```
2. Run `terraform validate`
3. See error
**Expected behavior**
No vulnerability or vulnerability if not set the attribute in all the 3 blocks
**Desktop (please complete the following information):**
- terraform --version: Terraform v1.0.3 on linux_amd64
- checkov --version: 2.0.326
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class CodeBuildProjectEncryption(BaseResourceCheck):
6
7 def __init__(self):
8 name = "Ensure that CodeBuild Project encryption is not disabled"
9 id = "CKV_AWS_78"
10 supported_resources = ['aws_codebuild_project']
11 categories = [CheckCategories.ENCRYPTION]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 if 'artifacts' not in conf:
16 return CheckResult.UNKNOWN
17 artifact = conf['artifacts'][0]
18 if isinstance(artifact, dict):
19 if artifact['type'] == "NO_ARTIFACTS":
20 self.evaluated_keys = 'artifacts/[0]/type'
21 elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:
22 self.evaluated_keys = 'artifacts/[0]/encryption_disabled'
23 return CheckResult.FAILED
24 return CheckResult.PASSED
25
26
27 check = CodeBuildProjectEncryption()
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py
--- a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py
+++ b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py
@@ -16,11 +16,13 @@
return CheckResult.UNKNOWN
artifact = conf['artifacts'][0]
if isinstance(artifact, dict):
- if artifact['type'] == "NO_ARTIFACTS":
+ if artifact['type'] == ["NO_ARTIFACTS"]:
self.evaluated_keys = 'artifacts/[0]/type'
- elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:
- self.evaluated_keys = 'artifacts/[0]/encryption_disabled'
- return CheckResult.FAILED
+ return CheckResult.UNKNOWN
+ if 'encryption_disabled' in artifact:
+ if artifact['encryption_disabled'] == [True]:
+ self.evaluated_keys = 'artifacts/[0]/encryption_disabled'
+ return CheckResult.FAILED
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py\n--- a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py\n+++ b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py\n@@ -16,11 +16,13 @@\n return CheckResult.UNKNOWN\n artifact = conf['artifacts'][0]\n if isinstance(artifact, dict):\n- if artifact['type'] == \"NO_ARTIFACTS\":\n+ if artifact['type'] == [\"NO_ARTIFACTS\"]:\n self.evaluated_keys = 'artifacts/[0]/type'\n- elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:\n- self.evaluated_keys = 'artifacts/[0]/encryption_disabled'\n- return CheckResult.FAILED\n+ return CheckResult.UNKNOWN\n+ if 'encryption_disabled' in artifact: \n+ if artifact['encryption_disabled'] == [True]:\n+ self.evaluated_keys = 'artifacts/[0]/encryption_disabled'\n+ return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "CKV_AWS_78 misreported\n**Describe the bug**\r\nCheckov is returning as vulnerability CKV_AWS_78, but the solution breaks Terraform validation.\r\n\r\nAccordigly to Checkov if `encryption_disabled = false` is not set in the main block it can be considered a vulnerability\r\n\r\n```\r\nresource \"aws_codebuild_project\" \"project-with-cache\" {\r\n name = \"test-project-cache\"\r\n description = \"test_codebuild_project_cache\"\r\n build_timeout = \"5\"\r\n queued_timeout = \"5\"\r\n+ encryption_disabled = false\r\n} \r\n```\r\nas described here: https://docs.bridgecrew.io/docs/bc_aws_general_30\r\n\r\nUnfortunately in Terraform v1.0.3 `encryption_disabled` is not available in that location but only in blocks `artifacts`, `secondary_artifacts` and `logs_config: s3_logs` as you can see here: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codebuild_project\r\n\r\nSo if not set it gives vulnerability, if set terraform fails during the validation.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Set in **aws_codebuild_project** **encryption_disabled = false**\r\n```\r\nresource \"aws_codebuild_project\" \"project-with-cache\" {\r\n name = \"test-project-cache\"\r\n description = \"test_codebuild_project_cache\"\r\n build_timeout = \"5\"\r\n queued_timeout = \"5\"\r\n+ encryption_disabled = false\r\n} \r\n```\r\n2. Run `terraform validate`\r\n3. See error\r\n\r\n**Expected behavior**\r\nNo vulnerability or vulnerability if not set the attribute in all the 3 blocks\r\n\r\n**Desktop (please complete the following information):**\r\n - terraform --version: Terraform v1.0.3 on linux_amd64\r\n - checkov --version: 2.0.326\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass CodeBuildProjectEncryption(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure that CodeBuild Project encryption is not disabled\"\n id = \"CKV_AWS_78\"\n supported_resources = ['aws_codebuild_project']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'artifacts' not in conf:\n return CheckResult.UNKNOWN\n artifact = conf['artifacts'][0]\n if isinstance(artifact, dict):\n if artifact['type'] == \"NO_ARTIFACTS\":\n self.evaluated_keys = 'artifacts/[0]/type'\n elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:\n self.evaluated_keys = 'artifacts/[0]/encryption_disabled'\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = CodeBuildProjectEncryption()\n", "path": "checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass CodeBuildProjectEncryption(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure that CodeBuild Project encryption is not disabled\"\n id = \"CKV_AWS_78\"\n supported_resources = ['aws_codebuild_project']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'artifacts' not in conf:\n return CheckResult.UNKNOWN\n artifact = conf['artifacts'][0]\n if isinstance(artifact, dict):\n if artifact['type'] == [\"NO_ARTIFACTS\"]:\n self.evaluated_keys = 'artifacts/[0]/type'\n return CheckResult.UNKNOWN\n if 'encryption_disabled' in artifact: \n if artifact['encryption_disabled'] == [True]:\n self.evaluated_keys = 'artifacts/[0]/encryption_disabled'\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = CodeBuildProjectEncryption()\n", "path": "checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py"}]} | 952 | 253 |
gh_patches_debug_2490 | rasdani/github-patches | git_diff | dotkom__onlineweb4-165 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding 'Offline Informasjonstekster' causes error
Not really sure what this does but it casts an error saying:
Exception Type: IntegrityError
Exception Value: column key is not unique
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/offline/admin.py`
Content:
```
1 from apps.offline.models import ProxyChunk, Issue
2 from chunks.models import Chunk
3 from django.contrib import admin
4 from django.db.models import Q
5
6
7 class ProxyChunkAdmin(admin.ModelAdmin):
8
9 readonly_fields = ['key']
10
11 def queryset(self, request):
12 offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))
13 return offline
14
15 admin.site.register(ProxyChunk, ProxyChunkAdmin)
16 admin.site.register(Issue)
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/offline/admin.py b/apps/offline/admin.py
--- a/apps/offline/admin.py
+++ b/apps/offline/admin.py
@@ -8,6 +8,9 @@
readonly_fields = ['key']
+ def has_add_permission(self, request):
+ return False
+
def queryset(self, request):
offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))
return offline
| {"golden_diff": "diff --git a/apps/offline/admin.py b/apps/offline/admin.py\n--- a/apps/offline/admin.py\n+++ b/apps/offline/admin.py\n@@ -8,6 +8,9 @@\n \n readonly_fields = ['key']\n \n+ def has_add_permission(self, request):\n+ return False\n+\n def queryset(self, request):\n offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))\n return offline\n", "issue": "Adding 'Offline Informasjonstekster' causes error\nNot really sure what this does but it casts an error saying:\n\nException Type: IntegrityError\nException Value: column key is not unique\n\n", "before_files": [{"content": "from apps.offline.models import ProxyChunk, Issue\nfrom chunks.models import Chunk\nfrom django.contrib import admin\nfrom django.db.models import Q\n\n\nclass ProxyChunkAdmin(admin.ModelAdmin):\n\n readonly_fields = ['key']\n\n def queryset(self, request):\n offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))\n return offline\n\nadmin.site.register(ProxyChunk, ProxyChunkAdmin)\nadmin.site.register(Issue)\n", "path": "apps/offline/admin.py"}], "after_files": [{"content": "from apps.offline.models import ProxyChunk, Issue\nfrom chunks.models import Chunk\nfrom django.contrib import admin\nfrom django.db.models import Q\n\n\nclass ProxyChunkAdmin(admin.ModelAdmin):\n\n readonly_fields = ['key']\n\n def has_add_permission(self, request):\n return False\n\n def queryset(self, request):\n offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))\n return offline\n\nadmin.site.register(ProxyChunk, ProxyChunkAdmin)\nadmin.site.register(Issue)\n", "path": "apps/offline/admin.py"}]} | 426 | 101 |
gh_patches_debug_34711 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Segfault in peak_local_max with large numbed of segments
## Description
scikit-image dives to (absolutely uncatchable and untrackable) segfault in peak_local_max.
## Way to reproduce
```python
import numpy as np
from scipy.ndimage import distance_transform_edt
from skimage.feature import peak_local_max
def segment(binary_image):
distance = distance_transform_edt(binary_image)
peak_local_max(
distance, min_distance=100, footprint=np.ones((3, 3)), labels=binary_image,
)
for p in [0.05, 0.95, 0.001, 0.999]:
print(p)
segment(np.random.random([2048, 2048]) < p)
```
## Version information
```python
# Paste the output of the following python commands
from __future__ import print_function
import sys; print(sys.version)
import platform; print(platform.platform())
import skimage; print(f'scikit-image version: {skimage.__version__}')
import numpy; print(f'numpy version: {numpy.__version__}')
```
```python
3.8.10 (default, Sep 28 2021, 16:10:42)
[GCC 9.3.0]
Linux-5.10.47-linuxkit-x86_64-with-glibc2.29
scikit-image version: 0.18.3
numpy version: 1.21.4
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/_shared/coord.py`
Content:
```
1 import numpy as np
2 from scipy.spatial import cKDTree, distance
3
4
5 def _ensure_spacing(coord, spacing, p_norm, max_out):
6 """Returns a subset of coord where a minimum spacing is guaranteed.
7
8 Parameters
9 ----------
10 coord : ndarray
11 The coordinates of the considered points.
12 spacing : float
13 the maximum allowed spacing between the points.
14 p_norm : float
15 Which Minkowski p-norm to use. Should be in the range [1, inf].
16 A finite large p may cause a ValueError if overflow can occur.
17 ``inf`` corresponds to the Chebyshev distance and 2 to the
18 Euclidean distance.
19 max_out: int
20 If not None, at most the first ``max_out`` candidates are
21 returned.
22
23 Returns
24 -------
25 output : ndarray
26 A subset of coord where a minimum spacing is guaranteed.
27
28 """
29
30 # Use KDtree to find the peaks that are too close to each other
31 tree = cKDTree(coord)
32
33 indices = tree.query_ball_point(coord, r=spacing, p=p_norm)
34 rejected_peaks_indices = set()
35 naccepted = 0
36 for idx, candidates in enumerate(indices):
37 if idx not in rejected_peaks_indices:
38 # keep current point and the points at exactly spacing from it
39 candidates.remove(idx)
40 dist = distance.cdist([coord[idx]],
41 coord[candidates],
42 distance.minkowski,
43 p=p_norm).reshape(-1)
44 candidates = [c for c, d in zip(candidates, dist)
45 if d < spacing]
46
47 # candidates.remove(keep)
48 rejected_peaks_indices.update(candidates)
49 naccepted += 1
50 if max_out is not None and naccepted >= max_out:
51 break
52
53 # Remove the peaks that are too close to each other
54 output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)
55 if max_out is not None:
56 output = output[:max_out]
57
58 return output
59
60
61 def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,
62 max_out=None):
63 """Returns a subset of coord where a minimum spacing is guaranteed.
64
65 Parameters
66 ----------
67 coords : array_like
68 The coordinates of the considered points.
69 spacing : float
70 the maximum allowed spacing between the points.
71 p_norm : float
72 Which Minkowski p-norm to use. Should be in the range [1, inf].
73 A finite large p may cause a ValueError if overflow can occur.
74 ``inf`` corresponds to the Chebyshev distance and 2 to the
75 Euclidean distance.
76 min_split_size : int
77 Minimum split size used to process ``coord`` by batch to save
78 memory. If None, the memory saving strategy is not applied.
79 max_out : int
80 If not None, only the first ``max_out`` candidates are returned.
81
82 Returns
83 -------
84 output : array_like
85 A subset of coord where a minimum spacing is guaranteed.
86
87 """
88
89 output = coords
90 if len(coords):
91
92 coords = np.atleast_2d(coords)
93 if min_split_size is None:
94 batch_list = [coords]
95 else:
96 coord_count = len(coords)
97 split_count = int(np.log2(coord_count / min_split_size)) + 1
98 split_idx = np.cumsum(
99 [coord_count // (2 ** i) for i in range(1, split_count)])
100 batch_list = np.array_split(coords, split_idx)
101
102 output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)
103 for batch in batch_list:
104 output = _ensure_spacing(np.vstack([output, batch]),
105 spacing, p_norm, max_out)
106 if max_out is not None and len(output) >= max_out:
107 break
108
109 return output
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/_shared/coord.py b/skimage/_shared/coord.py
--- a/skimage/_shared/coord.py
+++ b/skimage/_shared/coord.py
@@ -59,7 +59,7 @@
def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,
- max_out=None):
+ max_out=None, *, max_split_size=2000):
"""Returns a subset of coord where a minimum spacing is guaranteed.
Parameters
@@ -74,10 +74,19 @@
``inf`` corresponds to the Chebyshev distance and 2 to the
Euclidean distance.
min_split_size : int
- Minimum split size used to process ``coord`` by batch to save
+ Minimum split size used to process ``coords`` by batch to save
memory. If None, the memory saving strategy is not applied.
max_out : int
If not None, only the first ``max_out`` candidates are returned.
+ max_split_size : int
+ Maximum split size used to process ``coords`` by batch to save
+ memory. This number was decided by profiling with a large number
+ of points. Too small a number results in too much looping in
+ Python instead of C, slowing down the process, while too large
+ a number results in large memory allocations, slowdowns, and,
+ potentially, in the process being killed -- see gh-6010. See
+ benchmark results `here
+ <https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691>`_.
Returns
-------
@@ -94,9 +103,12 @@
batch_list = [coords]
else:
coord_count = len(coords)
- split_count = int(np.log2(coord_count / min_split_size)) + 1
- split_idx = np.cumsum(
- [coord_count // (2 ** i) for i in range(1, split_count)])
+ split_idx = [min_split_size]
+ split_size = min_split_size
+ while coord_count - split_idx[-1] > max_split_size:
+ split_size *= 2
+ split_idx.append(split_idx[-1] + min(split_size,
+ max_split_size))
batch_list = np.array_split(coords, split_idx)
output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)
| {"golden_diff": "diff --git a/skimage/_shared/coord.py b/skimage/_shared/coord.py\n--- a/skimage/_shared/coord.py\n+++ b/skimage/_shared/coord.py\n@@ -59,7 +59,7 @@\n \n \n def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,\n- max_out=None):\n+ max_out=None, *, max_split_size=2000):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n \n Parameters\n@@ -74,10 +74,19 @@\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n min_split_size : int\n- Minimum split size used to process ``coord`` by batch to save\n+ Minimum split size used to process ``coords`` by batch to save\n memory. If None, the memory saving strategy is not applied.\n max_out : int\n If not None, only the first ``max_out`` candidates are returned.\n+ max_split_size : int\n+ Maximum split size used to process ``coords`` by batch to save\n+ memory. This number was decided by profiling with a large number\n+ of points. Too small a number results in too much looping in\n+ Python instead of C, slowing down the process, while too large\n+ a number results in large memory allocations, slowdowns, and,\n+ potentially, in the process being killed -- see gh-6010. See\n+ benchmark results `here\n+ <https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691>`_.\n \n Returns\n -------\n@@ -94,9 +103,12 @@\n batch_list = [coords]\n else:\n coord_count = len(coords)\n- split_count = int(np.log2(coord_count / min_split_size)) + 1\n- split_idx = np.cumsum(\n- [coord_count // (2 ** i) for i in range(1, split_count)])\n+ split_idx = [min_split_size]\n+ split_size = min_split_size\n+ while coord_count - split_idx[-1] > max_split_size:\n+ split_size *= 2\n+ split_idx.append(split_idx[-1] + min(split_size,\n+ max_split_size))\n batch_list = np.array_split(coords, split_idx)\n \n output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)\n", "issue": "Segfault in peak_local_max with large numbed of segments\n## Description\r\n\r\nscikit-image dives to (absolutely uncatchable and untrackable) segfault in peak_local_max.\r\n\r\n## Way to reproduce\r\n```python\r\nimport numpy as np\r\nfrom scipy.ndimage import distance_transform_edt\r\nfrom skimage.feature import peak_local_max\r\n\r\n\r\ndef segment(binary_image):\r\n distance = distance_transform_edt(binary_image)\r\n peak_local_max(\r\n distance, min_distance=100, footprint=np.ones((3, 3)), labels=binary_image,\r\n )\r\n\r\nfor p in [0.05, 0.95, 0.001, 0.999]:\r\n print(p)\r\n segment(np.random.random([2048, 2048]) < p)\r\n\r\n\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n# Paste the output of the following python commands\r\nfrom __future__ import print_function\r\nimport sys; print(sys.version)\r\nimport platform; print(platform.platform())\r\nimport skimage; print(f'scikit-image version: {skimage.__version__}')\r\nimport numpy; print(f'numpy version: {numpy.__version__}')\r\n```\r\n\r\n```python\r\n3.8.10 (default, Sep 28 2021, 16:10:42) \r\n[GCC 9.3.0]\r\nLinux-5.10.47-linuxkit-x86_64-with-glibc2.29\r\nscikit-image version: 0.18.3\r\nnumpy version: 1.21.4\r\n```\r\n\n", "before_files": [{"content": "import numpy as np\nfrom scipy.spatial import cKDTree, distance\n\n\ndef _ensure_spacing(coord, spacing, p_norm, max_out):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coord : ndarray\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n max_out: int\n If not None, at most the first ``max_out`` candidates are\n returned.\n\n Returns\n -------\n output : ndarray\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n # Use KDtree to find the peaks that are too close to each other\n tree = cKDTree(coord)\n\n indices = tree.query_ball_point(coord, r=spacing, p=p_norm)\n rejected_peaks_indices = set()\n naccepted = 0\n for idx, candidates in enumerate(indices):\n if idx not in rejected_peaks_indices:\n # keep current point and the points at exactly spacing from it\n candidates.remove(idx)\n dist = distance.cdist([coord[idx]],\n coord[candidates],\n distance.minkowski,\n p=p_norm).reshape(-1)\n candidates = [c for c, d in zip(candidates, dist)\n if d < spacing]\n\n # candidates.remove(keep)\n rejected_peaks_indices.update(candidates)\n naccepted += 1\n if max_out is not None and naccepted >= max_out:\n break\n\n # Remove the peaks that are too close to each other\n output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)\n if max_out is not None:\n output = output[:max_out]\n\n return output\n\n\ndef ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,\n max_out=None):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coords : array_like\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n min_split_size : int\n Minimum split size used to process ``coord`` by batch to save\n memory. If None, the memory saving strategy is not applied.\n max_out : int\n If not None, only the first ``max_out`` candidates are returned.\n\n Returns\n -------\n output : array_like\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n output = coords\n if len(coords):\n\n coords = np.atleast_2d(coords)\n if min_split_size is None:\n batch_list = [coords]\n else:\n coord_count = len(coords)\n split_count = int(np.log2(coord_count / min_split_size)) + 1\n split_idx = np.cumsum(\n [coord_count // (2 ** i) for i in range(1, split_count)])\n batch_list = np.array_split(coords, split_idx)\n\n output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)\n for batch in batch_list:\n output = _ensure_spacing(np.vstack([output, batch]),\n spacing, p_norm, max_out)\n if max_out is not None and len(output) >= max_out:\n break\n\n return output\n", "path": "skimage/_shared/coord.py"}], "after_files": [{"content": "import numpy as np\nfrom scipy.spatial import cKDTree, distance\n\n\ndef _ensure_spacing(coord, spacing, p_norm, max_out):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coord : ndarray\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n max_out: int\n If not None, at most the first ``max_out`` candidates are\n returned.\n\n Returns\n -------\n output : ndarray\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n # Use KDtree to find the peaks that are too close to each other\n tree = cKDTree(coord)\n\n indices = tree.query_ball_point(coord, r=spacing, p=p_norm)\n rejected_peaks_indices = set()\n naccepted = 0\n for idx, candidates in enumerate(indices):\n if idx not in rejected_peaks_indices:\n # keep current point and the points at exactly spacing from it\n candidates.remove(idx)\n dist = distance.cdist([coord[idx]],\n coord[candidates],\n distance.minkowski,\n p=p_norm).reshape(-1)\n candidates = [c for c, d in zip(candidates, dist)\n if d < spacing]\n\n # candidates.remove(keep)\n rejected_peaks_indices.update(candidates)\n naccepted += 1\n if max_out is not None and naccepted >= max_out:\n break\n\n # Remove the peaks that are too close to each other\n output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)\n if max_out is not None:\n output = output[:max_out]\n\n return output\n\n\ndef ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,\n max_out=None, *, max_split_size=2000):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coords : array_like\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n min_split_size : int\n Minimum split size used to process ``coords`` by batch to save\n memory. If None, the memory saving strategy is not applied.\n max_out : int\n If not None, only the first ``max_out`` candidates are returned.\n max_split_size : int\n Maximum split size used to process ``coords`` by batch to save\n memory. This number was decided by profiling with a large number\n of points. Too small a number results in too much looping in\n Python instead of C, slowing down the process, while too large\n a number results in large memory allocations, slowdowns, and,\n potentially, in the process being killed -- see gh-6010. See\n benchmark results `here\n <https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691>`_.\n\n Returns\n -------\n output : array_like\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n output = coords\n if len(coords):\n\n coords = np.atleast_2d(coords)\n if min_split_size is None:\n batch_list = [coords]\n else:\n coord_count = len(coords)\n split_idx = [min_split_size]\n split_size = min_split_size\n while coord_count - split_idx[-1] > max_split_size:\n split_size *= 2\n split_idx.append(split_idx[-1] + min(split_size,\n max_split_size))\n batch_list = np.array_split(coords, split_idx)\n\n output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)\n for batch in batch_list:\n output = _ensure_spacing(np.vstack([output, batch]),\n spacing, p_norm, max_out)\n if max_out is not None and len(output) >= max_out:\n break\n\n return output\n", "path": "skimage/_shared/coord.py"}]} | 1,663 | 560 |
gh_patches_debug_37441 | rasdani/github-patches | git_diff | scikit-hep__awkward-1841 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ak.type does not understand `numpy.<type>` style dtypes
### Version of Awkward Array
2.0.0rc1
### Description and code to reproduce
numpy = 1.23.4
```python3
>>> import awkward as ak
>>> import numpy as np
>>> x = np.random.normal(size=100)
>>> ak.type(x)
```
results in:
```
Traceback (most recent call last):
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py", line 99, in _impl
out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]
KeyError: <class 'numpy.float64'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py", line 60, in type
return _impl(array)
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py", line 101, in _impl
raise ak._errors.wrap_error(
TypeError: while calling
ak.type(
array = numpy.ndarray([ 0.27824033 -1.483569 -0.61108357 ...
)
Error details: numpy array type is unrecognized by awkward: <class 'numpy.float64'>
```
`np.float64` (or `np.<type>` in general) is a fairly common way for folks to denote typing, we should probably support it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/operations/ak_type.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import numbers
4
5 import awkward as ak
6
7 np = ak.nplikes.NumpyMetadata.instance()
8
9
10 def type(array):
11 """
12 The high-level type of an `array` (many types supported, including all
13 Awkward Arrays and Records) as #ak.types.Type objects.
14
15 The high-level type ignores #layout differences like
16 #ak.contents.ListArray versus #ak.contents.ListOffsetArray, but
17 not differences like "regular-sized lists" (i.e.
18 #ak.contents.RegularArray) versus "variable-sized lists" (i.e.
19 #ak.contents.ListArray and similar).
20
21 Types are rendered as [Datashape](https://datashape.readthedocs.io/)
22 strings, which makes the same distinctions.
23
24 For example,
25
26 ak.Array([[{"x": 1.1, "y": [1]}, {"x": 2.2, "y": [2, 2]}],
27 [],
28 [{"x": 3.3, "y": [3, 3, 3]}]])
29
30 has type
31
32 3 * var * {"x": float64, "y": var * int64}
33
34 but
35
36 ak.Array(np.arange(2*3*5).reshape(2, 3, 5))
37
38 has type
39
40 2 * 3 * 5 * int64
41
42 Some cases, like heterogeneous data, require [extensions beyond the
43 Datashape specification](https://github.com/blaze/datashape/issues/237).
44 For example,
45
46 ak.Array([1, "two", [3, 3, 3]])
47
48 has type
49
50 3 * union[int64, string, var * int64]
51
52 but "union" is not a Datashape type-constructor. (Its syntax is
53 similar to existing type-constructors, so it's a plausible addition
54 to the language.)
55 """
56 with ak._errors.OperationErrorContext(
57 "ak.type",
58 dict(array=array),
59 ):
60 return _impl(array)
61
62
63 def _impl(array):
64 if array is None:
65 return ak.types.UnknownType()
66
67 elif isinstance(
68 array,
69 tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),
70 ):
71 return ak.types.NumpyType(
72 ak.types.numpytype._dtype_to_primitive_dict[array.dtype]
73 )
74
75 elif isinstance(array, (bool, np.bool_)):
76 return ak.types.NumpyType("bool")
77
78 elif isinstance(array, numbers.Integral):
79 return ak.types.NumpyType("int64")
80
81 elif isinstance(array, numbers.Real):
82 return ak.types.NumpyType("float64")
83
84 elif isinstance(
85 array,
86 (
87 ak.highlevel.Array,
88 ak.highlevel.Record,
89 ak.highlevel.ArrayBuilder,
90 ),
91 ):
92 return array.type
93
94 elif isinstance(array, np.ndarray):
95 if len(array.shape) == 0:
96 return _impl(array.reshape((1,))[0])
97 else:
98 try:
99 out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]
100 except KeyError as err:
101 raise ak._errors.wrap_error(
102 TypeError(
103 "numpy array type is unrecognized by awkward: %r"
104 % array.dtype.type
105 )
106 ) from err
107 out = ak.types.NumpyType(out)
108 for x in array.shape[-1:0:-1]:
109 out = ak.types.RegularType(out, x)
110 return ak.types.ArrayType(out, array.shape[0])
111
112 elif isinstance(array, ak._ext.ArrayBuilder):
113 form = ak.forms.from_json(array.form())
114 return ak.types.ArrayType(form.type_from_behavior(None), len(array))
115
116 elif isinstance(array, ak.record.Record):
117 return array.array.form.type
118
119 elif isinstance(array, ak.contents.Content):
120 return array.form.type
121
122 else:
123 raise ak._errors.wrap_error(TypeError(f"unrecognized array type: {array!r}"))
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/awkward/operations/ak_type.py b/src/awkward/operations/ak_type.py
--- a/src/awkward/operations/ak_type.py
+++ b/src/awkward/operations/ak_type.py
@@ -1,6 +1,8 @@
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
+import builtins
import numbers
+from datetime import datetime, timedelta
import awkward as ak
@@ -64,15 +66,18 @@
if array is None:
return ak.types.UnknownType()
- elif isinstance(
- array,
- tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),
+ elif isinstance(array, np.dtype):
+ return ak.types.NumpyType(ak.types.numpytype.dtype_to_primitive(array))
+
+ elif (
+ isinstance(array, np.generic)
+ or isinstance(array, builtins.type)
+ and issubclass(array, np.generic)
):
- return ak.types.NumpyType(
- ak.types.numpytype._dtype_to_primitive_dict[array.dtype]
- )
+ primitive = ak.types.numpytype.dtype_to_primitive(np.dtype(array))
+ return ak.types.NumpyType(primitive)
- elif isinstance(array, (bool, np.bool_)):
+ elif isinstance(array, bool): # np.bool_ in np.generic (above)
return ak.types.NumpyType("bool")
elif isinstance(array, numbers.Integral):
@@ -81,6 +86,15 @@
elif isinstance(array, numbers.Real):
return ak.types.NumpyType("float64")
+ elif isinstance(array, numbers.Complex):
+ return ak.types.NumpyType("complex128")
+
+ elif isinstance(array, datetime): # np.datetime64 in np.generic (above)
+ return ak.types.NumpyType("datetime64")
+
+ elif isinstance(array, timedelta): # np.timedelta64 in np.generic (above)
+ return ak.types.NumpyType("timedelta")
+
elif isinstance(
array,
(
@@ -95,16 +109,8 @@
if len(array.shape) == 0:
return _impl(array.reshape((1,))[0])
else:
- try:
- out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]
- except KeyError as err:
- raise ak._errors.wrap_error(
- TypeError(
- "numpy array type is unrecognized by awkward: %r"
- % array.dtype.type
- )
- ) from err
- out = ak.types.NumpyType(out)
+ primitive = ak.types.numpytype.dtype_to_primitive(array.dtype)
+ out = ak.types.NumpyType(primitive)
for x in array.shape[-1:0:-1]:
out = ak.types.RegularType(out, x)
return ak.types.ArrayType(out, array.shape[0])
| {"golden_diff": "diff --git a/src/awkward/operations/ak_type.py b/src/awkward/operations/ak_type.py\n--- a/src/awkward/operations/ak_type.py\n+++ b/src/awkward/operations/ak_type.py\n@@ -1,6 +1,8 @@\n # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n \n+import builtins\n import numbers\n+from datetime import datetime, timedelta\n \n import awkward as ak\n \n@@ -64,15 +66,18 @@\n if array is None:\n return ak.types.UnknownType()\n \n- elif isinstance(\n- array,\n- tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),\n+ elif isinstance(array, np.dtype):\n+ return ak.types.NumpyType(ak.types.numpytype.dtype_to_primitive(array))\n+\n+ elif (\n+ isinstance(array, np.generic)\n+ or isinstance(array, builtins.type)\n+ and issubclass(array, np.generic)\n ):\n- return ak.types.NumpyType(\n- ak.types.numpytype._dtype_to_primitive_dict[array.dtype]\n- )\n+ primitive = ak.types.numpytype.dtype_to_primitive(np.dtype(array))\n+ return ak.types.NumpyType(primitive)\n \n- elif isinstance(array, (bool, np.bool_)):\n+ elif isinstance(array, bool): # np.bool_ in np.generic (above)\n return ak.types.NumpyType(\"bool\")\n \n elif isinstance(array, numbers.Integral):\n@@ -81,6 +86,15 @@\n elif isinstance(array, numbers.Real):\n return ak.types.NumpyType(\"float64\")\n \n+ elif isinstance(array, numbers.Complex):\n+ return ak.types.NumpyType(\"complex128\")\n+\n+ elif isinstance(array, datetime): # np.datetime64 in np.generic (above)\n+ return ak.types.NumpyType(\"datetime64\")\n+\n+ elif isinstance(array, timedelta): # np.timedelta64 in np.generic (above)\n+ return ak.types.NumpyType(\"timedelta\")\n+\n elif isinstance(\n array,\n (\n@@ -95,16 +109,8 @@\n if len(array.shape) == 0:\n return _impl(array.reshape((1,))[0])\n else:\n- try:\n- out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]\n- except KeyError as err:\n- raise ak._errors.wrap_error(\n- TypeError(\n- \"numpy array type is unrecognized by awkward: %r\"\n- % array.dtype.type\n- )\n- ) from err\n- out = ak.types.NumpyType(out)\n+ primitive = ak.types.numpytype.dtype_to_primitive(array.dtype)\n+ out = ak.types.NumpyType(primitive)\n for x in array.shape[-1:0:-1]:\n out = ak.types.RegularType(out, x)\n return ak.types.ArrayType(out, array.shape[0])\n", "issue": "ak.type does not understand `numpy.<type>` style dtypes\n### Version of Awkward Array\n\n2.0.0rc1\n\n### Description and code to reproduce\n\nnumpy = 1.23.4\r\n\r\n```python3\r\n>>> import awkward as ak\r\n>>> import numpy as np\r\n>>> x = np.random.normal(size=100)\r\n>>> ak.type(x)\r\n```\r\nresults in:\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py\", line 99, in _impl\r\n out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]\r\nKeyError: <class 'numpy.float64'>\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py\", line 60, in type\r\n return _impl(array)\r\n File \"/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py\", line 101, in _impl\r\n raise ak._errors.wrap_error(\r\nTypeError: while calling\r\n\r\n ak.type(\r\n array = numpy.ndarray([ 0.27824033 -1.483569 -0.61108357 ...\r\n )\r\n\r\nError details: numpy array type is unrecognized by awkward: <class 'numpy.float64'>\r\n```\r\n\r\n`np.float64` (or `np.<type>` in general) is a fairly common way for folks to denote typing, we should probably support it.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef type(array):\n \"\"\"\n The high-level type of an `array` (many types supported, including all\n Awkward Arrays and Records) as #ak.types.Type objects.\n\n The high-level type ignores #layout differences like\n #ak.contents.ListArray versus #ak.contents.ListOffsetArray, but\n not differences like \"regular-sized lists\" (i.e.\n #ak.contents.RegularArray) versus \"variable-sized lists\" (i.e.\n #ak.contents.ListArray and similar).\n\n Types are rendered as [Datashape](https://datashape.readthedocs.io/)\n strings, which makes the same distinctions.\n\n For example,\n\n ak.Array([[{\"x\": 1.1, \"y\": [1]}, {\"x\": 2.2, \"y\": [2, 2]}],\n [],\n [{\"x\": 3.3, \"y\": [3, 3, 3]}]])\n\n has type\n\n 3 * var * {\"x\": float64, \"y\": var * int64}\n\n but\n\n ak.Array(np.arange(2*3*5).reshape(2, 3, 5))\n\n has type\n\n 2 * 3 * 5 * int64\n\n Some cases, like heterogeneous data, require [extensions beyond the\n Datashape specification](https://github.com/blaze/datashape/issues/237).\n For example,\n\n ak.Array([1, \"two\", [3, 3, 3]])\n\n has type\n\n 3 * union[int64, string, var * int64]\n\n but \"union\" is not a Datashape type-constructor. (Its syntax is\n similar to existing type-constructors, so it's a plausible addition\n to the language.)\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.type\",\n dict(array=array),\n ):\n return _impl(array)\n\n\ndef _impl(array):\n if array is None:\n return ak.types.UnknownType()\n\n elif isinstance(\n array,\n tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),\n ):\n return ak.types.NumpyType(\n ak.types.numpytype._dtype_to_primitive_dict[array.dtype]\n )\n\n elif isinstance(array, (bool, np.bool_)):\n return ak.types.NumpyType(\"bool\")\n\n elif isinstance(array, numbers.Integral):\n return ak.types.NumpyType(\"int64\")\n\n elif isinstance(array, numbers.Real):\n return ak.types.NumpyType(\"float64\")\n\n elif isinstance(\n array,\n (\n ak.highlevel.Array,\n ak.highlevel.Record,\n ak.highlevel.ArrayBuilder,\n ),\n ):\n return array.type\n\n elif isinstance(array, np.ndarray):\n if len(array.shape) == 0:\n return _impl(array.reshape((1,))[0])\n else:\n try:\n out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]\n except KeyError as err:\n raise ak._errors.wrap_error(\n TypeError(\n \"numpy array type is unrecognized by awkward: %r\"\n % array.dtype.type\n )\n ) from err\n out = ak.types.NumpyType(out)\n for x in array.shape[-1:0:-1]:\n out = ak.types.RegularType(out, x)\n return ak.types.ArrayType(out, array.shape[0])\n\n elif isinstance(array, ak._ext.ArrayBuilder):\n form = ak.forms.from_json(array.form())\n return ak.types.ArrayType(form.type_from_behavior(None), len(array))\n\n elif isinstance(array, ak.record.Record):\n return array.array.form.type\n\n elif isinstance(array, ak.contents.Content):\n return array.form.type\n\n else:\n raise ak._errors.wrap_error(TypeError(f\"unrecognized array type: {array!r}\"))\n", "path": "src/awkward/operations/ak_type.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport builtins\nimport numbers\nfrom datetime import datetime, timedelta\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef type(array):\n \"\"\"\n The high-level type of an `array` (many types supported, including all\n Awkward Arrays and Records) as #ak.types.Type objects.\n\n The high-level type ignores #layout differences like\n #ak.contents.ListArray versus #ak.contents.ListOffsetArray, but\n not differences like \"regular-sized lists\" (i.e.\n #ak.contents.RegularArray) versus \"variable-sized lists\" (i.e.\n #ak.contents.ListArray and similar).\n\n Types are rendered as [Datashape](https://datashape.readthedocs.io/)\n strings, which makes the same distinctions.\n\n For example,\n\n ak.Array([[{\"x\": 1.1, \"y\": [1]}, {\"x\": 2.2, \"y\": [2, 2]}],\n [],\n [{\"x\": 3.3, \"y\": [3, 3, 3]}]])\n\n has type\n\n 3 * var * {\"x\": float64, \"y\": var * int64}\n\n but\n\n ak.Array(np.arange(2*3*5).reshape(2, 3, 5))\n\n has type\n\n 2 * 3 * 5 * int64\n\n Some cases, like heterogeneous data, require [extensions beyond the\n Datashape specification](https://github.com/blaze/datashape/issues/237).\n For example,\n\n ak.Array([1, \"two\", [3, 3, 3]])\n\n has type\n\n 3 * union[int64, string, var * int64]\n\n but \"union\" is not a Datashape type-constructor. (Its syntax is\n similar to existing type-constructors, so it's a plausible addition\n to the language.)\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.type\",\n dict(array=array),\n ):\n return _impl(array)\n\n\ndef _impl(array):\n if array is None:\n return ak.types.UnknownType()\n\n elif isinstance(array, np.dtype):\n return ak.types.NumpyType(ak.types.numpytype.dtype_to_primitive(array))\n\n elif (\n isinstance(array, np.generic)\n or isinstance(array, builtins.type)\n and issubclass(array, np.generic)\n ):\n primitive = ak.types.numpytype.dtype_to_primitive(np.dtype(array))\n return ak.types.NumpyType(primitive)\n\n elif isinstance(array, bool): # np.bool_ in np.generic (above)\n return ak.types.NumpyType(\"bool\")\n\n elif isinstance(array, numbers.Integral):\n return ak.types.NumpyType(\"int64\")\n\n elif isinstance(array, numbers.Real):\n return ak.types.NumpyType(\"float64\")\n\n elif isinstance(array, numbers.Complex):\n return ak.types.NumpyType(\"complex128\")\n\n elif isinstance(array, datetime): # np.datetime64 in np.generic (above)\n return ak.types.NumpyType(\"datetime64\")\n\n elif isinstance(array, timedelta): # np.timedelta64 in np.generic (above)\n return ak.types.NumpyType(\"timedelta\")\n\n elif isinstance(\n array,\n (\n ak.highlevel.Array,\n ak.highlevel.Record,\n ak.highlevel.ArrayBuilder,\n ),\n ):\n return array.type\n\n elif isinstance(array, np.ndarray):\n if len(array.shape) == 0:\n return _impl(array.reshape((1,))[0])\n else:\n primitive = ak.types.numpytype.dtype_to_primitive(array.dtype)\n out = ak.types.NumpyType(primitive)\n for x in array.shape[-1:0:-1]:\n out = ak.types.RegularType(out, x)\n return ak.types.ArrayType(out, array.shape[0])\n\n elif isinstance(array, ak._ext.ArrayBuilder):\n form = ak.forms.from_json(array.form())\n return ak.types.ArrayType(form.type_from_behavior(None), len(array))\n\n elif isinstance(array, ak.record.Record):\n return array.array.form.type\n\n elif isinstance(array, ak.contents.Content):\n return array.form.type\n\n else:\n raise ak._errors.wrap_error(TypeError(f\"unrecognized array type: {array!r}\"))\n", "path": "src/awkward/operations/ak_type.py"}]} | 1,821 | 651 |
gh_patches_debug_38340 | rasdani/github-patches | git_diff | python-poetry__poetry-2602 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Poetry 1.1.0a2 includes python code in the version string
To reproduce:
1. `poetry init` with default choices; no dependencies.
1. `poetry add pytest-cov`
1. `poetry install`
The last command prints:
```
Installing dependencies from lock file
Package operations: 0 installs, 1 update, 0 removals
- Updating pytest-cov (2.10.0 import os, sys;exec('if \'COV_CORE_SOURCE\' in os.environ:\n try:\n from pytest_cov.embed import init\n init()\n except Exception as exc:\n sys.stderr.write(\n "pytest-cov: Failed to setup subprocess coverage. "\n "Environ: {0!r} "\n "Exception: {1!r}\\n".format(\n dict((k, v) for k, v in os.environ.items() if k.startswith(\'COV_CORE\')),\n exc\n )\n )\n') -> 2.10.0)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/repositories/installed_repository.py`
Content:
```
1 from poetry.core.packages import Package
2 from poetry.utils._compat import Path
3 from poetry.utils._compat import metadata
4 from poetry.utils.env import Env
5
6 from .repository import Repository
7
8
9 _VENDORS = Path(__file__).parent.parent.joinpath("_vendor")
10
11
12 class InstalledRepository(Repository):
13 @classmethod
14 def load(cls, env): # type: (Env) -> InstalledRepository
15 """
16 Load installed packages.
17 """
18 repo = cls()
19 seen = set()
20
21 for entry in reversed(env.sys_path):
22 for distribution in sorted(
23 metadata.distributions(path=[entry]), key=lambda d: str(d._path),
24 ):
25 name = distribution.metadata["name"]
26 path = Path(str(distribution._path))
27 version = distribution.metadata["version"]
28 package = Package(name, version, version)
29 package.description = distribution.metadata.get("summary", "")
30
31 if package.name in seen:
32 continue
33
34 try:
35 path.relative_to(_VENDORS)
36 except ValueError:
37 pass
38 else:
39 continue
40
41 seen.add(package.name)
42
43 repo.add_package(package)
44
45 is_standard_package = True
46 try:
47 path.relative_to(env.site_packages)
48 except ValueError:
49 is_standard_package = False
50
51 if is_standard_package:
52 if (
53 path.name.endswith(".dist-info")
54 and env.site_packages.joinpath(
55 "{}.pth".format(package.pretty_name)
56 ).exists()
57 ):
58 with env.site_packages.joinpath(
59 "{}.pth".format(package.pretty_name)
60 ).open() as f:
61 directory = Path(f.readline().strip())
62 package.source_type = "directory"
63 package.source_url = directory.as_posix()
64
65 continue
66
67 src_path = env.path / "src"
68
69 # A VCS dependency should have been installed
70 # in the src directory. If not, it's a path dependency
71 try:
72 path.relative_to(src_path)
73
74 from poetry.core.vcs.git import Git
75
76 git = Git()
77 revision = git.rev_parse("HEAD", src_path / package.name).strip()
78 url = git.remote_url(src_path / package.name)
79
80 package.source_type = "git"
81 package.source_url = url
82 package.source_reference = revision
83 except ValueError:
84 package.source_type = "directory"
85 package.source_url = str(path.parent)
86
87 return repo
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py
--- a/poetry/repositories/installed_repository.py
+++ b/poetry/repositories/installed_repository.py
@@ -1,3 +1,5 @@
+from typing import Set
+
from poetry.core.packages import Package
from poetry.utils._compat import Path
from poetry.utils._compat import metadata
@@ -10,6 +12,37 @@
class InstalledRepository(Repository):
+ @classmethod
+ def get_package_paths(cls, sitedir, name): # type: (Path, str) -> Set[Path]
+ """
+ Process a .pth file within the site-packages directory, and return any valid
+ paths. We skip executable .pth files as there is no reliable means to do this
+ without side-effects to current run-time. Mo check is made that the item refers
+ to a directory rather than a file, however, in order to maintain backwards
+ compatibility, we allow non-existing paths to be discovered. The latter
+ behaviour is different to how Python's site-specific hook configuration works.
+
+ Reference: https://docs.python.org/3.8/library/site.html
+
+ :param sitedir: The site-packages directory to search for .pth file.
+ :param name: The name of the package to search .pth file for.
+ :return: A `Set` of valid `Path` objects.
+ """
+ paths = set()
+
+ pth_file = sitedir.joinpath("{}.pth".format(name))
+ if pth_file.exists():
+ with pth_file.open() as f:
+ for line in f:
+ line = line.strip()
+ if line and not line.startswith(("#", "import ", "import\t")):
+ path = Path(line)
+ if not path.is_absolute():
+ path = sitedir.joinpath(path)
+ paths.add(path)
+
+ return paths
+
@classmethod
def load(cls, env): # type: (Env) -> InstalledRepository
"""
@@ -49,19 +82,14 @@
is_standard_package = False
if is_standard_package:
- if (
- path.name.endswith(".dist-info")
- and env.site_packages.joinpath(
- "{}.pth".format(package.pretty_name)
- ).exists()
- ):
- with env.site_packages.joinpath(
- "{}.pth".format(package.pretty_name)
- ).open() as f:
- directory = Path(f.readline().strip())
+ if path.name.endswith(".dist-info"):
+ paths = cls.get_package_paths(
+ sitedir=env.site_packages, name=package.pretty_name
+ )
+ if paths:
+ # TODO: handle multiple source directories?
package.source_type = "directory"
- package.source_url = directory.as_posix()
-
+ package.source_url = paths.pop().as_posix()
continue
src_path = env.path / "src"
| {"golden_diff": "diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py\n--- a/poetry/repositories/installed_repository.py\n+++ b/poetry/repositories/installed_repository.py\n@@ -1,3 +1,5 @@\n+from typing import Set\n+\n from poetry.core.packages import Package\n from poetry.utils._compat import Path\n from poetry.utils._compat import metadata\n@@ -10,6 +12,37 @@\n \n \n class InstalledRepository(Repository):\n+ @classmethod\n+ def get_package_paths(cls, sitedir, name): # type: (Path, str) -> Set[Path]\n+ \"\"\"\n+ Process a .pth file within the site-packages directory, and return any valid\n+ paths. We skip executable .pth files as there is no reliable means to do this\n+ without side-effects to current run-time. Mo check is made that the item refers\n+ to a directory rather than a file, however, in order to maintain backwards\n+ compatibility, we allow non-existing paths to be discovered. The latter\n+ behaviour is different to how Python's site-specific hook configuration works.\n+\n+ Reference: https://docs.python.org/3.8/library/site.html\n+\n+ :param sitedir: The site-packages directory to search for .pth file.\n+ :param name: The name of the package to search .pth file for.\n+ :return: A `Set` of valid `Path` objects.\n+ \"\"\"\n+ paths = set()\n+\n+ pth_file = sitedir.joinpath(\"{}.pth\".format(name))\n+ if pth_file.exists():\n+ with pth_file.open() as f:\n+ for line in f:\n+ line = line.strip()\n+ if line and not line.startswith((\"#\", \"import \", \"import\\t\")):\n+ path = Path(line)\n+ if not path.is_absolute():\n+ path = sitedir.joinpath(path)\n+ paths.add(path)\n+\n+ return paths\n+\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n@@ -49,19 +82,14 @@\n is_standard_package = False\n \n if is_standard_package:\n- if (\n- path.name.endswith(\".dist-info\")\n- and env.site_packages.joinpath(\n- \"{}.pth\".format(package.pretty_name)\n- ).exists()\n- ):\n- with env.site_packages.joinpath(\n- \"{}.pth\".format(package.pretty_name)\n- ).open() as f:\n- directory = Path(f.readline().strip())\n+ if path.name.endswith(\".dist-info\"):\n+ paths = cls.get_package_paths(\n+ sitedir=env.site_packages, name=package.pretty_name\n+ )\n+ if paths:\n+ # TODO: handle multiple source directories?\n package.source_type = \"directory\"\n- package.source_url = directory.as_posix()\n-\n+ package.source_url = paths.pop().as_posix()\n continue\n \n src_path = env.path / \"src\"\n", "issue": "Poetry 1.1.0a2 includes python code in the version string\nTo reproduce:\r\n\r\n1. `poetry init` with default choices; no dependencies.\r\n1. `poetry add pytest-cov`\r\n1. `poetry install`\r\n\r\nThe last command prints:\r\n```\r\nInstalling dependencies from lock file\r\n\r\nPackage operations: 0 installs, 1 update, 0 removals\r\n\r\n- Updating pytest-cov (2.10.0 import os, sys;exec('if \\'COV_CORE_SOURCE\\' in os.environ:\\n try:\\n from pytest_cov.embed import init\\n init()\\n except Exception as exc:\\n sys.stderr.write(\\n \"pytest-cov: Failed to setup subprocess coverage. \"\\n \"Environ: {0!r} \"\\n \"Exception: {1!r}\\\\n\".format(\\n dict((k, v) for k, v in os.environ.items() if k.startswith(\\'COV_CORE\\')),\\n exc\\n )\\n )\\n') -> 2.10.0) \r\n```\n", "before_files": [{"content": "from poetry.core.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils._compat import metadata\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\n_VENDORS = Path(__file__).parent.parent.joinpath(\"_vendor\")\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n \"\"\"\n repo = cls()\n seen = set()\n\n for entry in reversed(env.sys_path):\n for distribution in sorted(\n metadata.distributions(path=[entry]), key=lambda d: str(d._path),\n ):\n name = distribution.metadata[\"name\"]\n path = Path(str(distribution._path))\n version = distribution.metadata[\"version\"]\n package = Package(name, version, version)\n package.description = distribution.metadata.get(\"summary\", \"\")\n\n if package.name in seen:\n continue\n\n try:\n path.relative_to(_VENDORS)\n except ValueError:\n pass\n else:\n continue\n\n seen.add(package.name)\n\n repo.add_package(package)\n\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n if (\n path.name.endswith(\".dist-info\")\n and env.site_packages.joinpath(\n \"{}.pth\".format(package.pretty_name)\n ).exists()\n ):\n with env.site_packages.joinpath(\n \"{}.pth\".format(package.pretty_name)\n ).open() as f:\n directory = Path(f.readline().strip())\n package.source_type = \"directory\"\n package.source_url = directory.as_posix()\n\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.core.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py"}], "after_files": [{"content": "from typing import Set\n\nfrom poetry.core.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils._compat import metadata\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\n_VENDORS = Path(__file__).parent.parent.joinpath(\"_vendor\")\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def get_package_paths(cls, sitedir, name): # type: (Path, str) -> Set[Path]\n \"\"\"\n Process a .pth file within the site-packages directory, and return any valid\n paths. We skip executable .pth files as there is no reliable means to do this\n without side-effects to current run-time. Mo check is made that the item refers\n to a directory rather than a file, however, in order to maintain backwards\n compatibility, we allow non-existing paths to be discovered. The latter\n behaviour is different to how Python's site-specific hook configuration works.\n\n Reference: https://docs.python.org/3.8/library/site.html\n\n :param sitedir: The site-packages directory to search for .pth file.\n :param name: The name of the package to search .pth file for.\n :return: A `Set` of valid `Path` objects.\n \"\"\"\n paths = set()\n\n pth_file = sitedir.joinpath(\"{}.pth\".format(name))\n if pth_file.exists():\n with pth_file.open() as f:\n for line in f:\n line = line.strip()\n if line and not line.startswith((\"#\", \"import \", \"import\\t\")):\n path = Path(line)\n if not path.is_absolute():\n path = sitedir.joinpath(path)\n paths.add(path)\n\n return paths\n\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n \"\"\"\n repo = cls()\n seen = set()\n\n for entry in reversed(env.sys_path):\n for distribution in sorted(\n metadata.distributions(path=[entry]), key=lambda d: str(d._path),\n ):\n name = distribution.metadata[\"name\"]\n path = Path(str(distribution._path))\n version = distribution.metadata[\"version\"]\n package = Package(name, version, version)\n package.description = distribution.metadata.get(\"summary\", \"\")\n\n if package.name in seen:\n continue\n\n try:\n path.relative_to(_VENDORS)\n except ValueError:\n pass\n else:\n continue\n\n seen.add(package.name)\n\n repo.add_package(package)\n\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n if path.name.endswith(\".dist-info\"):\n paths = cls.get_package_paths(\n sitedir=env.site_packages, name=package.pretty_name\n )\n if paths:\n # TODO: handle multiple source directories?\n package.source_type = \"directory\"\n package.source_url = paths.pop().as_posix()\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.core.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py"}]} | 1,170 | 671 |
gh_patches_debug_4899 | rasdani/github-patches | git_diff | ivy-llc__ivy-18924 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dropout3d
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/nn/functional/common.py`
Content:
```
1 # local
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5
6
7 @to_ivy_arrays_and_back
8 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
9 def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
10 if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
11 numerator = ivy.sum(x1 * x2, axis=axis)
12 x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
13 x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
14 else:
15 numerator = ivy.sum(x1 * x2)
16 x1_squared_norm = ivy.sum(ivy.square(x1))
17 x2_squared_norm = ivy.sum(ivy.square(x2))
18
19 x1_norm = ivy.sqrt(x1_squared_norm)
20 x2_norm = ivy.sqrt(x2_squared_norm)
21 norm_mm = x1_norm * x2_norm
22 denominator = ivy.maximum(norm_mm, eps)
23
24 cosine = numerator / denominator
25 return cosine
26
27
28 @to_ivy_arrays_and_back
29 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
30 def dropout2d(x, *, p=0.5, training=True, data_format="NCHW", name=None):
31 return ivy.dropout2d(x, p=p, training=training, data_format=data_format)
32
33
34 def get_mask(shape, device, prob, seed=None):
35 mask = ivy.where(
36 ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,
37 0.0,
38 1.0,
39 )
40 return mask
41
42
43 @to_ivy_arrays_and_back
44 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
45 def dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None):
46 if axis > 1:
47 raise ValueError("Axis value can only be 0 or 1 or None.")
48 elif axis is None or (isinstance(axis, list) and len(axis) == 2):
49 mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)
50 elif axis == 0:
51 mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)
52 mask = ivy.broadcast_to(mask, x.shape)
53 elif axis == 1:
54 mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)
55 mask = ivy.broadcast_to(mask, x.shape)
56 if mode == "upscale_in_train":
57 if training:
58 out = ivy.multiply(x, mask)
59 ret = ivy.multiply(out, 1.0 / (1.0 - p))
60 else:
61 ret = x
62 else:
63 if training:
64 ret = ivy.multiply(x, mask)
65 else:
66 ret = ivy.multiply(x, (1.0 - p))
67 return ret
68
69
70 @to_ivy_arrays_and_back
71 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
72 def zeropad2d(x, padding, data_format="NCHW", name=None):
73 if ivy.is_array(padding):
74 padding = padding.to_list()
75 if isinstance(padding, int):
76 padding = [padding, padding, padding, padding]
77 if len(padding) != 4:
78 raise ValueError("Padding length should be 4.")
79 if x.ndim != 4:
80 raise ValueError("Input x must be 4-dimensional.")
81 if data_format == "NCHW":
82 padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))
83 elif data_format == "NHWC":
84 padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))
85 else:
86 raise ValueError("Unknown data_format: {}".format(data_format))
87 return ivy.pad(x, padding, mode="constant", constant_values=0.0)
88
89
90 @to_ivy_arrays_and_back
91 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
92 def interpolate(
93 x,
94 size=None,
95 scale_factor=None,
96 mode="nearest",
97 align_corners=False,
98 align_mode=0,
99 data_format="NCHW",
100 name=None,
101 ):
102 return ivy.interpolate(
103 x, size, mode=mode, scale_factor=scale_factor, align_corners=align_corners
104 )
105
106
107 @to_ivy_arrays_and_back
108 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
109 def linear(x, weight, bias=None, name=None):
110 weight = ivy.swapaxes(weight, -1, -2)
111 return ivy.linear(x, weight, bias=bias)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -109,3 +109,9 @@
def linear(x, weight, bias=None, name=None):
weight = ivy.swapaxes(weight, -1, -2)
return ivy.linear(x, weight, bias=bias)
+
+
+@to_ivy_arrays_and_back
+@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
+def dropout3d(x, p=0.5, training=True, data_format="NCDHW", name=None):
+ return ivy.dropout3d(x, p, training=training, data_format=data_format)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -109,3 +109,9 @@\n def linear(x, weight, bias=None, name=None):\n weight = ivy.swapaxes(weight, -1, -2)\n return ivy.linear(x, weight, bias=bias)\n+\n+\n+@to_ivy_arrays_and_back\n+@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+def dropout3d(x, p=0.5, training=True, data_format=\"NCDHW\", name=None):\n+ return ivy.dropout3d(x, p, training=training, data_format=data_format)\n", "issue": "dropout3d\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef interpolate(\n x,\n size=None,\n scale_factor=None,\n mode=\"nearest\",\n align_corners=False,\n align_mode=0,\n data_format=\"NCHW\",\n name=None,\n):\n return ivy.interpolate(\n x, size, mode=mode, scale_factor=scale_factor, align_corners=align_corners\n )\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef linear(x, weight, bias=None, name=None):\n weight = ivy.swapaxes(weight, -1, -2)\n return ivy.linear(x, weight, bias=bias)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef interpolate(\n x,\n size=None,\n scale_factor=None,\n mode=\"nearest\",\n align_corners=False,\n align_mode=0,\n data_format=\"NCHW\",\n name=None,\n):\n return ivy.interpolate(\n x, size, mode=mode, scale_factor=scale_factor, align_corners=align_corners\n )\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef linear(x, weight, bias=None, name=None):\n weight = ivy.swapaxes(weight, -1, -2)\n return ivy.linear(x, weight, bias=bias)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout3d(x, p=0.5, training=True, data_format=\"NCDHW\", name=None):\n return ivy.dropout3d(x, p, training=training, data_format=data_format)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]} | 1,676 | 210 |
gh_patches_debug_18102 | rasdani/github-patches | git_diff | iterative__dvc-417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Running DVC outside of Git dir
We should handle all the cases like this.
```
cd /
$ dvc repro
No handlers could be found for logger "dvc"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/logger.py`
Content:
```
1 import sys
2 import logging
3
4 import colorama
5
6
7 colorama.init()
8
9
10 class Logger(object):
11 FMT = '%(message)s'
12 DEFAULT_LEVEL = logging.INFO
13
14 LEVEL_MAP = {
15 'debug': logging.DEBUG,
16 'info': logging.INFO,
17 'warn': logging.WARNING,
18 'error': logging.ERROR
19 }
20
21 COLOR_MAP = {
22 'debug': colorama.Fore.BLUE,
23 'warn': colorama.Fore.YELLOW,
24 'error': colorama.Fore.RED
25 }
26
27 def __init__(self, config=None):
28 sh = logging.StreamHandler(sys.stdout)
29 sh.setFormatter(logging.Formatter(self.FMT))
30 sh.setLevel(logging.DEBUG)
31
32 self.logger().addHandler(sh)
33 level = None
34 if config:
35 level = config['Global'].get('LogLevel', None)
36 self.set_level(level)
37
38 @staticmethod
39 def logger():
40 return logging.getLogger('dvc')
41
42 @staticmethod
43 def set_level(level=None):
44 if not level:
45 lvl = Logger.DEFAULT_LEVEL
46 else:
47 lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)
48 Logger.logger().setLevel(lvl)
49
50 @staticmethod
51 def be_quiet():
52 Logger.logger().setLevel(logging.CRITICAL)
53
54 @staticmethod
55 def be_verbose():
56 Logger.logger().setLevel(logging.DEBUG)
57
58 @staticmethod
59 def colorize(msg, typ):
60 header = ''
61 footer = ''
62
63 if sys.stdout.isatty():
64 header = Logger.COLOR_MAP.get(typ.lower(), '')
65 footer = colorama.Style.RESET_ALL
66
67 return u'{}{}{}'.format(header, msg, footer)
68
69 @staticmethod
70 def error(msg, **kwargs):
71 exc_info = Logger.logger().getEffectiveLevel() == logging.DEBUG
72 return Logger.logger().error(Logger.colorize(msg, 'error'), exc_info=exc_info, **kwargs)
73
74 @staticmethod
75 def warn(msg, **kwargs):
76 return Logger.logger().warn(Logger.colorize(msg, 'warn'), **kwargs)
77
78 @staticmethod
79 def debug(msg, **kwargs):
80 return Logger.logger().debug(Logger.colorize(msg, 'debug'), **kwargs)
81
82 @staticmethod
83 def info(msg, **kwargs):
84 return Logger.logger().info(Logger.colorize(msg, 'info'), **kwargs)
85
```
Path: `dvc/main.py`
Content:
```
1 from dvc.logger import Logger
2 from dvc.cli import parse_args
3
4 def main(argv=None):
5 args = parse_args(argv)
6
7 try:
8 cmd = args.func(args)
9 except Exception as ex:
10 Logger.error('Initialization error: {}'.format(str(ex)))
11 return 255
12
13 return cmd.run_cmd()
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/logger.py b/dvc/logger.py
--- a/dvc/logger.py
+++ b/dvc/logger.py
@@ -25,15 +25,19 @@
}
def __init__(self, config=None):
+ self.init(config)
+
+ @staticmethod
+ def init(config=None):
sh = logging.StreamHandler(sys.stdout)
- sh.setFormatter(logging.Formatter(self.FMT))
+ sh.setFormatter(logging.Formatter(Logger.FMT))
sh.setLevel(logging.DEBUG)
- self.logger().addHandler(sh)
+ Logger.logger().addHandler(sh)
level = None
if config:
level = config['Global'].get('LogLevel', None)
- self.set_level(level)
+ Logger.set_level(level)
@staticmethod
def logger():
diff --git a/dvc/main.py b/dvc/main.py
--- a/dvc/main.py
+++ b/dvc/main.py
@@ -2,6 +2,8 @@
from dvc.cli import parse_args
def main(argv=None):
+ Logger.init()
+
args = parse_args(argv)
try:
| {"golden_diff": "diff --git a/dvc/logger.py b/dvc/logger.py\n--- a/dvc/logger.py\n+++ b/dvc/logger.py\n@@ -25,15 +25,19 @@\n }\n \n def __init__(self, config=None):\n+ self.init(config)\n+\n+ @staticmethod\n+ def init(config=None):\n sh = logging.StreamHandler(sys.stdout)\n- sh.setFormatter(logging.Formatter(self.FMT))\n+ sh.setFormatter(logging.Formatter(Logger.FMT))\n sh.setLevel(logging.DEBUG)\n \n- self.logger().addHandler(sh)\n+ Logger.logger().addHandler(sh)\n level = None\n if config:\n level = config['Global'].get('LogLevel', None)\n- self.set_level(level)\n+ Logger.set_level(level)\n \n @staticmethod\n def logger():\ndiff --git a/dvc/main.py b/dvc/main.py\n--- a/dvc/main.py\n+++ b/dvc/main.py\n@@ -2,6 +2,8 @@\n from dvc.cli import parse_args\n \n def main(argv=None):\n+ Logger.init()\n+\n args = parse_args(argv)\n \n try:\n", "issue": "Running DVC outside of Git dir\nWe should handle all the cases like this.\r\n\r\n```\r\ncd /\r\n$ dvc repro\r\nNo handlers could be found for logger \"dvc\"\r\n```\n", "before_files": [{"content": "import sys\nimport logging\n\nimport colorama\n\n\ncolorama.init()\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'debug': colorama.Fore.BLUE,\n 'warn': colorama.Fore.YELLOW,\n 'error': colorama.Fore.RED\n }\n\n def __init__(self, config=None):\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(logging.Formatter(self.FMT))\n sh.setLevel(logging.DEBUG)\n\n self.logger().addHandler(sh)\n level = None\n if config:\n level = config['Global'].get('LogLevel', None)\n self.set_level(level)\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, typ):\n header = ''\n footer = ''\n\n if sys.stdout.isatty():\n header = Logger.COLOR_MAP.get(typ.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def error(msg, **kwargs):\n exc_info = Logger.logger().getEffectiveLevel() == logging.DEBUG\n return Logger.logger().error(Logger.colorize(msg, 'error'), exc_info=exc_info, **kwargs)\n\n @staticmethod\n def warn(msg, **kwargs):\n return Logger.logger().warn(Logger.colorize(msg, 'warn'), **kwargs)\n\n @staticmethod\n def debug(msg, **kwargs):\n return Logger.logger().debug(Logger.colorize(msg, 'debug'), **kwargs)\n\n @staticmethod\n def info(msg, **kwargs):\n return Logger.logger().info(Logger.colorize(msg, 'info'), **kwargs)\n", "path": "dvc/logger.py"}, {"content": "from dvc.logger import Logger\nfrom dvc.cli import parse_args\n\ndef main(argv=None):\n args = parse_args(argv)\n\n try:\n cmd = args.func(args)\n except Exception as ex:\n Logger.error('Initialization error: {}'.format(str(ex)))\n return 255\n\n return cmd.run_cmd()\n", "path": "dvc/main.py"}], "after_files": [{"content": "import sys\nimport logging\n\nimport colorama\n\n\ncolorama.init()\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'debug': colorama.Fore.BLUE,\n 'warn': colorama.Fore.YELLOW,\n 'error': colorama.Fore.RED\n }\n\n def __init__(self, config=None):\n self.init(config)\n\n @staticmethod\n def init(config=None):\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(logging.Formatter(Logger.FMT))\n sh.setLevel(logging.DEBUG)\n\n Logger.logger().addHandler(sh)\n level = None\n if config:\n level = config['Global'].get('LogLevel', None)\n Logger.set_level(level)\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, typ):\n header = ''\n footer = ''\n\n if sys.stdout.isatty():\n header = Logger.COLOR_MAP.get(typ.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def error(msg, **kwargs):\n exc_info = Logger.logger().getEffectiveLevel() == logging.DEBUG\n return Logger.logger().error(Logger.colorize(msg, 'error'), exc_info=exc_info, **kwargs)\n\n @staticmethod\n def warn(msg, **kwargs):\n return Logger.logger().warn(Logger.colorize(msg, 'warn'), **kwargs)\n\n @staticmethod\n def debug(msg, **kwargs):\n return Logger.logger().debug(Logger.colorize(msg, 'debug'), **kwargs)\n\n @staticmethod\n def info(msg, **kwargs):\n return Logger.logger().info(Logger.colorize(msg, 'info'), **kwargs)\n", "path": "dvc/logger.py"}, {"content": "from dvc.logger import Logger\nfrom dvc.cli import parse_args\n\ndef main(argv=None):\n Logger.init()\n\n args = parse_args(argv)\n\n try:\n cmd = args.func(args)\n except Exception as ex:\n Logger.error('Initialization error: {}'.format(str(ex)))\n return 255\n\n return cmd.run_cmd()\n", "path": "dvc/main.py"}]} | 1,060 | 243 |
gh_patches_debug_22662 | rasdani/github-patches | git_diff | psf__black-2839 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve documentation for configuration options
Currently, our config options are documented only in a collapsed-by-default text block in https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#command-line-options. This is not very discoverable and makes it hard to give more detailed documentation, such as examples.
Instead, we should have a docs page with a separate section for each option. We can start with the existing descriptions, and extend them as needed for options with more complicated behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/check_version_in_basics_example.py`
Content:
```
1 """
2 Check that the rev value in the example from ``the_basics.md`` matches
3 the latest version of Black. This saves us from forgetting to update that
4 during the release process.
5 """
6
7 import os
8 import sys
9
10 import commonmark
11 from bs4 import BeautifulSoup
12
13
14 def main(changes: str, the_basics: str) -> None:
15 changes_html = commonmark.commonmark(changes)
16 changes_soup = BeautifulSoup(changes_html, "html.parser")
17 headers = changes_soup.find_all("h2")
18 tags = [header.string for header in headers if header.string != "Unreleased"]
19 latest_tag = tags[0]
20
21 the_basics_html = commonmark.commonmark(the_basics)
22 the_basics_soup = BeautifulSoup(the_basics_html, "html.parser")
23 (version_example,) = [
24 code_block.string
25 for code_block in the_basics_soup.find_all(class_="language-console")
26 if "$ black --version" in code_block.string
27 ]
28
29 for tag in tags:
30 if tag in version_example and tag != latest_tag:
31 print(
32 "Please set the version in the ``black --version`` "
33 "example from ``the_basics.md`` to be the latest one.\n"
34 f"Expected {latest_tag}, got {tag}.\n"
35 )
36 sys.exit(1)
37
38
39 if __name__ == "__main__":
40 with open("CHANGES.md", encoding="utf-8") as fd:
41 changes = fd.read()
42 with open(
43 os.path.join("docs", "usage_and_configuration", "the_basics.md"),
44 encoding="utf-8",
45 ) as fd:
46 the_basics = fd.read()
47 main(changes, the_basics)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/check_version_in_basics_example.py b/scripts/check_version_in_basics_example.py
--- a/scripts/check_version_in_basics_example.py
+++ b/scripts/check_version_in_basics_example.py
@@ -20,20 +20,21 @@
the_basics_html = commonmark.commonmark(the_basics)
the_basics_soup = BeautifulSoup(the_basics_html, "html.parser")
- (version_example,) = [
+ version_examples = [
code_block.string
for code_block in the_basics_soup.find_all(class_="language-console")
if "$ black --version" in code_block.string
]
for tag in tags:
- if tag in version_example and tag != latest_tag:
- print(
- "Please set the version in the ``black --version`` "
- "example from ``the_basics.md`` to be the latest one.\n"
- f"Expected {latest_tag}, got {tag}.\n"
- )
- sys.exit(1)
+ for version_example in version_examples:
+ if tag in version_example and tag != latest_tag:
+ print(
+ "Please set the version in the ``black --version`` "
+ "examples from ``the_basics.md`` to be the latest one.\n"
+ f"Expected {latest_tag}, got {tag}.\n"
+ )
+ sys.exit(1)
if __name__ == "__main__":
| {"golden_diff": "diff --git a/scripts/check_version_in_basics_example.py b/scripts/check_version_in_basics_example.py\n--- a/scripts/check_version_in_basics_example.py\n+++ b/scripts/check_version_in_basics_example.py\n@@ -20,20 +20,21 @@\n \n the_basics_html = commonmark.commonmark(the_basics)\n the_basics_soup = BeautifulSoup(the_basics_html, \"html.parser\")\n- (version_example,) = [\n+ version_examples = [\n code_block.string\n for code_block in the_basics_soup.find_all(class_=\"language-console\")\n if \"$ black --version\" in code_block.string\n ]\n \n for tag in tags:\n- if tag in version_example and tag != latest_tag:\n- print(\n- \"Please set the version in the ``black --version`` \"\n- \"example from ``the_basics.md`` to be the latest one.\\n\"\n- f\"Expected {latest_tag}, got {tag}.\\n\"\n- )\n- sys.exit(1)\n+ for version_example in version_examples:\n+ if tag in version_example and tag != latest_tag:\n+ print(\n+ \"Please set the version in the ``black --version`` \"\n+ \"examples from ``the_basics.md`` to be the latest one.\\n\"\n+ f\"Expected {latest_tag}, got {tag}.\\n\"\n+ )\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\":\n", "issue": "Improve documentation for configuration options\nCurrently, our config options are documented only in a collapsed-by-default text block in https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#command-line-options. This is not very discoverable and makes it hard to give more detailed documentation, such as examples.\r\n\r\nInstead, we should have a docs page with a separate section for each option. We can start with the existing descriptions, and extend them as needed for options with more complicated behavior.\n", "before_files": [{"content": "\"\"\"\nCheck that the rev value in the example from ``the_basics.md`` matches\nthe latest version of Black. This saves us from forgetting to update that\nduring the release process.\n\"\"\"\n\nimport os\nimport sys\n\nimport commonmark\nfrom bs4 import BeautifulSoup\n\n\ndef main(changes: str, the_basics: str) -> None:\n changes_html = commonmark.commonmark(changes)\n changes_soup = BeautifulSoup(changes_html, \"html.parser\")\n headers = changes_soup.find_all(\"h2\")\n tags = [header.string for header in headers if header.string != \"Unreleased\"]\n latest_tag = tags[0]\n\n the_basics_html = commonmark.commonmark(the_basics)\n the_basics_soup = BeautifulSoup(the_basics_html, \"html.parser\")\n (version_example,) = [\n code_block.string\n for code_block in the_basics_soup.find_all(class_=\"language-console\")\n if \"$ black --version\" in code_block.string\n ]\n\n for tag in tags:\n if tag in version_example and tag != latest_tag:\n print(\n \"Please set the version in the ``black --version`` \"\n \"example from ``the_basics.md`` to be the latest one.\\n\"\n f\"Expected {latest_tag}, got {tag}.\\n\"\n )\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n with open(\"CHANGES.md\", encoding=\"utf-8\") as fd:\n changes = fd.read()\n with open(\n os.path.join(\"docs\", \"usage_and_configuration\", \"the_basics.md\"),\n encoding=\"utf-8\",\n ) as fd:\n the_basics = fd.read()\n main(changes, the_basics)\n", "path": "scripts/check_version_in_basics_example.py"}], "after_files": [{"content": "\"\"\"\nCheck that the rev value in the example from ``the_basics.md`` matches\nthe latest version of Black. This saves us from forgetting to update that\nduring the release process.\n\"\"\"\n\nimport os\nimport sys\n\nimport commonmark\nfrom bs4 import BeautifulSoup\n\n\ndef main(changes: str, the_basics: str) -> None:\n changes_html = commonmark.commonmark(changes)\n changes_soup = BeautifulSoup(changes_html, \"html.parser\")\n headers = changes_soup.find_all(\"h2\")\n tags = [header.string for header in headers if header.string != \"Unreleased\"]\n latest_tag = tags[0]\n\n the_basics_html = commonmark.commonmark(the_basics)\n the_basics_soup = BeautifulSoup(the_basics_html, \"html.parser\")\n version_examples = [\n code_block.string\n for code_block in the_basics_soup.find_all(class_=\"language-console\")\n if \"$ black --version\" in code_block.string\n ]\n\n for tag in tags:\n for version_example in version_examples:\n if tag in version_example and tag != latest_tag:\n print(\n \"Please set the version in the ``black --version`` \"\n \"examples from ``the_basics.md`` to be the latest one.\\n\"\n f\"Expected {latest_tag}, got {tag}.\\n\"\n )\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n with open(\"CHANGES.md\", encoding=\"utf-8\") as fd:\n changes = fd.read()\n with open(\n os.path.join(\"docs\", \"usage_and_configuration\", \"the_basics.md\"),\n encoding=\"utf-8\",\n ) as fd:\n the_basics = fd.read()\n main(changes, the_basics)\n", "path": "scripts/check_version_in_basics_example.py"}]} | 818 | 318 |
gh_patches_debug_738 | rasdani/github-patches | git_diff | certbot__certbot-7766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Required pyparsing version
I've been experimenting with writing tests using the oldest allowed versions of our Python dependencies. `setup.py` for `letsencrypt-nginx` says it requires `pyparsing>=1.5.5` but when I pin version 1.5.5, I encounter problems. You can see Travis logs of the issue [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739657) and [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739658).
We should determine what version we require and update `setup.py` accordingly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot-nginx/setup.py`
Content:
```
1 import sys
2
3 from setuptools import find_packages
4 from setuptools import setup
5 from setuptools.command.test import test as TestCommand
6
7 version = '1.3.0.dev0'
8
9 # Remember to update local-oldest-requirements.txt when changing the minimum
10 # acme/certbot version.
11 install_requires = [
12 'acme>=1.0.0',
13 'certbot>=1.1.0',
14 'mock',
15 'PyOpenSSL',
16 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
17 'setuptools',
18 'zope.interface',
19 ]
20
21
22 class PyTest(TestCommand):
23 user_options = []
24
25 def initialize_options(self):
26 TestCommand.initialize_options(self)
27 self.pytest_args = ''
28
29 def run_tests(self):
30 import shlex
31 # import here, cause outside the eggs aren't loaded
32 import pytest
33 errno = pytest.main(shlex.split(self.pytest_args))
34 sys.exit(errno)
35
36
37 setup(
38 name='certbot-nginx',
39 version=version,
40 description="Nginx plugin for Certbot",
41 url='https://github.com/letsencrypt/letsencrypt',
42 author="Certbot Project",
43 author_email='[email protected]',
44 license='Apache License 2.0',
45 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
46 classifiers=[
47 'Development Status :: 5 - Production/Stable',
48 'Environment :: Plugins',
49 'Intended Audience :: System Administrators',
50 'License :: OSI Approved :: Apache Software License',
51 'Operating System :: POSIX :: Linux',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.5',
57 'Programming Language :: Python :: 3.6',
58 'Programming Language :: Python :: 3.7',
59 'Programming Language :: Python :: 3.8',
60 'Topic :: Internet :: WWW/HTTP',
61 'Topic :: Security',
62 'Topic :: System :: Installation/Setup',
63 'Topic :: System :: Networking',
64 'Topic :: System :: Systems Administration',
65 'Topic :: Utilities',
66 ],
67
68 packages=find_packages(),
69 include_package_data=True,
70 install_requires=install_requires,
71 entry_points={
72 'certbot.plugins': [
73 'nginx = certbot_nginx._internal.configurator:NginxConfigurator',
74 ],
75 },
76 test_suite='certbot_nginx',
77 tests_require=["pytest"],
78 cmdclass={"test": PyTest},
79 )
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py
--- a/certbot-nginx/setup.py
+++ b/certbot-nginx/setup.py
@@ -13,7 +13,7 @@
'certbot>=1.1.0',
'mock',
'PyOpenSSL',
- 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
+ 'pyparsing>=1.5.5', # Python3 support
'setuptools',
'zope.interface',
]
| {"golden_diff": "diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py\n--- a/certbot-nginx/setup.py\n+++ b/certbot-nginx/setup.py\n@@ -13,7 +13,7 @@\n 'certbot>=1.1.0',\n 'mock',\n 'PyOpenSSL',\n- 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?\n+ 'pyparsing>=1.5.5', # Python3 support\n 'setuptools',\n 'zope.interface',\n ]\n", "issue": "Required pyparsing version\nI've been experimenting with writing tests using the oldest allowed versions of our Python dependencies. `setup.py` for `letsencrypt-nginx` says it requires `pyparsing>=1.5.5` but when I pin version 1.5.5, I encounter problems. You can see Travis logs of the issue [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739657) and [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739658).\n\nWe should determine what version we require and update `setup.py` accordingly.\n\n", "before_files": [{"content": "import sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\nversion = '1.3.0.dev0'\n\n# Remember to update local-oldest-requirements.txt when changing the minimum\n# acme/certbot version.\ninstall_requires = [\n 'acme>=1.0.0',\n 'certbot>=1.1.0',\n 'mock',\n 'PyOpenSSL',\n 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?\n 'setuptools',\n 'zope.interface',\n]\n\n\nclass PyTest(TestCommand):\n user_options = []\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = ''\n\n def run_tests(self):\n import shlex\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\nsetup(\n name='certbot-nginx',\n version=version,\n description=\"Nginx plugin for Certbot\",\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Plugins',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n entry_points={\n 'certbot.plugins': [\n 'nginx = certbot_nginx._internal.configurator:NginxConfigurator',\n ],\n },\n test_suite='certbot_nginx',\n tests_require=[\"pytest\"],\n cmdclass={\"test\": PyTest},\n)\n", "path": "certbot-nginx/setup.py"}], "after_files": [{"content": "import sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\nversion = '1.3.0.dev0'\n\n# Remember to update local-oldest-requirements.txt when changing the minimum\n# acme/certbot version.\ninstall_requires = [\n 'acme>=1.0.0',\n 'certbot>=1.1.0',\n 'mock',\n 'PyOpenSSL',\n 'pyparsing>=1.5.5', # Python3 support\n 'setuptools',\n 'zope.interface',\n]\n\n\nclass PyTest(TestCommand):\n user_options = []\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = ''\n\n def run_tests(self):\n import shlex\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\nsetup(\n name='certbot-nginx',\n version=version,\n description=\"Nginx plugin for Certbot\",\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Plugins',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n entry_points={\n 'certbot.plugins': [\n 'nginx = certbot_nginx._internal.configurator:NginxConfigurator',\n ],\n },\n test_suite='certbot_nginx',\n tests_require=[\"pytest\"],\n cmdclass={\"test\": PyTest},\n)\n", "path": "certbot-nginx/setup.py"}]} | 1,138 | 126 |
gh_patches_debug_4844 | rasdani/github-patches | git_diff | twisted__twisted-11722 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
.hypothesis should be in .gitignore
**Describe the incorrect behavior you saw**
`git diff` shows me an untracked `.hypothesis` directory.
**Describe how to cause this behavior**
I ran the tests.
**Describe the correct behavior you'd like to see**
`.hypothesis` [shouldn't be checked in](https://hypothesis.readthedocs.io/en/latest/database.html#the-hypothesis-example-database), so it should be ignored by `git`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.github/scripts/check-pr-text.py`
Content:
```
1 #
2 # This script is designed to be called by the GHA workflow.
3 #
4 # It is designed to check that the PR text complies to our dev standards.
5 #
6 # The input is received via the environmet variables:
7 # * PR_TITLE - title of the PR
8 # * PR_BODY - the description of the PR
9 #
10 # To test it run
11 #
12 # $ export PR_TITLE='#1234 Test Title'
13 # $ export PR_BODY='some lines
14 # > Fixes #12345
15 # > more lines'
16 # $ python3 .github/scripts/check-pr-text.py
17 #
18 import os
19 import re
20 import sys
21
22 pr_title = os.environ.get("PR_TITLE", "")
23 pr_body = os.environ.get("PR_BODY", "")
24
25 print("--- DEBUG ---")
26 print(f"Title: {pr_title}")
27 print(f"Body:\n {pr_body}")
28 print("-------------")
29
30
31 def fail(message):
32 print(message)
33 print("Fix the title and then trigger a new push.")
34 print("A re-run for this job will not work.")
35 sys.exit(1)
36
37
38 if not pr_title:
39 fail("Title for the PR not found. " "Maybe missing PR_TITLE env var.")
40
41 if not pr_body:
42 fail("Body for the PR not found. " "Maybe missing PR_BODY env var.")
43
44 title_search = re.search(r"^(#\d+) .+", pr_title)
45 if not title_search:
46 fail(
47 "Title of PR has no issue ID reference. It must look like “#1234 Foo bar baz”."
48 )
49 else:
50 print(f"PR title is complaint for {title_search[1]}. Good job.")
51
52
53 body_search = re.search(r".*Fixes (#\d+).+", pr_body)
54 if not body_search:
55 fail('Body of PR has no "Fixes #12345" issue ID reference.')
56 else:
57 print(f"PR description is complaint for {body_search[1]}. Good job.")
58
59
60 if title_search[1] != body_search[1]:
61 fail("PR title and description have different IDs.")
62
63 # All good.
64 sys.exit(0)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/.github/scripts/check-pr-text.py b/.github/scripts/check-pr-text.py
--- a/.github/scripts/check-pr-text.py
+++ b/.github/scripts/check-pr-text.py
@@ -41,7 +41,7 @@
if not pr_body:
fail("Body for the PR not found. " "Maybe missing PR_BODY env var.")
-title_search = re.search(r"^(#\d+) .+", pr_title)
+title_search = re.search(r"^(#\d+):? .+", pr_title)
if not title_search:
fail(
"Title of PR has no issue ID reference. It must look like “#1234 Foo bar baz”."
| {"golden_diff": "diff --git a/.github/scripts/check-pr-text.py b/.github/scripts/check-pr-text.py\n--- a/.github/scripts/check-pr-text.py\n+++ b/.github/scripts/check-pr-text.py\n@@ -41,7 +41,7 @@\n if not pr_body:\n fail(\"Body for the PR not found. \" \"Maybe missing PR_BODY env var.\")\n \n-title_search = re.search(r\"^(#\\d+) .+\", pr_title)\n+title_search = re.search(r\"^(#\\d+):? .+\", pr_title)\n if not title_search:\n fail(\n \"Title of PR has no issue ID reference. It must look like \u201c#1234 Foo bar baz\u201d.\"\n", "issue": ".hypothesis should be in .gitignore\n**Describe the incorrect behavior you saw**\r\n\r\n`git diff` shows me an untracked `.hypothesis` directory.\r\n\r\n**Describe how to cause this behavior**\r\n\r\nI ran the tests.\r\n\r\n**Describe the correct behavior you'd like to see**\r\n\r\n`.hypothesis` [shouldn't be checked in](https://hypothesis.readthedocs.io/en/latest/database.html#the-hypothesis-example-database), so it should be ignored by `git`.\n", "before_files": [{"content": "#\n# This script is designed to be called by the GHA workflow.\n#\n# It is designed to check that the PR text complies to our dev standards.\n#\n# The input is received via the environmet variables:\n# * PR_TITLE - title of the PR\n# * PR_BODY - the description of the PR\n#\n# To test it run\n#\n# $ export PR_TITLE='#1234 Test Title'\n# $ export PR_BODY='some lines\n# > Fixes #12345\n# > more lines'\n# $ python3 .github/scripts/check-pr-text.py\n#\nimport os\nimport re\nimport sys\n\npr_title = os.environ.get(\"PR_TITLE\", \"\")\npr_body = os.environ.get(\"PR_BODY\", \"\")\n\nprint(\"--- DEBUG ---\")\nprint(f\"Title: {pr_title}\")\nprint(f\"Body:\\n {pr_body}\")\nprint(\"-------------\")\n\n\ndef fail(message):\n print(message)\n print(\"Fix the title and then trigger a new push.\")\n print(\"A re-run for this job will not work.\")\n sys.exit(1)\n\n\nif not pr_title:\n fail(\"Title for the PR not found. \" \"Maybe missing PR_TITLE env var.\")\n\nif not pr_body:\n fail(\"Body for the PR not found. \" \"Maybe missing PR_BODY env var.\")\n\ntitle_search = re.search(r\"^(#\\d+) .+\", pr_title)\nif not title_search:\n fail(\n \"Title of PR has no issue ID reference. It must look like \u201c#1234 Foo bar baz\u201d.\"\n )\nelse:\n print(f\"PR title is complaint for {title_search[1]}. Good job.\")\n\n\nbody_search = re.search(r\".*Fixes (#\\d+).+\", pr_body)\nif not body_search:\n fail('Body of PR has no \"Fixes #12345\" issue ID reference.')\nelse:\n print(f\"PR description is complaint for {body_search[1]}. Good job.\")\n\n\nif title_search[1] != body_search[1]:\n fail(\"PR title and description have different IDs.\")\n\n# All good.\nsys.exit(0)\n", "path": ".github/scripts/check-pr-text.py"}], "after_files": [{"content": "#\n# This script is designed to be called by the GHA workflow.\n#\n# It is designed to check that the PR text complies to our dev standards.\n#\n# The input is received via the environmet variables:\n# * PR_TITLE - title of the PR\n# * PR_BODY - the description of the PR\n#\n# To test it run\n#\n# $ export PR_TITLE='#1234 Test Title'\n# $ export PR_BODY='some lines\n# > Fixes #12345\n# > more lines'\n# $ python3 .github/scripts/check-pr-text.py\n#\nimport os\nimport re\nimport sys\n\npr_title = os.environ.get(\"PR_TITLE\", \"\")\npr_body = os.environ.get(\"PR_BODY\", \"\")\n\nprint(\"--- DEBUG ---\")\nprint(f\"Title: {pr_title}\")\nprint(f\"Body:\\n {pr_body}\")\nprint(\"-------------\")\n\n\ndef fail(message):\n print(message)\n print(\"Fix the title and then trigger a new push.\")\n print(\"A re-run for this job will not work.\")\n sys.exit(1)\n\n\nif not pr_title:\n fail(\"Title for the PR not found. \" \"Maybe missing PR_TITLE env var.\")\n\nif not pr_body:\n fail(\"Body for the PR not found. \" \"Maybe missing PR_BODY env var.\")\n\ntitle_search = re.search(r\"^(#\\d+):? .+\", pr_title)\nif not title_search:\n fail(\n \"Title of PR has no issue ID reference. It must look like \u201c#1234 Foo bar baz\u201d.\"\n )\nelse:\n print(f\"PR title is complaint for {title_search[1]}. Good job.\")\n\n\nbody_search = re.search(r\".*Fixes (#\\d+).+\", pr_body)\nif not body_search:\n fail('Body of PR has no \"Fixes #12345\" issue ID reference.')\nelse:\n print(f\"PR description is complaint for {body_search[1]}. Good job.\")\n\n\nif title_search[1] != body_search[1]:\n fail(\"PR title and description have different IDs.\")\n\n# All good.\nsys.exit(0)\n", "path": ".github/scripts/check-pr-text.py"}]} | 942 | 146 |
gh_patches_debug_391 | rasdani/github-patches | git_diff | getmoto__moto-1992 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace pyaml dependency with PyYAML
There is a dependency on pyaml in setup.py:
https://github.com/spulec/moto/blob/master/setup.py#L18
I think that this is intended to be PyYAML (which pyaml depends on), and I do not see any usages of pyaml itself in this codebase.
pyaml uses WTFPL (https://github.com/mk-fg/pretty-yaml/blob/master/COPYING) which is not approved by the OSI (https://opensource.org/minutes20090304)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3 import setuptools
4 from setuptools import setup, find_packages
5 import sys
6
7
8 install_requires = [
9 "Jinja2>=2.7.3",
10 "boto>=2.36.0",
11 "boto3>=1.6.16",
12 "botocore>=1.12.13",
13 "cryptography>=2.3.0",
14 "requests>=2.5",
15 "xmltodict",
16 "six>1.9",
17 "werkzeug",
18 "pyaml",
19 "pytz",
20 "python-dateutil<3.0.0,>=2.1",
21 "python-jose<3.0.0",
22 "mock",
23 "docker>=2.5.1",
24 "jsondiff==1.1.1",
25 "aws-xray-sdk!=0.96,>=0.93",
26 "responses>=0.9.0",
27 ]
28
29 extras_require = {
30 'server': ['flask'],
31 }
32
33 # https://hynek.me/articles/conditional-python-dependencies/
34 if int(setuptools.__version__.split(".", 1)[0]) < 18:
35 if sys.version_info[0:2] < (3, 3):
36 install_requires.append("backports.tempfile")
37 else:
38 extras_require[":python_version<'3.3'"] = ["backports.tempfile"]
39
40
41 setup(
42 name='moto',
43 version='1.3.7',
44 description='A library that allows your python tests to easily'
45 ' mock out the boto library',
46 author='Steve Pulec',
47 author_email='[email protected]',
48 url='https://github.com/spulec/moto',
49 entry_points={
50 'console_scripts': [
51 'moto_server = moto.server:main',
52 ],
53 },
54 packages=find_packages(exclude=("tests", "tests.*")),
55 install_requires=install_requires,
56 extras_require=extras_require,
57 include_package_data=True,
58 license="Apache",
59 test_suite="tests",
60 classifiers=[
61 "Programming Language :: Python :: 2",
62 "Programming Language :: Python :: 2.7",
63 "Programming Language :: Python :: 3",
64 "Programming Language :: Python :: 3.3",
65 "Programming Language :: Python :: 3.4",
66 "Programming Language :: Python :: 3.5",
67 "Programming Language :: Python :: 3.6",
68 "License :: OSI Approved :: Apache Software License",
69 "Topic :: Software Development :: Testing",
70 ],
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"xmltodict",
"six>1.9",
"werkzeug",
- "pyaml",
+ "PyYAML",
"pytz",
"python-dateutil<3.0.0,>=2.1",
"python-jose<3.0.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n- \"pyaml\",\n+ \"PyYAML\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n", "issue": "Replace pyaml dependency with PyYAML\nThere is a dependency on pyaml in setup.py:\r\n\r\nhttps://github.com/spulec/moto/blob/master/setup.py#L18\r\n\r\nI think that this is intended to be PyYAML (which pyaml depends on), and I do not see any usages of pyaml itself in this codebase.\r\n\r\npyaml uses WTFPL (https://github.com/mk-fg/pretty-yaml/blob/master/COPYING) which is not approved by the OSI (https://opensource.org/minutes20090304)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.12.13\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk!=0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.7',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.12.13\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"PyYAML\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk!=0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.7',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}]} | 1,070 | 98 |
gh_patches_debug_24229 | rasdani/github-patches | git_diff | streamlink__streamlink-2160 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Skai plugin broken
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
Skai plugin is broken since yesterday, but actually it is no longer needed because they provide a lot more stable stream (they don't change stream three or so times a day). **Imho it can be removed.**
New live url as follows:
http://www.skaitv.gr/live
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/skai.py`
Content:
```
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugin.api import validate
5
6 YOUTUBE_URL = "https://www.youtube.com/watch?v={0}"
7 _url_re = re.compile(r'http(s)?://www\.skai.gr/.*')
8 _youtube_id = re.compile(r'<span\s+itemprop="contentUrl"\s+href="(.*)"></span>', re.MULTILINE)
9 _youtube_url_schema = validate.Schema(
10 validate.all(
11 validate.transform(_youtube_id.search),
12 validate.any(
13 None,
14 validate.all(
15 validate.get(1),
16 validate.text
17 )
18 )
19 )
20 )
21
22
23 class Skai(Plugin):
24 @classmethod
25 def can_handle_url(cls, url):
26 return _url_re.match(url)
27
28 def _get_streams(self):
29 channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)
30 if channel_id:
31 return self.session.streams(YOUTUBE_URL.format(channel_id))
32
33
34 __plugin__ = Skai
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/skai.py b/src/streamlink/plugins/skai.py
--- a/src/streamlink/plugins/skai.py
+++ b/src/streamlink/plugins/skai.py
@@ -3,20 +3,15 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
-YOUTUBE_URL = "https://www.youtube.com/watch?v={0}"
-_url_re = re.compile(r'http(s)?://www\.skai.gr/.*')
-_youtube_id = re.compile(r'<span\s+itemprop="contentUrl"\s+href="(.*)"></span>', re.MULTILINE)
-_youtube_url_schema = validate.Schema(
- validate.all(
- validate.transform(_youtube_id.search),
- validate.any(
- None,
- validate.all(
- validate.get(1),
- validate.text
- )
- )
- )
+
+_url_re = re.compile(r'http(s)?://www\.skai(?:tv)?.gr/.*')
+_api_url = "http://www.skaitv.gr/json/live.php"
+_api_res_schema = validate.Schema(validate.all(
+ validate.get("now"),
+ {
+ "livestream": validate.url()
+ },
+ validate.get("livestream"))
)
@@ -26,9 +21,10 @@
return _url_re.match(url)
def _get_streams(self):
- channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)
- if channel_id:
- return self.session.streams(YOUTUBE_URL.format(channel_id))
+ api_res = self.session.http.get(_api_url)
+ yt_url = self.session.http.json(api_res, schema=_api_res_schema)
+ if yt_url:
+ return self.session.streams(yt_url)
__plugin__ = Skai
| {"golden_diff": "diff --git a/src/streamlink/plugins/skai.py b/src/streamlink/plugins/skai.py\n--- a/src/streamlink/plugins/skai.py\n+++ b/src/streamlink/plugins/skai.py\n@@ -3,20 +3,15 @@\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import validate\n \n-YOUTUBE_URL = \"https://www.youtube.com/watch?v={0}\"\n-_url_re = re.compile(r'http(s)?://www\\.skai.gr/.*')\n-_youtube_id = re.compile(r'<span\\s+itemprop=\"contentUrl\"\\s+href=\"(.*)\"></span>', re.MULTILINE)\n-_youtube_url_schema = validate.Schema(\n- validate.all(\n- validate.transform(_youtube_id.search),\n- validate.any(\n- None,\n- validate.all(\n- validate.get(1),\n- validate.text\n- )\n- )\n- )\n+\n+_url_re = re.compile(r'http(s)?://www\\.skai(?:tv)?.gr/.*')\n+_api_url = \"http://www.skaitv.gr/json/live.php\"\n+_api_res_schema = validate.Schema(validate.all(\n+ validate.get(\"now\"),\n+ {\n+ \"livestream\": validate.url()\n+ },\n+ validate.get(\"livestream\"))\n )\n \n \n@@ -26,9 +21,10 @@\n return _url_re.match(url)\n \n def _get_streams(self):\n- channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)\n- if channel_id:\n- return self.session.streams(YOUTUBE_URL.format(channel_id))\n+ api_res = self.session.http.get(_api_url)\n+ yt_url = self.session.http.json(api_res, schema=_api_res_schema)\n+ if yt_url:\n+ return self.session.streams(yt_url)\n \n \n __plugin__ = Skai\n", "issue": "Skai plugin broken\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\nSkai plugin is broken since yesterday, but actually it is no longer needed because they provide a lot more stable stream (they don't change stream three or so times a day). **Imho it can be removed.**\r\n\r\nNew live url as follows:\r\n\r\nhttp://www.skaitv.gr/live\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\n\nYOUTUBE_URL = \"https://www.youtube.com/watch?v={0}\"\n_url_re = re.compile(r'http(s)?://www\\.skai.gr/.*')\n_youtube_id = re.compile(r'<span\\s+itemprop=\"contentUrl\"\\s+href=\"(.*)\"></span>', re.MULTILINE)\n_youtube_url_schema = validate.Schema(\n validate.all(\n validate.transform(_youtube_id.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.text\n )\n )\n )\n)\n\n\nclass Skai(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)\n if channel_id:\n return self.session.streams(YOUTUBE_URL.format(channel_id))\n\n\n__plugin__ = Skai\n", "path": "src/streamlink/plugins/skai.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\n\n\n_url_re = re.compile(r'http(s)?://www\\.skai(?:tv)?.gr/.*')\n_api_url = \"http://www.skaitv.gr/json/live.php\"\n_api_res_schema = validate.Schema(validate.all(\n validate.get(\"now\"),\n {\n \"livestream\": validate.url()\n },\n validate.get(\"livestream\"))\n)\n\n\nclass Skai(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n api_res = self.session.http.get(_api_url)\n yt_url = self.session.http.json(api_res, schema=_api_res_schema)\n if yt_url:\n return self.session.streams(yt_url)\n\n\n__plugin__ = Skai\n", "path": "src/streamlink/plugins/skai.py"}]} | 735 | 400 |
gh_patches_debug_48579 | rasdani/github-patches | git_diff | openai__gym-1730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sampling Bug
Gym Version: 0.15.3
issue: Box samples numbers above the `high` parameter.
```
from gym.spaces import Box
observation_space = Box(low=-3, high=-1, shape=(9,), dtype='int')
print(observation_space.sample())
>> [ 0 -2 0 -2 0 -1 0 -2 0]
```
The current implementation samples float numbers from uniform distribution of [`low`, `high`] and then converts the resulting samples to desired `dtype`. This runs into the problem of sampling `low` parameter very rarely(and not uniformly) when `dtype` is `int`(as we are converting the floats back to int which results in ceil operation in case of negative numbers) i.e in the above example -3 is almost never sampled as most of the low sampled floats like -2.85, -2.9 get converted to -2.
https://github.com/openai/gym/blob/0cd9266d986d470ed9c0dd87a41cd680b65cfe1c/gym/spaces/box.py#L93-L97
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/box.py`
Content:
```
1 import numpy as np
2
3 from .space import Space
4
5
6 class Box(Space):
7 """
8 A (possibly unbounded) box in R^n. Specifically, a Box represents the
9 Cartesian product of n closed intervals. Each interval has the form of one
10 of [a, b], (-oo, b], [a, oo), or (-oo, oo).
11
12 There are two common use cases:
13
14 * Identical bound for each dimension::
15 >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
16 Box(3, 4)
17
18 * Independent bound for each dimension::
19 >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
20 Box(2,)
21
22 """
23 def __init__(self, low, high, shape=None, dtype=np.float32):
24 assert dtype is not None, 'dtype must be explicitly provided. '
25 self.dtype = np.dtype(dtype)
26
27 if shape is None:
28 assert low.shape == high.shape, 'box dimension mismatch. '
29 self.shape = low.shape
30 self.low = low
31 self.high = high
32 else:
33 assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '
34 self.shape = tuple(shape)
35 self.low = np.full(self.shape, low)
36 self.high = np.full(self.shape, high)
37
38 self.low = self.low.astype(self.dtype)
39 self.high = self.high.astype(self.dtype)
40
41 # Boolean arrays which indicate the interval type for each coordinate
42 self.bounded_below = -np.inf < self.low
43 self.bounded_above = np.inf > self.high
44
45 super(Box, self).__init__(self.shape, self.dtype)
46
47 def is_bounded(self, manner="both"):
48 below = np.all(self.bounded_below)
49 above = np.all(self.bounded_above)
50 if manner == "both":
51 return below and above
52 elif manner == "below":
53 return below
54 elif manner == "above":
55 return above
56 else:
57 raise ValueError("manner is not in {'below', 'above', 'both'}")
58
59 def sample(self):
60 """
61 Generates a single random sample inside of the Box.
62
63 In creating a sample of the box, each coordinate is sampled according to
64 the form of the interval:
65
66 * [a, b] : uniform distribution
67 * [a, oo) : shifted exponential distribution
68 * (-oo, b] : shifted negative exponential distribution
69 * (-oo, oo) : normal distribution
70 """
71 high = self.high if self.dtype.kind == 'f' \
72 else self.high.astype('int64') + 1
73 sample = np.empty(self.shape)
74
75 # Masking arrays which classify the coordinates according to interval
76 # type
77 unbounded = ~self.bounded_below & ~self.bounded_above
78 upp_bounded = ~self.bounded_below & self.bounded_above
79 low_bounded = self.bounded_below & ~self.bounded_above
80 bounded = self.bounded_below & self.bounded_above
81
82
83 # Vectorized sampling by interval type
84 sample[unbounded] = self.np_random.normal(
85 size=unbounded[unbounded].shape)
86
87 sample[low_bounded] = self.np_random.exponential(
88 size=low_bounded[low_bounded].shape) + self.low[low_bounded]
89
90 sample[upp_bounded] = -self.np_random.exponential(
91 size=upp_bounded[upp_bounded].shape) - self.high[upp_bounded]
92
93 sample[bounded] = self.np_random.uniform(low=self.low[bounded],
94 high=high[bounded],
95 size=bounded[bounded].shape)
96
97 return sample.astype(self.dtype)
98
99 def contains(self, x):
100 if isinstance(x, list):
101 x = np.array(x) # Promote list to array for contains check
102 return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
103
104 def to_jsonable(self, sample_n):
105 return np.array(sample_n).tolist()
106
107 def from_jsonable(self, sample_n):
108 return [np.asarray(sample) for sample in sample_n]
109
110 def __repr__(self):
111 return "Box" + str(self.shape)
112
113 def __eq__(self, other):
114 return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/spaces/box.py b/gym/spaces/box.py
--- a/gym/spaces/box.py
+++ b/gym/spaces/box.py
@@ -93,6 +93,8 @@
sample[bounded] = self.np_random.uniform(low=self.low[bounded],
high=high[bounded],
size=bounded[bounded].shape)
+ if self.dtype.kind == 'i':
+ sample = np.floor(sample)
return sample.astype(self.dtype)
| {"golden_diff": "diff --git a/gym/spaces/box.py b/gym/spaces/box.py\n--- a/gym/spaces/box.py\n+++ b/gym/spaces/box.py\n@@ -93,6 +93,8 @@\n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n+ if self.dtype.kind == 'i':\n+ sample = np.floor(sample)\n \n return sample.astype(self.dtype)\n", "issue": "Sampling Bug\nGym Version: 0.15.3\r\nissue: Box samples numbers above the `high` parameter.\r\n\r\n```\r\nfrom gym.spaces import Box\r\nobservation_space = Box(low=-3, high=-1, shape=(9,), dtype='int')\r\nprint(observation_space.sample())\r\n>> [ 0 -2 0 -2 0 -1 0 -2 0]\r\n```\r\nThe current implementation samples float numbers from uniform distribution of [`low`, `high`] and then converts the resulting samples to desired `dtype`. This runs into the problem of sampling `low` parameter very rarely(and not uniformly) when `dtype` is `int`(as we are converting the floats back to int which results in ceil operation in case of negative numbers) i.e in the above example -3 is almost never sampled as most of the low sampled floats like -2.85, -2.9 get converted to -2.\r\nhttps://github.com/openai/gym/blob/0cd9266d986d470ed9c0dd87a41cd680b65cfe1c/gym/spaces/box.py#L93-L97\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .space import Space\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n \n There are two common use cases:\n \n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n \n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low)\n self.high = np.full(self.shape, high)\n\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box. \n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n \n * [a, b] : uniform distribution \n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n \n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n \n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) - self.high[upp_bounded]\n \n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n\n return sample.astype(self.dtype)\n \n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py"}], "after_files": [{"content": "import numpy as np\n\nfrom .space import Space\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n \n There are two common use cases:\n \n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n \n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low)\n self.high = np.full(self.shape, high)\n\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box. \n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n \n * [a, b] : uniform distribution \n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n \n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n \n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) - self.high[upp_bounded]\n \n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n \n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py"}]} | 1,774 | 112 |
gh_patches_debug_26366 | rasdani/github-patches | git_diff | chainer__chainer-510 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
numerical_grad caution
I was implementing a differentiable Transpose function.
```
class Transpose(Function):
def forward(self, inputs):
x = inputs[0]
return x.transpose(),
def backward(self, inputs, grads):
return grads[0].transpose(),
```
While testing the gradient with numerical_grad,
```
def test_numerical_grad_cpu(self):
x = np.random.randn(1, 10)
x_var = Variable(x)
y_var = self.f(x_var)
y_var.grad = np.random.rand(10, 1)
y_var.backward()
cl = lambda: self.f.forward((x,))
gx, = gradient_check.numerical_grad(cl, (x,), (y_var.grad,))
gradient_check.assert_allclose(gx, x_var.grad)
```
(here `self.f = Transpose()`)
the numerical gradient `gx` keeps coming back as 0. After much frustration, I finally figured out that I was returning a view of `x` in the above code, and in `numerical_grad_cpu`,
```
flat_x[i] = orig + eps
ys1 = f()
flat_x[i] = orig - eps
ys2 = f()
flat_x[i] = orig
```
`ys1` and `ys2` end up being equal after the last line resetting `flat_x[i]` to the original value. I solved my problem by changing `cl = lambda: self.f.forward((x,))` to `cl = lambda: np.copy(self.f.forward((x,)))`.
I'm not sure how frequent this phenomenon could occur outside of transpose, but I just wanted to put this out here so that there could be a discussion. Perhaps a passing note in the documentation suffices here. Or doing `ys1 = np.copy(f())` instead might work as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/gradient_check.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import cuda
5 from chainer import utils
6
7
8 def numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):
9 grads = tuple(numpy.zeros_like(x) for x in inputs)
10 for x, gx in zip(inputs, grads):
11 flat_x = x.ravel()
12 flat_gx = gx.ravel()
13 for i in six.moves.range(flat_x.size):
14 orig = flat_x[i]
15 flat_x[i] = orig + eps
16 ys1 = f()
17 flat_x[i] = orig - eps
18 ys2 = f()
19 flat_x[i] = orig
20
21 for y1, y2, gy in zip(ys1, ys2, grad_outputs):
22 if gy is not None:
23 dot = float(sum(((y1 - y2) * gy).ravel()))
24 flat_gx[i] += dot / (2 * eps)
25
26 return grads
27
28
29 def numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):
30 grads = tuple(cuda.zeros_like(x) for x in inputs)
31 for x, gx in zip(inputs, grads):
32 x = x.ravel()
33 gx = gx.ravel()
34 x_cpu = x.get()
35 gx_cpu = gx.get()
36 for i in six.moves.range(x_cpu.size):
37 orig = x_cpu[i]
38 x_cpu[i] = orig + eps
39 x.set(x_cpu)
40 ys1 = f()
41 x_cpu[i] = orig - eps
42 x.set(x_cpu)
43 ys2 = f()
44 x_cpu[i] = orig
45 x.set(x_cpu)
46
47 for y1, y2, gy in zip(ys1, ys2, grad_outputs):
48 if gy is not None:
49 dot = sum(((y1 - y2) * gy).ravel()).get()
50 gx_cpu[i] += dot / (2 * eps)
51 gx.set(gx_cpu)
52
53 return grads
54
55
56 def numerical_grad(f, inputs, grad_outputs, eps=1e-3):
57 """Computes numerical gradient by finite differences.
58
59 This function is used to implement gradient check. For usage example, see
60 unit tests of :mod:`chainer.functions`.
61
62 Args:
63 f (function): Python function with no arguments that runs forward
64 computation and returns the result.
65 inputs (tuple of arrays): Tuple of arrays that should be treated as
66 inputs. Each element of them is slightly modified to realize
67 numerical gradient by finite differences.
68 grad_outputs (tuple of arrays): Tuple of arrays that are treated as
69 output gradients.
70 eps (float): Epsilon value of finite differences.
71
72 Returns:
73 tuple: Numerical gradient arrays corresponding to ``inputs``.
74
75 """
76 assert eps > 0
77 inputs = tuple(inputs)
78 grad_outputs = tuple(grad_outputs)
79 gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)
80
81 cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)
82
83 if gpu and cpu:
84 raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')
85 elif gpu:
86 return numerical_grad_gpu(f, inputs, grad_outputs, eps)
87 else:
88 return numerical_grad_cpu(f, inputs, grad_outputs, eps)
89
90
91 def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
92 """Asserts if some corresponding element of x and y differs too much.
93
94 This function can handle both CPU and GPU arrays simultaneously.
95
96 Args:
97 x: Left-hand-side array.
98 y: Right-hand-side array.
99 atol (float): Absolute tolerance.
100 rtol (float): Relative tolerance.
101 verbose (bool): If True, it outputs verbose messages on error.
102
103 """
104 x = cuda.to_cpu(utils.force_array(x))
105 y = cuda.to_cpu(utils.force_array(y))
106 try:
107 numpy.testing.assert_allclose(
108 x, y, atol=atol, rtol=rtol, verbose=verbose)
109 except Exception:
110 print('error:', numpy.abs(x - y).max())
111 raise
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/gradient_check.py b/chainer/gradient_check.py
--- a/chainer/gradient_check.py
+++ b/chainer/gradient_check.py
@@ -5,6 +5,11 @@
from chainer import utils
+def _copy_arrays(xs):
+ xp = cuda.get_array_module(*xs)
+ return tuple(xp.copy(x) for x in xs)
+
+
def numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):
grads = tuple(numpy.zeros_like(x) for x in inputs)
for x, gx in zip(inputs, grads):
@@ -13,9 +18,9 @@
for i in six.moves.range(flat_x.size):
orig = flat_x[i]
flat_x[i] = orig + eps
- ys1 = f()
+ ys1 = _copy_arrays(f())
flat_x[i] = orig - eps
- ys2 = f()
+ ys2 = _copy_arrays(f())
flat_x[i] = orig
for y1, y2, gy in zip(ys1, ys2, grad_outputs):
@@ -37,10 +42,10 @@
orig = x_cpu[i]
x_cpu[i] = orig + eps
x.set(x_cpu)
- ys1 = f()
+ ys1 = _copy_arrays(f())
x_cpu[i] = orig - eps
x.set(x_cpu)
- ys2 = f()
+ ys2 = _copy_arrays(f())
x_cpu[i] = orig
x.set(x_cpu)
| {"golden_diff": "diff --git a/chainer/gradient_check.py b/chainer/gradient_check.py\n--- a/chainer/gradient_check.py\n+++ b/chainer/gradient_check.py\n@@ -5,6 +5,11 @@\n from chainer import utils\n \n \n+def _copy_arrays(xs):\n+ xp = cuda.get_array_module(*xs)\n+ return tuple(xp.copy(x) for x in xs)\n+\n+\n def numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(numpy.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n@@ -13,9 +18,9 @@\n for i in six.moves.range(flat_x.size):\n orig = flat_x[i]\n flat_x[i] = orig + eps\n- ys1 = f()\n+ ys1 = _copy_arrays(f())\n flat_x[i] = orig - eps\n- ys2 = f()\n+ ys2 = _copy_arrays(f())\n flat_x[i] = orig\n \n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n@@ -37,10 +42,10 @@\n orig = x_cpu[i]\n x_cpu[i] = orig + eps\n x.set(x_cpu)\n- ys1 = f()\n+ ys1 = _copy_arrays(f())\n x_cpu[i] = orig - eps\n x.set(x_cpu)\n- ys2 = f()\n+ ys2 = _copy_arrays(f())\n x_cpu[i] = orig\n x.set(x_cpu)\n", "issue": "numerical_grad caution\nI was implementing a differentiable Transpose function. \n\n```\nclass Transpose(Function):\n def forward(self, inputs):\n x = inputs[0]\n return x.transpose(),\n def backward(self, inputs, grads):\n return grads[0].transpose(),\n```\n\nWhile testing the gradient with numerical_grad, \n\n```\n def test_numerical_grad_cpu(self):\n x = np.random.randn(1, 10)\n x_var = Variable(x)\n y_var = self.f(x_var)\n y_var.grad = np.random.rand(10, 1)\n y_var.backward()\n cl = lambda: self.f.forward((x,))\n gx, = gradient_check.numerical_grad(cl, (x,), (y_var.grad,))\n gradient_check.assert_allclose(gx, x_var.grad)\n```\n\n(here `self.f = Transpose()`)\nthe numerical gradient `gx` keeps coming back as 0. After much frustration, I finally figured out that I was returning a view of `x` in the above code, and in `numerical_grad_cpu`,\n\n```\n flat_x[i] = orig + eps\n ys1 = f()\n flat_x[i] = orig - eps\n ys2 = f()\n flat_x[i] = orig\n```\n\n`ys1` and `ys2` end up being equal after the last line resetting `flat_x[i]` to the original value. I solved my problem by changing `cl = lambda: self.f.forward((x,))` to `cl = lambda: np.copy(self.f.forward((x,)))`.\n\nI'm not sure how frequent this phenomenon could occur outside of transpose, but I just wanted to put this out here so that there could be a discussion. Perhaps a passing note in the documentation suffices here. Or doing `ys1 = np.copy(f())` instead might work as well.\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import utils\n\n\ndef numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(numpy.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n flat_x = x.ravel()\n flat_gx = gx.ravel()\n for i in six.moves.range(flat_x.size):\n orig = flat_x[i]\n flat_x[i] = orig + eps\n ys1 = f()\n flat_x[i] = orig - eps\n ys2 = f()\n flat_x[i] = orig\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = float(sum(((y1 - y2) * gy).ravel()))\n flat_gx[i] += dot / (2 * eps)\n\n return grads\n\n\ndef numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(cuda.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n x = x.ravel()\n gx = gx.ravel()\n x_cpu = x.get()\n gx_cpu = gx.get()\n for i in six.moves.range(x_cpu.size):\n orig = x_cpu[i]\n x_cpu[i] = orig + eps\n x.set(x_cpu)\n ys1 = f()\n x_cpu[i] = orig - eps\n x.set(x_cpu)\n ys2 = f()\n x_cpu[i] = orig\n x.set(x_cpu)\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = sum(((y1 - y2) * gy).ravel()).get()\n gx_cpu[i] += dot / (2 * eps)\n gx.set(gx_cpu)\n\n return grads\n\n\ndef numerical_grad(f, inputs, grad_outputs, eps=1e-3):\n \"\"\"Computes numerical gradient by finite differences.\n\n This function is used to implement gradient check. For usage example, see\n unit tests of :mod:`chainer.functions`.\n\n Args:\n f (function): Python function with no arguments that runs forward\n computation and returns the result.\n inputs (tuple of arrays): Tuple of arrays that should be treated as\n inputs. Each element of them is slightly modified to realize\n numerical gradient by finite differences.\n grad_outputs (tuple of arrays): Tuple of arrays that are treated as\n output gradients.\n eps (float): Epsilon value of finite differences.\n\n Returns:\n tuple: Numerical gradient arrays corresponding to ``inputs``.\n\n \"\"\"\n assert eps > 0\n inputs = tuple(inputs)\n grad_outputs = tuple(grad_outputs)\n gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)\n\n cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)\n\n if gpu and cpu:\n raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')\n elif gpu:\n return numerical_grad_gpu(f, inputs, grad_outputs, eps)\n else:\n return numerical_grad_cpu(f, inputs, grad_outputs, eps)\n\n\ndef assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n \"\"\"Asserts if some corresponding element of x and y differs too much.\n\n This function can handle both CPU and GPU arrays simultaneously.\n\n Args:\n x: Left-hand-side array.\n y: Right-hand-side array.\n atol (float): Absolute tolerance.\n rtol (float): Relative tolerance.\n verbose (bool): If True, it outputs verbose messages on error.\n\n \"\"\"\n x = cuda.to_cpu(utils.force_array(x))\n y = cuda.to_cpu(utils.force_array(y))\n try:\n numpy.testing.assert_allclose(\n x, y, atol=atol, rtol=rtol, verbose=verbose)\n except Exception:\n print('error:', numpy.abs(x - y).max())\n raise\n", "path": "chainer/gradient_check.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import utils\n\n\ndef _copy_arrays(xs):\n xp = cuda.get_array_module(*xs)\n return tuple(xp.copy(x) for x in xs)\n\n\ndef numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(numpy.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n flat_x = x.ravel()\n flat_gx = gx.ravel()\n for i in six.moves.range(flat_x.size):\n orig = flat_x[i]\n flat_x[i] = orig + eps\n ys1 = _copy_arrays(f())\n flat_x[i] = orig - eps\n ys2 = _copy_arrays(f())\n flat_x[i] = orig\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = float(sum(((y1 - y2) * gy).ravel()))\n flat_gx[i] += dot / (2 * eps)\n\n return grads\n\n\ndef numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(cuda.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n x = x.ravel()\n gx = gx.ravel()\n x_cpu = x.get()\n gx_cpu = gx.get()\n for i in six.moves.range(x_cpu.size):\n orig = x_cpu[i]\n x_cpu[i] = orig + eps\n x.set(x_cpu)\n ys1 = _copy_arrays(f())\n x_cpu[i] = orig - eps\n x.set(x_cpu)\n ys2 = _copy_arrays(f())\n x_cpu[i] = orig\n x.set(x_cpu)\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = sum(((y1 - y2) * gy).ravel()).get()\n gx_cpu[i] += dot / (2 * eps)\n gx.set(gx_cpu)\n\n return grads\n\n\ndef numerical_grad(f, inputs, grad_outputs, eps=1e-3):\n \"\"\"Computes numerical gradient by finite differences.\n\n This function is used to implement gradient check. For usage example, see\n unit tests of :mod:`chainer.functions`.\n\n Args:\n f (function): Python function with no arguments that runs forward\n computation and returns the result.\n inputs (tuple of arrays): Tuple of arrays that should be treated as\n inputs. Each element of them is slightly modified to realize\n numerical gradient by finite differences.\n grad_outputs (tuple of arrays): Tuple of arrays that are treated as\n output gradients.\n eps (float): Epsilon value of finite differences.\n\n Returns:\n tuple: Numerical gradient arrays corresponding to ``inputs``.\n\n \"\"\"\n assert eps > 0\n inputs = tuple(inputs)\n grad_outputs = tuple(grad_outputs)\n gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)\n\n cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)\n\n if gpu and cpu:\n raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')\n elif gpu:\n return numerical_grad_gpu(f, inputs, grad_outputs, eps)\n else:\n return numerical_grad_cpu(f, inputs, grad_outputs, eps)\n\n\ndef assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n \"\"\"Asserts if some corresponding element of x and y differs too much.\n\n This function can handle both CPU and GPU arrays simultaneously.\n\n Args:\n x: Left-hand-side array.\n y: Right-hand-side array.\n atol (float): Absolute tolerance.\n rtol (float): Relative tolerance.\n verbose (bool): If True, it outputs verbose messages on error.\n\n \"\"\"\n x = cuda.to_cpu(utils.force_array(x))\n y = cuda.to_cpu(utils.force_array(y))\n try:\n numpy.testing.assert_allclose(\n x, y, atol=atol, rtol=rtol, verbose=verbose)\n except Exception:\n print('error:', numpy.abs(x - y).max())\n raise\n", "path": "chainer/gradient_check.py"}]} | 1,782 | 346 |
gh_patches_debug_6369 | rasdani/github-patches | git_diff | ivy-llc__ivy-18211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
selu
#14951
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/mindspore/ops/function/nn_func.py`
Content:
```
1 """Includes Mindspore Frontend functions listed in the TODO list
2 https://github.com/unifyai/ivy/issues/14951."""
3
4 # local
5 import ivy
6 from ivy.func_wrapper import with_supported_dtypes
7 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
8
9
10 @with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
11 @to_ivy_arrays_and_back
12 def softsign(x):
13 return ivy.divide(x, ivy.add(1, ivy.abs(x)))
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py
+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
@@ -7,6 +7,12 @@
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
+@with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
+@to_ivy_arrays_and_back
+def selu(input_x):
+ return ivy.selu(input_x)
+
+
@with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def softsign(x):
| {"golden_diff": "diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n@@ -7,6 +7,12 @@\n from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n \n \n+@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n+@to_ivy_arrays_and_back\n+def selu(input_x):\n+ return ivy.selu(input_x)\n+\n+ \n @with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n @to_ivy_arrays_and_back\n def softsign(x):\n", "issue": "selu\n#14951 \n", "before_files": [{"content": "\"\"\"Includes Mindspore Frontend functions listed in the TODO list\nhttps://github.com/unifyai/ivy/issues/14951.\"\"\"\n\n# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef softsign(x):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n", "path": "ivy/functional/frontends/mindspore/ops/function/nn_func.py"}], "after_files": [{"content": "\"\"\"Includes Mindspore Frontend functions listed in the TODO list\nhttps://github.com/unifyai/ivy/issues/14951.\"\"\"\n\n# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef selu(input_x):\n return ivy.selu(input_x)\n\n \n@with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef softsign(x):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n", "path": "ivy/functional/frontends/mindspore/ops/function/nn_func.py"}]} | 428 | 211 |
gh_patches_debug_5837 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docs: 404 error while accessing contribution guide
When I was trying to access the contribution guide mentioned in `CONTRIBUTING.rst`, I am getting 404 error - https://googleapis.github.io/google-api-python-client/contributing.html


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `owlbot.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import synthtool as s
16 from synthtool import gcp
17
18 from synthtool.languages import python
19
20 common = gcp.CommonTemplates()
21
22 # ----------------------------------------------------------------------------
23 # Add templated files
24 # ----------------------------------------------------------------------------
25 templated_files = common.py_library()
26
27 # Copy kokoro configs.
28 # Docs are excluded as repo docs cannot currently be generated using sphinx.
29 s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])
30 s.move(templated_files / '.trampolinerc') # config file for trampoline_v2
31
32 # Also move issue templates
33 s.move(templated_files / '.github', excludes=['CODEOWNERS'])
34
35 # Move scripts folder needed for samples CI
36 s.move(templated_files / 'scripts')
37
38 # ----------------------------------------------------------------------------
39 # Samples templates
40 # ----------------------------------------------------------------------------
41
42 python.py_samples(skip_readmes=True)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/owlbot.py b/owlbot.py
--- a/owlbot.py
+++ b/owlbot.py
@@ -35,6 +35,9 @@
# Move scripts folder needed for samples CI
s.move(templated_files / 'scripts')
+# Copy CONTRIBUTING.rst
+s.move(templated_files / 'CONTRIBUTING.rst')
+
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
| {"golden_diff": "diff --git a/owlbot.py b/owlbot.py\n--- a/owlbot.py\n+++ b/owlbot.py\n@@ -35,6 +35,9 @@\n # Move scripts folder needed for samples CI\n s.move(templated_files / 'scripts')\n \n+# Copy CONTRIBUTING.rst\n+s.move(templated_files / 'CONTRIBUTING.rst')\n+\n # ----------------------------------------------------------------------------\n # Samples templates\n # ----------------------------------------------------------------------------\n", "issue": "docs: 404 error while accessing contribution guide\nWhen I was trying to access the contribution guide mentioned in `CONTRIBUTING.rst`, I am getting 404 error - https://googleapis.github.io/google-api-python-client/contributing.html\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\nfrom synthtool.languages import python\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\ns.move(templated_files / '.trampolinerc') # config file for trampoline_v2\n\n# Also move issue templates\ns.move(templated_files / '.github', excludes=['CODEOWNERS'])\n\n# Move scripts folder needed for samples CI\ns.move(templated_files / 'scripts')\n\n# ----------------------------------------------------------------------------\n# Samples templates\n# ----------------------------------------------------------------------------\n\npython.py_samples(skip_readmes=True)\n", "path": "owlbot.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\nfrom synthtool.languages import python\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\ns.move(templated_files / '.trampolinerc') # config file for trampoline_v2\n\n# Also move issue templates\ns.move(templated_files / '.github', excludes=['CODEOWNERS'])\n\n# Move scripts folder needed for samples CI\ns.move(templated_files / 'scripts')\n\n# Copy CONTRIBUTING.rst\ns.move(templated_files / 'CONTRIBUTING.rst')\n\n# ----------------------------------------------------------------------------\n# Samples templates\n# ----------------------------------------------------------------------------\n\npython.py_samples(skip_readmes=True)\n", "path": "owlbot.py"}]} | 830 | 87 |
gh_patches_debug_12483 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2929 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bump Ansible to 2.4
## Description
The current version of Ansible in the admin workstation uses PyCrypto==2.6.1 as a dependency, which is causing CI safety failures when checking the admin pip requirements due to the fact that there is a CVE in PyCrypto 2.6.1. See upstream discussion in https://github.com/ansible/ansible/issues/23179.
We should bump to a more recent version of Ansible in the admin workstations that does not have PyCrypto as a dependency
## User Stories
As a SecureDrop administrator, I don't want to run software relying on unmaintained dependencies.
Temporarily disable safety check
## Description
We'll need to temporarily disable safety in order to merge until #2926 is resolved (and we'll need to cherry pick the disabling of safety into the 0.5.2 release branch).
## User Stories
As a SecureDrop maintainer, I don't want to merge with failing CI.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `install_files/ansible-base/callback_plugins/ansible_version_check.py`
Content:
```
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import sys
5
6 import ansible
7
8 try:
9 # Version 2.0+
10 from ansible.plugins.callback import CallbackBase
11 except ImportError:
12 CallbackBase = object
13
14
15 def print_red_bold(text):
16 print('\x1b[31;1m' + text + '\x1b[0m')
17
18
19 class CallbackModule(CallbackBase):
20 def __init__(self):
21 # Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+
22 required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml
23 if not ansible.__version__.startswith(required_version):
24 print_red_bold(
25 "SecureDrop restriction: only Ansible {version}.* is supported. "
26 .format(version=required_version)
27 )
28 sys.exit(1)
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -19,7 +19,7 @@
class CallbackModule(CallbackBase):
def __init__(self):
# Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+
- required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml
+ required_version = '2.4.2' # Keep synchronized with requirements files
if not ansible.__version__.startswith(required_version):
print_red_bold(
"SecureDrop restriction: only Ansible {version}.* is supported. "
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -19,7 +19,7 @@\n class CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+\n- required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml\n+ required_version = '2.4.2' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.* is supported. \"\n", "issue": "Bump Ansible to 2.4\n## Description\r\n\r\nThe current version of Ansible in the admin workstation uses PyCrypto==2.6.1 as a dependency, which is causing CI safety failures when checking the admin pip requirements due to the fact that there is a CVE in PyCrypto 2.6.1. See upstream discussion in https://github.com/ansible/ansible/issues/23179. \r\n\r\nWe should bump to a more recent version of Ansible in the admin workstations that does not have PyCrypto as a dependency\r\n\r\n## User Stories\r\n\r\nAs a SecureDrop administrator, I don't want to run software relying on unmaintained dependencies.\nTemporarily disable safety check\n## Description\r\n\r\nWe'll need to temporarily disable safety in order to merge until #2926 is resolved (and we'll need to cherry pick the disabling of safety into the 0.5.2 release branch). \r\n\r\n## User Stories\r\n\r\nAs a SecureDrop maintainer, I don't want to merge with failing CI. \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+\n required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.* is supported. \"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}], "after_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+\n required_version = '2.4.2' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.* is supported. \"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]} | 729 | 185 |
gh_patches_debug_6736 | rasdani/github-patches | git_diff | freqtrade__freqtrade-3490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I get the same profit / loss report in all time frames
<!--
Have you searched for similar issues before posting it? Yes
If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue).
If it hasn't been reported, please create a new issue.
Please do not use bug reports to request new features.
-->
## Describe your environment
* Operating system: ____Ubuntu 18.04.4 LTS
* Python Version: _____Python 3.6.9
* CCXT version: _____ ccxt==1.29.5
* Freqtrade Version: ____ freqtrade develop-761407f7
Today, I updated with the method below.
cd freqtrade
git pull
python3 -m pip install -r requirements.txt --user
python3 -m pip install -r requirements-hyperopt.txt --user
python3 -m pip install -r requirements-plot.txt --user
and
freqtrade download-data --days 365 --timeframes 5m 15m 30m 1h 4h 1d
https://github.com/freqtrade/freqtrade/issues/3104 (I keep getting this error on 1 and 5 minute candles)
I use StaticPairList
I did backtest as below
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 5m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 15m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 30m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 4h
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1d
The problem I encountered:
I get the same profit / loss report in all time frames
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/configuration/deprecated_settings.py`
Content:
```
1 """
2 Functions to handle deprecated settings
3 """
4
5 import logging
6 from typing import Any, Dict
7
8 from freqtrade.exceptions import OperationalException
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def check_conflicting_settings(config: Dict[str, Any],
15 section1: str, name1: str,
16 section2: str, name2: str) -> None:
17 section1_config = config.get(section1, {})
18 section2_config = config.get(section2, {})
19 if name1 in section1_config and name2 in section2_config:
20 raise OperationalException(
21 f"Conflicting settings `{section1}.{name1}` and `{section2}.{name2}` "
22 "(DEPRECATED) detected in the configuration file. "
23 "This deprecated setting will be removed in the next versions of Freqtrade. "
24 f"Please delete it from your configuration and use the `{section1}.{name1}` "
25 "setting instead."
26 )
27
28
29 def process_deprecated_setting(config: Dict[str, Any],
30 section1: str, name1: str,
31 section2: str, name2: str) -> None:
32 section2_config = config.get(section2, {})
33
34 if name2 in section2_config:
35 logger.warning(
36 "DEPRECATED: "
37 f"The `{section2}.{name2}` setting is deprecated and "
38 "will be removed in the next versions of Freqtrade. "
39 f"Please use the `{section1}.{name1}` setting in your configuration instead."
40 )
41 section1_config = config.get(section1, {})
42 section1_config[name1] = section2_config[name2]
43
44
45 def process_temporary_deprecated_settings(config: Dict[str, Any]) -> None:
46
47 check_conflicting_settings(config, 'ask_strategy', 'use_sell_signal',
48 'experimental', 'use_sell_signal')
49 check_conflicting_settings(config, 'ask_strategy', 'sell_profit_only',
50 'experimental', 'sell_profit_only')
51 check_conflicting_settings(config, 'ask_strategy', 'ignore_roi_if_buy_signal',
52 'experimental', 'ignore_roi_if_buy_signal')
53
54 process_deprecated_setting(config, 'ask_strategy', 'use_sell_signal',
55 'experimental', 'use_sell_signal')
56 process_deprecated_setting(config, 'ask_strategy', 'sell_profit_only',
57 'experimental', 'sell_profit_only')
58 process_deprecated_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal',
59 'experimental', 'ignore_roi_if_buy_signal')
60
61 if (config.get('edge', {}).get('enabled', False)
62 and 'capital_available_percentage' in config.get('edge', {})):
63 raise OperationalException(
64 "DEPRECATED: "
65 "Using 'edge.capital_available_percentage' has been deprecated in favor of "
66 "'tradable_balance_ratio'. Please migrate your configuration to "
67 "'tradable_balance_ratio' and remove 'capital_available_percentage' "
68 "from the edge configuration."
69 )
70 if 'ticker_interval' in config:
71 logger.warning(
72 "DEPRECATED: "
73 "Please use 'timeframe' instead of 'ticker_interval."
74 )
75 config['timeframe'] = config['ticker_interval']
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/configuration/deprecated_settings.py b/freqtrade/configuration/deprecated_settings.py
--- a/freqtrade/configuration/deprecated_settings.py
+++ b/freqtrade/configuration/deprecated_settings.py
@@ -72,4 +72,9 @@
"DEPRECATED: "
"Please use 'timeframe' instead of 'ticker_interval."
)
+ if 'timeframe' in config:
+ raise OperationalException(
+ "Both 'timeframe' and 'ticker_interval' detected."
+ "Please remove 'ticker_interval' from your configuration to continue operating."
+ )
config['timeframe'] = config['ticker_interval']
| {"golden_diff": "diff --git a/freqtrade/configuration/deprecated_settings.py b/freqtrade/configuration/deprecated_settings.py\n--- a/freqtrade/configuration/deprecated_settings.py\n+++ b/freqtrade/configuration/deprecated_settings.py\n@@ -72,4 +72,9 @@\n \"DEPRECATED: \"\n \"Please use 'timeframe' instead of 'ticker_interval.\"\n )\n+ if 'timeframe' in config:\n+ raise OperationalException(\n+ \"Both 'timeframe' and 'ticker_interval' detected.\"\n+ \"Please remove 'ticker_interval' from your configuration to continue operating.\"\n+ )\n config['timeframe'] = config['ticker_interval']\n", "issue": "I get the same profit / loss report in all time frames\n<!-- \r\nHave you searched for similar issues before posting it? Yes\r\n\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\nPlease do not use bug reports to request new features.\r\n-->\r\n\r\n## Describe your environment\r\n\r\n * Operating system: ____Ubuntu 18.04.4 LTS\r\n * Python Version: _____Python 3.6.9\r\n * CCXT version: _____ ccxt==1.29.5\r\n * Freqtrade Version: ____ freqtrade develop-761407f7\r\n \r\nToday, I updated with the method below.\r\ncd freqtrade\r\ngit pull\r\npython3 -m pip install -r requirements.txt --user\r\npython3 -m pip install -r requirements-hyperopt.txt --user\r\npython3 -m pip install -r requirements-plot.txt --user\r\n\r\nand\r\nfreqtrade download-data --days 365 --timeframes 5m 15m 30m 1h 4h 1d\r\nhttps://github.com/freqtrade/freqtrade/issues/3104 (I keep getting this error on 1 and 5 minute candles)\r\n\r\nI use StaticPairList\r\n\r\nI did backtest as below\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 5m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 15m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 30m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 4h\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1d\r\n\r\nThe problem I encountered:\r\nI get the same profit / loss report in all time frames\n", "before_files": [{"content": "\"\"\"\nFunctions to handle deprecated settings\n\"\"\"\n\nimport logging\nfrom typing import Any, Dict\n\nfrom freqtrade.exceptions import OperationalException\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_conflicting_settings(config: Dict[str, Any],\n section1: str, name1: str,\n section2: str, name2: str) -> None:\n section1_config = config.get(section1, {})\n section2_config = config.get(section2, {})\n if name1 in section1_config and name2 in section2_config:\n raise OperationalException(\n f\"Conflicting settings `{section1}.{name1}` and `{section2}.{name2}` \"\n \"(DEPRECATED) detected in the configuration file. \"\n \"This deprecated setting will be removed in the next versions of Freqtrade. \"\n f\"Please delete it from your configuration and use the `{section1}.{name1}` \"\n \"setting instead.\"\n )\n\n\ndef process_deprecated_setting(config: Dict[str, Any],\n section1: str, name1: str,\n section2: str, name2: str) -> None:\n section2_config = config.get(section2, {})\n\n if name2 in section2_config:\n logger.warning(\n \"DEPRECATED: \"\n f\"The `{section2}.{name2}` setting is deprecated and \"\n \"will be removed in the next versions of Freqtrade. \"\n f\"Please use the `{section1}.{name1}` setting in your configuration instead.\"\n )\n section1_config = config.get(section1, {})\n section1_config[name1] = section2_config[name2]\n\n\ndef process_temporary_deprecated_settings(config: Dict[str, Any]) -> None:\n\n check_conflicting_settings(config, 'ask_strategy', 'use_sell_signal',\n 'experimental', 'use_sell_signal')\n check_conflicting_settings(config, 'ask_strategy', 'sell_profit_only',\n 'experimental', 'sell_profit_only')\n check_conflicting_settings(config, 'ask_strategy', 'ignore_roi_if_buy_signal',\n 'experimental', 'ignore_roi_if_buy_signal')\n\n process_deprecated_setting(config, 'ask_strategy', 'use_sell_signal',\n 'experimental', 'use_sell_signal')\n process_deprecated_setting(config, 'ask_strategy', 'sell_profit_only',\n 'experimental', 'sell_profit_only')\n process_deprecated_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal',\n 'experimental', 'ignore_roi_if_buy_signal')\n\n if (config.get('edge', {}).get('enabled', False)\n and 'capital_available_percentage' in config.get('edge', {})):\n raise OperationalException(\n \"DEPRECATED: \"\n \"Using 'edge.capital_available_percentage' has been deprecated in favor of \"\n \"'tradable_balance_ratio'. Please migrate your configuration to \"\n \"'tradable_balance_ratio' and remove 'capital_available_percentage' \"\n \"from the edge configuration.\"\n )\n if 'ticker_interval' in config:\n logger.warning(\n \"DEPRECATED: \"\n \"Please use 'timeframe' instead of 'ticker_interval.\"\n )\n config['timeframe'] = config['ticker_interval']\n", "path": "freqtrade/configuration/deprecated_settings.py"}], "after_files": [{"content": "\"\"\"\nFunctions to handle deprecated settings\n\"\"\"\n\nimport logging\nfrom typing import Any, Dict\n\nfrom freqtrade.exceptions import OperationalException\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_conflicting_settings(config: Dict[str, Any],\n section1: str, name1: str,\n section2: str, name2: str) -> None:\n section1_config = config.get(section1, {})\n section2_config = config.get(section2, {})\n if name1 in section1_config and name2 in section2_config:\n raise OperationalException(\n f\"Conflicting settings `{section1}.{name1}` and `{section2}.{name2}` \"\n \"(DEPRECATED) detected in the configuration file. \"\n \"This deprecated setting will be removed in the next versions of Freqtrade. \"\n f\"Please delete it from your configuration and use the `{section1}.{name1}` \"\n \"setting instead.\"\n )\n\n\ndef process_deprecated_setting(config: Dict[str, Any],\n section1: str, name1: str,\n section2: str, name2: str) -> None:\n section2_config = config.get(section2, {})\n\n if name2 in section2_config:\n logger.warning(\n \"DEPRECATED: \"\n f\"The `{section2}.{name2}` setting is deprecated and \"\n \"will be removed in the next versions of Freqtrade. \"\n f\"Please use the `{section1}.{name1}` setting in your configuration instead.\"\n )\n section1_config = config.get(section1, {})\n section1_config[name1] = section2_config[name2]\n\n\ndef process_temporary_deprecated_settings(config: Dict[str, Any]) -> None:\n\n check_conflicting_settings(config, 'ask_strategy', 'use_sell_signal',\n 'experimental', 'use_sell_signal')\n check_conflicting_settings(config, 'ask_strategy', 'sell_profit_only',\n 'experimental', 'sell_profit_only')\n check_conflicting_settings(config, 'ask_strategy', 'ignore_roi_if_buy_signal',\n 'experimental', 'ignore_roi_if_buy_signal')\n\n process_deprecated_setting(config, 'ask_strategy', 'use_sell_signal',\n 'experimental', 'use_sell_signal')\n process_deprecated_setting(config, 'ask_strategy', 'sell_profit_only',\n 'experimental', 'sell_profit_only')\n process_deprecated_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal',\n 'experimental', 'ignore_roi_if_buy_signal')\n\n if (config.get('edge', {}).get('enabled', False)\n and 'capital_available_percentage' in config.get('edge', {})):\n raise OperationalException(\n \"DEPRECATED: \"\n \"Using 'edge.capital_available_percentage' has been deprecated in favor of \"\n \"'tradable_balance_ratio'. Please migrate your configuration to \"\n \"'tradable_balance_ratio' and remove 'capital_available_percentage' \"\n \"from the edge configuration.\"\n )\n if 'ticker_interval' in config:\n logger.warning(\n \"DEPRECATED: \"\n \"Please use 'timeframe' instead of 'ticker_interval.\"\n )\n if 'timeframe' in config:\n raise OperationalException(\n \"Both 'timeframe' and 'ticker_interval' detected.\"\n \"Please remove 'ticker_interval' from your configuration to continue operating.\"\n )\n config['timeframe'] = config['ticker_interval']\n", "path": "freqtrade/configuration/deprecated_settings.py"}]} | 1,575 | 140 |
gh_patches_debug_8822 | rasdani/github-patches | git_diff | learningequality__kolibri-4588 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
<Improvement> Not showing user is exist notification on UI.
### Observed behavior
We are able to create user using CREATE AN ACCOUNT option which is on login page of kolibri. But when someone uses existing username to create account, it will not show any kind of existing user notification on UI.
Not able to distinguish whether account is exist or not.
### Expected behavior
It must show existing username notification on UI if user is exist.
### Steps to reproduce
1. Login with Admin and go to the facility.
2. Click on settings.
3. Select Allow learners to create accounts.
4. Logout and click on CREATE AN ACCOUNT button and use existing username to create account.
### Context
* Kolibri version : Kolibri 0.11.0
* Operating system : ubuntu 14.04
* Browser : Chrome
### Screenshots:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/auth/serializers.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3 from __future__ import unicode_literals
4
5 from rest_framework import serializers
6 from rest_framework.validators import UniqueTogetherValidator
7
8 from .constants.collection_kinds import LEARNERGROUP
9 from .models import Classroom
10 from .models import Facility
11 from .models import FacilityDataset
12 from .models import FacilityUser
13 from .models import LearnerGroup
14 from .models import Membership
15 from .models import Role
16 from kolibri.core import error_constants
17
18
19 class RoleSerializer(serializers.ModelSerializer):
20 collection_parent = serializers.SerializerMethodField()
21
22 class Meta:
23 model = Role
24 fields = ('id', 'kind', 'collection', 'user', 'collection_parent',)
25
26 def get_collection_parent(self, instance):
27 if instance.collection.parent is not None:
28 return instance.collection.parent.id
29 else:
30 return None
31
32
33 class FacilityUserSerializer(serializers.ModelSerializer):
34 roles = RoleSerializer(many=True, read_only=True)
35
36 class Meta:
37 model = FacilityUser
38 extra_kwargs = {'password': {'write_only': True}}
39 fields = ('id', 'username', 'full_name', 'password', 'facility', 'roles', 'is_superuser')
40
41 def create(self, validated_data):
42 if FacilityUser.objects.filter(username__iexact=validated_data['username']).exists():
43 raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
44 code=error_constants.USERNAME_ALREADY_EXISTS)
45 return super(FacilityUserSerializer, self).create(validated_data)
46
47 def update(self, instance, validated_data):
48 if validated_data.get('username') and FacilityUser.objects.exclude(id__exact=instance.id).filter(username__iexact=validated_data['username']).exists():
49 raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
50 code=error_constants.USERNAME_ALREADY_EXISTS)
51 return super(FacilityUserSerializer, self).update(instance, validated_data)
52
53
54 class FacilityUserSignupSerializer(FacilityUserSerializer):
55
56 def validate_username(self, value):
57 if FacilityUser.objects.filter(username__iexact=value).exists():
58 raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
59 code=error_constants.USERNAME_ALREADY_EXISTS)
60 return value
61
62
63 class FacilityUsernameSerializer(serializers.ModelSerializer):
64
65 class Meta:
66 model = FacilityUser
67 fields = ('username', )
68
69
70 class MembershipSerializer(serializers.ModelSerializer):
71
72 class Meta:
73 model = Membership
74 fields = ('id', 'collection', 'user')
75
76 def create(self, validated_data):
77 user = validated_data["user"]
78 collection = validated_data["collection"]
79 if collection.kind == LEARNERGROUP and user.memberships.filter(collection__parent=collection.parent).exists():
80 # We are trying to create a membership for a user in a group, but they already belong to a group
81 # in the same class as this group. We may want to allow this, but the frontend does not currently
82 # support this. Error!
83 raise serializers.ValidationError(detail={'classroom': 'This user is already in a group in this class'},
84 code=error_constants.USER_ALREADY_IN_GROUP_IN_CLASS)
85 return super(MembershipSerializer, self).create(validated_data)
86
87
88 class FacilityDatasetSerializer(serializers.ModelSerializer):
89
90 class Meta:
91 model = FacilityDataset
92 fields = ('id', 'learner_can_edit_username', 'learner_can_edit_name', 'learner_can_edit_password',
93 'learner_can_sign_up', 'learner_can_delete_account', 'learner_can_login_with_no_password',
94 'show_download_button_in_learn', 'description', 'location', 'allow_guest_access')
95
96
97 class FacilitySerializer(serializers.ModelSerializer):
98 dataset = FacilityDatasetSerializer(read_only=True)
99 default = serializers.SerializerMethodField()
100
101 class Meta:
102 model = Facility
103 extra_kwargs = {'id': {'read_only': True}, 'dataset': {'read_only': True}}
104 fields = ('id', 'name', 'dataset', 'default')
105
106 def get_default(self, instance):
107 return instance == Facility.get_default_facility()
108
109
110 class PublicFacilitySerializer(serializers.ModelSerializer):
111
112 class Meta:
113 model = Facility
114 fields = ('dataset', 'name')
115
116
117 class ClassroomSerializer(serializers.ModelSerializer):
118 learner_count = serializers.SerializerMethodField()
119 coaches = serializers.SerializerMethodField()
120
121 def get_learner_count(self, instance):
122 return instance.get_members().count()
123
124 def get_coaches(self, instance):
125 return FacilityUserSerializer(instance.get_coaches(), many=True).data
126
127 class Meta:
128 model = Classroom
129 fields = (
130 'id',
131 'name',
132 'parent',
133 'learner_count',
134 'coaches',
135 )
136
137 validators = [
138 UniqueTogetherValidator(
139 queryset=Classroom.objects.all(),
140 fields=('parent', 'name')
141 )
142 ]
143
144
145 class LearnerGroupSerializer(serializers.ModelSerializer):
146
147 user_ids = serializers.SerializerMethodField()
148
149 def get_user_ids(self, group):
150 return [str(user_id['id']) for user_id in group.get_members().values('id')]
151
152 class Meta:
153 model = LearnerGroup
154 fields = ('id', 'name', 'parent', 'user_ids')
155
156 validators = [
157 UniqueTogetherValidator(
158 queryset=Classroom.objects.all(),
159 fields=('parent', 'name')
160 )
161 ]
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/auth/serializers.py b/kolibri/core/auth/serializers.py
--- a/kolibri/core/auth/serializers.py
+++ b/kolibri/core/auth/serializers.py
@@ -55,8 +55,10 @@
def validate_username(self, value):
if FacilityUser.objects.filter(username__iexact=value).exists():
- raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
- code=error_constants.USERNAME_ALREADY_EXISTS)
+ raise serializers.ValidationError(
+ detail='An account with that username already exists.',
+ code=error_constants.USERNAME_ALREADY_EXISTS
+ )
return value
| {"golden_diff": "diff --git a/kolibri/core/auth/serializers.py b/kolibri/core/auth/serializers.py\n--- a/kolibri/core/auth/serializers.py\n+++ b/kolibri/core/auth/serializers.py\n@@ -55,8 +55,10 @@\n \n def validate_username(self, value):\n if FacilityUser.objects.filter(username__iexact=value).exists():\n- raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n- code=error_constants.USERNAME_ALREADY_EXISTS)\n+ raise serializers.ValidationError(\n+ detail='An account with that username already exists.',\n+ code=error_constants.USERNAME_ALREADY_EXISTS\n+ )\n return value\n", "issue": "<Improvement> Not showing user is exist notification on UI.\n\r\n### Observed behavior\r\nWe are able to create user using CREATE AN ACCOUNT option which is on login page of kolibri. But when someone uses existing username to create account, it will not show any kind of existing user notification on UI.\r\nNot able to distinguish whether account is exist or not.\r\n### Expected behavior\r\nIt must show existing username notification on UI if user is exist.\r\n\r\n\r\n### Steps to reproduce\r\n1. Login with Admin and go to the facility.\r\n2. Click on settings.\r\n3. Select Allow learners to create accounts.\r\n4. Logout and click on CREATE AN ACCOUNT button and use existing username to create account.\r\n\r\n### Context\r\n\r\n * Kolibri version : Kolibri 0.11.0 \r\n * Operating system : ubuntu 14.04\r\n * Browser : Chrome\r\n\r\n### Screenshots:\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom .constants.collection_kinds import LEARNERGROUP\nfrom .models import Classroom\nfrom .models import Facility\nfrom .models import FacilityDataset\nfrom .models import FacilityUser\nfrom .models import LearnerGroup\nfrom .models import Membership\nfrom .models import Role\nfrom kolibri.core import error_constants\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n collection_parent = serializers.SerializerMethodField()\n\n class Meta:\n model = Role\n fields = ('id', 'kind', 'collection', 'user', 'collection_parent',)\n\n def get_collection_parent(self, instance):\n if instance.collection.parent is not None:\n return instance.collection.parent.id\n else:\n return None\n\n\nclass FacilityUserSerializer(serializers.ModelSerializer):\n roles = RoleSerializer(many=True, read_only=True)\n\n class Meta:\n model = FacilityUser\n extra_kwargs = {'password': {'write_only': True}}\n fields = ('id', 'username', 'full_name', 'password', 'facility', 'roles', 'is_superuser')\n\n def create(self, validated_data):\n if FacilityUser.objects.filter(username__iexact=validated_data['username']).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return super(FacilityUserSerializer, self).create(validated_data)\n\n def update(self, instance, validated_data):\n if validated_data.get('username') and FacilityUser.objects.exclude(id__exact=instance.id).filter(username__iexact=validated_data['username']).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return super(FacilityUserSerializer, self).update(instance, validated_data)\n\n\nclass FacilityUserSignupSerializer(FacilityUserSerializer):\n\n def validate_username(self, value):\n if FacilityUser.objects.filter(username__iexact=value).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return value\n\n\nclass FacilityUsernameSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FacilityUser\n fields = ('username', )\n\n\nclass MembershipSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Membership\n fields = ('id', 'collection', 'user')\n\n def create(self, validated_data):\n user = validated_data[\"user\"]\n collection = validated_data[\"collection\"]\n if collection.kind == LEARNERGROUP and user.memberships.filter(collection__parent=collection.parent).exists():\n # We are trying to create a membership for a user in a group, but they already belong to a group\n # in the same class as this group. We may want to allow this, but the frontend does not currently\n # support this. Error!\n raise serializers.ValidationError(detail={'classroom': 'This user is already in a group in this class'},\n code=error_constants.USER_ALREADY_IN_GROUP_IN_CLASS)\n return super(MembershipSerializer, self).create(validated_data)\n\n\nclass FacilityDatasetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FacilityDataset\n fields = ('id', 'learner_can_edit_username', 'learner_can_edit_name', 'learner_can_edit_password',\n 'learner_can_sign_up', 'learner_can_delete_account', 'learner_can_login_with_no_password',\n 'show_download_button_in_learn', 'description', 'location', 'allow_guest_access')\n\n\nclass FacilitySerializer(serializers.ModelSerializer):\n dataset = FacilityDatasetSerializer(read_only=True)\n default = serializers.SerializerMethodField()\n\n class Meta:\n model = Facility\n extra_kwargs = {'id': {'read_only': True}, 'dataset': {'read_only': True}}\n fields = ('id', 'name', 'dataset', 'default')\n\n def get_default(self, instance):\n return instance == Facility.get_default_facility()\n\n\nclass PublicFacilitySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Facility\n fields = ('dataset', 'name')\n\n\nclass ClassroomSerializer(serializers.ModelSerializer):\n learner_count = serializers.SerializerMethodField()\n coaches = serializers.SerializerMethodField()\n\n def get_learner_count(self, instance):\n return instance.get_members().count()\n\n def get_coaches(self, instance):\n return FacilityUserSerializer(instance.get_coaches(), many=True).data\n\n class Meta:\n model = Classroom\n fields = (\n 'id',\n 'name',\n 'parent',\n 'learner_count',\n 'coaches',\n )\n\n validators = [\n UniqueTogetherValidator(\n queryset=Classroom.objects.all(),\n fields=('parent', 'name')\n )\n ]\n\n\nclass LearnerGroupSerializer(serializers.ModelSerializer):\n\n user_ids = serializers.SerializerMethodField()\n\n def get_user_ids(self, group):\n return [str(user_id['id']) for user_id in group.get_members().values('id')]\n\n class Meta:\n model = LearnerGroup\n fields = ('id', 'name', 'parent', 'user_ids')\n\n validators = [\n UniqueTogetherValidator(\n queryset=Classroom.objects.all(),\n fields=('parent', 'name')\n )\n ]\n", "path": "kolibri/core/auth/serializers.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom .constants.collection_kinds import LEARNERGROUP\nfrom .models import Classroom\nfrom .models import Facility\nfrom .models import FacilityDataset\nfrom .models import FacilityUser\nfrom .models import LearnerGroup\nfrom .models import Membership\nfrom .models import Role\nfrom kolibri.core import error_constants\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n collection_parent = serializers.SerializerMethodField()\n\n class Meta:\n model = Role\n fields = ('id', 'kind', 'collection', 'user', 'collection_parent',)\n\n def get_collection_parent(self, instance):\n if instance.collection.parent is not None:\n return instance.collection.parent.id\n else:\n return None\n\n\nclass FacilityUserSerializer(serializers.ModelSerializer):\n roles = RoleSerializer(many=True, read_only=True)\n\n class Meta:\n model = FacilityUser\n extra_kwargs = {'password': {'write_only': True}}\n fields = ('id', 'username', 'full_name', 'password', 'facility', 'roles', 'is_superuser')\n\n def create(self, validated_data):\n if FacilityUser.objects.filter(username__iexact=validated_data['username']).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return super(FacilityUserSerializer, self).create(validated_data)\n\n def update(self, instance, validated_data):\n if validated_data.get('username') and FacilityUser.objects.exclude(id__exact=instance.id).filter(username__iexact=validated_data['username']).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return super(FacilityUserSerializer, self).update(instance, validated_data)\n\n\nclass FacilityUserSignupSerializer(FacilityUserSerializer):\n\n def validate_username(self, value):\n if FacilityUser.objects.filter(username__iexact=value).exists():\n raise serializers.ValidationError(\n detail='An account with that username already exists.',\n code=error_constants.USERNAME_ALREADY_EXISTS\n )\n return value\n\n\nclass FacilityUsernameSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FacilityUser\n fields = ('username', )\n\n\nclass MembershipSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Membership\n fields = ('id', 'collection', 'user')\n\n def create(self, validated_data):\n user = validated_data[\"user\"]\n collection = validated_data[\"collection\"]\n if collection.kind == LEARNERGROUP and user.memberships.filter(collection__parent=collection.parent).exists():\n # We are trying to create a membership for a user in a group, but they already belong to a group\n # in the same class as this group. We may want to allow this, but the frontend does not currently\n # support this. Error!\n raise serializers.ValidationError(detail={'classroom': 'This user is already in a group in this class'},\n code=error_constants.USER_ALREADY_IN_GROUP_IN_CLASS)\n return super(MembershipSerializer, self).create(validated_data)\n\n\nclass FacilityDatasetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FacilityDataset\n fields = ('id', 'learner_can_edit_username', 'learner_can_edit_name', 'learner_can_edit_password',\n 'learner_can_sign_up', 'learner_can_delete_account', 'learner_can_login_with_no_password',\n 'show_download_button_in_learn', 'description', 'location', 'allow_guest_access')\n\n\nclass FacilitySerializer(serializers.ModelSerializer):\n dataset = FacilityDatasetSerializer(read_only=True)\n default = serializers.SerializerMethodField()\n\n class Meta:\n model = Facility\n extra_kwargs = {'id': {'read_only': True}, 'dataset': {'read_only': True}}\n fields = ('id', 'name', 'dataset', 'default')\n\n def get_default(self, instance):\n return instance == Facility.get_default_facility()\n\n\nclass PublicFacilitySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Facility\n fields = ('dataset', 'name')\n\n\nclass ClassroomSerializer(serializers.ModelSerializer):\n learner_count = serializers.SerializerMethodField()\n coaches = serializers.SerializerMethodField()\n\n def get_learner_count(self, instance):\n return instance.get_members().count()\n\n def get_coaches(self, instance):\n return FacilityUserSerializer(instance.get_coaches(), many=True).data\n\n class Meta:\n model = Classroom\n fields = (\n 'id',\n 'name',\n 'parent',\n 'learner_count',\n 'coaches',\n )\n\n validators = [\n UniqueTogetherValidator(\n queryset=Classroom.objects.all(),\n fields=('parent', 'name')\n )\n ]\n\n\nclass LearnerGroupSerializer(serializers.ModelSerializer):\n\n user_ids = serializers.SerializerMethodField()\n\n def get_user_ids(self, group):\n return [str(user_id['id']) for user_id in group.get_members().values('id')]\n\n class Meta:\n model = LearnerGroup\n fields = ('id', 'name', 'parent', 'user_ids')\n\n validators = [\n UniqueTogetherValidator(\n queryset=Classroom.objects.all(),\n fields=('parent', 'name')\n )\n ]\n", "path": "kolibri/core/auth/serializers.py"}]} | 2,028 | 150 |
gh_patches_debug_14756 | rasdani/github-patches | git_diff | translate__pootle-4277 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`sync_stores` doesn't handle disabled projects
We addressed the similar issue for `update_stores` #4198.
`sync_stores` should work for disabled projects as well https://github.com/translate/pootle/issues/4198#issuecomment-161717337.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_app/management/commands/sync_stores.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import os
11 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
12 from optparse import make_option
13
14 from pootle_app.management.commands import PootleCommand
15
16
17 class Command(PootleCommand):
18 option_list = PootleCommand.option_list + (
19 make_option(
20 '--overwrite',
21 action='store_true',
22 dest='overwrite',
23 default=False,
24 help="Don't just save translations, but "
25 "overwrite files to reflect state in database",
26 ),
27 make_option(
28 '--skip-missing',
29 action='store_true',
30 dest='skip_missing',
31 default=False,
32 help="Ignore missing files on disk",
33 ),
34 make_option(
35 '--force',
36 action='store_true',
37 dest='force',
38 default=False,
39 help="Don't ignore stores synced after last change",
40 ),
41 )
42 help = "Save new translations to disk manually."
43
44 def handle_all_stores(self, translation_project, **options):
45 translation_project.sync(
46 conservative=not options['overwrite'],
47 skip_missing=options['skip_missing'],
48 only_newer=not options['force']
49 )
50
51 def handle_store(self, store, **options):
52 store.sync(
53 conservative=not options['overwrite'],
54 update_structure=options['overwrite'],
55 skip_missing=options['skip_missing'],
56 only_newer=not options['force']
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_app/management/commands/sync_stores.py b/pootle/apps/pootle_app/management/commands/sync_stores.py
--- a/pootle/apps/pootle_app/management/commands/sync_stores.py
+++ b/pootle/apps/pootle_app/management/commands/sync_stores.py
@@ -40,13 +40,15 @@
),
)
help = "Save new translations to disk manually."
+ process_disabled_projects = True
def handle_all_stores(self, translation_project, **options):
- translation_project.sync(
- conservative=not options['overwrite'],
- skip_missing=options['skip_missing'],
- only_newer=not options['force']
- )
+ if translation_project.directory_exists_on_disk():
+ translation_project.sync(
+ conservative=not options['overwrite'],
+ skip_missing=options['skip_missing'],
+ only_newer=not options['force']
+ )
def handle_store(self, store, **options):
store.sync(
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/sync_stores.py b/pootle/apps/pootle_app/management/commands/sync_stores.py\n--- a/pootle/apps/pootle_app/management/commands/sync_stores.py\n+++ b/pootle/apps/pootle_app/management/commands/sync_stores.py\n@@ -40,13 +40,15 @@\n ),\n )\n help = \"Save new translations to disk manually.\"\n+ process_disabled_projects = True\n \n def handle_all_stores(self, translation_project, **options):\n- translation_project.sync(\n- conservative=not options['overwrite'],\n- skip_missing=options['skip_missing'],\n- only_newer=not options['force']\n- )\n+ if translation_project.directory_exists_on_disk():\n+ translation_project.sync(\n+ conservative=not options['overwrite'],\n+ skip_missing=options['skip_missing'],\n+ only_newer=not options['force']\n+ )\n \n def handle_store(self, store, **options):\n store.sync(\n", "issue": "`sync_stores` doesn't handle disabled projects\nWe addressed the similar issue for `update_stores` #4198.\n`sync_stores` should work for disabled projects as well https://github.com/translate/pootle/issues/4198#issuecomment-161717337.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\nfrom optparse import make_option\n\nfrom pootle_app.management.commands import PootleCommand\n\n\nclass Command(PootleCommand):\n option_list = PootleCommand.option_list + (\n make_option(\n '--overwrite',\n action='store_true',\n dest='overwrite',\n default=False,\n help=\"Don't just save translations, but \"\n \"overwrite files to reflect state in database\",\n ),\n make_option(\n '--skip-missing',\n action='store_true',\n dest='skip_missing',\n default=False,\n help=\"Ignore missing files on disk\",\n ),\n make_option(\n '--force',\n action='store_true',\n dest='force',\n default=False,\n help=\"Don't ignore stores synced after last change\",\n ),\n )\n help = \"Save new translations to disk manually.\"\n\n def handle_all_stores(self, translation_project, **options):\n translation_project.sync(\n conservative=not options['overwrite'],\n skip_missing=options['skip_missing'],\n only_newer=not options['force']\n )\n\n def handle_store(self, store, **options):\n store.sync(\n conservative=not options['overwrite'],\n update_structure=options['overwrite'],\n skip_missing=options['skip_missing'],\n only_newer=not options['force']\n )\n", "path": "pootle/apps/pootle_app/management/commands/sync_stores.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\nfrom optparse import make_option\n\nfrom pootle_app.management.commands import PootleCommand\n\n\nclass Command(PootleCommand):\n option_list = PootleCommand.option_list + (\n make_option(\n '--overwrite',\n action='store_true',\n dest='overwrite',\n default=False,\n help=\"Don't just save translations, but \"\n \"overwrite files to reflect state in database\",\n ),\n make_option(\n '--skip-missing',\n action='store_true',\n dest='skip_missing',\n default=False,\n help=\"Ignore missing files on disk\",\n ),\n make_option(\n '--force',\n action='store_true',\n dest='force',\n default=False,\n help=\"Don't ignore stores synced after last change\",\n ),\n )\n help = \"Save new translations to disk manually.\"\n process_disabled_projects = True\n\n def handle_all_stores(self, translation_project, **options):\n if translation_project.directory_exists_on_disk():\n translation_project.sync(\n conservative=not options['overwrite'],\n skip_missing=options['skip_missing'],\n only_newer=not options['force']\n )\n\n def handle_store(self, store, **options):\n store.sync(\n conservative=not options['overwrite'],\n update_structure=options['overwrite'],\n skip_missing=options['skip_missing'],\n only_newer=not options['force']\n )\n", "path": "pootle/apps/pootle_app/management/commands/sync_stores.py"}]} | 816 | 233 |
gh_patches_debug_16297 | rasdani/github-patches | git_diff | spyder-ide__spyder-7300 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Debugging history saves both python commands and pdb commands
<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->
### Issue Report Checklist
* [x] Searched the [issues page](https://github.com/spyder-ide/spyder/issues?q=is%3Aissue) for similar reports
* [x] Read the relevant sections of the [Spyder Troubleshooting Guide](https://github.com/spyder-ide/spyder/wiki/Troubleshooting-Guide-and-FAQ) and followed its advice
* [x] Reproduced the issue after updating with ``conda update spyder`` (or ``pip``, if not using Anaconda)
* [x] Could not reproduce inside ``jupyter qtconsole`` (if console-related)
* [ ] Tried basic troubleshooting (if a bug/error)
* [ ] Restarted Spyder
* [ ] Reset preferences with ``spyder --reset``
* [ ] Reinstalled the latest version of [Anaconda](https://www.anaconda.com/download/)
* [ ] Tried the other applicable steps from the Troubleshooting Guide
* [x] Completed the **Problem Description**, **Steps to Reproduce** and **Version** sections below
## Problem Description
When debugging, I end up with many "s" and "n" in my debugging history, preventing me from finding what I am looking for. It would be nicer to only save python commands, or at least to have an option to do so.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/widgets/ipythonconsole/debugging.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Widget that handles communications between a console in debugging
9 mode and Spyder
10 """
11
12 import ast
13 import pickle
14
15 from qtpy.QtCore import Qt
16 from qtconsole.rich_jupyter_widget import RichJupyterWidget
17
18 from spyder.config.base import PICKLE_PROTOCOL
19 from spyder.config.main import CONF
20 from spyder.py3compat import to_text_string
21
22
23 class DebuggingWidget(RichJupyterWidget):
24 """
25 Widget with the necessary attributes and methods to handle
26 communications between a console in debugging mode and
27 Spyder
28 """
29
30 # --- Public API --------------------------------------------------
31 def write_to_stdin(self, line):
32 """Send raw characters to the IPython kernel through stdin"""
33 self.kernel_client.input(line)
34
35 def set_spyder_breakpoints(self, force=False):
36 """Set Spyder breakpoints into a debugging session"""
37 if self._reading or force:
38 breakpoints_dict = CONF.get('run', 'breakpoints', {})
39
40 # We need to enclose pickled values in a list to be able to
41 # send them to the kernel in Python 2
42 serialiazed_breakpoints = [pickle.dumps(breakpoints_dict,
43 protocol=PICKLE_PROTOCOL)]
44 breakpoints = to_text_string(serialiazed_breakpoints)
45
46 cmd = u"!get_ipython().kernel._set_spyder_breakpoints({})"
47 self.kernel_client.input(cmd.format(breakpoints))
48
49 def dbg_exec_magic(self, magic, args=''):
50 """Run an IPython magic while debugging."""
51 code = "!get_ipython().kernel.shell.run_line_magic('{}', '{}')".format(
52 magic, args)
53 self.kernel_client.input(code)
54
55 def refresh_from_pdb(self, pdb_state):
56 """
57 Refresh Variable Explorer and Editor from a Pdb session,
58 after running any pdb command.
59
60 See publish_pdb_state and notify_spyder in spyder_kernels
61 """
62 if 'step' in pdb_state and 'fname' in pdb_state['step']:
63 fname = pdb_state['step']['fname']
64 lineno = pdb_state['step']['lineno']
65 self.sig_pdb_step.emit(fname, lineno)
66
67 if 'namespace_view' in pdb_state:
68 self.sig_namespace_view.emit(ast.literal_eval(
69 pdb_state['namespace_view']))
70
71 if 'var_properties' in pdb_state:
72 self.sig_var_properties.emit(ast.literal_eval(
73 pdb_state['var_properties']))
74
75 # ---- Private API (overrode by us) ----------------------------
76 def _handle_input_request(self, msg):
77 """Save history and add a %plot magic."""
78 if self._hidden:
79 raise RuntimeError('Request for raw input during hidden execution.')
80
81 # Make sure that all output from the SUB channel has been processed
82 # before entering readline mode.
83 self.kernel_client.iopub_channel.flush()
84
85 def callback(line):
86 # Save history to browse it later
87 if not (len(self._control.history) > 0
88 and self._control.history[-1] == line):
89 self._control.history.append(line)
90
91 # This is the Spyder addition: add a %plot magic to display
92 # plots while debugging
93 if line.startswith('%plot '):
94 line = line.split()[-1]
95 code = "__spy_code__ = get_ipython().run_cell('%s')" % line
96 self.kernel_client.input(code)
97 else:
98 self.kernel_client.input(line)
99 if self._reading:
100 self._reading = False
101 self._readline(msg['content']['prompt'], callback=callback,
102 password=msg['content']['password'])
103
104 def _event_filter_console_keypress(self, event):
105 """Handle Key_Up/Key_Down while debugging."""
106 key = event.key()
107 if self._reading:
108 self._control.current_prompt_pos = self._prompt_pos
109 if key == Qt.Key_Up:
110 self._control.browse_history(backward=True)
111 return True
112 elif key == Qt.Key_Down:
113 self._control.browse_history(backward=False)
114 return True
115 elif key in (Qt.Key_Return, Qt.Key_Enter):
116 self._control.reset_search_pos()
117 else:
118 self._control.hist_wholeline = False
119 return super(DebuggingWidget,
120 self)._event_filter_console_keypress(event)
121 else:
122 return super(DebuggingWidget,
123 self)._event_filter_console_keypress(event)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spyder/widgets/ipythonconsole/debugging.py b/spyder/widgets/ipythonconsole/debugging.py
--- a/spyder/widgets/ipythonconsole/debugging.py
+++ b/spyder/widgets/ipythonconsole/debugging.py
@@ -10,6 +10,7 @@
"""
import ast
+import pdb
import pickle
from qtpy.QtCore import Qt
@@ -86,7 +87,10 @@
# Save history to browse it later
if not (len(self._control.history) > 0
and self._control.history[-1] == line):
- self._control.history.append(line)
+ # do not save pdb commands
+ cmd = line.split(" ")[0]
+ if "do_" + cmd not in dir(pdb.Pdb):
+ self._control.history.append(line)
# This is the Spyder addition: add a %plot magic to display
# plots while debugging
| {"golden_diff": "diff --git a/spyder/widgets/ipythonconsole/debugging.py b/spyder/widgets/ipythonconsole/debugging.py\n--- a/spyder/widgets/ipythonconsole/debugging.py\n+++ b/spyder/widgets/ipythonconsole/debugging.py\n@@ -10,6 +10,7 @@\n \"\"\"\n \n import ast\n+import pdb\n import pickle\n \n from qtpy.QtCore import Qt\n@@ -86,7 +87,10 @@\n # Save history to browse it later\n if not (len(self._control.history) > 0\n and self._control.history[-1] == line):\n- self._control.history.append(line)\n+ # do not save pdb commands\n+ cmd = line.split(\" \")[0]\n+ if \"do_\" + cmd not in dir(pdb.Pdb):\n+ self._control.history.append(line)\n \n # This is the Spyder addition: add a %plot magic to display\n # plots while debugging\n", "issue": "Debugging history saves both python commands and pdb commands\n<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->\r\n\r\n### Issue Report Checklist\r\n\r\n* [x] Searched the [issues page](https://github.com/spyder-ide/spyder/issues?q=is%3Aissue) for similar reports\r\n* [x] Read the relevant sections of the [Spyder Troubleshooting Guide](https://github.com/spyder-ide/spyder/wiki/Troubleshooting-Guide-and-FAQ) and followed its advice\r\n* [x] Reproduced the issue after updating with ``conda update spyder`` (or ``pip``, if not using Anaconda)\r\n* [x] Could not reproduce inside ``jupyter qtconsole`` (if console-related)\r\n* [ ] Tried basic troubleshooting (if a bug/error)\r\n * [ ] Restarted Spyder\r\n * [ ] Reset preferences with ``spyder --reset``\r\n * [ ] Reinstalled the latest version of [Anaconda](https://www.anaconda.com/download/)\r\n * [ ] Tried the other applicable steps from the Troubleshooting Guide\r\n* [x] Completed the **Problem Description**, **Steps to Reproduce** and **Version** sections below\r\n\r\n\r\n## Problem Description\r\n\r\nWhen debugging, I end up with many \"s\" and \"n\" in my debugging history, preventing me from finding what I am looking for. It would be nicer to only save python commands, or at least to have an option to do so.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nWidget that handles communications between a console in debugging\nmode and Spyder\n\"\"\"\n\nimport ast\nimport pickle\n\nfrom qtpy.QtCore import Qt\nfrom qtconsole.rich_jupyter_widget import RichJupyterWidget\n\nfrom spyder.config.base import PICKLE_PROTOCOL\nfrom spyder.config.main import CONF\nfrom spyder.py3compat import to_text_string\n\n\nclass DebuggingWidget(RichJupyterWidget):\n \"\"\"\n Widget with the necessary attributes and methods to handle\n communications between a console in debugging mode and\n Spyder\n \"\"\"\n\n # --- Public API --------------------------------------------------\n def write_to_stdin(self, line):\n \"\"\"Send raw characters to the IPython kernel through stdin\"\"\"\n self.kernel_client.input(line)\n\n def set_spyder_breakpoints(self, force=False):\n \"\"\"Set Spyder breakpoints into a debugging session\"\"\"\n if self._reading or force:\n breakpoints_dict = CONF.get('run', 'breakpoints', {})\n\n # We need to enclose pickled values in a list to be able to\n # send them to the kernel in Python 2\n serialiazed_breakpoints = [pickle.dumps(breakpoints_dict,\n protocol=PICKLE_PROTOCOL)]\n breakpoints = to_text_string(serialiazed_breakpoints)\n\n cmd = u\"!get_ipython().kernel._set_spyder_breakpoints({})\"\n self.kernel_client.input(cmd.format(breakpoints))\n\n def dbg_exec_magic(self, magic, args=''):\n \"\"\"Run an IPython magic while debugging.\"\"\"\n code = \"!get_ipython().kernel.shell.run_line_magic('{}', '{}')\".format(\n magic, args)\n self.kernel_client.input(code)\n\n def refresh_from_pdb(self, pdb_state):\n \"\"\"\n Refresh Variable Explorer and Editor from a Pdb session,\n after running any pdb command.\n\n See publish_pdb_state and notify_spyder in spyder_kernels\n \"\"\"\n if 'step' in pdb_state and 'fname' in pdb_state['step']:\n fname = pdb_state['step']['fname']\n lineno = pdb_state['step']['lineno']\n self.sig_pdb_step.emit(fname, lineno)\n\n if 'namespace_view' in pdb_state:\n self.sig_namespace_view.emit(ast.literal_eval(\n pdb_state['namespace_view']))\n\n if 'var_properties' in pdb_state:\n self.sig_var_properties.emit(ast.literal_eval(\n pdb_state['var_properties']))\n\n # ---- Private API (overrode by us) ----------------------------\n def _handle_input_request(self, msg):\n \"\"\"Save history and add a %plot magic.\"\"\"\n if self._hidden:\n raise RuntimeError('Request for raw input during hidden execution.')\n\n # Make sure that all output from the SUB channel has been processed\n # before entering readline mode.\n self.kernel_client.iopub_channel.flush()\n\n def callback(line):\n # Save history to browse it later\n if not (len(self._control.history) > 0\n and self._control.history[-1] == line):\n self._control.history.append(line)\n\n # This is the Spyder addition: add a %plot magic to display\n # plots while debugging\n if line.startswith('%plot '):\n line = line.split()[-1]\n code = \"__spy_code__ = get_ipython().run_cell('%s')\" % line\n self.kernel_client.input(code)\n else:\n self.kernel_client.input(line)\n if self._reading:\n self._reading = False\n self._readline(msg['content']['prompt'], callback=callback,\n password=msg['content']['password'])\n\n def _event_filter_console_keypress(self, event):\n \"\"\"Handle Key_Up/Key_Down while debugging.\"\"\"\n key = event.key()\n if self._reading:\n self._control.current_prompt_pos = self._prompt_pos\n if key == Qt.Key_Up:\n self._control.browse_history(backward=True)\n return True\n elif key == Qt.Key_Down:\n self._control.browse_history(backward=False)\n return True\n elif key in (Qt.Key_Return, Qt.Key_Enter):\n self._control.reset_search_pos()\n else:\n self._control.hist_wholeline = False\n return super(DebuggingWidget,\n self)._event_filter_console_keypress(event)\n else:\n return super(DebuggingWidget,\n self)._event_filter_console_keypress(event)\n", "path": "spyder/widgets/ipythonconsole/debugging.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nWidget that handles communications between a console in debugging\nmode and Spyder\n\"\"\"\n\nimport ast\nimport pdb\nimport pickle\n\nfrom qtpy.QtCore import Qt\nfrom qtconsole.rich_jupyter_widget import RichJupyterWidget\n\nfrom spyder.config.base import PICKLE_PROTOCOL\nfrom spyder.config.main import CONF\nfrom spyder.py3compat import to_text_string\n\n\nclass DebuggingWidget(RichJupyterWidget):\n \"\"\"\n Widget with the necessary attributes and methods to handle\n communications between a console in debugging mode and\n Spyder\n \"\"\"\n\n # --- Public API --------------------------------------------------\n def write_to_stdin(self, line):\n \"\"\"Send raw characters to the IPython kernel through stdin\"\"\"\n self.kernel_client.input(line)\n\n def set_spyder_breakpoints(self, force=False):\n \"\"\"Set Spyder breakpoints into a debugging session\"\"\"\n if self._reading or force:\n breakpoints_dict = CONF.get('run', 'breakpoints', {})\n\n # We need to enclose pickled values in a list to be able to\n # send them to the kernel in Python 2\n serialiazed_breakpoints = [pickle.dumps(breakpoints_dict,\n protocol=PICKLE_PROTOCOL)]\n breakpoints = to_text_string(serialiazed_breakpoints)\n\n cmd = u\"!get_ipython().kernel._set_spyder_breakpoints({})\"\n self.kernel_client.input(cmd.format(breakpoints))\n\n def dbg_exec_magic(self, magic, args=''):\n \"\"\"Run an IPython magic while debugging.\"\"\"\n code = \"!get_ipython().kernel.shell.run_line_magic('{}', '{}')\".format(\n magic, args)\n self.kernel_client.input(code)\n\n def refresh_from_pdb(self, pdb_state):\n \"\"\"\n Refresh Variable Explorer and Editor from a Pdb session,\n after running any pdb command.\n\n See publish_pdb_state and notify_spyder in spyder_kernels\n \"\"\"\n if 'step' in pdb_state and 'fname' in pdb_state['step']:\n fname = pdb_state['step']['fname']\n lineno = pdb_state['step']['lineno']\n self.sig_pdb_step.emit(fname, lineno)\n\n if 'namespace_view' in pdb_state:\n self.sig_namespace_view.emit(ast.literal_eval(\n pdb_state['namespace_view']))\n\n if 'var_properties' in pdb_state:\n self.sig_var_properties.emit(ast.literal_eval(\n pdb_state['var_properties']))\n\n # ---- Private API (overrode by us) ----------------------------\n def _handle_input_request(self, msg):\n \"\"\"Save history and add a %plot magic.\"\"\"\n if self._hidden:\n raise RuntimeError('Request for raw input during hidden execution.')\n\n # Make sure that all output from the SUB channel has been processed\n # before entering readline mode.\n self.kernel_client.iopub_channel.flush()\n\n def callback(line):\n # Save history to browse it later\n if not (len(self._control.history) > 0\n and self._control.history[-1] == line):\n # do not save pdb commands\n cmd = line.split(\" \")[0]\n if \"do_\" + cmd not in dir(pdb.Pdb):\n self._control.history.append(line)\n\n # This is the Spyder addition: add a %plot magic to display\n # plots while debugging\n if line.startswith('%plot '):\n line = line.split()[-1]\n code = \"__spy_code__ = get_ipython().run_cell('%s')\" % line\n self.kernel_client.input(code)\n else:\n self.kernel_client.input(line)\n if self._reading:\n self._reading = False\n self._readline(msg['content']['prompt'], callback=callback,\n password=msg['content']['password'])\n\n def _event_filter_console_keypress(self, event):\n \"\"\"Handle Key_Up/Key_Down while debugging.\"\"\"\n key = event.key()\n if self._reading:\n self._control.current_prompt_pos = self._prompt_pos\n if key == Qt.Key_Up:\n self._control.browse_history(backward=True)\n return True\n elif key == Qt.Key_Down:\n self._control.browse_history(backward=False)\n return True\n elif key in (Qt.Key_Return, Qt.Key_Enter):\n self._control.reset_search_pos()\n else:\n self._control.hist_wholeline = False\n return super(DebuggingWidget,\n self)._event_filter_console_keypress(event)\n else:\n return super(DebuggingWidget,\n self)._event_filter_console_keypress(event)\n", "path": "spyder/widgets/ipythonconsole/debugging.py"}]} | 1,835 | 214 |
gh_patches_debug_26800 | rasdani/github-patches | git_diff | gammapy__gammapy-4657 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Models are lost in FluxProfileEstimator
**Gammapy version**
gammapy 1.0.1
**Bug description**
All models attached to my datasets are discarded when I calculate a flux profile with `FluxProfileEstimator`.
**Expected behavior**
The predicted counts from the models should become part of the background when `to_spectrum_datasets` is called in `FluxProfileEstimator.run()`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gammapy/estimators/points/profile.py`
Content:
```
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 """Tools to create profiles (i.e. 1D "slices" from 2D images)."""
3 from astropy import units as u
4 from regions import CircleAnnulusSkyRegion
5 from gammapy.datasets import Datasets
6 from gammapy.maps import MapAxis
7 from gammapy.modeling.models import PowerLawSpectralModel, SkyModel
8 from .core import FluxPoints
9 from .sed import FluxPointsEstimator
10
11 __all__ = ["FluxProfileEstimator"]
12
13
14 class FluxProfileEstimator(FluxPointsEstimator):
15 """Estimate flux profiles
16
17 Parameters
18 ----------
19 regions : list of `~regions.SkyRegion`
20 regions to use
21 spectrum : `~gammapy.modeling.models.SpectralModel` (optional)
22 Spectral model to compute the fluxes or brightness.
23 Default is power-law with spectral index of 2.
24 **kwargs : dict
25 Keywords forwarded to the `FluxPointsEstimator` (see documentation
26 there for further description of valid keywords)
27
28 Examples
29 --------
30 This example shows how to compute a counts profile for the Fermi galactic
31 center region::
32
33 >>> from astropy import units as u
34 >>> from astropy.coordinates import SkyCoord
35 >>> from gammapy.data import GTI
36 >>> from gammapy.estimators import FluxProfileEstimator
37 >>> from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions
38 >>> from gammapy.datasets import MapDataset
39 >>> from gammapy.maps import RegionGeom
40
41 >>> # load example data
42 >>> filename = "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc.fits.gz"
43 >>> dataset = MapDataset.read(filename, name="fermi-dataset")
44
45 >>> # configuration
46 >>> dataset.gti = GTI.create("0s", "1e7s", "2010-01-01")
47
48 >>> # creation of the boxes and axis
49 >>> start_pos = SkyCoord("-1d", "0d", frame='galactic')
50 >>> end_pos = SkyCoord("1d", "0d", frame='galactic')
51
52 >>> regions = make_orthogonal_rectangle_sky_regions(
53 start_pos=start_pos,
54 end_pos=end_pos,
55 wcs=dataset.counts.geom.wcs,
56 height=2 * u.deg,
57 nbin=21
58 )
59
60 >>> # set up profile estimator and run
61 >>> prof_maker = FluxProfileEstimator(regions=regions, energy_edges=[10, 2000] * u.GeV)
62 >>> fermi_prof = prof_maker.run(dataset)
63 >>> print(fermi_prof)
64 FluxPoints
65 ----------
66 <BLANKLINE>
67 geom : RegionGeom
68 axes : ['lon', 'lat', 'energy', 'projected-distance']
69 shape : (1, 1, 1, 21)
70 quantities : ['norm', 'norm_err', 'ts', 'npred', 'npred_excess', 'stat', 'counts', 'success'] # noqa: E501
71 ref. model : pl
72 n_sigma : 1
73 n_sigma_ul : 2
74 sqrt_ts_threshold_ul : 2
75 sed type init : likelihood
76
77 """
78
79 tag = "FluxProfileEstimator"
80
81 def __init__(self, regions, spectrum=None, **kwargs):
82 if len(regions) <= 1:
83 raise ValueError(
84 "Please provide at least two regions for flux profile estimation."
85 )
86
87 self.regions = regions
88
89 if spectrum is None:
90 spectrum = PowerLawSpectralModel()
91
92 self.spectrum = spectrum
93 super().__init__(**kwargs)
94
95 @property
96 def projected_distance_axis(self):
97 """Get projected distance from the first region.
98
99 For normal region this is defined as the distance from the
100 center of the region. For annulus shaped regions it is the
101 mean between the inner and outer radius.
102
103 Returns
104 -------
105 axis : `MapAxis`
106 Projected distance axis
107 """
108 distances = []
109 center = self.regions[0].center
110
111 for idx, region in enumerate(self.regions):
112 if isinstance(region, CircleAnnulusSkyRegion):
113 distance = (region.inner_radius + region.outer_radius) / 2.0
114 else:
115 distance = center.separation(region.center)
116
117 distances.append(distance)
118
119 return MapAxis.from_nodes(
120 u.Quantity(distances, "deg"), name="projected-distance"
121 )
122
123 def run(self, datasets):
124 """Run flux profile estimation
125
126 Parameters
127 ----------
128 datasets : list of `~gammapy.datasets.MapDataset`
129 Map datasets.
130
131 Returns
132 -------
133 profile : `~gammapy.estimators.FluxPoints`
134 Profile flux points.
135 """
136 datasets = Datasets(datasets=datasets)
137
138 maps = []
139
140 for region in self.regions:
141 datasets_to_fit = datasets.to_spectrum_datasets(region=region)
142 datasets_to_fit.models = SkyModel(self.spectrum, name="test-source")
143 fp = super().run(datasets_to_fit)
144 maps.append(fp)
145
146 return FluxPoints.from_stack(
147 maps=maps,
148 axis=self.projected_distance_axis,
149 )
150
151 @property
152 def config_parameters(self):
153 """Config parameters"""
154 pars = self.__dict__.copy()
155 pars = {key.strip("_"): value for key, value in pars.items()}
156 pars.pop("regions")
157 return pars
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gammapy/estimators/points/profile.py b/gammapy/estimators/points/profile.py
--- a/gammapy/estimators/points/profile.py
+++ b/gammapy/estimators/points/profile.py
@@ -1,5 +1,6 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
+import numpy as np
from astropy import units as u
from regions import CircleAnnulusSkyRegion
from gammapy.datasets import Datasets
@@ -133,12 +134,20 @@
profile : `~gammapy.estimators.FluxPoints`
Profile flux points.
"""
+
datasets = Datasets(datasets=datasets)
maps = []
-
for region in self.regions:
datasets_to_fit = datasets.to_spectrum_datasets(region=region)
+ for dataset_spec, dataset_map in zip(datasets_to_fit, datasets):
+ dataset_spec.background.data = (
+ dataset_map.npred()
+ .to_region_nd_map(
+ region, func=np.sum, weights=dataset_map.mask_safe
+ )
+ .data
+ )
datasets_to_fit.models = SkyModel(self.spectrum, name="test-source")
fp = super().run(datasets_to_fit)
maps.append(fp)
| {"golden_diff": "diff --git a/gammapy/estimators/points/profile.py b/gammapy/estimators/points/profile.py\n--- a/gammapy/estimators/points/profile.py\n+++ b/gammapy/estimators/points/profile.py\n@@ -1,5 +1,6 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n \"\"\"Tools to create profiles (i.e. 1D \"slices\" from 2D images).\"\"\"\n+import numpy as np\n from astropy import units as u\n from regions import CircleAnnulusSkyRegion\n from gammapy.datasets import Datasets\n@@ -133,12 +134,20 @@\n profile : `~gammapy.estimators.FluxPoints`\n Profile flux points.\n \"\"\"\n+\n datasets = Datasets(datasets=datasets)\n \n maps = []\n-\n for region in self.regions:\n datasets_to_fit = datasets.to_spectrum_datasets(region=region)\n+ for dataset_spec, dataset_map in zip(datasets_to_fit, datasets):\n+ dataset_spec.background.data = (\n+ dataset_map.npred()\n+ .to_region_nd_map(\n+ region, func=np.sum, weights=dataset_map.mask_safe\n+ )\n+ .data\n+ )\n datasets_to_fit.models = SkyModel(self.spectrum, name=\"test-source\")\n fp = super().run(datasets_to_fit)\n maps.append(fp)\n", "issue": "Models are lost in FluxProfileEstimator\n**Gammapy version**\r\ngammapy 1.0.1\r\n\r\n**Bug description**\r\nAll models attached to my datasets are discarded when I calculate a flux profile with `FluxProfileEstimator`.\r\n\r\n**Expected behavior**\r\nThe predicted counts from the models should become part of the background when `to_spectrum_datasets` is called in `FluxProfileEstimator.run()`\r\n\r\n\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Tools to create profiles (i.e. 1D \"slices\" from 2D images).\"\"\"\nfrom astropy import units as u\nfrom regions import CircleAnnulusSkyRegion\nfrom gammapy.datasets import Datasets\nfrom gammapy.maps import MapAxis\nfrom gammapy.modeling.models import PowerLawSpectralModel, SkyModel\nfrom .core import FluxPoints\nfrom .sed import FluxPointsEstimator\n\n__all__ = [\"FluxProfileEstimator\"]\n\n\nclass FluxProfileEstimator(FluxPointsEstimator):\n \"\"\"Estimate flux profiles\n\n Parameters\n ----------\n regions : list of `~regions.SkyRegion`\n regions to use\n spectrum : `~gammapy.modeling.models.SpectralModel` (optional)\n Spectral model to compute the fluxes or brightness.\n Default is power-law with spectral index of 2.\n **kwargs : dict\n Keywords forwarded to the `FluxPointsEstimator` (see documentation\n there for further description of valid keywords)\n\n Examples\n --------\n This example shows how to compute a counts profile for the Fermi galactic\n center region::\n\n >>> from astropy import units as u\n >>> from astropy.coordinates import SkyCoord\n >>> from gammapy.data import GTI\n >>> from gammapy.estimators import FluxProfileEstimator\n >>> from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions\n >>> from gammapy.datasets import MapDataset\n >>> from gammapy.maps import RegionGeom\n\n >>> # load example data\n >>> filename = \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc.fits.gz\"\n >>> dataset = MapDataset.read(filename, name=\"fermi-dataset\")\n\n >>> # configuration\n >>> dataset.gti = GTI.create(\"0s\", \"1e7s\", \"2010-01-01\")\n\n >>> # creation of the boxes and axis\n >>> start_pos = SkyCoord(\"-1d\", \"0d\", frame='galactic')\n >>> end_pos = SkyCoord(\"1d\", \"0d\", frame='galactic')\n\n >>> regions = make_orthogonal_rectangle_sky_regions(\n start_pos=start_pos,\n end_pos=end_pos,\n wcs=dataset.counts.geom.wcs,\n height=2 * u.deg,\n nbin=21\n )\n\n >>> # set up profile estimator and run\n >>> prof_maker = FluxProfileEstimator(regions=regions, energy_edges=[10, 2000] * u.GeV)\n >>> fermi_prof = prof_maker.run(dataset)\n >>> print(fermi_prof)\n FluxPoints\n ----------\n <BLANKLINE>\n geom : RegionGeom\n axes : ['lon', 'lat', 'energy', 'projected-distance']\n shape : (1, 1, 1, 21)\n quantities : ['norm', 'norm_err', 'ts', 'npred', 'npred_excess', 'stat', 'counts', 'success'] # noqa: E501\n ref. model : pl\n n_sigma : 1\n n_sigma_ul : 2\n sqrt_ts_threshold_ul : 2\n sed type init : likelihood\n\n \"\"\"\n\n tag = \"FluxProfileEstimator\"\n\n def __init__(self, regions, spectrum=None, **kwargs):\n if len(regions) <= 1:\n raise ValueError(\n \"Please provide at least two regions for flux profile estimation.\"\n )\n\n self.regions = regions\n\n if spectrum is None:\n spectrum = PowerLawSpectralModel()\n\n self.spectrum = spectrum\n super().__init__(**kwargs)\n\n @property\n def projected_distance_axis(self):\n \"\"\"Get projected distance from the first region.\n\n For normal region this is defined as the distance from the\n center of the region. For annulus shaped regions it is the\n mean between the inner and outer radius.\n\n Returns\n -------\n axis : `MapAxis`\n Projected distance axis\n \"\"\"\n distances = []\n center = self.regions[0].center\n\n for idx, region in enumerate(self.regions):\n if isinstance(region, CircleAnnulusSkyRegion):\n distance = (region.inner_radius + region.outer_radius) / 2.0\n else:\n distance = center.separation(region.center)\n\n distances.append(distance)\n\n return MapAxis.from_nodes(\n u.Quantity(distances, \"deg\"), name=\"projected-distance\"\n )\n\n def run(self, datasets):\n \"\"\"Run flux profile estimation\n\n Parameters\n ----------\n datasets : list of `~gammapy.datasets.MapDataset`\n Map datasets.\n\n Returns\n -------\n profile : `~gammapy.estimators.FluxPoints`\n Profile flux points.\n \"\"\"\n datasets = Datasets(datasets=datasets)\n\n maps = []\n\n for region in self.regions:\n datasets_to_fit = datasets.to_spectrum_datasets(region=region)\n datasets_to_fit.models = SkyModel(self.spectrum, name=\"test-source\")\n fp = super().run(datasets_to_fit)\n maps.append(fp)\n\n return FluxPoints.from_stack(\n maps=maps,\n axis=self.projected_distance_axis,\n )\n\n @property\n def config_parameters(self):\n \"\"\"Config parameters\"\"\"\n pars = self.__dict__.copy()\n pars = {key.strip(\"_\"): value for key, value in pars.items()}\n pars.pop(\"regions\")\n return pars\n", "path": "gammapy/estimators/points/profile.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Tools to create profiles (i.e. 1D \"slices\" from 2D images).\"\"\"\nimport numpy as np\nfrom astropy import units as u\nfrom regions import CircleAnnulusSkyRegion\nfrom gammapy.datasets import Datasets\nfrom gammapy.maps import MapAxis\nfrom gammapy.modeling.models import PowerLawSpectralModel, SkyModel\nfrom .core import FluxPoints\nfrom .sed import FluxPointsEstimator\n\n__all__ = [\"FluxProfileEstimator\"]\n\n\nclass FluxProfileEstimator(FluxPointsEstimator):\n \"\"\"Estimate flux profiles\n\n Parameters\n ----------\n regions : list of `~regions.SkyRegion`\n regions to use\n spectrum : `~gammapy.modeling.models.SpectralModel` (optional)\n Spectral model to compute the fluxes or brightness.\n Default is power-law with spectral index of 2.\n **kwargs : dict\n Keywords forwarded to the `FluxPointsEstimator` (see documentation\n there for further description of valid keywords)\n\n Examples\n --------\n This example shows how to compute a counts profile for the Fermi galactic\n center region::\n\n >>> from astropy import units as u\n >>> from astropy.coordinates import SkyCoord\n >>> from gammapy.data import GTI\n >>> from gammapy.estimators import FluxProfileEstimator\n >>> from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions\n >>> from gammapy.datasets import MapDataset\n >>> from gammapy.maps import RegionGeom\n\n >>> # load example data\n >>> filename = \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc.fits.gz\"\n >>> dataset = MapDataset.read(filename, name=\"fermi-dataset\")\n\n >>> # configuration\n >>> dataset.gti = GTI.create(\"0s\", \"1e7s\", \"2010-01-01\")\n\n >>> # creation of the boxes and axis\n >>> start_pos = SkyCoord(\"-1d\", \"0d\", frame='galactic')\n >>> end_pos = SkyCoord(\"1d\", \"0d\", frame='galactic')\n\n >>> regions = make_orthogonal_rectangle_sky_regions(\n start_pos=start_pos,\n end_pos=end_pos,\n wcs=dataset.counts.geom.wcs,\n height=2 * u.deg,\n nbin=21\n )\n\n >>> # set up profile estimator and run\n >>> prof_maker = FluxProfileEstimator(regions=regions, energy_edges=[10, 2000] * u.GeV)\n >>> fermi_prof = prof_maker.run(dataset)\n >>> print(fermi_prof)\n FluxPoints\n ----------\n <BLANKLINE>\n geom : RegionGeom\n axes : ['lon', 'lat', 'energy', 'projected-distance']\n shape : (1, 1, 1, 21)\n quantities : ['norm', 'norm_err', 'ts', 'npred', 'npred_excess', 'stat', 'counts', 'success'] # noqa: E501\n ref. model : pl\n n_sigma : 1\n n_sigma_ul : 2\n sqrt_ts_threshold_ul : 2\n sed type init : likelihood\n\n \"\"\"\n\n tag = \"FluxProfileEstimator\"\n\n def __init__(self, regions, spectrum=None, **kwargs):\n if len(regions) <= 1:\n raise ValueError(\n \"Please provide at least two regions for flux profile estimation.\"\n )\n\n self.regions = regions\n\n if spectrum is None:\n spectrum = PowerLawSpectralModel()\n\n self.spectrum = spectrum\n super().__init__(**kwargs)\n\n @property\n def projected_distance_axis(self):\n \"\"\"Get projected distance from the first region.\n\n For normal region this is defined as the distance from the\n center of the region. For annulus shaped regions it is the\n mean between the inner and outer radius.\n\n Returns\n -------\n axis : `MapAxis`\n Projected distance axis\n \"\"\"\n distances = []\n center = self.regions[0].center\n\n for idx, region in enumerate(self.regions):\n if isinstance(region, CircleAnnulusSkyRegion):\n distance = (region.inner_radius + region.outer_radius) / 2.0\n else:\n distance = center.separation(region.center)\n\n distances.append(distance)\n\n return MapAxis.from_nodes(\n u.Quantity(distances, \"deg\"), name=\"projected-distance\"\n )\n\n def run(self, datasets):\n \"\"\"Run flux profile estimation\n\n Parameters\n ----------\n datasets : list of `~gammapy.datasets.MapDataset`\n Map datasets.\n\n Returns\n -------\n profile : `~gammapy.estimators.FluxPoints`\n Profile flux points.\n \"\"\"\n\n datasets = Datasets(datasets=datasets)\n\n maps = []\n for region in self.regions:\n datasets_to_fit = datasets.to_spectrum_datasets(region=region)\n for dataset_spec, dataset_map in zip(datasets_to_fit, datasets):\n dataset_spec.background.data = (\n dataset_map.npred()\n .to_region_nd_map(\n region, func=np.sum, weights=dataset_map.mask_safe\n )\n .data\n )\n datasets_to_fit.models = SkyModel(self.spectrum, name=\"test-source\")\n fp = super().run(datasets_to_fit)\n maps.append(fp)\n\n return FluxPoints.from_stack(\n maps=maps,\n axis=self.projected_distance_axis,\n )\n\n @property\n def config_parameters(self):\n \"\"\"Config parameters\"\"\"\n pars = self.__dict__.copy()\n pars = {key.strip(\"_\"): value for key, value in pars.items()}\n pars.pop(\"regions\")\n return pars\n", "path": "gammapy/estimators/points/profile.py"}]} | 1,954 | 312 |
gh_patches_debug_3819 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2395 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
single module project without timeline is missing any discription
my single module project [https://meinberlin-dev.liqd.net/projects/schillerkasse-ihre-kiezkasse-fur-den-schillerkiez/](url) which is module Kiezkasse should have a phase information. It is currently running.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/contrib/mixins.py`
Content:
```
1 from django import forms
2 from django.db.models import Max
3 from django.db.models import Min
4 from django.db.models import Q
5 from django.urls import resolve
6 from django.utils.translation import ugettext_lazy as _
7 from django.views import generic
8
9 from adhocracy4.modules.models import Module
10
11 RIGHT_OF_USE_LABEL = _('I hereby confirm that the copyrights for this '
12 'photo are with me or that I have received '
13 'rights of use from the author. I also confirm '
14 'that the privacy rights of depicted third persons '
15 'are not violated. ')
16
17
18 class DynamicChoicesMixin(object):
19 """Dynamic choices mixin.
20
21 Add callable functionality to filters that support the ``choices``
22 argument. If the ``choices`` is callable, then it **must** accept the
23 ``view`` object as a single argument.
24 The ``view`` object may be None if the parent FilterSet is not class based.
25
26 This is useful for dymanic ``choices`` determined properties on the
27 ``view`` object.
28 """
29
30 def __init__(self, *args, **kwargs):
31 self.choices = kwargs.pop('choices')
32 super().__init__(*args, **kwargs)
33
34 def get_choices(self, view):
35 choices = self.choices
36
37 if callable(choices):
38 return choices(view)
39 return choices
40
41 @property
42 def field(self):
43 choices = self.get_choices(getattr(self, 'view', None))
44
45 if choices is not None:
46 self.extra['choices'] = choices
47
48 return super(DynamicChoicesMixin, self).field
49
50
51 class ImageRightOfUseMixin(forms.ModelForm):
52 right_of_use = forms.BooleanField(required=False, label=RIGHT_OF_USE_LABEL)
53
54 def __init__(self, *args, **kwargs):
55 super().__init__(*args, **kwargs)
56 if self.instance.image:
57 self.initial['right_of_use'] = True
58
59 def clean(self):
60 cleaned_data = super().clean()
61 image = cleaned_data.get('image')
62 right_of_use = cleaned_data.get('right_of_use')
63 if image and not right_of_use:
64 self.add_error('right_of_use',
65 _("You want to upload an image. "
66 "Please check that you have the "
67 "right of use for the image."))
68
69
70 class ModuleClusterMixin:
71
72 def _get_module_dict(self, count, start_date, end_date):
73 return {
74 'count': count,
75 'date': start_date,
76 'end_date': end_date,
77 'modules': []
78 }
79
80 def get_module_clusters(self, modules):
81 modules = modules\
82 .exclude(Q(start_date=None) | Q(end_date=None))
83 clusters = []
84 try:
85 start_date = modules.first().start_date
86 end_date = modules.first().end_date
87 count = 1
88 first_cluster = self._get_module_dict(
89 count, start_date, end_date)
90 first_cluster['modules'].append(modules.first())
91 current_cluster = first_cluster
92 clusters.append(first_cluster)
93
94 for module in modules[1:]:
95 if module.start_date > end_date:
96 start_date = module.start_date
97 end_date = module.end_date
98 count += 1
99 next_cluster = self._get_module_dict(
100 count, start_date, end_date)
101 next_cluster['modules'].append(module)
102 current_cluster = next_cluster
103 clusters.append(next_cluster)
104 else:
105 current_cluster['modules'].append(module)
106 if module.end_date > end_date:
107 end_date = module.end_date
108 current_cluster['end_date'] = end_date
109 except AttributeError:
110 return clusters
111 if len(clusters) == 1:
112 clusters[0]['title'] = _('Online Participation')
113 return clusters
114
115
116 class DisplayProjectOrModuleMixin(generic.base.ContextMixin,
117 ModuleClusterMixin):
118
119 def module_clusters(self, modules):
120 return super().get_module_clusters(modules)
121
122 @property
123 def url_name(self):
124 return resolve(self.request.path_info).url_name
125
126 @property
127 def other_modules(self):
128 modules = Module.objects.filter(project=self.project)\
129 .annotate(start_date=Min('phase__start_date'))\
130 .annotate(end_date=Max('phase__end_date'))\
131 .order_by('start_date')
132
133 for cluster in self.module_clusters(modules):
134 if self.module in cluster['modules']:
135 idx = cluster['modules'].index(self.module)
136 modules = cluster['modules']
137 return modules, idx
138 return []
139
140 @property
141 def extends(self):
142 if self.url_name == 'module-detail':
143 return 'a4modules/module_detail.html'
144 return 'meinberlin_projects/project_detail.html'
145
146 def get_context_data(self, **kwargs):
147 context = super().get_context_data(**kwargs)
148 context['url_name'] = self.url_name
149 context['extends'] = self.extends
150 if self.url_name == 'module-detail':
151 cluster, idx = self.other_modules
152 next_module = None
153 previous_module = None
154 try:
155 next_module = cluster[idx + 1]
156 except IndexError:
157 pass
158 try:
159 previous_module = cluster[idx - 1]
160 except IndexError:
161 pass
162 context['other_modules'] = cluster
163 context['index'] = idx + 1
164 context['next'] = next_module
165 context['previous'] = previous_module
166 return context
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/contrib/mixins.py b/meinberlin/apps/contrib/mixins.py
--- a/meinberlin/apps/contrib/mixins.py
+++ b/meinberlin/apps/contrib/mixins.py
@@ -156,7 +156,8 @@
except IndexError:
pass
try:
- previous_module = cluster[idx - 1]
+ if idx > 0:
+ previous_module = cluster[idx - 1]
except IndexError:
pass
context['other_modules'] = cluster
| {"golden_diff": "diff --git a/meinberlin/apps/contrib/mixins.py b/meinberlin/apps/contrib/mixins.py\n--- a/meinberlin/apps/contrib/mixins.py\n+++ b/meinberlin/apps/contrib/mixins.py\n@@ -156,7 +156,8 @@\n except IndexError:\n pass\n try:\n- previous_module = cluster[idx - 1]\n+ if idx > 0:\n+ previous_module = cluster[idx - 1]\n except IndexError:\n pass\n context['other_modules'] = cluster\n", "issue": "single module project without timeline is missing any discription\nmy single module project [https://meinberlin-dev.liqd.net/projects/schillerkasse-ihre-kiezkasse-fur-den-schillerkiez/](url) which is module Kiezkasse should have a phase information. It is currently running.\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.db.models import Max\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.urls import resolve\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.modules.models import Module\n\nRIGHT_OF_USE_LABEL = _('I hereby confirm that the copyrights for this '\n 'photo are with me or that I have received '\n 'rights of use from the author. I also confirm '\n 'that the privacy rights of depicted third persons '\n 'are not violated. ')\n\n\nclass DynamicChoicesMixin(object):\n \"\"\"Dynamic choices mixin.\n\n Add callable functionality to filters that support the ``choices``\n argument. If the ``choices`` is callable, then it **must** accept the\n ``view`` object as a single argument.\n The ``view`` object may be None if the parent FilterSet is not class based.\n\n This is useful for dymanic ``choices`` determined properties on the\n ``view`` object.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.choices = kwargs.pop('choices')\n super().__init__(*args, **kwargs)\n\n def get_choices(self, view):\n choices = self.choices\n\n if callable(choices):\n return choices(view)\n return choices\n\n @property\n def field(self):\n choices = self.get_choices(getattr(self, 'view', None))\n\n if choices is not None:\n self.extra['choices'] = choices\n\n return super(DynamicChoicesMixin, self).field\n\n\nclass ImageRightOfUseMixin(forms.ModelForm):\n right_of_use = forms.BooleanField(required=False, label=RIGHT_OF_USE_LABEL)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance.image:\n self.initial['right_of_use'] = True\n\n def clean(self):\n cleaned_data = super().clean()\n image = cleaned_data.get('image')\n right_of_use = cleaned_data.get('right_of_use')\n if image and not right_of_use:\n self.add_error('right_of_use',\n _(\"You want to upload an image. \"\n \"Please check that you have the \"\n \"right of use for the image.\"))\n\n\nclass ModuleClusterMixin:\n\n def _get_module_dict(self, count, start_date, end_date):\n return {\n 'count': count,\n 'date': start_date,\n 'end_date': end_date,\n 'modules': []\n }\n\n def get_module_clusters(self, modules):\n modules = modules\\\n .exclude(Q(start_date=None) | Q(end_date=None))\n clusters = []\n try:\n start_date = modules.first().start_date\n end_date = modules.first().end_date\n count = 1\n first_cluster = self._get_module_dict(\n count, start_date, end_date)\n first_cluster['modules'].append(modules.first())\n current_cluster = first_cluster\n clusters.append(first_cluster)\n\n for module in modules[1:]:\n if module.start_date > end_date:\n start_date = module.start_date\n end_date = module.end_date\n count += 1\n next_cluster = self._get_module_dict(\n count, start_date, end_date)\n next_cluster['modules'].append(module)\n current_cluster = next_cluster\n clusters.append(next_cluster)\n else:\n current_cluster['modules'].append(module)\n if module.end_date > end_date:\n end_date = module.end_date\n current_cluster['end_date'] = end_date\n except AttributeError:\n return clusters\n if len(clusters) == 1:\n clusters[0]['title'] = _('Online Participation')\n return clusters\n\n\nclass DisplayProjectOrModuleMixin(generic.base.ContextMixin,\n ModuleClusterMixin):\n\n def module_clusters(self, modules):\n return super().get_module_clusters(modules)\n\n @property\n def url_name(self):\n return resolve(self.request.path_info).url_name\n\n @property\n def other_modules(self):\n modules = Module.objects.filter(project=self.project)\\\n .annotate(start_date=Min('phase__start_date'))\\\n .annotate(end_date=Max('phase__end_date'))\\\n .order_by('start_date')\n\n for cluster in self.module_clusters(modules):\n if self.module in cluster['modules']:\n idx = cluster['modules'].index(self.module)\n modules = cluster['modules']\n return modules, idx\n return []\n\n @property\n def extends(self):\n if self.url_name == 'module-detail':\n return 'a4modules/module_detail.html'\n return 'meinberlin_projects/project_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['url_name'] = self.url_name\n context['extends'] = self.extends\n if self.url_name == 'module-detail':\n cluster, idx = self.other_modules\n next_module = None\n previous_module = None\n try:\n next_module = cluster[idx + 1]\n except IndexError:\n pass\n try:\n previous_module = cluster[idx - 1]\n except IndexError:\n pass\n context['other_modules'] = cluster\n context['index'] = idx + 1\n context['next'] = next_module\n context['previous'] = previous_module\n return context\n", "path": "meinberlin/apps/contrib/mixins.py"}], "after_files": [{"content": "from django import forms\nfrom django.db.models import Max\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.urls import resolve\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.modules.models import Module\n\nRIGHT_OF_USE_LABEL = _('I hereby confirm that the copyrights for this '\n 'photo are with me or that I have received '\n 'rights of use from the author. I also confirm '\n 'that the privacy rights of depicted third persons '\n 'are not violated. ')\n\n\nclass DynamicChoicesMixin(object):\n \"\"\"Dynamic choices mixin.\n\n Add callable functionality to filters that support the ``choices``\n argument. If the ``choices`` is callable, then it **must** accept the\n ``view`` object as a single argument.\n The ``view`` object may be None if the parent FilterSet is not class based.\n\n This is useful for dymanic ``choices`` determined properties on the\n ``view`` object.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.choices = kwargs.pop('choices')\n super().__init__(*args, **kwargs)\n\n def get_choices(self, view):\n choices = self.choices\n\n if callable(choices):\n return choices(view)\n return choices\n\n @property\n def field(self):\n choices = self.get_choices(getattr(self, 'view', None))\n\n if choices is not None:\n self.extra['choices'] = choices\n\n return super(DynamicChoicesMixin, self).field\n\n\nclass ImageRightOfUseMixin(forms.ModelForm):\n right_of_use = forms.BooleanField(required=False, label=RIGHT_OF_USE_LABEL)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance.image:\n self.initial['right_of_use'] = True\n\n def clean(self):\n cleaned_data = super().clean()\n image = cleaned_data.get('image')\n right_of_use = cleaned_data.get('right_of_use')\n if image and not right_of_use:\n self.add_error('right_of_use',\n _(\"You want to upload an image. \"\n \"Please check that you have the \"\n \"right of use for the image.\"))\n\n\nclass ModuleClusterMixin:\n\n def _get_module_dict(self, count, start_date, end_date):\n return {\n 'count': count,\n 'date': start_date,\n 'end_date': end_date,\n 'modules': []\n }\n\n def get_module_clusters(self, modules):\n modules = modules\\\n .exclude(Q(start_date=None) | Q(end_date=None))\n clusters = []\n try:\n start_date = modules.first().start_date\n end_date = modules.first().end_date\n count = 1\n first_cluster = self._get_module_dict(\n count, start_date, end_date)\n first_cluster['modules'].append(modules.first())\n current_cluster = first_cluster\n clusters.append(first_cluster)\n\n for module in modules[1:]:\n if module.start_date > end_date:\n start_date = module.start_date\n end_date = module.end_date\n count += 1\n next_cluster = self._get_module_dict(\n count, start_date, end_date)\n next_cluster['modules'].append(module)\n current_cluster = next_cluster\n clusters.append(next_cluster)\n else:\n current_cluster['modules'].append(module)\n if module.end_date > end_date:\n end_date = module.end_date\n current_cluster['end_date'] = end_date\n except AttributeError:\n return clusters\n if len(clusters) == 1:\n clusters[0]['title'] = _('Online Participation')\n return clusters\n\n\nclass DisplayProjectOrModuleMixin(generic.base.ContextMixin,\n ModuleClusterMixin):\n\n def module_clusters(self, modules):\n return super().get_module_clusters(modules)\n\n @property\n def url_name(self):\n return resolve(self.request.path_info).url_name\n\n @property\n def other_modules(self):\n modules = Module.objects.filter(project=self.project)\\\n .annotate(start_date=Min('phase__start_date'))\\\n .annotate(end_date=Max('phase__end_date'))\\\n .order_by('start_date')\n\n for cluster in self.module_clusters(modules):\n if self.module in cluster['modules']:\n idx = cluster['modules'].index(self.module)\n modules = cluster['modules']\n return modules, idx\n return []\n\n @property\n def extends(self):\n if self.url_name == 'module-detail':\n return 'a4modules/module_detail.html'\n return 'meinberlin_projects/project_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['url_name'] = self.url_name\n context['extends'] = self.extends\n if self.url_name == 'module-detail':\n cluster, idx = self.other_modules\n next_module = None\n previous_module = None\n try:\n next_module = cluster[idx + 1]\n except IndexError:\n pass\n try:\n if idx > 0:\n previous_module = cluster[idx - 1]\n except IndexError:\n pass\n context['other_modules'] = cluster\n context['index'] = idx + 1\n context['next'] = next_module\n context['previous'] = previous_module\n return context\n", "path": "meinberlin/apps/contrib/mixins.py"}]} | 1,980 | 126 |
gh_patches_debug_55161 | rasdani/github-patches | git_diff | spack__spack-5031 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adios2 missing zeromq dependency
@ax3l : I did a normal build without specifying any variants
Call Stack (most recent call first):
..../cmake-3.9/Modules/FindPackageHandleStandardArgs.cmake:377 (_FPHSA_FAILURE_MESSAGE)
cmake/FindZeroMQ.cmake:44 (find_package_handle_standard_args)
source/dataman/CMakeLists.txt:51 (find_package)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/adios2/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Adios2(CMakePackage):
29 """Next generation of ADIOS developed in the Exascale Computing Program"""
30
31 homepage = "https://www.olcf.ornl.gov/center-projects/adios/"
32 url = "https://github.com/ornladios/ADIOS2/archive/v2.0.0.tar.gz"
33
34 version('develop', branch='master',
35 git='https://github.com/ornladios/ADIOS2.git')
36
37 version('2.0.0', '019115e5c6ac28bd0f4201f590f5d994')
38
39 variant('shared', default=True,
40 description='Also build shared libraries')
41 variant('mpi', default=True,
42 description='Enable MPI')
43 # transforms (not yet implemented)
44 # variant('bzip2', default=True,
45 # description='Enable BZip2 compression')
46 # variant('zfp', default=True,
47 # description='Enable ZFP compression')
48 # transport engines
49 variant('dataman', default=True,
50 description='Enable the DataMan engine for WAN transports')
51 # currently required by DataMan, optional in the future
52 # variant('zeromq', default=False,
53 # description='Enable ZeroMQ for the DataMan engine')
54 variant('hdf5', default=False,
55 description='Enable the HDF5 engine')
56 variant('adios1', default=False,
57 description='Enable the ADIOS 1.x engine')
58 # language bindings
59 variant('python', default=True,
60 description='Enable the Python >= 2.7 bindings')
61
62 # requires mature C++11 implementations
63 conflicts('%gcc@:4.7')
64 conflicts('%intel@:15')
65 conflicts('%pgi@:14')
66
67 # DataMan needs dlopen
68 conflicts('+dataman', when='~shared')
69
70 depends_on('[email protected]:', type='build')
71
72 # contained in thirdparty/
73 # depends_on('googletest')
74 # depends_on('pugixml')
75 # depends_on('kwsys')
76 # depends_on('nlohmannjson')
77 # depends_on('[email protected]:', when='+python')
78
79 depends_on('mpi', when='+mpi')
80
81 depends_on('hdf5', when='+hdf5')
82 depends_on('hdf5+mpi', when='+hdf5+mpi')
83 depends_on('adios', when='+adios1')
84 depends_on('adios+mpi', when='+adios1+mpi')
85
86 depends_on('bzip2', when='+bzip2')
87 depends_on('zfp', when='+zfp')
88
89 extends('python', when='+python')
90 depends_on('[email protected]:', type=('build', 'run'), when='+python')
91 depends_on('[email protected]:', type=('build', 'run'), when='+python')
92 depends_on('[email protected]:', type=('build', 'run'), when='+mpi +python')
93
94 def cmake_args(self):
95 spec = self.spec
96
97 args = [
98 '-DADIOS2_BUILD_SHARED_LIBS:BOOL={0}'.format((
99 'ON' if '+shared' in spec else 'OFF')),
100 '-DADIOS2_BUILD_TESTING=OFF',
101 '-DADIOS2_USE_MPI={0}'.format((
102 'ON' if '+mpi' in spec else 'OFF')),
103 '-DADIOS2_USE_BZip2={0}'.format((
104 'ON' if '+bzip2' in spec else 'OFF')),
105 '-DADIOS2_USE_ZFP={0}'.format((
106 'ON' if '+zfp' in spec else 'OFF')),
107 '-DADIOS2_USE_DataMan={0}'.format((
108 'ON' if '+dataman' in spec else 'OFF')),
109 '-DADIOS2_USE_ZeroMQ={0}'.format((
110 'ON' if '+dataman' in spec else 'OFF')),
111 '-DADIOS2_USE_HDF5={0}'.format((
112 'ON' if '+hdf5' in spec else 'OFF')),
113 '-DADIOS2_USE_ADIOS1={0}'.format((
114 'ON' if '+adios1' in spec else 'OFF')),
115 '-DADIOS2_USE_Python={0}'.format((
116 'ON' if '+python' in spec else 'OFF'))
117 ]
118 return args
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/adios2/package.py b/var/spack/repos/builtin/packages/adios2/package.py
--- a/var/spack/repos/builtin/packages/adios2/package.py
+++ b/var/spack/repos/builtin/packages/adios2/package.py
@@ -77,6 +77,7 @@
# depends_on('[email protected]:', when='+python')
depends_on('mpi', when='+mpi')
+ depends_on('zeromq', when='+dataman')
depends_on('hdf5', when='+hdf5')
depends_on('hdf5+mpi', when='+hdf5+mpi')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/adios2/package.py b/var/spack/repos/builtin/packages/adios2/package.py\n--- a/var/spack/repos/builtin/packages/adios2/package.py\n+++ b/var/spack/repos/builtin/packages/adios2/package.py\n@@ -77,6 +77,7 @@\n # depends_on('[email protected]:', when='+python')\n \n depends_on('mpi', when='+mpi')\n+ depends_on('zeromq', when='+dataman')\n \n depends_on('hdf5', when='+hdf5')\n depends_on('hdf5+mpi', when='+hdf5+mpi')\n", "issue": "Adios2 missing zeromq dependency\n@ax3l : I did a normal build without specifying any variants\r\n\r\n Call Stack (most recent call first):\r\n ..../cmake-3.9/Modules/FindPackageHandleStandardArgs.cmake:377 (_FPHSA_FAILURE_MESSAGE)\r\n cmake/FindZeroMQ.cmake:44 (find_package_handle_standard_args)\r\n source/dataman/CMakeLists.txt:51 (find_package)\r\n\r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Adios2(CMakePackage):\n \"\"\"Next generation of ADIOS developed in the Exascale Computing Program\"\"\"\n\n homepage = \"https://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS2/archive/v2.0.0.tar.gz\"\n\n version('develop', branch='master',\n git='https://github.com/ornladios/ADIOS2.git')\n\n version('2.0.0', '019115e5c6ac28bd0f4201f590f5d994')\n\n variant('shared', default=True,\n description='Also build shared libraries')\n variant('mpi', default=True,\n description='Enable MPI')\n # transforms (not yet implemented)\n # variant('bzip2', default=True,\n # description='Enable BZip2 compression')\n # variant('zfp', default=True,\n # description='Enable ZFP compression')\n # transport engines\n variant('dataman', default=True,\n description='Enable the DataMan engine for WAN transports')\n # currently required by DataMan, optional in the future\n # variant('zeromq', default=False,\n # description='Enable ZeroMQ for the DataMan engine')\n variant('hdf5', default=False,\n description='Enable the HDF5 engine')\n variant('adios1', default=False,\n description='Enable the ADIOS 1.x engine')\n # language bindings\n variant('python', default=True,\n description='Enable the Python >= 2.7 bindings')\n\n # requires mature C++11 implementations\n conflicts('%gcc@:4.7')\n conflicts('%intel@:15')\n conflicts('%pgi@:14')\n\n # DataMan needs dlopen\n conflicts('+dataman', when='~shared')\n\n depends_on('[email protected]:', type='build')\n\n # contained in thirdparty/\n # depends_on('googletest')\n # depends_on('pugixml')\n # depends_on('kwsys')\n # depends_on('nlohmannjson')\n # depends_on('[email protected]:', when='+python')\n\n depends_on('mpi', when='+mpi')\n\n depends_on('hdf5', when='+hdf5')\n depends_on('hdf5+mpi', when='+hdf5+mpi')\n depends_on('adios', when='+adios1')\n depends_on('adios+mpi', when='+adios1+mpi')\n\n depends_on('bzip2', when='+bzip2')\n depends_on('zfp', when='+zfp')\n\n extends('python', when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+mpi +python')\n\n def cmake_args(self):\n spec = self.spec\n\n args = [\n '-DADIOS2_BUILD_SHARED_LIBS:BOOL={0}'.format((\n 'ON' if '+shared' in spec else 'OFF')),\n '-DADIOS2_BUILD_TESTING=OFF',\n '-DADIOS2_USE_MPI={0}'.format((\n 'ON' if '+mpi' in spec else 'OFF')),\n '-DADIOS2_USE_BZip2={0}'.format((\n 'ON' if '+bzip2' in spec else 'OFF')),\n '-DADIOS2_USE_ZFP={0}'.format((\n 'ON' if '+zfp' in spec else 'OFF')),\n '-DADIOS2_USE_DataMan={0}'.format((\n 'ON' if '+dataman' in spec else 'OFF')),\n '-DADIOS2_USE_ZeroMQ={0}'.format((\n 'ON' if '+dataman' in spec else 'OFF')),\n '-DADIOS2_USE_HDF5={0}'.format((\n 'ON' if '+hdf5' in spec else 'OFF')),\n '-DADIOS2_USE_ADIOS1={0}'.format((\n 'ON' if '+adios1' in spec else 'OFF')),\n '-DADIOS2_USE_Python={0}'.format((\n 'ON' if '+python' in spec else 'OFF'))\n ]\n return args\n", "path": "var/spack/repos/builtin/packages/adios2/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Adios2(CMakePackage):\n \"\"\"Next generation of ADIOS developed in the Exascale Computing Program\"\"\"\n\n homepage = \"https://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS2/archive/v2.0.0.tar.gz\"\n\n version('develop', branch='master',\n git='https://github.com/ornladios/ADIOS2.git')\n\n version('2.0.0', '019115e5c6ac28bd0f4201f590f5d994')\n\n variant('shared', default=True,\n description='Also build shared libraries')\n variant('mpi', default=True,\n description='Enable MPI')\n # transforms (not yet implemented)\n # variant('bzip2', default=True,\n # description='Enable BZip2 compression')\n # variant('zfp', default=True,\n # description='Enable ZFP compression')\n # transport engines\n variant('dataman', default=True,\n description='Enable the DataMan engine for WAN transports')\n # currently required by DataMan, optional in the future\n # variant('zeromq', default=False,\n # description='Enable ZeroMQ for the DataMan engine')\n variant('hdf5', default=False,\n description='Enable the HDF5 engine')\n variant('adios1', default=False,\n description='Enable the ADIOS 1.x engine')\n # language bindings\n variant('python', default=True,\n description='Enable the Python >= 2.7 bindings')\n\n # requires mature C++11 implementations\n conflicts('%gcc@:4.7')\n conflicts('%intel@:15')\n conflicts('%pgi@:14')\n\n # DataMan needs dlopen\n conflicts('+dataman', when='~shared')\n\n depends_on('[email protected]:', type='build')\n\n # contained in thirdparty/\n # depends_on('googletest')\n # depends_on('pugixml')\n # depends_on('kwsys')\n # depends_on('nlohmannjson')\n # depends_on('[email protected]:', when='+python')\n\n depends_on('mpi', when='+mpi')\n depends_on('zeromq', when='+dataman')\n\n depends_on('hdf5', when='+hdf5')\n depends_on('hdf5+mpi', when='+hdf5+mpi')\n depends_on('adios', when='+adios1')\n depends_on('adios+mpi', when='+adios1+mpi')\n\n depends_on('bzip2', when='+bzip2')\n depends_on('zfp', when='+zfp')\n\n extends('python', when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+mpi +python')\n\n def cmake_args(self):\n spec = self.spec\n\n args = [\n '-DADIOS2_BUILD_SHARED_LIBS:BOOL={0}'.format((\n 'ON' if '+shared' in spec else 'OFF')),\n '-DADIOS2_BUILD_TESTING=OFF',\n '-DADIOS2_USE_MPI={0}'.format((\n 'ON' if '+mpi' in spec else 'OFF')),\n '-DADIOS2_USE_BZip2={0}'.format((\n 'ON' if '+bzip2' in spec else 'OFF')),\n '-DADIOS2_USE_ZFP={0}'.format((\n 'ON' if '+zfp' in spec else 'OFF')),\n '-DADIOS2_USE_DataMan={0}'.format((\n 'ON' if '+dataman' in spec else 'OFF')),\n '-DADIOS2_USE_ZeroMQ={0}'.format((\n 'ON' if '+dataman' in spec else 'OFF')),\n '-DADIOS2_USE_HDF5={0}'.format((\n 'ON' if '+hdf5' in spec else 'OFF')),\n '-DADIOS2_USE_ADIOS1={0}'.format((\n 'ON' if '+adios1' in spec else 'OFF')),\n '-DADIOS2_USE_Python={0}'.format((\n 'ON' if '+python' in spec else 'OFF'))\n ]\n return args\n", "path": "var/spack/repos/builtin/packages/adios2/package.py"}]} | 1,895 | 154 |
gh_patches_debug_40664 | rasdani/github-patches | git_diff | medtagger__MedTagger-40 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "radon" tool to the Backend and enable it in CI
## Expected Behavior
Python code in backend should be validated by "radon" tool in CI.
## Actual Behavior
MedTagger backend uses a few linters already but we should add more validators to increate automation and code quality.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/scripts/migrate_hbase.py`
Content:
```
1 """Script that can migrate existing HBase schema or prepare empty database with given schema.
2
3 How to use it?
4 --------------
5 Run this script just by executing following line in the root directory of this project:
6
7 (venv) $ python3.6 scripts/migrate_hbase.py
8
9 """
10 import argparse
11 import logging
12 import logging.config
13
14 from medtagger.clients.hbase_client import HBaseClient
15 from utils import get_connection_to_hbase, user_agrees
16
17 logging.config.fileConfig('logging.conf')
18 logger = logging.getLogger(__name__)
19
20 parser = argparse.ArgumentParser(description='HBase migration.')
21 parser.add_argument('-y', '--yes', dest='yes', action='store_const', const=True)
22 args = parser.parse_args()
23
24
25 HBASE_SCHEMA = HBaseClient.HBASE_SCHEMA
26 connection = get_connection_to_hbase()
27 existing_tables = set(connection.tables())
28 schema_tables = set(HBASE_SCHEMA)
29 tables_to_drop = list(existing_tables - schema_tables)
30 for table_name in tables_to_drop:
31 if args.yes or user_agrees('Do you want to drop table "{}"?'.format(table_name)):
32 logger.info('Dropping table "%s".', table_name)
33 table = connection.table(table_name)
34 table.drop()
35
36 for table_name in HBASE_SCHEMA:
37 table = connection.table(table_name)
38 if not table.exists():
39 if args.yes or user_agrees('Do you want to create table "{}"?'.format(table_name)):
40 list_of_columns = HBASE_SCHEMA[table_name]
41 logger.info('Creating table "%s" with columns %s.', table_name, list_of_columns)
42 table.create(*list_of_columns)
43 table.enable_if_exists_checks()
44 else:
45 existing_column_families = set(table.columns())
46 schema_column_families = set(HBASE_SCHEMA[table_name])
47 columns_to_add = list(schema_column_families - existing_column_families)
48 columns_to_drop = list(existing_column_families - schema_column_families)
49
50 if columns_to_add:
51 if args.yes or user_agrees('Do you want to add columns {} to "{}"?'.format(columns_to_add, table_name)):
52 table.add_columns(*columns_to_add)
53
54 if columns_to_drop:
55 if args.yes or user_agrees('Do you want to drop columns {} from "{}"?'.format(columns_to_drop, table_name)):
56 table.drop_columns(*columns_to_drop)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/scripts/migrate_hbase.py b/backend/scripts/migrate_hbase.py
--- a/backend/scripts/migrate_hbase.py
+++ b/backend/scripts/migrate_hbase.py
@@ -11,6 +11,8 @@
import logging
import logging.config
+from starbase import Table
+
from medtagger.clients.hbase_client import HBaseClient
from utils import get_connection_to_hbase, user_agrees
@@ -22,35 +24,59 @@
args = parser.parse_args()
-HBASE_SCHEMA = HBaseClient.HBASE_SCHEMA
-connection = get_connection_to_hbase()
-existing_tables = set(connection.tables())
-schema_tables = set(HBASE_SCHEMA)
-tables_to_drop = list(existing_tables - schema_tables)
-for table_name in tables_to_drop:
+def create_new_table(table: Table) -> None:
+ """Create new table once user agrees on that."""
+ table_name = table.name
+ if args.yes or user_agrees('Do you want to create table "{}"?'.format(table_name)):
+ list_of_columns = HBaseClient.HBASE_SCHEMA[table_name]
+ logger.info('Creating table "%s" with columns %s.', table_name, list_of_columns)
+ table.create(*list_of_columns)
+ table.enable_if_exists_checks()
+
+
+def update_table_schema(table: Table) -> None:
+ """Update table schema once user agrees on that."""
+ table_name = table.name
+ existing_column_families = set(table.columns())
+ schema_column_families = set(HBaseClient.HBASE_SCHEMA[table_name])
+ columns_to_add = list(schema_column_families - existing_column_families)
+ columns_to_drop = list(existing_column_families - schema_column_families)
+
+ if columns_to_add:
+ if args.yes or user_agrees('Do you want to add columns {} to "{}"?'.format(columns_to_add, table_name)):
+ table.add_columns(*columns_to_add)
+
+ if columns_to_drop:
+ if args.yes or user_agrees('Do you want to drop columns {} from "{}"?'.format(columns_to_drop, table_name)):
+ table.drop_columns(*columns_to_drop)
+
+
+def drop_table(table: Table) -> None:
+ """Drop table once user agrees on that."""
+ table_name = table.name
if args.yes or user_agrees('Do you want to drop table "{}"?'.format(table_name)):
logger.info('Dropping table "%s".', table_name)
- table = connection.table(table_name)
table.drop()
-for table_name in HBASE_SCHEMA:
- table = connection.table(table_name)
- if not table.exists():
- if args.yes or user_agrees('Do you want to create table "{}"?'.format(table_name)):
- list_of_columns = HBASE_SCHEMA[table_name]
- logger.info('Creating table "%s" with columns %s.', table_name, list_of_columns)
- table.create(*list_of_columns)
- table.enable_if_exists_checks()
- else:
- existing_column_families = set(table.columns())
- schema_column_families = set(HBASE_SCHEMA[table_name])
- columns_to_add = list(schema_column_families - existing_column_families)
- columns_to_drop = list(existing_column_families - schema_column_families)
-
- if columns_to_add:
- if args.yes or user_agrees('Do you want to add columns {} to "{}"?'.format(columns_to_add, table_name)):
- table.add_columns(*columns_to_add)
-
- if columns_to_drop:
- if args.yes or user_agrees('Do you want to drop columns {} from "{}"?'.format(columns_to_drop, table_name)):
- table.drop_columns(*columns_to_drop)
+
+def main() -> None:
+ """Run main functionality of this script."""
+ connection = get_connection_to_hbase()
+ existing_tables = set(connection.tables())
+ schema_tables = set(HBaseClient.HBASE_SCHEMA)
+ tables_to_drop = list(existing_tables - schema_tables)
+
+ for table_name in tables_to_drop:
+ table = connection.table(table_name)
+ drop_table(table)
+
+ for table_name in HBaseClient.HBASE_SCHEMA:
+ table = connection.table(table_name)
+ if not table.exists():
+ create_new_table(table)
+ else:
+ update_table_schema(table)
+
+
+if __name__ == '__main__':
+ main()
| {"golden_diff": "diff --git a/backend/scripts/migrate_hbase.py b/backend/scripts/migrate_hbase.py\n--- a/backend/scripts/migrate_hbase.py\n+++ b/backend/scripts/migrate_hbase.py\n@@ -11,6 +11,8 @@\n import logging\n import logging.config\n \n+from starbase import Table\n+\n from medtagger.clients.hbase_client import HBaseClient\n from utils import get_connection_to_hbase, user_agrees\n \n@@ -22,35 +24,59 @@\n args = parser.parse_args()\n \n \n-HBASE_SCHEMA = HBaseClient.HBASE_SCHEMA\n-connection = get_connection_to_hbase()\n-existing_tables = set(connection.tables())\n-schema_tables = set(HBASE_SCHEMA)\n-tables_to_drop = list(existing_tables - schema_tables)\n-for table_name in tables_to_drop:\n+def create_new_table(table: Table) -> None:\n+ \"\"\"Create new table once user agrees on that.\"\"\"\n+ table_name = table.name\n+ if args.yes or user_agrees('Do you want to create table \"{}\"?'.format(table_name)):\n+ list_of_columns = HBaseClient.HBASE_SCHEMA[table_name]\n+ logger.info('Creating table \"%s\" with columns %s.', table_name, list_of_columns)\n+ table.create(*list_of_columns)\n+ table.enable_if_exists_checks()\n+\n+\n+def update_table_schema(table: Table) -> None:\n+ \"\"\"Update table schema once user agrees on that.\"\"\"\n+ table_name = table.name\n+ existing_column_families = set(table.columns())\n+ schema_column_families = set(HBaseClient.HBASE_SCHEMA[table_name])\n+ columns_to_add = list(schema_column_families - existing_column_families)\n+ columns_to_drop = list(existing_column_families - schema_column_families)\n+\n+ if columns_to_add:\n+ if args.yes or user_agrees('Do you want to add columns {} to \"{}\"?'.format(columns_to_add, table_name)):\n+ table.add_columns(*columns_to_add)\n+\n+ if columns_to_drop:\n+ if args.yes or user_agrees('Do you want to drop columns {} from \"{}\"?'.format(columns_to_drop, table_name)):\n+ table.drop_columns(*columns_to_drop)\n+\n+\n+def drop_table(table: Table) -> None:\n+ \"\"\"Drop table once user agrees on that.\"\"\"\n+ table_name = table.name\n if args.yes or user_agrees('Do you want to drop table \"{}\"?'.format(table_name)):\n logger.info('Dropping table \"%s\".', table_name)\n- table = connection.table(table_name)\n table.drop()\n \n-for table_name in HBASE_SCHEMA:\n- table = connection.table(table_name)\n- if not table.exists():\n- if args.yes or user_agrees('Do you want to create table \"{}\"?'.format(table_name)):\n- list_of_columns = HBASE_SCHEMA[table_name]\n- logger.info('Creating table \"%s\" with columns %s.', table_name, list_of_columns)\n- table.create(*list_of_columns)\n- table.enable_if_exists_checks()\n- else:\n- existing_column_families = set(table.columns())\n- schema_column_families = set(HBASE_SCHEMA[table_name])\n- columns_to_add = list(schema_column_families - existing_column_families)\n- columns_to_drop = list(existing_column_families - schema_column_families)\n-\n- if columns_to_add:\n- if args.yes or user_agrees('Do you want to add columns {} to \"{}\"?'.format(columns_to_add, table_name)):\n- table.add_columns(*columns_to_add)\n-\n- if columns_to_drop:\n- if args.yes or user_agrees('Do you want to drop columns {} from \"{}\"?'.format(columns_to_drop, table_name)):\n- table.drop_columns(*columns_to_drop)\n+\n+def main() -> None:\n+ \"\"\"Run main functionality of this script.\"\"\"\n+ connection = get_connection_to_hbase()\n+ existing_tables = set(connection.tables())\n+ schema_tables = set(HBaseClient.HBASE_SCHEMA)\n+ tables_to_drop = list(existing_tables - schema_tables)\n+\n+ for table_name in tables_to_drop:\n+ table = connection.table(table_name)\n+ drop_table(table)\n+\n+ for table_name in HBaseClient.HBASE_SCHEMA:\n+ table = connection.table(table_name)\n+ if not table.exists():\n+ create_new_table(table)\n+ else:\n+ update_table_schema(table)\n+\n+\n+if __name__ == '__main__':\n+ main()\n", "issue": "Add \"radon\" tool to the Backend and enable it in CI\n## Expected Behavior\r\n\r\nPython code in backend should be validated by \"radon\" tool in CI.\r\n\r\n## Actual Behavior\r\n\r\nMedTagger backend uses a few linters already but we should add more validators to increate automation and code quality.\n", "before_files": [{"content": "\"\"\"Script that can migrate existing HBase schema or prepare empty database with given schema.\n\nHow to use it?\n--------------\nRun this script just by executing following line in the root directory of this project:\n\n (venv) $ python3.6 scripts/migrate_hbase.py\n\n\"\"\"\nimport argparse\nimport logging\nimport logging.config\n\nfrom medtagger.clients.hbase_client import HBaseClient\nfrom utils import get_connection_to_hbase, user_agrees\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(description='HBase migration.')\nparser.add_argument('-y', '--yes', dest='yes', action='store_const', const=True)\nargs = parser.parse_args()\n\n\nHBASE_SCHEMA = HBaseClient.HBASE_SCHEMA\nconnection = get_connection_to_hbase()\nexisting_tables = set(connection.tables())\nschema_tables = set(HBASE_SCHEMA)\ntables_to_drop = list(existing_tables - schema_tables)\nfor table_name in tables_to_drop:\n if args.yes or user_agrees('Do you want to drop table \"{}\"?'.format(table_name)):\n logger.info('Dropping table \"%s\".', table_name)\n table = connection.table(table_name)\n table.drop()\n\nfor table_name in HBASE_SCHEMA:\n table = connection.table(table_name)\n if not table.exists():\n if args.yes or user_agrees('Do you want to create table \"{}\"?'.format(table_name)):\n list_of_columns = HBASE_SCHEMA[table_name]\n logger.info('Creating table \"%s\" with columns %s.', table_name, list_of_columns)\n table.create(*list_of_columns)\n table.enable_if_exists_checks()\n else:\n existing_column_families = set(table.columns())\n schema_column_families = set(HBASE_SCHEMA[table_name])\n columns_to_add = list(schema_column_families - existing_column_families)\n columns_to_drop = list(existing_column_families - schema_column_families)\n\n if columns_to_add:\n if args.yes or user_agrees('Do you want to add columns {} to \"{}\"?'.format(columns_to_add, table_name)):\n table.add_columns(*columns_to_add)\n\n if columns_to_drop:\n if args.yes or user_agrees('Do you want to drop columns {} from \"{}\"?'.format(columns_to_drop, table_name)):\n table.drop_columns(*columns_to_drop)\n", "path": "backend/scripts/migrate_hbase.py"}], "after_files": [{"content": "\"\"\"Script that can migrate existing HBase schema or prepare empty database with given schema.\n\nHow to use it?\n--------------\nRun this script just by executing following line in the root directory of this project:\n\n (venv) $ python3.6 scripts/migrate_hbase.py\n\n\"\"\"\nimport argparse\nimport logging\nimport logging.config\n\nfrom starbase import Table\n\nfrom medtagger.clients.hbase_client import HBaseClient\nfrom utils import get_connection_to_hbase, user_agrees\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(description='HBase migration.')\nparser.add_argument('-y', '--yes', dest='yes', action='store_const', const=True)\nargs = parser.parse_args()\n\n\ndef create_new_table(table: Table) -> None:\n \"\"\"Create new table once user agrees on that.\"\"\"\n table_name = table.name\n if args.yes or user_agrees('Do you want to create table \"{}\"?'.format(table_name)):\n list_of_columns = HBaseClient.HBASE_SCHEMA[table_name]\n logger.info('Creating table \"%s\" with columns %s.', table_name, list_of_columns)\n table.create(*list_of_columns)\n table.enable_if_exists_checks()\n\n\ndef update_table_schema(table: Table) -> None:\n \"\"\"Update table schema once user agrees on that.\"\"\"\n table_name = table.name\n existing_column_families = set(table.columns())\n schema_column_families = set(HBaseClient.HBASE_SCHEMA[table_name])\n columns_to_add = list(schema_column_families - existing_column_families)\n columns_to_drop = list(existing_column_families - schema_column_families)\n\n if columns_to_add:\n if args.yes or user_agrees('Do you want to add columns {} to \"{}\"?'.format(columns_to_add, table_name)):\n table.add_columns(*columns_to_add)\n\n if columns_to_drop:\n if args.yes or user_agrees('Do you want to drop columns {} from \"{}\"?'.format(columns_to_drop, table_name)):\n table.drop_columns(*columns_to_drop)\n\n\ndef drop_table(table: Table) -> None:\n \"\"\"Drop table once user agrees on that.\"\"\"\n table_name = table.name\n if args.yes or user_agrees('Do you want to drop table \"{}\"?'.format(table_name)):\n logger.info('Dropping table \"%s\".', table_name)\n table.drop()\n\n\ndef main() -> None:\n \"\"\"Run main functionality of this script.\"\"\"\n connection = get_connection_to_hbase()\n existing_tables = set(connection.tables())\n schema_tables = set(HBaseClient.HBASE_SCHEMA)\n tables_to_drop = list(existing_tables - schema_tables)\n\n for table_name in tables_to_drop:\n table = connection.table(table_name)\n drop_table(table)\n\n for table_name in HBaseClient.HBASE_SCHEMA:\n table = connection.table(table_name)\n if not table.exists():\n create_new_table(table)\n else:\n update_table_schema(table)\n\n\nif __name__ == '__main__':\n main()\n", "path": "backend/scripts/migrate_hbase.py"}]} | 926 | 964 |
gh_patches_debug_1836 | rasdani/github-patches | git_diff | Nitrate__Nitrate-337 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade django-tinymce to 2.7.0
As per subject.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 with open('VERSION.txt', 'r') as f:
7 pkg_version = f.read().strip()
8
9
10 def get_long_description():
11 with open('README.rst', 'r') as f:
12 return f.read()
13
14
15 install_requires = [
16 'PyMySQL == 0.7.11',
17 'beautifulsoup4 >= 4.1.1',
18 'celery == 4.1.0',
19 'django-contrib-comments == 1.8.0',
20 'django-tinymce == 2.6.0',
21 'django-uuslug == 1.1.8',
22 'django >= 1.10,<2.0',
23 'html2text',
24 'kobo == 0.7.0',
25 'odfpy >= 0.9.6',
26 'six',
27 'xmltodict',
28 ]
29
30 extras_require = {
31 # Required for tcms.core.contrib.auth.backends.KerberosBackend
32 'krbauth': [
33 'kerberos == 1.2.5'
34 ],
35
36 # Packages for building documentation
37 'docs': [
38 'Sphinx >= 1.1.2',
39 'sphinx_rtd_theme',
40 ],
41
42 # Necessary packages for running tests
43 'tests': [
44 'coverage',
45 'factory_boy',
46 'flake8',
47 'mock',
48 'pytest',
49 'pytest-cov',
50 'pytest-django',
51 ],
52
53 # Contain tools that assists the development
54 'devtools': [
55 'django-debug-toolbar == 1.7',
56 'tox',
57 'django-extensions',
58 'pygraphviz',
59 ]
60 }
61
62
63 setup(
64 name='Nitrate',
65 version=pkg_version,
66 description='Test Case Management System',
67 long_description=get_long_description(),
68 author='Nitrate Team',
69 maintainer='Chenxiong Qi',
70 maintainer_email='[email protected]',
71 url='https://github.com/Nitrate/Nitrate/',
72 license='GPLv2+',
73 keywords='test case',
74 install_requires=install_requires,
75 extras_require=extras_require,
76 packages=find_packages(),
77 include_package_data=True,
78 classifiers=[
79 'Framework :: Django',
80 'Framework :: Django :: 1.10',
81 'Framework :: Django :: 1.11',
82 'Intended Audience :: Developers',
83 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
84 'Programming Language :: Python :: 2',
85 'Programming Language :: Python :: 2.7',
86 'Programming Language :: Python :: 3',
87 'Programming Language :: Python :: 3.6',
88 'Topic :: Software Development :: Quality Assurance',
89 'Topic :: Software Development :: Testing',
90 ],
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@
'beautifulsoup4 >= 4.1.1',
'celery == 4.1.0',
'django-contrib-comments == 1.8.0',
- 'django-tinymce == 2.6.0',
+ 'django-tinymce == 2.7.0',
'django-uuslug == 1.1.8',
'django >= 1.10,<2.0',
'html2text',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,7 +17,7 @@\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django-contrib-comments == 1.8.0',\n- 'django-tinymce == 2.6.0',\n+ 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'django >= 1.10,<2.0',\n 'html2text',\n", "issue": "Upgrade django-tinymce to 2.7.0\nAs per subject.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.6.0',\n 'django-uuslug == 1.1.8',\n 'django >= 1.10,<2.0',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'six',\n 'xmltodict',\n]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'django >= 1.10,<2.0',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'six',\n 'xmltodict',\n]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]} | 1,071 | 134 |
gh_patches_debug_58946 | rasdani/github-patches | git_diff | ivy-llc__ivy-13797 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
diagflat
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/creation_routines/building_matrices.py`
Content:
```
1 import ivy
2 from ivy.functional.frontends.numpy.func_wrapper import (
3 to_ivy_arrays_and_back,
4 handle_numpy_dtype,
5 )
6
7
8 @to_ivy_arrays_and_back
9 def tril(m, k=0):
10 return ivy.tril(m, k=k)
11
12
13 @to_ivy_arrays_and_back
14 def triu(m, k=0):
15 return ivy.triu(m, k=k)
16
17
18 @handle_numpy_dtype
19 @to_ivy_arrays_and_back
20 def tri(N, M=None, k=0, dtype="float64", *, like=None):
21 if M is None:
22 M = N
23 ones = ivy.ones((N, M), dtype=dtype)
24 return ivy.tril(ones, k=k)
25
26
27 @to_ivy_arrays_and_back
28 def diag(v, k=0):
29 return ivy.diag(v, k=k)
30
31
32 @to_ivy_arrays_and_back
33 def vander(x, N=None, increasing=False):
34 if ivy.is_float_dtype(x):
35 x = x.astype(ivy.float64)
36 elif ivy.is_bool_dtype or ivy.is_int_dtype(x):
37 x = x.astype(ivy.int64)
38 return ivy.vander(x, N=N, increasing=increasing)
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/creation_routines/building_matrices.py b/ivy/functional/frontends/numpy/creation_routines/building_matrices.py
--- a/ivy/functional/frontends/numpy/creation_routines/building_matrices.py
+++ b/ivy/functional/frontends/numpy/creation_routines/building_matrices.py
@@ -36,3 +36,12 @@
elif ivy.is_bool_dtype or ivy.is_int_dtype(x):
x = x.astype(ivy.int64)
return ivy.vander(x, N=N, increasing=increasing)
+
+
+# diagflat
+@to_ivy_arrays_and_back
+def diagflat(v, k=0):
+ ret = ivy.diagflat(v, offset=k)
+ while len(ivy.shape(ret)) < 2:
+ ret = ret.expand_dims(axis=0)
+ return ret
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/creation_routines/building_matrices.py b/ivy/functional/frontends/numpy/creation_routines/building_matrices.py\n--- a/ivy/functional/frontends/numpy/creation_routines/building_matrices.py\n+++ b/ivy/functional/frontends/numpy/creation_routines/building_matrices.py\n@@ -36,3 +36,12 @@\n elif ivy.is_bool_dtype or ivy.is_int_dtype(x):\n x = x.astype(ivy.int64)\n return ivy.vander(x, N=N, increasing=increasing)\n+\n+\n+# diagflat\n+@to_ivy_arrays_and_back\n+def diagflat(v, k=0):\n+ ret = ivy.diagflat(v, offset=k)\n+ while len(ivy.shape(ret)) < 2:\n+ ret = ret.expand_dims(axis=0)\n+ return ret\n", "issue": "diagflat\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_numpy_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef tril(m, k=0):\n return ivy.tril(m, k=k)\n\n\n@to_ivy_arrays_and_back\ndef triu(m, k=0):\n return ivy.triu(m, k=k)\n\n\n@handle_numpy_dtype\n@to_ivy_arrays_and_back\ndef tri(N, M=None, k=0, dtype=\"float64\", *, like=None):\n if M is None:\n M = N\n ones = ivy.ones((N, M), dtype=dtype)\n return ivy.tril(ones, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef vander(x, N=None, increasing=False):\n if ivy.is_float_dtype(x):\n x = x.astype(ivy.float64)\n elif ivy.is_bool_dtype or ivy.is_int_dtype(x):\n x = x.astype(ivy.int64)\n return ivy.vander(x, N=N, increasing=increasing)\n", "path": "ivy/functional/frontends/numpy/creation_routines/building_matrices.py"}], "after_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_numpy_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef tril(m, k=0):\n return ivy.tril(m, k=k)\n\n\n@to_ivy_arrays_and_back\ndef triu(m, k=0):\n return ivy.triu(m, k=k)\n\n\n@handle_numpy_dtype\n@to_ivy_arrays_and_back\ndef tri(N, M=None, k=0, dtype=\"float64\", *, like=None):\n if M is None:\n M = N\n ones = ivy.ones((N, M), dtype=dtype)\n return ivy.tril(ones, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef vander(x, N=None, increasing=False):\n if ivy.is_float_dtype(x):\n x = x.astype(ivy.float64)\n elif ivy.is_bool_dtype or ivy.is_int_dtype(x):\n x = x.astype(ivy.int64)\n return ivy.vander(x, N=N, increasing=increasing)\n\n\n# diagflat\n@to_ivy_arrays_and_back\ndef diagflat(v, k=0):\n ret = ivy.diagflat(v, offset=k)\n while len(ivy.shape(ret)) < 2:\n ret = ret.expand_dims(axis=0)\n return ret\n", "path": "ivy/functional/frontends/numpy/creation_routines/building_matrices.py"}]} | 615 | 198 |
gh_patches_debug_23782 | rasdani/github-patches | git_diff | Textualize__rich-273 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] '#' sign is treated as the end of a URL
**Describe the bug**
The `#` a valid element of the URL, but Rich seems to ignore it and treats it as the end of it.
Consider this URL: https://github.com/willmcgugan/rich#rich-print-function
**To Reproduce**
```python
from rich.console import Console
console = Console()
console.log("https://github.com/willmcgugan/rich#rich-print-function")
```
Output:

**Platform**
I'm using Rich on Windows and Linux, with the currently newest version `6.1.1`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rich/highlighter.py`
Content:
```
1 from abc import ABC, abstractmethod
2 from typing import List, Union
3
4 from .text import Text
5
6
7 class Highlighter(ABC):
8 """Abstract base class for highlighters."""
9
10 def __call__(self, text: Union[str, Text]) -> Text:
11 """Highlight a str or Text instance.
12
13 Args:
14 text (Union[str, ~Text]): Text to highlight.
15
16 Raises:
17 TypeError: If not called with text or str.
18
19 Returns:
20 Text: A test instance with highlighting applied.
21 """
22 if isinstance(text, str):
23 highlight_text = Text(text)
24 elif isinstance(text, Text):
25 highlight_text = text.copy()
26 else:
27 raise TypeError(f"str or Text instance required, not {text!r}")
28 self.highlight(highlight_text)
29 return highlight_text
30
31 @abstractmethod
32 def highlight(self, text: Text) -> None:
33 """Apply highlighting in place to text.
34
35 Args:
36 text (~Text): A text object highlight.
37 """
38
39
40 class NullHighlighter(Highlighter):
41 """A highlighter object that doesn't highlight.
42
43 May be used to disable highlighting entirely.
44
45 """
46
47 def highlight(self, text: Text) -> None:
48 """Nothing to do"""
49
50
51 class RegexHighlighter(Highlighter):
52 """Applies highlighting from a list of regular expressions."""
53
54 highlights: List[str] = []
55 base_style: str = ""
56
57 def highlight(self, text: Text) -> None:
58 """Highlight :class:`rich.text.Text` using regular expressions.
59
60 Args:
61 text (~Text): Text to highlighted.
62
63 """
64 highlight_regex = text.highlight_regex
65 for re_highlight in self.highlights:
66 highlight_regex(re_highlight, style_prefix=self.base_style)
67
68
69 class ReprHighlighter(RegexHighlighter):
70 """Highlights the text typically produced from ``__repr__`` methods."""
71
72 base_style = "repr."
73 highlights = [
74 r"(?P<brace>[\{\[\(\)\]\}])",
75 r"(?P<tag_start>\<)(?P<tag_name>[\w\-\.\:]*)(?P<tag_contents>.*?)(?P<tag_end>\>)",
76 r"(?P<attrib_name>\w+?)=(?P<attrib_value>\"?[\w_]+\"?)",
77 r"(?P<bool_true>True)|(?P<bool_false>False)|(?P<none>None)",
78 r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[\-\+]?\d+?)?\b)",
79 r"(?P<number>0x[0-9a-f]*)",
80 r"(?P<path>\B(\/[\w\.\-\_\+]+)*\/)(?P<filename>[\w\.\-\_\+]*)?",
81 r"(?<!\\)(?P<str>b?\'\'\'.*?(?<!\\)\'\'\'|b?\'.*?(?<!\\)\'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
82 r"(?P<url>https?:\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%]*)",
83 r"(?P<uuid>[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})",
84 ]
85
86
87 if __name__ == "__main__": # pragma: no cover
88 from .console import Console
89
90 console = Console()
91 console.print("[bold green]hello world![/bold green]")
92 console.print("'[bold green]hello world![/bold green]'")
93
94 console.print(" /foo")
95 console.print("/foo/")
96 console.print("/foo/bar")
97 console.print("foo/bar/baz")
98
99 console.print("/foo/bar/baz?foo=bar+egg&egg=baz")
100 console.print("/foo/bar/baz/")
101 console.print("/foo/bar/baz/egg")
102 console.print("/foo/bar/baz/egg.py")
103 console.print("/foo/bar/baz/egg.py word")
104 console.print(" /foo/bar/baz/egg.py word")
105 console.print("foo /foo/bar/baz/egg.py word")
106 console.print("foo /foo/bar/ba._++z/egg+.py word")
107 console.print("https://example.org?foo=bar")
108
109 console.print(1234567.34)
110 console.print(1 / 2)
111 console.print(-1 / 123123123123)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rich/highlighter.py b/rich/highlighter.py
--- a/rich/highlighter.py
+++ b/rich/highlighter.py
@@ -79,7 +79,7 @@
r"(?P<number>0x[0-9a-f]*)",
r"(?P<path>\B(\/[\w\.\-\_\+]+)*\/)(?P<filename>[\w\.\-\_\+]*)?",
r"(?<!\\)(?P<str>b?\'\'\'.*?(?<!\\)\'\'\'|b?\'.*?(?<!\\)\'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
- r"(?P<url>https?:\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%]*)",
+ r"(?P<url>https?:\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%\#]*)",
r"(?P<uuid>[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})",
]
@@ -104,7 +104,7 @@
console.print(" /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/ba._++z/egg+.py word")
- console.print("https://example.org?foo=bar")
+ console.print("https://example.org?foo=bar#header")
console.print(1234567.34)
console.print(1 / 2)
| {"golden_diff": "diff --git a/rich/highlighter.py b/rich/highlighter.py\n--- a/rich/highlighter.py\n+++ b/rich/highlighter.py\n@@ -79,7 +79,7 @@\n r\"(?P<number>0x[0-9a-f]*)\",\n r\"(?P<path>\\B(\\/[\\w\\.\\-\\_\\+]+)*\\/)(?P<filename>[\\w\\.\\-\\_\\+]*)?\",\n r\"(?<!\\\\)(?P<str>b?\\'\\'\\'.*?(?<!\\\\)\\'\\'\\'|b?\\'.*?(?<!\\\\)\\'|b?\\\"\\\"\\\".*?(?<!\\\\)\\\"\\\"\\\"|b?\\\".*?(?<!\\\\)\\\")\",\n- r\"(?P<url>https?:\\/\\/[0-9a-zA-Z\\$\\-\\_\\+\\!`\\(\\)\\,\\.\\?\\/\\;\\:\\&\\=\\%]*)\",\n+ r\"(?P<url>https?:\\/\\/[0-9a-zA-Z\\$\\-\\_\\+\\!`\\(\\)\\,\\.\\?\\/\\;\\:\\&\\=\\%\\#]*)\",\n r\"(?P<uuid>[a-fA-F0-9]{8}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{12})\",\n ]\n \n@@ -104,7 +104,7 @@\n console.print(\" /foo/bar/baz/egg.py word\")\n console.print(\"foo /foo/bar/baz/egg.py word\")\n console.print(\"foo /foo/bar/ba._++z/egg+.py word\")\n- console.print(\"https://example.org?foo=bar\")\n+ console.print(\"https://example.org?foo=bar#header\")\n \n console.print(1234567.34)\n console.print(1 / 2)\n", "issue": "[BUG] '#' sign is treated as the end of a URL\n**Describe the bug**\r\nThe `#` a valid element of the URL, but Rich seems to ignore it and treats it as the end of it. \r\nConsider this URL: https://github.com/willmcgugan/rich#rich-print-function\r\n\r\n**To Reproduce**\r\n```python\r\nfrom rich.console import Console\r\n\r\nconsole = Console()\r\n\r\nconsole.log(\"https://github.com/willmcgugan/rich#rich-print-function\")\r\n```\r\n\r\nOutput: \r\n\r\n\r\n\r\n**Platform**\r\nI'm using Rich on Windows and Linux, with the currently newest version `6.1.1`.\r\n\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import List, Union\n\nfrom .text import Text\n\n\nclass Highlighter(ABC):\n \"\"\"Abstract base class for highlighters.\"\"\"\n\n def __call__(self, text: Union[str, Text]) -> Text:\n \"\"\"Highlight a str or Text instance.\n\n Args:\n text (Union[str, ~Text]): Text to highlight.\n\n Raises:\n TypeError: If not called with text or str.\n\n Returns:\n Text: A test instance with highlighting applied.\n \"\"\"\n if isinstance(text, str):\n highlight_text = Text(text)\n elif isinstance(text, Text):\n highlight_text = text.copy()\n else:\n raise TypeError(f\"str or Text instance required, not {text!r}\")\n self.highlight(highlight_text)\n return highlight_text\n\n @abstractmethod\n def highlight(self, text: Text) -> None:\n \"\"\"Apply highlighting in place to text.\n\n Args:\n text (~Text): A text object highlight.\n \"\"\"\n\n\nclass NullHighlighter(Highlighter):\n \"\"\"A highlighter object that doesn't highlight.\n\n May be used to disable highlighting entirely.\n\n \"\"\"\n\n def highlight(self, text: Text) -> None:\n \"\"\"Nothing to do\"\"\"\n\n\nclass RegexHighlighter(Highlighter):\n \"\"\"Applies highlighting from a list of regular expressions.\"\"\"\n\n highlights: List[str] = []\n base_style: str = \"\"\n\n def highlight(self, text: Text) -> None:\n \"\"\"Highlight :class:`rich.text.Text` using regular expressions.\n\n Args:\n text (~Text): Text to highlighted.\n\n \"\"\"\n highlight_regex = text.highlight_regex\n for re_highlight in self.highlights:\n highlight_regex(re_highlight, style_prefix=self.base_style)\n\n\nclass ReprHighlighter(RegexHighlighter):\n \"\"\"Highlights the text typically produced from ``__repr__`` methods.\"\"\"\n\n base_style = \"repr.\"\n highlights = [\n r\"(?P<brace>[\\{\\[\\(\\)\\]\\}])\",\n r\"(?P<tag_start>\\<)(?P<tag_name>[\\w\\-\\.\\:]*)(?P<tag_contents>.*?)(?P<tag_end>\\>)\",\n r\"(?P<attrib_name>\\w+?)=(?P<attrib_value>\\\"?[\\w_]+\\\"?)\",\n r\"(?P<bool_true>True)|(?P<bool_false>False)|(?P<none>None)\",\n r\"(?P<number>(?<!\\w)\\-?[0-9]+\\.?[0-9]*(e[\\-\\+]?\\d+?)?\\b)\",\n r\"(?P<number>0x[0-9a-f]*)\",\n r\"(?P<path>\\B(\\/[\\w\\.\\-\\_\\+]+)*\\/)(?P<filename>[\\w\\.\\-\\_\\+]*)?\",\n r\"(?<!\\\\)(?P<str>b?\\'\\'\\'.*?(?<!\\\\)\\'\\'\\'|b?\\'.*?(?<!\\\\)\\'|b?\\\"\\\"\\\".*?(?<!\\\\)\\\"\\\"\\\"|b?\\\".*?(?<!\\\\)\\\")\",\n r\"(?P<url>https?:\\/\\/[0-9a-zA-Z\\$\\-\\_\\+\\!`\\(\\)\\,\\.\\?\\/\\;\\:\\&\\=\\%]*)\",\n r\"(?P<uuid>[a-fA-F0-9]{8}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{12})\",\n ]\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from .console import Console\n\n console = Console()\n console.print(\"[bold green]hello world![/bold green]\")\n console.print(\"'[bold green]hello world![/bold green]'\")\n\n console.print(\" /foo\")\n console.print(\"/foo/\")\n console.print(\"/foo/bar\")\n console.print(\"foo/bar/baz\")\n\n console.print(\"/foo/bar/baz?foo=bar+egg&egg=baz\")\n console.print(\"/foo/bar/baz/\")\n console.print(\"/foo/bar/baz/egg\")\n console.print(\"/foo/bar/baz/egg.py\")\n console.print(\"/foo/bar/baz/egg.py word\")\n console.print(\" /foo/bar/baz/egg.py word\")\n console.print(\"foo /foo/bar/baz/egg.py word\")\n console.print(\"foo /foo/bar/ba._++z/egg+.py word\")\n console.print(\"https://example.org?foo=bar\")\n\n console.print(1234567.34)\n console.print(1 / 2)\n console.print(-1 / 123123123123)\n", "path": "rich/highlighter.py"}], "after_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import List, Union\n\nfrom .text import Text\n\n\nclass Highlighter(ABC):\n \"\"\"Abstract base class for highlighters.\"\"\"\n\n def __call__(self, text: Union[str, Text]) -> Text:\n \"\"\"Highlight a str or Text instance.\n\n Args:\n text (Union[str, ~Text]): Text to highlight.\n\n Raises:\n TypeError: If not called with text or str.\n\n Returns:\n Text: A test instance with highlighting applied.\n \"\"\"\n if isinstance(text, str):\n highlight_text = Text(text)\n elif isinstance(text, Text):\n highlight_text = text.copy()\n else:\n raise TypeError(f\"str or Text instance required, not {text!r}\")\n self.highlight(highlight_text)\n return highlight_text\n\n @abstractmethod\n def highlight(self, text: Text) -> None:\n \"\"\"Apply highlighting in place to text.\n\n Args:\n text (~Text): A text object highlight.\n \"\"\"\n\n\nclass NullHighlighter(Highlighter):\n \"\"\"A highlighter object that doesn't highlight.\n\n May be used to disable highlighting entirely.\n\n \"\"\"\n\n def highlight(self, text: Text) -> None:\n \"\"\"Nothing to do\"\"\"\n\n\nclass RegexHighlighter(Highlighter):\n \"\"\"Applies highlighting from a list of regular expressions.\"\"\"\n\n highlights: List[str] = []\n base_style: str = \"\"\n\n def highlight(self, text: Text) -> None:\n \"\"\"Highlight :class:`rich.text.Text` using regular expressions.\n\n Args:\n text (~Text): Text to highlighted.\n\n \"\"\"\n highlight_regex = text.highlight_regex\n for re_highlight in self.highlights:\n highlight_regex(re_highlight, style_prefix=self.base_style)\n\n\nclass ReprHighlighter(RegexHighlighter):\n \"\"\"Highlights the text typically produced from ``__repr__`` methods.\"\"\"\n\n base_style = \"repr.\"\n highlights = [\n r\"(?P<brace>[\\{\\[\\(\\)\\]\\}])\",\n r\"(?P<tag_start>\\<)(?P<tag_name>[\\w\\-\\.\\:]*)(?P<tag_contents>.*?)(?P<tag_end>\\>)\",\n r\"(?P<attrib_name>\\w+?)=(?P<attrib_value>\\\"?[\\w_]+\\\"?)\",\n r\"(?P<bool_true>True)|(?P<bool_false>False)|(?P<none>None)\",\n r\"(?P<number>(?<!\\w)\\-?[0-9]+\\.?[0-9]*(e[\\-\\+]?\\d+?)?\\b)\",\n r\"(?P<number>0x[0-9a-f]*)\",\n r\"(?P<path>\\B(\\/[\\w\\.\\-\\_\\+]+)*\\/)(?P<filename>[\\w\\.\\-\\_\\+]*)?\",\n r\"(?<!\\\\)(?P<str>b?\\'\\'\\'.*?(?<!\\\\)\\'\\'\\'|b?\\'.*?(?<!\\\\)\\'|b?\\\"\\\"\\\".*?(?<!\\\\)\\\"\\\"\\\"|b?\\\".*?(?<!\\\\)\\\")\",\n r\"(?P<url>https?:\\/\\/[0-9a-zA-Z\\$\\-\\_\\+\\!`\\(\\)\\,\\.\\?\\/\\;\\:\\&\\=\\%\\#]*)\",\n r\"(?P<uuid>[a-fA-F0-9]{8}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{4}\\-[a-fA-F0-9]{12})\",\n ]\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from .console import Console\n\n console = Console()\n console.print(\"[bold green]hello world![/bold green]\")\n console.print(\"'[bold green]hello world![/bold green]'\")\n\n console.print(\" /foo\")\n console.print(\"/foo/\")\n console.print(\"/foo/bar\")\n console.print(\"foo/bar/baz\")\n\n console.print(\"/foo/bar/baz?foo=bar+egg&egg=baz\")\n console.print(\"/foo/bar/baz/\")\n console.print(\"/foo/bar/baz/egg\")\n console.print(\"/foo/bar/baz/egg.py\")\n console.print(\"/foo/bar/baz/egg.py word\")\n console.print(\" /foo/bar/baz/egg.py word\")\n console.print(\"foo /foo/bar/baz/egg.py word\")\n console.print(\"foo /foo/bar/ba._++z/egg+.py word\")\n console.print(\"https://example.org?foo=bar#header\")\n\n console.print(1234567.34)\n console.print(1 / 2)\n console.print(-1 / 123123123123)\n", "path": "rich/highlighter.py"}]} | 1,732 | 431 |
gh_patches_debug_480 | rasdani/github-patches | git_diff | google__flax-2136 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flax actually requires jax 0.3.2
https://github.com/google/flax/blob/ef6bf4054c30271a58bfabb58f3d0049ef5d851a/flax/linen/initializers.py#L19
the constant initialiser was added in this commit https://github.com/google/jax/commit/86e8928e709ac07cc51c10e815db6284507c320e that was first included in jax 0.3.2
This came up in NetKet's automated oldest-version-dependencies testing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """setup.py for Flax."""
16
17 import os
18 from setuptools import find_packages
19 from setuptools import setup
20
21 here = os.path.abspath(os.path.dirname(__file__))
22 try:
23 README = open(os.path.join(here, "README.md"), encoding="utf-8").read()
24 except IOError:
25 README = ""
26
27 install_requires = [
28 "numpy>=1.12",
29 "jax>=0.3",
30 "matplotlib", # only needed for tensorboard export
31 "msgpack",
32 "optax",
33 "rich~=11.1.0",
34 "typing_extensions>=4.1.1",
35 ]
36
37 tests_require = [
38 "atari-py==0.2.5", # Last version does not have the ROMs we test on pre-packaged
39 "clu", # All examples.
40 "gym==0.18.3",
41 "jaxlib",
42 "jraph",
43 "ml-collections",
44 "opencv-python",
45 "pytest",
46 "pytest-cov",
47 "pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate
48 "pytype",
49 "sentencepiece", # WMT example.
50 "svn",
51 "tensorflow_text>=2.4.0", # WMT example.
52 "tensorflow_datasets",
53 "tensorflow",
54 "torch",
55 "pandas", # get_repo_metrics script
56 ]
57
58 __version__ = None
59
60 with open("flax/version.py") as f:
61 exec(f.read(), globals())
62
63 setup(
64 name="flax",
65 version=__version__,
66 description="Flax: A neural network library for JAX designed for flexibility",
67 long_description="\n\n".join([README]),
68 long_description_content_type="text/markdown",
69 classifiers=[
70 "Development Status :: 3 - Alpha",
71 "Intended Audience :: Developers",
72 "Intended Audience :: Science/Research",
73 "License :: OSI Approved :: Apache Software License",
74 "Programming Language :: Python :: 3.7",
75 "Topic :: Scientific/Engineering :: Artificial Intelligence",
76 ],
77 keywords="",
78 author="Flax team",
79 author_email="[email protected]",
80 url="https://github.com/google/flax",
81 packages=find_packages(),
82 package_data={"flax": ["py.typed"]},
83 zip_safe=False,
84 install_requires=install_requires,
85 extras_require={
86 "testing": tests_require,
87 },
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
install_requires = [
"numpy>=1.12",
- "jax>=0.3",
+ "jax>=0.3.2",
"matplotlib", # only needed for tensorboard export
"msgpack",
"optax",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \n install_requires = [\n \"numpy>=1.12\",\n- \"jax>=0.3\",\n+ \"jax>=0.3.2\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n", "issue": "Flax actually requires jax 0.3.2\nhttps://github.com/google/flax/blob/ef6bf4054c30271a58bfabb58f3d0049ef5d851a/flax/linen/initializers.py#L19\r\n\r\nthe constant initialiser was added in this commit https://github.com/google/jax/commit/86e8928e709ac07cc51c10e815db6284507c320e that was first included in jax 0.3.2\r\n\r\nThis came up in NetKet's automated oldest-version-dependencies testing.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding=\"utf-8\").read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.3\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n \"rich~=11.1.0\", \n \"typing_extensions>=4.1.1\",\n]\n\ntests_require = [\n \"atari-py==0.2.5\", # Last version does not have the ROMs we test on pre-packaged\n \"clu\", # All examples.\n \"gym==0.18.3\",\n \"jaxlib\",\n \"jraph\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"pytype\",\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow_text>=2.4.0\", # WMT example.\n \"tensorflow_datasets\",\n \"tensorflow\",\n \"torch\",\n \"pandas\", # get_repo_metrics script\n]\n\n__version__ = None\n\nwith open(\"flax/version.py\") as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n package_data={\"flax\": [\"py.typed\"]},\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding=\"utf-8\").read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.3.2\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n \"rich~=11.1.0\", \n \"typing_extensions>=4.1.1\",\n]\n\ntests_require = [\n \"atari-py==0.2.5\", # Last version does not have the ROMs we test on pre-packaged\n \"clu\", # All examples.\n \"gym==0.18.3\",\n \"jaxlib\",\n \"jraph\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"pytype\",\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow_text>=2.4.0\", # WMT example.\n \"tensorflow_datasets\",\n \"tensorflow\",\n \"torch\",\n \"pandas\", # get_repo_metrics script\n]\n\n__version__ = None\n\nwith open(\"flax/version.py\") as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n package_data={\"flax\": [\"py.typed\"]},\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]} | 1,248 | 89 |
gh_patches_debug_1736 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PlotWidget.__getattr__ raises wrong exception type - but this has a simple fix
`hasattr(widget, "some_non_existing_attribute")` raises `NameError` instead of returning `False` for instances of `PlotWidget`. I think that `PlotWidget.__getattr__` (in PlotWidget.py) should raise `AttributeError` instead of `NameError`, which would be converted correctly to `False` by `hasattr`. I believe the same holds for `TabWindow.__getattr__` (in graphicsWindows.py).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/graphicsWindows.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 DEPRECATED: The classes below are convenience classes that create a new window
4 containting a single, specific widget. These classes are now unnecessary because
5 it is possible to place any widget into its own window by simply calling its
6 show() method.
7 """
8
9 from .Qt import QtCore, QtGui, mkQApp
10 from .widgets.PlotWidget import *
11 from .imageview import *
12 from .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget
13 from .widgets.GraphicsView import GraphicsView
14
15
16 class GraphicsWindow(GraphicsLayoutWidget):
17 """
18 (deprecated; use GraphicsLayoutWidget instead)
19
20 Convenience subclass of :class:`GraphicsLayoutWidget
21 <pyqtgraph.GraphicsLayoutWidget>`. This class is intended for use from
22 the interactive python prompt.
23 """
24 def __init__(self, title=None, size=(800,600), **kargs):
25 mkQApp()
26 GraphicsLayoutWidget.__init__(self, **kargs)
27 self.resize(*size)
28 if title is not None:
29 self.setWindowTitle(title)
30 self.show()
31
32
33 class TabWindow(QtGui.QMainWindow):
34 """
35 (deprecated)
36 """
37 def __init__(self, title=None, size=(800,600)):
38 mkQApp()
39 QtGui.QMainWindow.__init__(self)
40 self.resize(*size)
41 self.cw = QtGui.QTabWidget()
42 self.setCentralWidget(self.cw)
43 if title is not None:
44 self.setWindowTitle(title)
45 self.show()
46
47 def __getattr__(self, attr):
48 if hasattr(self.cw, attr):
49 return getattr(self.cw, attr)
50 else:
51 raise NameError(attr)
52
53
54 class PlotWindow(PlotWidget):
55 """
56 (deprecated; use PlotWidget instead)
57 """
58 def __init__(self, title=None, **kargs):
59 mkQApp()
60 self.win = QtGui.QMainWindow()
61 PlotWidget.__init__(self, **kargs)
62 self.win.setCentralWidget(self)
63 for m in ['resize']:
64 setattr(self, m, getattr(self.win, m))
65 if title is not None:
66 self.win.setWindowTitle(title)
67 self.win.show()
68
69
70 class ImageWindow(ImageView):
71 """
72 (deprecated; use ImageView instead)
73 """
74 def __init__(self, *args, **kargs):
75 mkQApp()
76 self.win = QtGui.QMainWindow()
77 self.win.resize(800,600)
78 if 'title' in kargs:
79 self.win.setWindowTitle(kargs['title'])
80 del kargs['title']
81 ImageView.__init__(self, self.win)
82 if len(args) > 0 or len(kargs) > 0:
83 self.setImage(*args, **kargs)
84 self.win.setCentralWidget(self)
85 for m in ['resize']:
86 setattr(self, m, getattr(self.win, m))
87 #for m in ['setImage', 'autoRange', 'addItem', 'removeItem', 'blackLevel', 'whiteLevel', 'imageItem']:
88 #setattr(self, m, getattr(self.cw, m))
89 self.win.show()
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py
--- a/pyqtgraph/graphicsWindows.py
+++ b/pyqtgraph/graphicsWindows.py
@@ -45,10 +45,7 @@
self.show()
def __getattr__(self, attr):
- if hasattr(self.cw, attr):
- return getattr(self.cw, attr)
- else:
- raise NameError(attr)
+ return getattr(self.cw, attr)
class PlotWindow(PlotWidget):
| {"golden_diff": "diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py\n--- a/pyqtgraph/graphicsWindows.py\n+++ b/pyqtgraph/graphicsWindows.py\n@@ -45,10 +45,7 @@\n self.show()\n \n def __getattr__(self, attr):\n- if hasattr(self.cw, attr):\n- return getattr(self.cw, attr)\n- else:\n- raise NameError(attr)\n+ return getattr(self.cw, attr)\n \n \n class PlotWindow(PlotWidget):\n", "issue": "PlotWidget.__getattr__ raises wrong exception type - but this has a simple fix\n`hasattr(widget, \"some_non_existing_attribute\")` raises `NameError` instead of returning `False` for instances of `PlotWidget`. I think that `PlotWidget.__getattr__` (in PlotWidget.py) should raise `AttributeError` instead of `NameError`, which would be converted correctly to `False` by `hasattr`. I believe the same holds for `TabWindow.__getattr__` (in graphicsWindows.py).\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDEPRECATED: The classes below are convenience classes that create a new window\ncontainting a single, specific widget. These classes are now unnecessary because\nit is possible to place any widget into its own window by simply calling its\nshow() method.\n\"\"\"\n\nfrom .Qt import QtCore, QtGui, mkQApp\nfrom .widgets.PlotWidget import *\nfrom .imageview import *\nfrom .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget\nfrom .widgets.GraphicsView import GraphicsView\n\n\nclass GraphicsWindow(GraphicsLayoutWidget):\n \"\"\"\n (deprecated; use GraphicsLayoutWidget instead)\n \n Convenience subclass of :class:`GraphicsLayoutWidget \n <pyqtgraph.GraphicsLayoutWidget>`. This class is intended for use from \n the interactive python prompt.\n \"\"\"\n def __init__(self, title=None, size=(800,600), **kargs):\n mkQApp()\n GraphicsLayoutWidget.__init__(self, **kargs)\n self.resize(*size)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n\nclass TabWindow(QtGui.QMainWindow):\n \"\"\"\n (deprecated)\n \"\"\"\n def __init__(self, title=None, size=(800,600)):\n mkQApp()\n QtGui.QMainWindow.__init__(self)\n self.resize(*size)\n self.cw = QtGui.QTabWidget()\n self.setCentralWidget(self.cw)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n def __getattr__(self, attr):\n if hasattr(self.cw, attr):\n return getattr(self.cw, attr)\n else:\n raise NameError(attr)\n \n\nclass PlotWindow(PlotWidget):\n \"\"\"\n (deprecated; use PlotWidget instead)\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n PlotWidget.__init__(self, **kargs)\n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n if title is not None:\n self.win.setWindowTitle(title)\n self.win.show()\n\n\nclass ImageWindow(ImageView):\n \"\"\"\n (deprecated; use ImageView instead)\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n self.win.resize(800,600)\n if 'title' in kargs:\n self.win.setWindowTitle(kargs['title'])\n del kargs['title']\n ImageView.__init__(self, self.win)\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n #for m in ['setImage', 'autoRange', 'addItem', 'removeItem', 'blackLevel', 'whiteLevel', 'imageItem']:\n #setattr(self, m, getattr(self.cw, m))\n self.win.show()\n", "path": "pyqtgraph/graphicsWindows.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDEPRECATED: The classes below are convenience classes that create a new window\ncontainting a single, specific widget. These classes are now unnecessary because\nit is possible to place any widget into its own window by simply calling its\nshow() method.\n\"\"\"\n\nfrom .Qt import QtCore, QtGui, mkQApp\nfrom .widgets.PlotWidget import *\nfrom .imageview import *\nfrom .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget\nfrom .widgets.GraphicsView import GraphicsView\n\n\nclass GraphicsWindow(GraphicsLayoutWidget):\n \"\"\"\n (deprecated; use GraphicsLayoutWidget instead)\n \n Convenience subclass of :class:`GraphicsLayoutWidget \n <pyqtgraph.GraphicsLayoutWidget>`. This class is intended for use from \n the interactive python prompt.\n \"\"\"\n def __init__(self, title=None, size=(800,600), **kargs):\n mkQApp()\n GraphicsLayoutWidget.__init__(self, **kargs)\n self.resize(*size)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n\nclass TabWindow(QtGui.QMainWindow):\n \"\"\"\n (deprecated)\n \"\"\"\n def __init__(self, title=None, size=(800,600)):\n mkQApp()\n QtGui.QMainWindow.__init__(self)\n self.resize(*size)\n self.cw = QtGui.QTabWidget()\n self.setCentralWidget(self.cw)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n def __getattr__(self, attr):\n return getattr(self.cw, attr)\n \n\nclass PlotWindow(PlotWidget):\n \"\"\"\n (deprecated; use PlotWidget instead)\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n PlotWidget.__init__(self, **kargs)\n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n if title is not None:\n self.win.setWindowTitle(title)\n self.win.show()\n\n\nclass ImageWindow(ImageView):\n \"\"\"\n (deprecated; use ImageView instead)\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n self.win.resize(800,600)\n if 'title' in kargs:\n self.win.setWindowTitle(kargs['title'])\n del kargs['title']\n ImageView.__init__(self, self.win)\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n #for m in ['setImage', 'autoRange', 'addItem', 'removeItem', 'blackLevel', 'whiteLevel', 'imageItem']:\n #setattr(self, m, getattr(self.cw, m))\n self.win.show()\n", "path": "pyqtgraph/graphicsWindows.py"}]} | 1,207 | 117 |
gh_patches_debug_10360 | rasdani/github-patches | git_diff | ckan__ckan-624 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tag pages still use old templates
/tag
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/controllers/tag.py`
Content:
```
1 from pylons.i18n import _
2 from pylons import request, c
3
4 import ckan.logic as logic
5 import ckan.model as model
6 import ckan.lib.base as base
7 import ckan.lib.helpers as h
8
9
10 LIMIT = 25
11
12
13 class TagController(base.BaseController):
14
15 def __before__(self, action, **env):
16 base.BaseController.__before__(self, action, **env)
17 try:
18 context = {'model': model, 'user': c.user or c.author}
19 logic.check_access('site_read', context)
20 except logic.NotAuthorized:
21 base.abort(401, _('Not authorized to see this page'))
22
23 def index(self):
24 c.q = request.params.get('q', '')
25
26 context = {'model': model, 'session': model.Session,
27 'user': c.user or c.author, 'for_view': True}
28
29 data_dict = {'all_fields': True}
30
31 if c.q:
32 page = int(request.params.get('page', 1))
33 data_dict['q'] = c.q
34 data_dict['limit'] = LIMIT
35 data_dict['offset'] = (page - 1) * LIMIT
36 data_dict['return_objects'] = True
37
38 results = logic.get_action('tag_list')(context, data_dict)
39
40 if c.q:
41 c.page = h.Page(
42 collection=results,
43 page=page,
44 item_count=len(results),
45 items_per_page=LIMIT
46 )
47 c.page.items = results
48 else:
49 c.page = h.AlphaPage(
50 collection=results,
51 page=request.params.get('page', 'A'),
52 alpha_attribute='name',
53 other_text=_('Other'),
54 )
55
56 return base.render('tag/index.html')
57
58 def read(self, id):
59 context = {'model': model, 'session': model.Session,
60 'user': c.user or c.author, 'for_view': True}
61
62 data_dict = {'id': id}
63 try:
64 c.tag = logic.get_action('tag_show')(context, data_dict)
65 except logic.NotFound:
66 base.abort(404, _('Tag not found'))
67
68 return base.render('tag/read.html')
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/controllers/tag.py b/ckan/controllers/tag.py
--- a/ckan/controllers/tag.py
+++ b/ckan/controllers/tag.py
@@ -1,5 +1,5 @@
from pylons.i18n import _
-from pylons import request, c
+from pylons import request, c, config
import ckan.logic as logic
import ckan.model as model
@@ -65,4 +65,7 @@
except logic.NotFound:
base.abort(404, _('Tag not found'))
- return base.render('tag/read.html')
+ if h.asbool(config.get('ckan.legacy_templates', False)):
+ return base.render('tag/read.html')
+ else:
+ h.redirect_to(controller='package', action='search', tags=c.tag.get('name'))
| {"golden_diff": "diff --git a/ckan/controllers/tag.py b/ckan/controllers/tag.py\n--- a/ckan/controllers/tag.py\n+++ b/ckan/controllers/tag.py\n@@ -1,5 +1,5 @@\n from pylons.i18n import _\n-from pylons import request, c\n+from pylons import request, c, config\n \n import ckan.logic as logic\n import ckan.model as model\n@@ -65,4 +65,7 @@\n except logic.NotFound:\n base.abort(404, _('Tag not found'))\n \n- return base.render('tag/read.html')\n+ if h.asbool(config.get('ckan.legacy_templates', False)):\n+ return base.render('tag/read.html')\n+ else:\n+ h.redirect_to(controller='package', action='search', tags=c.tag.get('name'))\n", "issue": "Tag pages still use old templates\n/tag\n\n", "before_files": [{"content": "from pylons.i18n import _\nfrom pylons import request, c\n\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.lib.base as base\nimport ckan.lib.helpers as h\n\n\nLIMIT = 25\n\n\nclass TagController(base.BaseController):\n\n def __before__(self, action, **env):\n base.BaseController.__before__(self, action, **env)\n try:\n context = {'model': model, 'user': c.user or c.author}\n logic.check_access('site_read', context)\n except logic.NotAuthorized:\n base.abort(401, _('Not authorized to see this page'))\n\n def index(self):\n c.q = request.params.get('q', '')\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True}\n\n data_dict = {'all_fields': True}\n\n if c.q:\n page = int(request.params.get('page', 1))\n data_dict['q'] = c.q\n data_dict['limit'] = LIMIT\n data_dict['offset'] = (page - 1) * LIMIT\n data_dict['return_objects'] = True\n\n results = logic.get_action('tag_list')(context, data_dict)\n\n if c.q:\n c.page = h.Page(\n collection=results,\n page=page,\n item_count=len(results),\n items_per_page=LIMIT\n )\n c.page.items = results\n else:\n c.page = h.AlphaPage(\n collection=results,\n page=request.params.get('page', 'A'),\n alpha_attribute='name',\n other_text=_('Other'),\n )\n\n return base.render('tag/index.html')\n\n def read(self, id):\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True}\n\n data_dict = {'id': id}\n try:\n c.tag = logic.get_action('tag_show')(context, data_dict)\n except logic.NotFound:\n base.abort(404, _('Tag not found'))\n\n return base.render('tag/read.html')\n", "path": "ckan/controllers/tag.py"}], "after_files": [{"content": "from pylons.i18n import _\nfrom pylons import request, c, config\n\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.lib.base as base\nimport ckan.lib.helpers as h\n\n\nLIMIT = 25\n\n\nclass TagController(base.BaseController):\n\n def __before__(self, action, **env):\n base.BaseController.__before__(self, action, **env)\n try:\n context = {'model': model, 'user': c.user or c.author}\n logic.check_access('site_read', context)\n except logic.NotAuthorized:\n base.abort(401, _('Not authorized to see this page'))\n\n def index(self):\n c.q = request.params.get('q', '')\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True}\n\n data_dict = {'all_fields': True}\n\n if c.q:\n page = int(request.params.get('page', 1))\n data_dict['q'] = c.q\n data_dict['limit'] = LIMIT\n data_dict['offset'] = (page - 1) * LIMIT\n data_dict['return_objects'] = True\n\n results = logic.get_action('tag_list')(context, data_dict)\n\n if c.q:\n c.page = h.Page(\n collection=results,\n page=page,\n item_count=len(results),\n items_per_page=LIMIT\n )\n c.page.items = results\n else:\n c.page = h.AlphaPage(\n collection=results,\n page=request.params.get('page', 'A'),\n alpha_attribute='name',\n other_text=_('Other'),\n )\n\n return base.render('tag/index.html')\n\n def read(self, id):\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True}\n\n data_dict = {'id': id}\n try:\n c.tag = logic.get_action('tag_show')(context, data_dict)\n except logic.NotFound:\n base.abort(404, _('Tag not found'))\n\n if h.asbool(config.get('ckan.legacy_templates', False)):\n return base.render('tag/read.html')\n else:\n h.redirect_to(controller='package', action='search', tags=c.tag.get('name'))\n", "path": "ckan/controllers/tag.py"}]} | 871 | 180 |
gh_patches_debug_11659 | rasdani/github-patches | git_diff | gratipay__gratipay.com-4464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NPM sync is broken
https://gratipay.slack.com/archives/C36LJJF9V/p1494580201702422
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/sync_npm.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import requests
5 from couchdb import Database
6
7 from gratipay.models.package import NPM, Package
8
9
10 REGISTRY_URL = 'https://replicate.npmjs.com/'
11
12
13 def get_last_seq(db):
14 return db.one('SELECT npm_last_seq FROM worker_coordination')
15
16
17 def production_change_stream(seq):
18 """Given a sequence number in the npm registry change stream, start
19 streaming from there!
20 """
21 return Database(REGISTRY_URL).changes(feed='continuous', include_docs=True, since=seq)
22
23
24 def process_doc(doc):
25 """Return a smoothed-out doc, or None if it's not a package doc, meaning
26 there's no name key and it's probably a design doc, per:
27
28 https://github.com/npm/registry/blob/aef8a275/docs/follower.md#clean-up
29
30 """
31 if 'name' not in doc:
32 return None
33 name = doc['name']
34 description = doc.get('description', '')
35 emails = [e for e in [m.get('email') for m in doc.get('maintainers', [])] if e.strip()]
36 return {'name': name, 'description': description, 'emails': sorted(set(emails))}
37
38
39 def consume_change_stream(stream, db):
40 """Given an iterable of CouchDB change notifications and a
41 :py:class:`~GratipayDB`, read from the stream and write to the db.
42
43 The npm registry is a CouchDB app, which means we get a change stream from
44 it that allows us to follow registry updates in near-realtime. Our strategy
45 here is to maintain open connections to both the registry and our own
46 database, and write as we read.
47
48 """
49 with db.get_connection() as connection:
50 for change in stream:
51
52 # Decide what to do.
53 if change.get('deleted'):
54 package = Package.from_names(NPM, change['id'])
55 assert package is not None # right?
56 op, kw = package.delete, {}
57 else:
58 op = Package.upsert
59 kw = process_doc(change['doc'])
60 if not kw:
61 continue
62 kw['package_manager'] = NPM
63
64 # Do it.
65 cursor = connection.cursor()
66 kw['cursor'] = cursor
67 op(**kw)
68 cursor.run('UPDATE worker_coordination SET npm_last_seq=%(seq)s', change)
69 connection.commit()
70
71
72 def check(db, _print=print):
73 ours = db.one('SELECT npm_last_seq FROM worker_coordination')
74 theirs = int(requests.get(REGISTRY_URL).json()['update_seq'])
75 _print("count#npm-sync-lag={}".format(theirs - ours))
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gratipay/sync_npm.py b/gratipay/sync_npm.py
--- a/gratipay/sync_npm.py
+++ b/gratipay/sync_npm.py
@@ -52,7 +52,11 @@
# Decide what to do.
if change.get('deleted'):
package = Package.from_names(NPM, change['id'])
- assert package is not None # right?
+ if not package:
+ # As a result of CouchDB's compaction algorithm, we might
+ # receive 'deleted' events for docs even if we haven't seen
+ # the corresponding events for when the doc was created
+ continue
op, kw = package.delete, {}
else:
op = Package.upsert
| {"golden_diff": "diff --git a/gratipay/sync_npm.py b/gratipay/sync_npm.py\n--- a/gratipay/sync_npm.py\n+++ b/gratipay/sync_npm.py\n@@ -52,7 +52,11 @@\n # Decide what to do.\n if change.get('deleted'):\n package = Package.from_names(NPM, change['id'])\n- assert package is not None # right?\n+ if not package:\n+ # As a result of CouchDB's compaction algorithm, we might\n+ # receive 'deleted' events for docs even if we haven't seen\n+ # the corresponding events for when the doc was created\n+ continue\n op, kw = package.delete, {}\n else:\n op = Package.upsert\n", "issue": "NPM sync is broken\nhttps://gratipay.slack.com/archives/C36LJJF9V/p1494580201702422\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport requests\nfrom couchdb import Database\n\nfrom gratipay.models.package import NPM, Package\n\n\nREGISTRY_URL = 'https://replicate.npmjs.com/'\n\n\ndef get_last_seq(db):\n return db.one('SELECT npm_last_seq FROM worker_coordination')\n\n\ndef production_change_stream(seq):\n \"\"\"Given a sequence number in the npm registry change stream, start\n streaming from there!\n \"\"\"\n return Database(REGISTRY_URL).changes(feed='continuous', include_docs=True, since=seq)\n\n\ndef process_doc(doc):\n \"\"\"Return a smoothed-out doc, or None if it's not a package doc, meaning\n there's no name key and it's probably a design doc, per:\n\n https://github.com/npm/registry/blob/aef8a275/docs/follower.md#clean-up\n\n \"\"\"\n if 'name' not in doc:\n return None\n name = doc['name']\n description = doc.get('description', '')\n emails = [e for e in [m.get('email') for m in doc.get('maintainers', [])] if e.strip()]\n return {'name': name, 'description': description, 'emails': sorted(set(emails))}\n\n\ndef consume_change_stream(stream, db):\n \"\"\"Given an iterable of CouchDB change notifications and a\n :py:class:`~GratipayDB`, read from the stream and write to the db.\n\n The npm registry is a CouchDB app, which means we get a change stream from\n it that allows us to follow registry updates in near-realtime. Our strategy\n here is to maintain open connections to both the registry and our own\n database, and write as we read.\n\n \"\"\"\n with db.get_connection() as connection:\n for change in stream:\n\n # Decide what to do.\n if change.get('deleted'):\n package = Package.from_names(NPM, change['id'])\n assert package is not None # right?\n op, kw = package.delete, {}\n else:\n op = Package.upsert\n kw = process_doc(change['doc'])\n if not kw:\n continue\n kw['package_manager'] = NPM\n\n # Do it.\n cursor = connection.cursor()\n kw['cursor'] = cursor\n op(**kw)\n cursor.run('UPDATE worker_coordination SET npm_last_seq=%(seq)s', change)\n connection.commit()\n\n\ndef check(db, _print=print):\n ours = db.one('SELECT npm_last_seq FROM worker_coordination')\n theirs = int(requests.get(REGISTRY_URL).json()['update_seq'])\n _print(\"count#npm-sync-lag={}\".format(theirs - ours))\n", "path": "gratipay/sync_npm.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport requests\nfrom couchdb import Database\n\nfrom gratipay.models.package import NPM, Package\n\n\nREGISTRY_URL = 'https://replicate.npmjs.com/'\n\n\ndef get_last_seq(db):\n return db.one('SELECT npm_last_seq FROM worker_coordination')\n\n\ndef production_change_stream(seq):\n \"\"\"Given a sequence number in the npm registry change stream, start\n streaming from there!\n \"\"\"\n return Database(REGISTRY_URL).changes(feed='continuous', include_docs=True, since=seq)\n\n\ndef process_doc(doc):\n \"\"\"Return a smoothed-out doc, or None if it's not a package doc, meaning\n there's no name key and it's probably a design doc, per:\n\n https://github.com/npm/registry/blob/aef8a275/docs/follower.md#clean-up\n\n \"\"\"\n if 'name' not in doc:\n return None\n name = doc['name']\n description = doc.get('description', '')\n emails = [e for e in [m.get('email') for m in doc.get('maintainers', [])] if e.strip()]\n return {'name': name, 'description': description, 'emails': sorted(set(emails))}\n\n\ndef consume_change_stream(stream, db):\n \"\"\"Given an iterable of CouchDB change notifications and a\n :py:class:`~GratipayDB`, read from the stream and write to the db.\n\n The npm registry is a CouchDB app, which means we get a change stream from\n it that allows us to follow registry updates in near-realtime. Our strategy\n here is to maintain open connections to both the registry and our own\n database, and write as we read.\n\n \"\"\"\n with db.get_connection() as connection:\n for change in stream:\n\n # Decide what to do.\n if change.get('deleted'):\n package = Package.from_names(NPM, change['id'])\n if not package:\n # As a result of CouchDB's compaction algorithm, we might\n # receive 'deleted' events for docs even if we haven't seen\n # the corresponding events for when the doc was created\n continue\n op, kw = package.delete, {}\n else:\n op = Package.upsert\n kw = process_doc(change['doc'])\n if not kw:\n continue\n kw['package_manager'] = NPM\n\n # Do it.\n cursor = connection.cursor()\n kw['cursor'] = cursor\n op(**kw)\n cursor.run('UPDATE worker_coordination SET npm_last_seq=%(seq)s', change)\n connection.commit()\n\n\ndef check(db, _print=print):\n ours = db.one('SELECT npm_last_seq FROM worker_coordination')\n theirs = int(requests.get(REGISTRY_URL).json()['update_seq'])\n _print(\"count#npm-sync-lag={}\".format(theirs - ours))\n", "path": "gratipay/sync_npm.py"}]} | 1,044 | 171 |
gh_patches_debug_26298 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3326 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider wsp is broken
During the global build at 2021-09-01-14-42-16, spider **wsp** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/wsp.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/wsp.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/wsp.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/wsp.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 from locations.items import GeojsonPointItem
5
6
7 class wsp(scrapy.Spider):
8 name = "wsp"
9 item_attributes = {'brand': "wsp"}
10 allowed_domains = ["www.wsp.com"]
11 start_urls = (
12 'https://www.wsp.com',
13 )
14
15 def parse(self, response):
16 url = 'https://www.wsp.com/api/sitecore/Maps/GetMapPoints'
17
18 formdata = {
19 'itemId': '{2F436202-D2B9-4F3D-8ECC-5E0BCA533888}',
20 }
21
22 yield scrapy.http.FormRequest(
23 url,
24 self.parse_store,
25 method='POST',
26 formdata=formdata,
27 )
28
29 def parse_store(self, response):
30 office_data = json.loads(response.body_as_unicode())
31
32 for office in office_data:
33 try:
34 properties = {
35 'ref': office["ID"]["Guid"],
36 'addr_full': office["Address"],
37 'lat': office["Location"].split(",")[0],
38 'lon': office["Location"].split(",")[1],
39 'name': office["Name"],
40 'website': office["MapPointURL"]
41 }
42 except IndexError:
43 continue
44
45 yield GeojsonPointItem(**properties)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/wsp.py b/locations/spiders/wsp.py
--- a/locations/spiders/wsp.py
+++ b/locations/spiders/wsp.py
@@ -9,7 +9,7 @@
item_attributes = {'brand': "wsp"}
allowed_domains = ["www.wsp.com"]
start_urls = (
- 'https://www.wsp.com',
+ 'https://www.wsp.com/',
)
def parse(self, response):
@@ -24,10 +24,10 @@
self.parse_store,
method='POST',
formdata=formdata,
- )
+ )
def parse_store(self, response):
- office_data = json.loads(response.body_as_unicode())
+ office_data = json.loads(response.text)
for office in office_data:
try:
@@ -37,9 +37,9 @@
'lat': office["Location"].split(",")[0],
'lon': office["Location"].split(",")[1],
'name': office["Name"],
- 'website': office["MapPointURL"]
+ 'website': response.urljoin(office["MapPointURL"]),
}
except IndexError:
continue
- yield GeojsonPointItem(**properties)
\ No newline at end of file
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/wsp.py b/locations/spiders/wsp.py\n--- a/locations/spiders/wsp.py\n+++ b/locations/spiders/wsp.py\n@@ -9,7 +9,7 @@\n item_attributes = {'brand': \"wsp\"}\n allowed_domains = [\"www.wsp.com\"]\n start_urls = (\n- 'https://www.wsp.com',\n+ 'https://www.wsp.com/',\n )\n \n def parse(self, response):\n@@ -24,10 +24,10 @@\n self.parse_store,\n method='POST',\n formdata=formdata,\n- )\n+ )\n \n def parse_store(self, response):\n- office_data = json.loads(response.body_as_unicode())\n+ office_data = json.loads(response.text)\n \n for office in office_data:\n try:\n@@ -37,9 +37,9 @@\n 'lat': office[\"Location\"].split(\",\")[0],\n 'lon': office[\"Location\"].split(\",\")[1],\n 'name': office[\"Name\"],\n- 'website': office[\"MapPointURL\"]\n+ 'website': response.urljoin(office[\"MapPointURL\"]),\n }\n except IndexError:\n continue\n \n- yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider wsp is broken\nDuring the global build at 2021-09-01-14-42-16, spider **wsp** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/wsp.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/wsp.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/wsp.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\n\nclass wsp(scrapy.Spider):\n name = \"wsp\"\n item_attributes = {'brand': \"wsp\"}\n allowed_domains = [\"www.wsp.com\"]\n start_urls = (\n 'https://www.wsp.com',\n )\n\n def parse(self, response):\n url = 'https://www.wsp.com/api/sitecore/Maps/GetMapPoints'\n\n formdata = {\n 'itemId': '{2F436202-D2B9-4F3D-8ECC-5E0BCA533888}',\n }\n\n yield scrapy.http.FormRequest(\n url,\n self.parse_store,\n method='POST',\n formdata=formdata,\n )\n\n def parse_store(self, response):\n office_data = json.loads(response.body_as_unicode())\n\n for office in office_data:\n try:\n properties = {\n 'ref': office[\"ID\"][\"Guid\"],\n 'addr_full': office[\"Address\"],\n 'lat': office[\"Location\"].split(\",\")[0],\n 'lon': office[\"Location\"].split(\",\")[1],\n 'name': office[\"Name\"],\n 'website': office[\"MapPointURL\"]\n }\n except IndexError:\n continue\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/wsp.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\n\nclass wsp(scrapy.Spider):\n name = \"wsp\"\n item_attributes = {'brand': \"wsp\"}\n allowed_domains = [\"www.wsp.com\"]\n start_urls = (\n 'https://www.wsp.com/',\n )\n\n def parse(self, response):\n url = 'https://www.wsp.com/api/sitecore/Maps/GetMapPoints'\n\n formdata = {\n 'itemId': '{2F436202-D2B9-4F3D-8ECC-5E0BCA533888}',\n }\n\n yield scrapy.http.FormRequest(\n url,\n self.parse_store,\n method='POST',\n formdata=formdata,\n )\n\n def parse_store(self, response):\n office_data = json.loads(response.text)\n\n for office in office_data:\n try:\n properties = {\n 'ref': office[\"ID\"][\"Guid\"],\n 'addr_full': office[\"Address\"],\n 'lat': office[\"Location\"].split(\",\")[0],\n 'lon': office[\"Location\"].split(\",\")[1],\n 'name': office[\"Name\"],\n 'website': response.urljoin(office[\"MapPointURL\"]),\n }\n except IndexError:\n continue\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wsp.py"}]} | 816 | 293 |
gh_patches_debug_10115 | rasdani/github-patches | git_diff | Qiskit__qiskit-11782 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Round durations in `GenericBackendV2`
<!--
⚠️ If you do not respect this template, your pull request will be closed.
⚠️ Your pull request title should be short detailed and understandable for all.
⚠️ Also, please add a release note file using reno if the change needs to be
documented in the release notes.
⚠️ If your pull request fixes an open issue, please link to the issue.
- [ ] I have added the tests to cover my changes.
- [ ] I have updated the documentation accordingly.
- [ ] I have read the CONTRIBUTING document.
-->
### Summary
This PR makes sure that the conversion of `GenericBackendV2` instruction durations to `dt` is exact to avoid user warnings during scheduling of type:
`UserWarning: Duration is rounded to 616 [dt] = 1.367520e-07 [s] from 1.366887e-07 [s]`
Given that the durations are sampled randomly, and the rounded duration is the one used in the scheduling passes, we might as well make sure in advance that the conversion from seconds to dt will be exact and doesn't raise warnings.
### Details and comments
I am not sure this qualifies as a bugfix but I think it improves the readability of the test logs. For example, for `test_scheduling_backend_v2` in `test/python/compiler/test_transpiler.py`. Before:
```
/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 986 [dt] = 2.188920e-07 [s] from 2.189841e-07 [s]
warnings.warn(
/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 2740 [dt] = 6.082800e-07 [s] from 6.083383e-07 [s]
warnings.warn(
/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 2697 [dt] = 5.987340e-07 [s] from 5.988312e-07 [s]
warnings.warn(
/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 178 [dt] = 3.951600e-08 [s] from 3.956636e-08 [s]
warnings.warn(
.
----------------------------------------------------------------------
Ran 1 test in 0.548s
OK
```
After:
```
.
----------------------------------------------------------------------
Ran 1 test in 0.506s
OK
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/duration.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2020.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """
14 Utilities for handling duration of a circuit instruction.
15 """
16 import warnings
17
18 from qiskit.circuit import QuantumCircuit
19 from qiskit.circuit.exceptions import CircuitError
20 from qiskit.utils.units import apply_prefix
21
22
23 def duration_in_dt(duration_in_sec: float, dt_in_sec: float) -> int:
24 """
25 Return duration in dt.
26
27 Args:
28 duration_in_sec: duration [s] to be converted.
29 dt_in_sec: duration of dt in seconds used for conversion.
30
31 Returns:
32 Duration in dt.
33 """
34 res = round(duration_in_sec / dt_in_sec)
35 rounding_error = abs(duration_in_sec - res * dt_in_sec)
36 if rounding_error > 1e-15:
37 warnings.warn(
38 "Duration is rounded to %d [dt] = %e [s] from %e [s]"
39 % (res, res * dt_in_sec, duration_in_sec),
40 UserWarning,
41 )
42 return res
43
44
45 def convert_durations_to_dt(qc: QuantumCircuit, dt_in_sec: float, inplace=True):
46 """Convert all the durations in SI (seconds) into those in dt.
47
48 Returns a new circuit if `inplace=False`.
49
50 Parameters:
51 qc (QuantumCircuit): Duration of dt in seconds used for conversion.
52 dt_in_sec (float): Duration of dt in seconds used for conversion.
53 inplace (bool): All durations are converted inplace or return new circuit.
54
55 Returns:
56 QuantumCircuit: Converted circuit if `inplace = False`, otherwise None.
57
58 Raises:
59 CircuitError: if fail to convert durations.
60 """
61 if inplace:
62 circ = qc
63 else:
64 circ = qc.copy()
65
66 for instruction in circ.data:
67 operation = instruction.operation
68 if operation.unit == "dt" or operation.duration is None:
69 continue
70
71 if not operation.unit.endswith("s"):
72 raise CircuitError(f"Invalid time unit: '{operation.unit}'")
73
74 duration = operation.duration
75 if operation.unit != "s":
76 duration = apply_prefix(duration, operation.unit)
77
78 operation.duration = duration_in_dt(duration, dt_in_sec)
79 operation.unit = "dt"
80
81 if circ.duration is not None:
82 circ.duration = duration_in_dt(circ.duration, dt_in_sec)
83 circ.unit = "dt"
84
85 if not inplace:
86 return circ
87 else:
88 return None
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/circuit/duration.py b/qiskit/circuit/duration.py
--- a/qiskit/circuit/duration.py
+++ b/qiskit/circuit/duration.py
@@ -78,8 +78,15 @@
operation.duration = duration_in_dt(duration, dt_in_sec)
operation.unit = "dt"
- if circ.duration is not None:
- circ.duration = duration_in_dt(circ.duration, dt_in_sec)
+ if circ.duration is not None and circ.unit != "dt":
+ if not circ.unit.endswith("s"):
+ raise CircuitError(f"Invalid time unit: '{circ.unit}'")
+
+ duration = circ.duration
+ if circ.unit != "s":
+ duration = apply_prefix(duration, circ.unit)
+
+ circ.duration = duration_in_dt(duration, dt_in_sec)
circ.unit = "dt"
if not inplace:
| {"golden_diff": "diff --git a/qiskit/circuit/duration.py b/qiskit/circuit/duration.py\n--- a/qiskit/circuit/duration.py\n+++ b/qiskit/circuit/duration.py\n@@ -78,8 +78,15 @@\n operation.duration = duration_in_dt(duration, dt_in_sec)\n operation.unit = \"dt\"\n \n- if circ.duration is not None:\n- circ.duration = duration_in_dt(circ.duration, dt_in_sec)\n+ if circ.duration is not None and circ.unit != \"dt\":\n+ if not circ.unit.endswith(\"s\"):\n+ raise CircuitError(f\"Invalid time unit: '{circ.unit}'\")\n+\n+ duration = circ.duration\n+ if circ.unit != \"s\":\n+ duration = apply_prefix(duration, circ.unit)\n+\n+ circ.duration = duration_in_dt(duration, dt_in_sec)\n circ.unit = \"dt\"\n \n if not inplace:\n", "issue": "Round durations in `GenericBackendV2`\n<!--\r\n\u26a0\ufe0f If you do not respect this template, your pull request will be closed.\r\n\u26a0\ufe0f Your pull request title should be short detailed and understandable for all.\r\n\u26a0\ufe0f Also, please add a release note file using reno if the change needs to be\r\n documented in the release notes.\r\n\u26a0\ufe0f If your pull request fixes an open issue, please link to the issue.\r\n\r\n- [ ] I have added the tests to cover my changes.\r\n- [ ] I have updated the documentation accordingly.\r\n- [ ] I have read the CONTRIBUTING document.\r\n-->\r\n\r\n### Summary\r\nThis PR makes sure that the conversion of `GenericBackendV2` instruction durations to `dt` is exact to avoid user warnings during scheduling of type:\r\n\r\n`UserWarning: Duration is rounded to 616 [dt] = 1.367520e-07 [s] from 1.366887e-07 [s]`\r\n\r\nGiven that the durations are sampled randomly, and the rounded duration is the one used in the scheduling passes, we might as well make sure in advance that the conversion from seconds to dt will be exact and doesn't raise warnings.\r\n\r\n### Details and comments\r\nI am not sure this qualifies as a bugfix but I think it improves the readability of the test logs. For example, for `test_scheduling_backend_v2` in `test/python/compiler/test_transpiler.py`. Before:\r\n\r\n```\r\n/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 986 [dt] = 2.188920e-07 [s] from 2.189841e-07 [s]\r\n warnings.warn(\r\n/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 2740 [dt] = 6.082800e-07 [s] from 6.083383e-07 [s]\r\n warnings.warn(\r\n/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 2697 [dt] = 5.987340e-07 [s] from 5.988312e-07 [s]\r\n warnings.warn(\r\n/Users/ept/qiskit_workspace/qiskit/qiskit/circuit/duration.py:37: UserWarning: Duration is rounded to 178 [dt] = 3.951600e-08 [s] from 3.956636e-08 [s]\r\n warnings.warn(\r\n.\r\n----------------------------------------------------------------------\r\nRan 1 test in 0.548s\r\n\r\nOK\r\n```\r\n\r\nAfter:\r\n\r\n```\r\n.\r\n----------------------------------------------------------------------\r\nRan 1 test in 0.506s\r\n\r\nOK\r\n```\r\n\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nUtilities for handling duration of a circuit instruction.\n\"\"\"\nimport warnings\n\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit.exceptions import CircuitError\nfrom qiskit.utils.units import apply_prefix\n\n\ndef duration_in_dt(duration_in_sec: float, dt_in_sec: float) -> int:\n \"\"\"\n Return duration in dt.\n\n Args:\n duration_in_sec: duration [s] to be converted.\n dt_in_sec: duration of dt in seconds used for conversion.\n\n Returns:\n Duration in dt.\n \"\"\"\n res = round(duration_in_sec / dt_in_sec)\n rounding_error = abs(duration_in_sec - res * dt_in_sec)\n if rounding_error > 1e-15:\n warnings.warn(\n \"Duration is rounded to %d [dt] = %e [s] from %e [s]\"\n % (res, res * dt_in_sec, duration_in_sec),\n UserWarning,\n )\n return res\n\n\ndef convert_durations_to_dt(qc: QuantumCircuit, dt_in_sec: float, inplace=True):\n \"\"\"Convert all the durations in SI (seconds) into those in dt.\n\n Returns a new circuit if `inplace=False`.\n\n Parameters:\n qc (QuantumCircuit): Duration of dt in seconds used for conversion.\n dt_in_sec (float): Duration of dt in seconds used for conversion.\n inplace (bool): All durations are converted inplace or return new circuit.\n\n Returns:\n QuantumCircuit: Converted circuit if `inplace = False`, otherwise None.\n\n Raises:\n CircuitError: if fail to convert durations.\n \"\"\"\n if inplace:\n circ = qc\n else:\n circ = qc.copy()\n\n for instruction in circ.data:\n operation = instruction.operation\n if operation.unit == \"dt\" or operation.duration is None:\n continue\n\n if not operation.unit.endswith(\"s\"):\n raise CircuitError(f\"Invalid time unit: '{operation.unit}'\")\n\n duration = operation.duration\n if operation.unit != \"s\":\n duration = apply_prefix(duration, operation.unit)\n\n operation.duration = duration_in_dt(duration, dt_in_sec)\n operation.unit = \"dt\"\n\n if circ.duration is not None:\n circ.duration = duration_in_dt(circ.duration, dt_in_sec)\n circ.unit = \"dt\"\n\n if not inplace:\n return circ\n else:\n return None\n", "path": "qiskit/circuit/duration.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nUtilities for handling duration of a circuit instruction.\n\"\"\"\nimport warnings\n\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit.exceptions import CircuitError\nfrom qiskit.utils.units import apply_prefix\n\n\ndef duration_in_dt(duration_in_sec: float, dt_in_sec: float) -> int:\n \"\"\"\n Return duration in dt.\n\n Args:\n duration_in_sec: duration [s] to be converted.\n dt_in_sec: duration of dt in seconds used for conversion.\n\n Returns:\n Duration in dt.\n \"\"\"\n res = round(duration_in_sec / dt_in_sec)\n rounding_error = abs(duration_in_sec - res * dt_in_sec)\n if rounding_error > 1e-15:\n warnings.warn(\n \"Duration is rounded to %d [dt] = %e [s] from %e [s]\"\n % (res, res * dt_in_sec, duration_in_sec),\n UserWarning,\n )\n return res\n\n\ndef convert_durations_to_dt(qc: QuantumCircuit, dt_in_sec: float, inplace=True):\n \"\"\"Convert all the durations in SI (seconds) into those in dt.\n\n Returns a new circuit if `inplace=False`.\n\n Parameters:\n qc (QuantumCircuit): Duration of dt in seconds used for conversion.\n dt_in_sec (float): Duration of dt in seconds used for conversion.\n inplace (bool): All durations are converted inplace or return new circuit.\n\n Returns:\n QuantumCircuit: Converted circuit if `inplace = False`, otherwise None.\n\n Raises:\n CircuitError: if fail to convert durations.\n \"\"\"\n if inplace:\n circ = qc\n else:\n circ = qc.copy()\n\n for instruction in circ.data:\n operation = instruction.operation\n if operation.unit == \"dt\" or operation.duration is None:\n continue\n\n if not operation.unit.endswith(\"s\"):\n raise CircuitError(f\"Invalid time unit: '{operation.unit}'\")\n\n duration = operation.duration\n if operation.unit != \"s\":\n duration = apply_prefix(duration, operation.unit)\n\n operation.duration = duration_in_dt(duration, dt_in_sec)\n operation.unit = \"dt\"\n\n if circ.duration is not None and circ.unit != \"dt\":\n if not circ.unit.endswith(\"s\"):\n raise CircuitError(f\"Invalid time unit: '{circ.unit}'\")\n\n duration = circ.duration\n if circ.unit != \"s\":\n duration = apply_prefix(duration, circ.unit)\n\n circ.duration = duration_in_dt(duration, dt_in_sec)\n circ.unit = \"dt\"\n\n if not inplace:\n return circ\n else:\n return None\n", "path": "qiskit/circuit/duration.py"}]} | 1,718 | 200 |
gh_patches_debug_7618 | rasdani/github-patches | git_diff | localstack__localstack-8398 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to change ContentBasedDeduplication attribute on existing queue
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
If I create a queue and try to change its `ContentDeduplication` attribute, I see this error:
` An error occurred (InvalidAttributeName) when calling the SetQueueAttributes operation: Unknown Attribute ContentBasedDeduplication.`
### Expected Behavior
I should be able to set `ContentBasedDeduplication` from `true` to `false` on an existing queue. It appears to work on AWS.
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker run localstack/localstack
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
```
aws sqs create-queue --queue-name test1.fifo --endpoint-url http://localhost:4566/ --attributes FifoQueue=true,ContentBasedDeduplication=true
{
"QueueUrl": "http://localhost:4566/000000000000/test1.fifo"
}
aws sqs get-queue-attributes --endpoint-url http://localhost:4566/ --queue-url http://localhost:4566/000000000000/test1.fifo --attribute-names '["ContentBasedDeduplication"]'
{
"Attributes": {
"FifoQueue": "true,
"ContentBasedDeduplication": "true"
}
}
aws sqs set-queue-attributes --endpoint-url http://localhost:4566/ --queue-url http://localhost:4566/000000000000/test1.fifo --attributes ContentBasedDeduplication=false
An error occurred (InvalidAttributeName) when calling the SetQueueAttributes operation: Unknown Attribute ContentBasedDeduplication.
```
### Environment
```markdown
- OS: MacOs Ventura 13.3.1 (a)
- LocalStack: 2.1.0
```
### Anything else?
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/sqs/constants.py`
Content:
```
1 # Valid unicode values: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF
2 # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html
3 from localstack.aws.api.sqs import QueueAttributeName
4
5 MSG_CONTENT_REGEX = "^[\u0009\u000A\u000D\u0020-\uD7FF\uE000-\uFFFD\U00010000-\U0010FFFF]*$"
6
7 # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html
8 # While not documented, umlauts seem to be allowed
9 ATTR_NAME_CHAR_REGEX = "^[\u00C0-\u017Fa-zA-Z0-9_.-]*$"
10 ATTR_NAME_PREFIX_SUFFIX_REGEX = r"^(?!(aws\.|amazon\.|\.)).*(?<!\.)$"
11 ATTR_TYPE_REGEX = "^(String|Number|Binary).*$"
12 FIFO_MSG_REGEX = "^[0-9a-zA-z!\"#$%&'()*+,./:;<=>?@[\\]^_`{|}~-]*$"
13
14 DEDUPLICATION_INTERVAL_IN_SEC = 5 * 60
15
16 # When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.
17 # see https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteQueue.html
18 RECENTLY_DELETED_TIMEOUT = 60
19
20 # the default maximum message size in SQS
21 DEFAULT_MAXIMUM_MESSAGE_SIZE = 262144
22 INTERNAL_QUEUE_ATTRIBUTES = [
23 # these attributes cannot be changed by set_queue_attributes and should
24 # therefore be ignored when comparing queue attributes for create_queue
25 # 'FifoQueue' is handled on a per_queue basis
26 QueueAttributeName.ApproximateNumberOfMessages,
27 QueueAttributeName.ApproximateNumberOfMessagesDelayed,
28 QueueAttributeName.ApproximateNumberOfMessagesNotVisible,
29 QueueAttributeName.ContentBasedDeduplication,
30 QueueAttributeName.CreatedTimestamp,
31 QueueAttributeName.LastModifiedTimestamp,
32 QueueAttributeName.QueueArn,
33 ]
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/sqs/constants.py b/localstack/services/sqs/constants.py
--- a/localstack/services/sqs/constants.py
+++ b/localstack/services/sqs/constants.py
@@ -26,7 +26,6 @@
QueueAttributeName.ApproximateNumberOfMessages,
QueueAttributeName.ApproximateNumberOfMessagesDelayed,
QueueAttributeName.ApproximateNumberOfMessagesNotVisible,
- QueueAttributeName.ContentBasedDeduplication,
QueueAttributeName.CreatedTimestamp,
QueueAttributeName.LastModifiedTimestamp,
QueueAttributeName.QueueArn,
| {"golden_diff": "diff --git a/localstack/services/sqs/constants.py b/localstack/services/sqs/constants.py\n--- a/localstack/services/sqs/constants.py\n+++ b/localstack/services/sqs/constants.py\n@@ -26,7 +26,6 @@\n QueueAttributeName.ApproximateNumberOfMessages,\n QueueAttributeName.ApproximateNumberOfMessagesDelayed,\n QueueAttributeName.ApproximateNumberOfMessagesNotVisible,\n- QueueAttributeName.ContentBasedDeduplication,\n QueueAttributeName.CreatedTimestamp,\n QueueAttributeName.LastModifiedTimestamp,\n QueueAttributeName.QueueArn,\n", "issue": "Unable to change ContentBasedDeduplication attribute on existing queue\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Current Behavior\r\n\r\nIf I create a queue and try to change its `ContentDeduplication` attribute, I see this error:\r\n\r\n` An error occurred (InvalidAttributeName) when calling the SetQueueAttributes operation: Unknown Attribute ContentBasedDeduplication.`\r\n\r\n### Expected Behavior\r\n\r\nI should be able to set `ContentBasedDeduplication` from `true` to `false` on an existing queue. It appears to work on AWS.\r\n\r\n### How are you starting LocalStack?\r\n\r\nWith a docker-compose file\r\n\r\n### Steps To Reproduce\r\n\r\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\n docker run localstack/localstack\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n```\r\naws sqs create-queue --queue-name test1.fifo --endpoint-url http://localhost:4566/ --attributes FifoQueue=true,ContentBasedDeduplication=true\r\n{\r\n \"QueueUrl\": \"http://localhost:4566/000000000000/test1.fifo\"\r\n}\r\n\r\n\r\naws sqs get-queue-attributes --endpoint-url http://localhost:4566/ --queue-url http://localhost:4566/000000000000/test1.fifo --attribute-names '[\"ContentBasedDeduplication\"]'\r\n{\r\n \"Attributes\": {\r\n \"FifoQueue\": \"true,\r\n \"ContentBasedDeduplication\": \"true\"\r\n }\r\n}\r\n\r\naws sqs set-queue-attributes --endpoint-url http://localhost:4566/ --queue-url http://localhost:4566/000000000000/test1.fifo --attributes ContentBasedDeduplication=false\r\n\r\nAn error occurred (InvalidAttributeName) when calling the SetQueueAttributes operation: Unknown Attribute ContentBasedDeduplication.\r\n```\r\n\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: MacOs Ventura 13.3.1 (a) \r\n- LocalStack: 2.1.0\r\n```\r\n\r\n\r\n### Anything else?\r\n\r\n_No response_\n", "before_files": [{"content": "# Valid unicode values: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF\n# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html\nfrom localstack.aws.api.sqs import QueueAttributeName\n\nMSG_CONTENT_REGEX = \"^[\\u0009\\u000A\\u000D\\u0020-\\uD7FF\\uE000-\\uFFFD\\U00010000-\\U0010FFFF]*$\"\n\n# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html\n# While not documented, umlauts seem to be allowed\nATTR_NAME_CHAR_REGEX = \"^[\\u00C0-\\u017Fa-zA-Z0-9_.-]*$\"\nATTR_NAME_PREFIX_SUFFIX_REGEX = r\"^(?!(aws\\.|amazon\\.|\\.)).*(?<!\\.)$\"\nATTR_TYPE_REGEX = \"^(String|Number|Binary).*$\"\nFIFO_MSG_REGEX = \"^[0-9a-zA-z!\\\"#$%&'()*+,./:;<=>?@[\\\\]^_`{|}~-]*$\"\n\nDEDUPLICATION_INTERVAL_IN_SEC = 5 * 60\n\n# When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n# see https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteQueue.html\nRECENTLY_DELETED_TIMEOUT = 60\n\n# the default maximum message size in SQS\nDEFAULT_MAXIMUM_MESSAGE_SIZE = 262144\nINTERNAL_QUEUE_ATTRIBUTES = [\n # these attributes cannot be changed by set_queue_attributes and should\n # therefore be ignored when comparing queue attributes for create_queue\n # 'FifoQueue' is handled on a per_queue basis\n QueueAttributeName.ApproximateNumberOfMessages,\n QueueAttributeName.ApproximateNumberOfMessagesDelayed,\n QueueAttributeName.ApproximateNumberOfMessagesNotVisible,\n QueueAttributeName.ContentBasedDeduplication,\n QueueAttributeName.CreatedTimestamp,\n QueueAttributeName.LastModifiedTimestamp,\n QueueAttributeName.QueueArn,\n]\n", "path": "localstack/services/sqs/constants.py"}], "after_files": [{"content": "# Valid unicode values: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF\n# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html\nfrom localstack.aws.api.sqs import QueueAttributeName\n\nMSG_CONTENT_REGEX = \"^[\\u0009\\u000A\\u000D\\u0020-\\uD7FF\\uE000-\\uFFFD\\U00010000-\\U0010FFFF]*$\"\n\n# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html\n# While not documented, umlauts seem to be allowed\nATTR_NAME_CHAR_REGEX = \"^[\\u00C0-\\u017Fa-zA-Z0-9_.-]*$\"\nATTR_NAME_PREFIX_SUFFIX_REGEX = r\"^(?!(aws\\.|amazon\\.|\\.)).*(?<!\\.)$\"\nATTR_TYPE_REGEX = \"^(String|Number|Binary).*$\"\nFIFO_MSG_REGEX = \"^[0-9a-zA-z!\\\"#$%&'()*+,./:;<=>?@[\\\\]^_`{|}~-]*$\"\n\nDEDUPLICATION_INTERVAL_IN_SEC = 5 * 60\n\n# When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n# see https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteQueue.html\nRECENTLY_DELETED_TIMEOUT = 60\n\n# the default maximum message size in SQS\nDEFAULT_MAXIMUM_MESSAGE_SIZE = 262144\nINTERNAL_QUEUE_ATTRIBUTES = [\n # these attributes cannot be changed by set_queue_attributes and should\n # therefore be ignored when comparing queue attributes for create_queue\n # 'FifoQueue' is handled on a per_queue basis\n QueueAttributeName.ApproximateNumberOfMessages,\n QueueAttributeName.ApproximateNumberOfMessagesDelayed,\n QueueAttributeName.ApproximateNumberOfMessagesNotVisible,\n QueueAttributeName.CreatedTimestamp,\n QueueAttributeName.LastModifiedTimestamp,\n QueueAttributeName.QueueArn,\n]\n", "path": "localstack/services/sqs/constants.py"}]} | 1,309 | 112 |
gh_patches_debug_37151 | rasdani/github-patches | git_diff | conan-io__conan-10960 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] version is not set correctly when using layout
When layout is being used, recipe version is not set correctly somehow using json generator, it seems that version is not being fetched from package metadata when running conan install command!
### Environment Details
* Operating System+version: macos
* Compiler+version: apple-clang 12.0
* Conan version: Conan version 1.47.0
* Python version: 3.9
### Steps to reproduce
* create a conan demo project using `conan new demo/1.0.0 --template=cmake_lib`
* create a local conan package `conan create .`
* generate deps using json generator `conan install demo/1.0.0@ -g json`
* inspect conanbuildinfo.json, version is set to null, however it should be 1.0.0
* remove the layout method from the conanfile.py and try again
* now version is set correctly
btw, it seems to be the same issue for the description attribute, maybe other attributes as well

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/generators/json_generator.py`
Content:
```
1 import json
2
3 from conans.model import Generator
4
5
6 def serialize_cpp_info(cpp_info):
7 keys = [
8 "version",
9 "description",
10 "rootpath",
11 "sysroot",
12 "include_paths", "lib_paths", "bin_paths", "build_paths", "res_paths",
13 "libs",
14 "system_libs",
15 "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags",
16 "frameworks", "framework_paths", "names", "filenames",
17 "build_modules", "build_modules_paths"
18 ]
19 res = {}
20 for key in keys:
21 res[key] = getattr(cpp_info, key)
22 res["cppflags"] = cpp_info.cxxflags # Backwards compatibility
23 return res
24
25
26 def serialize_user_info(user_info):
27 res = {}
28 for key, value in user_info.items():
29 res[key] = value.vars
30 return res
31
32
33 class JsonGenerator(Generator):
34 @property
35 def filename(self):
36 return "conanbuildinfo.json"
37
38 @property
39 def content(self):
40 info = {}
41 info["deps_env_info"] = self.deps_env_info.vars
42 info["deps_user_info"] = serialize_user_info(self.deps_user_info)
43 info["dependencies"] = self.get_dependencies_info()
44 info["settings"] = self.get_settings()
45 info["options"] = self.get_options()
46 if self._user_info_build:
47 info["user_info_build"] = serialize_user_info(self._user_info_build)
48
49 return json.dumps(info, indent=2)
50
51 def get_dependencies_info(self):
52 res = []
53 for depname, cpp_info in self.deps_build_info.dependencies:
54 serialized_info = serialize_cpp_info(cpp_info)
55 serialized_info["name"] = depname
56 for cfg, cfg_cpp_info in cpp_info.configs.items():
57 serialized_info.setdefault("configs", {})[cfg] = serialize_cpp_info(cfg_cpp_info)
58 res.append(serialized_info)
59 return res
60
61 def get_settings(self):
62 settings = {}
63 for key, value in self.settings.items():
64 settings[key] = value
65 return settings
66
67 def get_options(self):
68 options = {}
69 for req in self.conanfile.requires:
70 options[req] = {}
71 for key, value in self.conanfile.options[req].items():
72 options[req][key] = value
73 return options
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/generators/json_generator.py b/conans/client/generators/json_generator.py
--- a/conans/client/generators/json_generator.py
+++ b/conans/client/generators/json_generator.py
@@ -3,26 +3,6 @@
from conans.model import Generator
-def serialize_cpp_info(cpp_info):
- keys = [
- "version",
- "description",
- "rootpath",
- "sysroot",
- "include_paths", "lib_paths", "bin_paths", "build_paths", "res_paths",
- "libs",
- "system_libs",
- "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags",
- "frameworks", "framework_paths", "names", "filenames",
- "build_modules", "build_modules_paths"
- ]
- res = {}
- for key in keys:
- res[key] = getattr(cpp_info, key)
- res["cppflags"] = cpp_info.cxxflags # Backwards compatibility
- return res
-
-
def serialize_user_info(user_info):
res = {}
for key, value in user_info.items():
@@ -51,10 +31,10 @@
def get_dependencies_info(self):
res = []
for depname, cpp_info in self.deps_build_info.dependencies:
- serialized_info = serialize_cpp_info(cpp_info)
- serialized_info["name"] = depname
+ serialized_info = self.serialize_cpp_info(depname, cpp_info)
for cfg, cfg_cpp_info in cpp_info.configs.items():
- serialized_info.setdefault("configs", {})[cfg] = serialize_cpp_info(cfg_cpp_info)
+ serialized_info.setdefault("configs", {})[cfg] = self.serialize_cpp_info(depname,
+ cfg_cpp_info)
res.append(serialized_info)
return res
@@ -71,3 +51,31 @@
for key, value in self.conanfile.options[req].items():
options[req][key] = value
return options
+
+ def serialize_cpp_info(self, depname, cpp_info):
+ keys = [
+ "version",
+ "description",
+ "rootpath",
+ "sysroot",
+ "include_paths", "lib_paths", "bin_paths", "build_paths", "res_paths",
+ "libs",
+ "system_libs",
+ "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags",
+ "frameworks", "framework_paths", "names", "filenames",
+ "build_modules", "build_modules_paths"
+ ]
+ res = {}
+ for key in keys:
+ res[key] = getattr(cpp_info, key)
+ res["cppflags"] = cpp_info.cxxflags # Backwards compatibility
+ res["name"] = depname
+
+ # FIXME: trick for NewCppInfo objects when declared layout
+ try:
+ if cpp_info.version is None:
+ res["version"] = self.conanfile.dependencies.get(depname).ref.version
+ except Exception:
+ pass
+
+ return res
| {"golden_diff": "diff --git a/conans/client/generators/json_generator.py b/conans/client/generators/json_generator.py\n--- a/conans/client/generators/json_generator.py\n+++ b/conans/client/generators/json_generator.py\n@@ -3,26 +3,6 @@\n from conans.model import Generator\n \n \n-def serialize_cpp_info(cpp_info):\n- keys = [\n- \"version\",\n- \"description\",\n- \"rootpath\",\n- \"sysroot\",\n- \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\", \"res_paths\",\n- \"libs\",\n- \"system_libs\",\n- \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\", \"exelinkflags\",\n- \"frameworks\", \"framework_paths\", \"names\", \"filenames\",\n- \"build_modules\", \"build_modules_paths\"\n- ]\n- res = {}\n- for key in keys:\n- res[key] = getattr(cpp_info, key)\n- res[\"cppflags\"] = cpp_info.cxxflags # Backwards compatibility\n- return res\n-\n-\n def serialize_user_info(user_info):\n res = {}\n for key, value in user_info.items():\n@@ -51,10 +31,10 @@\n def get_dependencies_info(self):\n res = []\n for depname, cpp_info in self.deps_build_info.dependencies:\n- serialized_info = serialize_cpp_info(cpp_info)\n- serialized_info[\"name\"] = depname\n+ serialized_info = self.serialize_cpp_info(depname, cpp_info)\n for cfg, cfg_cpp_info in cpp_info.configs.items():\n- serialized_info.setdefault(\"configs\", {})[cfg] = serialize_cpp_info(cfg_cpp_info)\n+ serialized_info.setdefault(\"configs\", {})[cfg] = self.serialize_cpp_info(depname,\n+ cfg_cpp_info)\n res.append(serialized_info)\n return res\n \n@@ -71,3 +51,31 @@\n for key, value in self.conanfile.options[req].items():\n options[req][key] = value\n return options\n+\n+ def serialize_cpp_info(self, depname, cpp_info):\n+ keys = [\n+ \"version\",\n+ \"description\",\n+ \"rootpath\",\n+ \"sysroot\",\n+ \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\", \"res_paths\",\n+ \"libs\",\n+ \"system_libs\",\n+ \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\", \"exelinkflags\",\n+ \"frameworks\", \"framework_paths\", \"names\", \"filenames\",\n+ \"build_modules\", \"build_modules_paths\"\n+ ]\n+ res = {}\n+ for key in keys:\n+ res[key] = getattr(cpp_info, key)\n+ res[\"cppflags\"] = cpp_info.cxxflags # Backwards compatibility\n+ res[\"name\"] = depname\n+\n+ # FIXME: trick for NewCppInfo objects when declared layout\n+ try:\n+ if cpp_info.version is None:\n+ res[\"version\"] = self.conanfile.dependencies.get(depname).ref.version\n+ except Exception:\n+ pass\n+\n+ return res\n", "issue": "[bug] version is not set correctly when using layout\nWhen layout is being used, recipe version is not set correctly somehow using json generator, it seems that version is not being fetched from package metadata when running conan install command!\r\n\r\n\r\n### Environment Details\r\n * Operating System+version: macos\r\n * Compiler+version: apple-clang 12.0\r\n * Conan version: Conan version 1.47.0\r\n * Python version: 3.9\r\n\r\n### Steps to reproduce \r\n* create a conan demo project using `conan new demo/1.0.0 --template=cmake_lib` \r\n* create a local conan package `conan create .`\r\n* generate deps using json generator `conan install demo/1.0.0@ -g json`\r\n* inspect conanbuildinfo.json, version is set to null, however it should be 1.0.0\r\n\r\n* remove the layout method from the conanfile.py and try again\r\n* now version is set correctly \r\n\r\nbtw, it seems to be the same issue for the description attribute, maybe other attributes as well\r\n\r\n\r\n\n", "before_files": [{"content": "import json\n\nfrom conans.model import Generator\n\n\ndef serialize_cpp_info(cpp_info):\n keys = [\n \"version\",\n \"description\",\n \"rootpath\",\n \"sysroot\",\n \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\", \"res_paths\",\n \"libs\",\n \"system_libs\",\n \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\", \"exelinkflags\",\n \"frameworks\", \"framework_paths\", \"names\", \"filenames\",\n \"build_modules\", \"build_modules_paths\"\n ]\n res = {}\n for key in keys:\n res[key] = getattr(cpp_info, key)\n res[\"cppflags\"] = cpp_info.cxxflags # Backwards compatibility\n return res\n\n\ndef serialize_user_info(user_info):\n res = {}\n for key, value in user_info.items():\n res[key] = value.vars\n return res\n\n\nclass JsonGenerator(Generator):\n @property\n def filename(self):\n return \"conanbuildinfo.json\"\n\n @property\n def content(self):\n info = {}\n info[\"deps_env_info\"] = self.deps_env_info.vars\n info[\"deps_user_info\"] = serialize_user_info(self.deps_user_info)\n info[\"dependencies\"] = self.get_dependencies_info()\n info[\"settings\"] = self.get_settings()\n info[\"options\"] = self.get_options()\n if self._user_info_build:\n info[\"user_info_build\"] = serialize_user_info(self._user_info_build)\n\n return json.dumps(info, indent=2)\n\n def get_dependencies_info(self):\n res = []\n for depname, cpp_info in self.deps_build_info.dependencies:\n serialized_info = serialize_cpp_info(cpp_info)\n serialized_info[\"name\"] = depname\n for cfg, cfg_cpp_info in cpp_info.configs.items():\n serialized_info.setdefault(\"configs\", {})[cfg] = serialize_cpp_info(cfg_cpp_info)\n res.append(serialized_info)\n return res\n\n def get_settings(self):\n settings = {}\n for key, value in self.settings.items():\n settings[key] = value\n return settings\n\n def get_options(self):\n options = {}\n for req in self.conanfile.requires:\n options[req] = {}\n for key, value in self.conanfile.options[req].items():\n options[req][key] = value\n return options\n", "path": "conans/client/generators/json_generator.py"}], "after_files": [{"content": "import json\n\nfrom conans.model import Generator\n\n\ndef serialize_user_info(user_info):\n res = {}\n for key, value in user_info.items():\n res[key] = value.vars\n return res\n\n\nclass JsonGenerator(Generator):\n @property\n def filename(self):\n return \"conanbuildinfo.json\"\n\n @property\n def content(self):\n info = {}\n info[\"deps_env_info\"] = self.deps_env_info.vars\n info[\"deps_user_info\"] = serialize_user_info(self.deps_user_info)\n info[\"dependencies\"] = self.get_dependencies_info()\n info[\"settings\"] = self.get_settings()\n info[\"options\"] = self.get_options()\n if self._user_info_build:\n info[\"user_info_build\"] = serialize_user_info(self._user_info_build)\n\n return json.dumps(info, indent=2)\n\n def get_dependencies_info(self):\n res = []\n for depname, cpp_info in self.deps_build_info.dependencies:\n serialized_info = self.serialize_cpp_info(depname, cpp_info)\n for cfg, cfg_cpp_info in cpp_info.configs.items():\n serialized_info.setdefault(\"configs\", {})[cfg] = self.serialize_cpp_info(depname,\n cfg_cpp_info)\n res.append(serialized_info)\n return res\n\n def get_settings(self):\n settings = {}\n for key, value in self.settings.items():\n settings[key] = value\n return settings\n\n def get_options(self):\n options = {}\n for req in self.conanfile.requires:\n options[req] = {}\n for key, value in self.conanfile.options[req].items():\n options[req][key] = value\n return options\n\n def serialize_cpp_info(self, depname, cpp_info):\n keys = [\n \"version\",\n \"description\",\n \"rootpath\",\n \"sysroot\",\n \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\", \"res_paths\",\n \"libs\",\n \"system_libs\",\n \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\", \"exelinkflags\",\n \"frameworks\", \"framework_paths\", \"names\", \"filenames\",\n \"build_modules\", \"build_modules_paths\"\n ]\n res = {}\n for key in keys:\n res[key] = getattr(cpp_info, key)\n res[\"cppflags\"] = cpp_info.cxxflags # Backwards compatibility\n res[\"name\"] = depname\n\n # FIXME: trick for NewCppInfo objects when declared layout\n try:\n if cpp_info.version is None:\n res[\"version\"] = self.conanfile.dependencies.get(depname).ref.version\n except Exception:\n pass\n\n return res\n", "path": "conans/client/generators/json_generator.py"}]} | 1,253 | 704 |
gh_patches_debug_28718 | rasdani/github-patches | git_diff | napari__napari-3613 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding 0 and 1 coords if missed in colormap
# Description
<!-- What does this pull request (PR) do? Why is it necessary? -->
<!-- Tell us about your new feature, improvement, or fix! -->
<!-- If your change includes user interface changes, please add an image, or an animation "An image is worth a thousando words!" -->
<!-- You can use https://www.cockos.com/licecap/ or similar to create animations -->
Fix #2400
## Type of change
<!-- Please delete options that are not relevant. -->
# References
<!-- What resources, documentation, and guides were used in the creation of this PR? -->
<!-- If this is a bug-fix or otherwise resolves an issue, reference it here with "closes #(issue)" -->
# How has this been tested?
<!-- Please describe the tests that you ran to verify your changes. -->
- [ ] example: the test suite for my feature covers cases x, y, and z
- [ ] example: all tests pass with my change
## Final checklist:
- [ ] My PR is the minimum possible work for the desired functionality
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [x] I have added tests that prove my fix is effective or that my feature works
Adding 0 and 1 coords if missed in colormap
# Description
<!-- What does this pull request (PR) do? Why is it necessary? -->
<!-- Tell us about your new feature, improvement, or fix! -->
<!-- If your change includes user interface changes, please add an image, or an animation "An image is worth a thousando words!" -->
<!-- You can use https://www.cockos.com/licecap/ or similar to create animations -->
Fix #2400
## Type of change
<!-- Please delete options that are not relevant. -->
# References
<!-- What resources, documentation, and guides were used in the creation of this PR? -->
<!-- If this is a bug-fix or otherwise resolves an issue, reference it here with "closes #(issue)" -->
# How has this been tested?
<!-- Please describe the tests that you ran to verify your changes. -->
- [ ] example: the test suite for my feature covers cases x, y, and z
- [ ] example: all tests pass with my change
## Final checklist:
- [ ] My PR is the minimum possible work for the desired functionality
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [x] I have added tests that prove my fix is effective or that my feature works
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/colormaps/colormap.py`
Content:
```
1 from enum import Enum
2 from typing import Optional
3
4 import numpy as np
5 from pydantic import PrivateAttr, validator
6
7 from ..events import EventedModel
8 from ..events.custom_types import Array
9 from ..translations import trans
10 from .colorbars import make_colorbar
11 from .standardize_color import transform_color
12
13
14 class ColormapInterpolationMode(str, Enum):
15 """INTERPOLATION: Interpolation mode for colormaps.
16
17 Selects an interpolation mode for the colormap.
18 * linear: colors are defined by linear interpolation between
19 colors of neighboring controls points.
20 * zero: colors are defined by the value of the color in the
21 bin between by neighboring controls points.
22 """
23
24 LINEAR = 'linear'
25 ZERO = 'zero'
26
27
28 class Colormap(EventedModel):
29 """Colormap that relates intensity values to colors.
30
31 Attributes
32 ----------
33 colors : array, shape (N, 4)
34 Data used in the colormap.
35 name : str
36 Name of the colormap.
37 display_name : str
38 Display name of the colormap.
39 controls : array, shape (N,) or (N+1,)
40 Control points of the colormap.
41 interpolation : str
42 Colormap interpolation mode, either 'linear' or
43 'zero'. If 'linear', ncontrols = ncolors (one
44 color per control point). If 'zero', ncontrols
45 = ncolors+1 (one color per bin).
46 """
47
48 # fields
49 colors: Array[float, (-1, 4)]
50 name: str = 'custom'
51 _display_name: Optional[str] = PrivateAttr(None)
52 interpolation: ColormapInterpolationMode = ColormapInterpolationMode.LINEAR
53 controls: Array[float, (-1,)] = None
54
55 def __init__(self, colors, display_name: Optional[str] = None, **data):
56 if display_name is None:
57 display_name = data.get('name', 'custom')
58
59 super().__init__(colors=colors, **data)
60 self._display_name = display_name
61
62 # validators
63 @validator('colors', pre=True)
64 def _ensure_color_array(cls, v):
65 return transform_color(v)
66
67 # controls validator must be called even if None for correct initialization
68 @validator('controls', pre=True, always=True)
69 def _check_controls(cls, v, values):
70 if v is None or len(v) == 0:
71 n_controls = len(values['colors']) + int(
72 values['interpolation'] == ColormapInterpolationMode.ZERO
73 )
74 return np.linspace(0, 1, n_controls)
75 return v
76
77 def __iter__(self):
78 yield from (self.colors, self.controls, self.interpolation)
79
80 def map(self, values):
81 values = np.atleast_1d(values)
82 if self.interpolation == ColormapInterpolationMode.LINEAR:
83 # One color per control point
84 cols = [
85 np.interp(values, self.controls, self.colors[:, i])
86 for i in range(4)
87 ]
88 cols = np.stack(cols, axis=1)
89 elif self.interpolation == ColormapInterpolationMode.ZERO:
90 # One color per bin
91 indices = np.clip(
92 np.searchsorted(self.controls, values) - 1, 0, len(self.colors)
93 )
94 cols = self.colors[indices.astype(np.int32)]
95 else:
96 raise ValueError(
97 trans._(
98 'Unrecognized Colormap Interpolation Mode',
99 deferred=True,
100 )
101 )
102
103 return cols
104
105 @property
106 def colorbar(self):
107 return make_colorbar(self)
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/utils/colormaps/colormap.py b/napari/utils/colormaps/colormap.py
--- a/napari/utils/colormaps/colormap.py
+++ b/napari/utils/colormaps/colormap.py
@@ -67,11 +67,44 @@
# controls validator must be called even if None for correct initialization
@validator('controls', pre=True, always=True)
def _check_controls(cls, v, values):
+ # If no control points provided generate defaults
if v is None or len(v) == 0:
n_controls = len(values['colors']) + int(
values['interpolation'] == ColormapInterpolationMode.ZERO
)
return np.linspace(0, 1, n_controls)
+
+ # Check control end points are correct
+ if not (v[0] == 0 and v[-1] == 1):
+ raise ValueError(
+ trans._(
+ f'Control points must start with 0.0 and end with 1.0. Got {v[0]} and {v[-1]}',
+ deferred=True,
+ )
+ )
+
+ # Check control points are sorted correctly
+ if not np.array_equal(v, sorted(v)):
+ raise ValueError(
+ trans._(
+ 'Control points need to be sorted in ascending order',
+ deferred=True,
+ )
+ )
+
+ # Check number of control points is correct
+ n_controls_target = len(values['colors']) + int(
+ values['interpolation'] == ColormapInterpolationMode.ZERO
+ )
+ n_controls = len(v)
+ if not n_controls == n_controls_target:
+ raise ValueError(
+ trans._(
+ f'Wrong number of control points provided. Expected {n_controls_target}, got {n_controls}',
+ deferred=True,
+ )
+ )
+
return v
def __iter__(self):
| {"golden_diff": "diff --git a/napari/utils/colormaps/colormap.py b/napari/utils/colormaps/colormap.py\n--- a/napari/utils/colormaps/colormap.py\n+++ b/napari/utils/colormaps/colormap.py\n@@ -67,11 +67,44 @@\n # controls validator must be called even if None for correct initialization\n @validator('controls', pre=True, always=True)\n def _check_controls(cls, v, values):\n+ # If no control points provided generate defaults\n if v is None or len(v) == 0:\n n_controls = len(values['colors']) + int(\n values['interpolation'] == ColormapInterpolationMode.ZERO\n )\n return np.linspace(0, 1, n_controls)\n+\n+ # Check control end points are correct\n+ if not (v[0] == 0 and v[-1] == 1):\n+ raise ValueError(\n+ trans._(\n+ f'Control points must start with 0.0 and end with 1.0. Got {v[0]} and {v[-1]}',\n+ deferred=True,\n+ )\n+ )\n+\n+ # Check control points are sorted correctly\n+ if not np.array_equal(v, sorted(v)):\n+ raise ValueError(\n+ trans._(\n+ 'Control points need to be sorted in ascending order',\n+ deferred=True,\n+ )\n+ )\n+\n+ # Check number of control points is correct\n+ n_controls_target = len(values['colors']) + int(\n+ values['interpolation'] == ColormapInterpolationMode.ZERO\n+ )\n+ n_controls = len(v)\n+ if not n_controls == n_controls_target:\n+ raise ValueError(\n+ trans._(\n+ f'Wrong number of control points provided. Expected {n_controls_target}, got {n_controls}',\n+ deferred=True,\n+ )\n+ )\n+\n return v\n \n def __iter__(self):\n", "issue": "Adding 0 and 1 coords if missed in colormap\n# Description\r\n<!-- What does this pull request (PR) do? Why is it necessary? -->\r\n<!-- Tell us about your new feature, improvement, or fix! -->\r\n<!-- If your change includes user interface changes, please add an image, or an animation \"An image is worth a thousando words!\" -->\r\n<!-- You can use https://www.cockos.com/licecap/ or similar to create animations -->\r\nFix #2400\r\n\r\n## Type of change\r\n<!-- Please delete options that are not relevant. -->\r\n\r\n\r\n# References\r\n<!-- What resources, documentation, and guides were used in the creation of this PR? -->\r\n<!-- If this is a bug-fix or otherwise resolves an issue, reference it here with \"closes #(issue)\" -->\r\n\r\n# How has this been tested?\r\n<!-- Please describe the tests that you ran to verify your changes. -->\r\n- [ ] example: the test suite for my feature covers cases x, y, and z\r\n- [ ] example: all tests pass with my change\r\n\r\n## Final checklist:\r\n- [ ] My PR is the minimum possible work for the desired functionality\r\n- [ ] I have commented my code, particularly in hard-to-understand areas\r\n- [ ] I have made corresponding changes to the documentation\r\n- [x] I have added tests that prove my fix is effective or that my feature works\r\n\nAdding 0 and 1 coords if missed in colormap\n# Description\r\n<!-- What does this pull request (PR) do? Why is it necessary? -->\r\n<!-- Tell us about your new feature, improvement, or fix! -->\r\n<!-- If your change includes user interface changes, please add an image, or an animation \"An image is worth a thousando words!\" -->\r\n<!-- You can use https://www.cockos.com/licecap/ or similar to create animations -->\r\nFix #2400\r\n\r\n## Type of change\r\n<!-- Please delete options that are not relevant. -->\r\n\r\n\r\n# References\r\n<!-- What resources, documentation, and guides were used in the creation of this PR? -->\r\n<!-- If this is a bug-fix or otherwise resolves an issue, reference it here with \"closes #(issue)\" -->\r\n\r\n# How has this been tested?\r\n<!-- Please describe the tests that you ran to verify your changes. -->\r\n- [ ] example: the test suite for my feature covers cases x, y, and z\r\n- [ ] example: all tests pass with my change\r\n\r\n## Final checklist:\r\n- [ ] My PR is the minimum possible work for the desired functionality\r\n- [ ] I have commented my code, particularly in hard-to-understand areas\r\n- [ ] I have made corresponding changes to the documentation\r\n- [x] I have added tests that prove my fix is effective or that my feature works\r\n\n", "before_files": [{"content": "from enum import Enum\nfrom typing import Optional\n\nimport numpy as np\nfrom pydantic import PrivateAttr, validator\n\nfrom ..events import EventedModel\nfrom ..events.custom_types import Array\nfrom ..translations import trans\nfrom .colorbars import make_colorbar\nfrom .standardize_color import transform_color\n\n\nclass ColormapInterpolationMode(str, Enum):\n \"\"\"INTERPOLATION: Interpolation mode for colormaps.\n\n Selects an interpolation mode for the colormap.\n * linear: colors are defined by linear interpolation between\n colors of neighboring controls points.\n * zero: colors are defined by the value of the color in the\n bin between by neighboring controls points.\n \"\"\"\n\n LINEAR = 'linear'\n ZERO = 'zero'\n\n\nclass Colormap(EventedModel):\n \"\"\"Colormap that relates intensity values to colors.\n\n Attributes\n ----------\n colors : array, shape (N, 4)\n Data used in the colormap.\n name : str\n Name of the colormap.\n display_name : str\n Display name of the colormap.\n controls : array, shape (N,) or (N+1,)\n Control points of the colormap.\n interpolation : str\n Colormap interpolation mode, either 'linear' or\n 'zero'. If 'linear', ncontrols = ncolors (one\n color per control point). If 'zero', ncontrols\n = ncolors+1 (one color per bin).\n \"\"\"\n\n # fields\n colors: Array[float, (-1, 4)]\n name: str = 'custom'\n _display_name: Optional[str] = PrivateAttr(None)\n interpolation: ColormapInterpolationMode = ColormapInterpolationMode.LINEAR\n controls: Array[float, (-1,)] = None\n\n def __init__(self, colors, display_name: Optional[str] = None, **data):\n if display_name is None:\n display_name = data.get('name', 'custom')\n\n super().__init__(colors=colors, **data)\n self._display_name = display_name\n\n # validators\n @validator('colors', pre=True)\n def _ensure_color_array(cls, v):\n return transform_color(v)\n\n # controls validator must be called even if None for correct initialization\n @validator('controls', pre=True, always=True)\n def _check_controls(cls, v, values):\n if v is None or len(v) == 0:\n n_controls = len(values['colors']) + int(\n values['interpolation'] == ColormapInterpolationMode.ZERO\n )\n return np.linspace(0, 1, n_controls)\n return v\n\n def __iter__(self):\n yield from (self.colors, self.controls, self.interpolation)\n\n def map(self, values):\n values = np.atleast_1d(values)\n if self.interpolation == ColormapInterpolationMode.LINEAR:\n # One color per control point\n cols = [\n np.interp(values, self.controls, self.colors[:, i])\n for i in range(4)\n ]\n cols = np.stack(cols, axis=1)\n elif self.interpolation == ColormapInterpolationMode.ZERO:\n # One color per bin\n indices = np.clip(\n np.searchsorted(self.controls, values) - 1, 0, len(self.colors)\n )\n cols = self.colors[indices.astype(np.int32)]\n else:\n raise ValueError(\n trans._(\n 'Unrecognized Colormap Interpolation Mode',\n deferred=True,\n )\n )\n\n return cols\n\n @property\n def colorbar(self):\n return make_colorbar(self)\n", "path": "napari/utils/colormaps/colormap.py"}], "after_files": [{"content": "from enum import Enum\nfrom typing import Optional\n\nimport numpy as np\nfrom pydantic import PrivateAttr, validator\n\nfrom ..events import EventedModel\nfrom ..events.custom_types import Array\nfrom ..translations import trans\nfrom .colorbars import make_colorbar\nfrom .standardize_color import transform_color\n\n\nclass ColormapInterpolationMode(str, Enum):\n \"\"\"INTERPOLATION: Interpolation mode for colormaps.\n\n Selects an interpolation mode for the colormap.\n * linear: colors are defined by linear interpolation between\n colors of neighboring controls points.\n * zero: colors are defined by the value of the color in the\n bin between by neighboring controls points.\n \"\"\"\n\n LINEAR = 'linear'\n ZERO = 'zero'\n\n\nclass Colormap(EventedModel):\n \"\"\"Colormap that relates intensity values to colors.\n\n Attributes\n ----------\n colors : array, shape (N, 4)\n Data used in the colormap.\n name : str\n Name of the colormap.\n display_name : str\n Display name of the colormap.\n controls : array, shape (N,) or (N+1,)\n Control points of the colormap.\n interpolation : str\n Colormap interpolation mode, either 'linear' or\n 'zero'. If 'linear', ncontrols = ncolors (one\n color per control point). If 'zero', ncontrols\n = ncolors+1 (one color per bin).\n \"\"\"\n\n # fields\n colors: Array[float, (-1, 4)]\n name: str = 'custom'\n _display_name: Optional[str] = PrivateAttr(None)\n interpolation: ColormapInterpolationMode = ColormapInterpolationMode.LINEAR\n controls: Array[float, (-1,)] = None\n\n def __init__(self, colors, display_name: Optional[str] = None, **data):\n if display_name is None:\n display_name = data.get('name', 'custom')\n\n super().__init__(colors=colors, **data)\n self._display_name = display_name\n\n # validators\n @validator('colors', pre=True)\n def _ensure_color_array(cls, v):\n return transform_color(v)\n\n # controls validator must be called even if None for correct initialization\n @validator('controls', pre=True, always=True)\n def _check_controls(cls, v, values):\n # If no control points provided generate defaults\n if v is None or len(v) == 0:\n n_controls = len(values['colors']) + int(\n values['interpolation'] == ColormapInterpolationMode.ZERO\n )\n return np.linspace(0, 1, n_controls)\n\n # Check control end points are correct\n if not (v[0] == 0 and v[-1] == 1):\n raise ValueError(\n trans._(\n f'Control points must start with 0.0 and end with 1.0. Got {v[0]} and {v[-1]}',\n deferred=True,\n )\n )\n\n # Check control points are sorted correctly\n if not np.array_equal(v, sorted(v)):\n raise ValueError(\n trans._(\n 'Control points need to be sorted in ascending order',\n deferred=True,\n )\n )\n\n # Check number of control points is correct\n n_controls_target = len(values['colors']) + int(\n values['interpolation'] == ColormapInterpolationMode.ZERO\n )\n n_controls = len(v)\n if not n_controls == n_controls_target:\n raise ValueError(\n trans._(\n f'Wrong number of control points provided. Expected {n_controls_target}, got {n_controls}',\n deferred=True,\n )\n )\n\n return v\n\n def __iter__(self):\n yield from (self.colors, self.controls, self.interpolation)\n\n def map(self, values):\n values = np.atleast_1d(values)\n if self.interpolation == ColormapInterpolationMode.LINEAR:\n # One color per control point\n cols = [\n np.interp(values, self.controls, self.colors[:, i])\n for i in range(4)\n ]\n cols = np.stack(cols, axis=1)\n elif self.interpolation == ColormapInterpolationMode.ZERO:\n # One color per bin\n indices = np.clip(\n np.searchsorted(self.controls, values) - 1, 0, len(self.colors)\n )\n cols = self.colors[indices.astype(np.int32)]\n else:\n raise ValueError(\n trans._(\n 'Unrecognized Colormap Interpolation Mode',\n deferred=True,\n )\n )\n\n return cols\n\n @property\n def colorbar(self):\n return make_colorbar(self)\n", "path": "napari/utils/colormaps/colormap.py"}]} | 1,836 | 438 |
gh_patches_debug_2718 | rasdani/github-patches | git_diff | pyload__pyload-1733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES'
03.08.2015 20:46:43 INFO Free space: 6.48 TiB
630 03.08.2015 20:46:43 INFO Activating Accounts...
631 03.08.2015 20:46:43 INFO Activating Plugins...
632 03.08.2015 20:46:43 WARNING HOOK AntiStandby: Unable to change system power state | [Errno 2] No such file or directory
633 03.08.2015 20:46:43 WARNING HOOK AntiStandby: Unable to change display power state | [Errno 2] No such file or directory
634 03.08.2015 20:46:43 INFO HOOK XFileSharingPro: Handling any hoster I can!
635 03.08.2015 20:46:43 WARNING HOOK UpdateManager: Unable to retrieve server to get updates
636 03.08.2015 20:46:43 INFO HOOK XFileSharingPro: Handling any crypter I can!
637 03.08.2015 20:46:43 INFO pyLoad is up and running
638 03.08.2015 20:46:45 INFO HOOK LinkdecrypterCom: Reloading supported crypter list
639 03.08.2015 20:46:45 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry
640 03.08.2015 20:46:53 INFO HOOK ClickAndLoad: Proxy listening on 127.0.0.1:9666
641 03.08.2015 20:46:53 INFO HOOK LinkdecrypterCom: Reloading supported crypter list
642 03.08.2015 20:46:53 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry
643 03.08.2015 20:47:45 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry
644 03.08.2015 20:47:53 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hooks/LinkdecrypterComHook.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import re
4
5 from module.plugins.internal.MultiHook import MultiHook
6
7
8 class LinkdecrypterComHook(MultiHook):
9 __name__ = "LinkdecrypterComHook"
10 __type__ = "hook"
11 __version__ = "1.07"
12 __status__ = "testing"
13
14 __config__ = [("activated" , "bool" , "Activated" , True ),
15 ("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
16 ("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
17 ("reload" , "bool" , "Reload plugin list" , True ),
18 ("reloadinterval", "int" , "Reload interval in hours" , 12 )]
19
20 __description__ = """Linkdecrypter.com hook plugin"""
21 __license__ = "GPLv3"
22 __authors__ = [("Walter Purcaro", "[email protected]")]
23
24
25 def get_hosters(self):
26 list = re.search(r'>Supported\(\d+\)</b>: <i>(.[\w.\-, ]+)',
27 self.load("http://linkdecrypter.com/").replace("(g)", "")).group(1).split(', ')
28 try:
29 list.remove("download.serienjunkies.org")
30 except ValueError:
31 pass
32
33 return list
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/module/plugins/hooks/LinkdecrypterComHook.py b/module/plugins/hooks/LinkdecrypterComHook.py
--- a/module/plugins/hooks/LinkdecrypterComHook.py
+++ b/module/plugins/hooks/LinkdecrypterComHook.py
@@ -21,6 +21,7 @@
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
+ COOKIES = False
def get_hosters(self):
list = re.search(r'>Supported\(\d+\)</b>: <i>(.[\w.\-, ]+)',
| {"golden_diff": "diff --git a/module/plugins/hooks/LinkdecrypterComHook.py b/module/plugins/hooks/LinkdecrypterComHook.py\n--- a/module/plugins/hooks/LinkdecrypterComHook.py\n+++ b/module/plugins/hooks/LinkdecrypterComHook.py\n@@ -21,6 +21,7 @@\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n \n+ COOKIES = False\n \n def get_hosters(self):\n list = re.search(r'>Supported\\(\\d+\\)</b>: <i>(.[\\w.\\-, ]+)',\n", "issue": "HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' \n03.08.2015 20:46:43 INFO Free space: 6.48 TiB\n630 03.08.2015 20:46:43 INFO Activating Accounts...\n631 03.08.2015 20:46:43 INFO Activating Plugins...\n632 03.08.2015 20:46:43 WARNING HOOK AntiStandby: Unable to change system power state | [Errno 2] No such file or directory\n633 03.08.2015 20:46:43 WARNING HOOK AntiStandby: Unable to change display power state | [Errno 2] No such file or directory\n634 03.08.2015 20:46:43 INFO HOOK XFileSharingPro: Handling any hoster I can!\n635 03.08.2015 20:46:43 WARNING HOOK UpdateManager: Unable to retrieve server to get updates\n636 03.08.2015 20:46:43 INFO HOOK XFileSharingPro: Handling any crypter I can!\n637 03.08.2015 20:46:43 INFO pyLoad is up and running\n638 03.08.2015 20:46:45 INFO HOOK LinkdecrypterCom: Reloading supported crypter list\n639 03.08.2015 20:46:45 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry\n640 03.08.2015 20:46:53 INFO HOOK ClickAndLoad: Proxy listening on 127.0.0.1:9666\n641 03.08.2015 20:46:53 INFO HOOK LinkdecrypterCom: Reloading supported crypter list\n642 03.08.2015 20:46:53 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry\n643 03.08.2015 20:47:45 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry\n644 03.08.2015 20:47:53 WARNING HOOK LinkdecrypterCom: 'LinkdecrypterComHook' object has no attribute 'COOKIES' | Waiting 1 minute and retry\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom module.plugins.internal.MultiHook import MultiHook\n\n\nclass LinkdecrypterComHook(MultiHook):\n __name__ = \"LinkdecrypterComHook\"\n __type__ = \"hook\"\n __version__ = \"1.07\"\n __status__ = \"testing\"\n\n __config__ = [(\"activated\" , \"bool\" , \"Activated\" , True ),\n (\"pluginmode\" , \"all;listed;unlisted\", \"Use for plugins\" , \"all\"),\n (\"pluginlist\" , \"str\" , \"Plugin list (comma separated)\", \"\" ),\n (\"reload\" , \"bool\" , \"Reload plugin list\" , True ),\n (\"reloadinterval\", \"int\" , \"Reload interval in hours\" , 12 )]\n\n __description__ = \"\"\"Linkdecrypter.com hook plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n\n def get_hosters(self):\n list = re.search(r'>Supported\\(\\d+\\)</b>: <i>(.[\\w.\\-, ]+)',\n self.load(\"http://linkdecrypter.com/\").replace(\"(g)\", \"\")).group(1).split(', ')\n try:\n list.remove(\"download.serienjunkies.org\")\n except ValueError:\n pass\n\n return list\n", "path": "module/plugins/hooks/LinkdecrypterComHook.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom module.plugins.internal.MultiHook import MultiHook\n\n\nclass LinkdecrypterComHook(MultiHook):\n __name__ = \"LinkdecrypterComHook\"\n __type__ = \"hook\"\n __version__ = \"1.07\"\n __status__ = \"testing\"\n\n __config__ = [(\"activated\" , \"bool\" , \"Activated\" , True ),\n (\"pluginmode\" , \"all;listed;unlisted\", \"Use for plugins\" , \"all\"),\n (\"pluginlist\" , \"str\" , \"Plugin list (comma separated)\", \"\" ),\n (\"reload\" , \"bool\" , \"Reload plugin list\" , True ),\n (\"reloadinterval\", \"int\" , \"Reload interval in hours\" , 12 )]\n\n __description__ = \"\"\"Linkdecrypter.com hook plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n COOKIES = False\n\n def get_hosters(self):\n list = re.search(r'>Supported\\(\\d+\\)</b>: <i>(.[\\w.\\-, ]+)',\n self.load(\"http://linkdecrypter.com/\").replace(\"(g)\", \"\")).group(1).split(', ')\n try:\n list.remove(\"download.serienjunkies.org\")\n except ValueError:\n pass\n\n return list\n", "path": "module/plugins/hooks/LinkdecrypterComHook.py"}]} | 1,353 | 137 |
gh_patches_debug_65083 | rasdani/github-patches | git_diff | cupy__cupy-5857 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop support for NumPy 1.17 in v10 (NEP 29)
CuPy should drop support for these legacy versions, following [NEP 29](https://numpy.org/neps/nep-0029-deprecation_policy.html#support-table).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import glob
4 import os
5 from setuptools import setup, find_packages
6 import sys
7
8 source_root = os.path.abspath(os.path.dirname(__file__))
9 sys.path.append(os.path.join(source_root, 'install'))
10
11 import cupy_builder # NOQA
12 from cupy_builder import cupy_setup_build # NOQA
13
14 ctx = cupy_builder.Context(source_root)
15 cupy_builder.initialize(ctx)
16 if not cupy_builder.preflight_check(ctx):
17 sys.exit(1)
18
19
20 # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)
21 setup_requires = [
22 'Cython>=0.29.22,<3',
23 'fastrlock>=0.5',
24 ]
25 install_requires = [
26 'numpy>=1.17,<1.24', # see #4773
27 'fastrlock>=0.5',
28 ]
29 extras_require = {
30 'all': [
31 'scipy>=1.4,<1.10', # see #4773
32 'Cython>=0.29.22,<3',
33 'optuna>=2.0',
34 ],
35 'stylecheck': [
36 'autopep8==1.5.5',
37 'flake8==3.8.4',
38 'pbr==5.5.1',
39 'pycodestyle==2.6.0',
40 ],
41 'test': [
42 # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.
43 'pytest>=6.2',
44 ],
45 # TODO(kmaehashi): Remove 'jenkins' requirements.
46 'jenkins': [
47 'pytest>=6.2',
48 'pytest-timeout',
49 'pytest-cov',
50 'coveralls',
51 'codecov',
52 'coverage<5', # Otherwise, Python must be built with sqlite
53 ],
54 }
55 tests_require = extras_require['test']
56
57
58 # List of files that needs to be in the distribution (sdist/wheel).
59 # Notes:
60 # - Files only needed in sdist should be added to `MANIFEST.in`.
61 # - The following glob (`**`) ignores items starting with `.`.
62 cupy_package_data = [
63 'cupy/cuda/cupy_thrust.cu',
64 'cupy/cuda/cupy_cub.cu',
65 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback
66 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback
67 'cupy/cuda/cupy_cufft.h', # for cuFFT callback
68 'cupy/cuda/cufft.pxd', # for cuFFT callback
69 'cupy/cuda/cufft.pyx', # for cuFFT callback
70 'cupy/random/cupy_distributions.cu',
71 'cupy/random/cupy_distributions.cuh',
72 ] + [
73 x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)
74 if os.path.isfile(x)
75 ]
76
77 package_data = {
78 'cupy': [
79 os.path.relpath(x, 'cupy') for x in cupy_package_data
80 ],
81 }
82
83 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)
84
85 ext_modules = cupy_setup_build.get_ext_modules(False, ctx)
86 build_ext = cupy_setup_build.custom_build_ext
87
88 # Get __version__ variable
89 with open(os.path.join(source_root, 'cupy', '_version.py')) as f:
90 exec(f.read())
91
92 long_description = None
93 if ctx.long_description_path is not None:
94 with open(ctx.long_description_path) as f:
95 long_description = f.read()
96
97
98 CLASSIFIERS = """\
99 Development Status :: 5 - Production/Stable
100 Intended Audience :: Science/Research
101 Intended Audience :: Developers
102 License :: OSI Approved :: MIT License
103 Programming Language :: Python
104 Programming Language :: Python :: 3
105 Programming Language :: Python :: 3.7
106 Programming Language :: Python :: 3.8
107 Programming Language :: Python :: 3.9
108 Programming Language :: Python :: 3 :: Only
109 Programming Language :: Cython
110 Topic :: Software Development
111 Topic :: Scientific/Engineering
112 Operating System :: POSIX
113 Operating System :: Microsoft :: Windows
114 """
115
116
117 setup(
118 name=ctx.package_name,
119 version=__version__, # NOQA
120 description='CuPy: NumPy & SciPy for GPU',
121 long_description=long_description,
122 author='Seiya Tokui',
123 author_email='[email protected]',
124 maintainer='CuPy Developers',
125 url='https://cupy.dev/',
126 license='MIT License',
127 project_urls={
128 "Bug Tracker": "https://github.com/cupy/cupy/issues",
129 "Documentation": "https://docs.cupy.dev/",
130 "Source Code": "https://github.com/cupy/cupy",
131 },
132 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
133 packages=find_packages(exclude=['install', 'tests']),
134 package_data=package_data,
135 zip_safe=False,
136 python_requires='>=3.7',
137 setup_requires=setup_requires,
138 install_requires=install_requires,
139 tests_require=tests_require,
140 extras_require=extras_require,
141 ext_modules=ext_modules,
142 cmdclass={'build_ext': build_ext},
143 )
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
'fastrlock>=0.5',
]
install_requires = [
- 'numpy>=1.17,<1.24', # see #4773
+ 'numpy>=1.18,<1.24', # see #4773
'fastrlock>=0.5',
]
extras_require = {
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n 'fastrlock>=0.5',\n ]\n install_requires = [\n- 'numpy>=1.17,<1.24', # see #4773\n+ 'numpy>=1.18,<1.24', # see #4773\n 'fastrlock>=0.5',\n ]\n extras_require = {\n", "issue": "Drop support for NumPy 1.17 in v10 (NEP 29)\nCuPy should drop support for these legacy versions, following [NEP 29](https://numpy.org/neps/nep-0029-deprecation_policy.html#support-table).\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.17,<1.24', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.4,<1.10', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n # TODO(kmaehashi): Remove 'jenkins' requirements.\n 'jenkins': [\n 'pytest>=6.2',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\next_modules = cupy_setup_build.get_ext_modules(False, ctx)\nbuild_ext = cupy_setup_build.custom_build_ext\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.18,<1.24', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.4,<1.10', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n # TODO(kmaehashi): Remove 'jenkins' requirements.\n 'jenkins': [\n 'pytest>=6.2',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\next_modules = cupy_setup_build.get_ext_modules(False, ctx)\nbuild_ext = cupy_setup_build.custom_build_ext\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}]} | 1,813 | 110 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.