problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_34572 | rasdani/github-patches | git_diff | SigmaHQ__sigma-1895 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sigma2attack does not support collections
Collections parsing happens only in [`collection.py`](tools/sigma/parser/collection.py) it seems but [`sigma2attack`](/tools/sigma/sigma2attack.py#L24) uses good old `yaml.safe_load` on his own.
that leads to errors when parsing and rules being ignored
```
[snip]
Ignoring rule rules\windows\other\win_tool_psexec.yml (parsing failed)
Ignoring rule rules\windows\powershell\win_powershell_web_request.yml (parsing failed)
Ignoring rule rules\windows\process_access\sysmon_cmstp_execution.yml (parsing failed)
Ignoring rule rules\windows\process_creation\win_apt_chafer_mar18.yml (parsing failed)
Ignoring rule rules\windows\process_creation\win_apt_empiremonkey.yml (parsing failed)
Ignoring rule rules\windows\process_creation\win_apt_gallium.yml (parsing failed)
[snip]
```
</issue>
<code>
[start of tools/sigma/sigma2attack.py]
1 #!/usr/bin/env python3
2
3 import argparse
4 import glob
5 import json
6 import os
7 import sys
8
9 import yaml
10
11 def main():
12 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
13 parser.add_argument("--rules-directory", "-d", dest="rules_dir", default="rules", help="Directory to read rules from")
14 parser.add_argument("--out-file", "-o", dest="out_file", default="heatmap.json", help="File to write the JSON layer to")
15 parser.add_argument("--no-comment", dest="no_comment", action="store_true", help="Don't store rule names in comments")
16 args = parser.parse_args()
17
18 rule_files = glob.glob(os.path.join(args.rules_dir, "**/*.yml"), recursive=True)
19 techniques_to_rules = {}
20 curr_max_technique_count = 0
21 num_rules_used = 0
22 for rule_file in rule_files:
23 try:
24 rule = yaml.safe_load(open(rule_file, encoding="utf-8").read())
25 except yaml.YAMLError:
26 sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n")
27 continue
28 if "tags" not in rule:
29 sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n")
30 continue
31 tags = rule["tags"]
32 for tag in tags:
33 if tag.lower().startswith("attack.t"):
34 technique_id = tag[len("attack."):].upper()
35 num_rules_used += 1
36 if technique_id not in techniques_to_rules:
37 techniques_to_rules[technique_id] = []
38 techniques_to_rules[technique_id].append(os.path.basename(rule_file))
39 curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))
40
41
42 scores = []
43 for technique in techniques_to_rules:
44 entry = {
45 "techniqueID": technique,
46 "score": len(techniques_to_rules[technique]),
47 }
48 if not args.no_comment:
49 entry["comment"] = "\n".join(techniques_to_rules[technique])
50
51 scores.append(entry)
52
53 output = {
54 "domain": "mitre-enterprise",
55 "name": "Sigma rules heatmap",
56 "gradient": {
57 "colors": [
58 "#ffffff",
59 "#ff6666"
60 ],
61 "maxValue": curr_max_technique_count,
62 "minValue": 0
63 },
64 "versions": {
65 "navigator": "4.0",
66 "layer": "4.0"
67 },
68 "techniques": scores,
69 }
70
71 with open(args.out_file, "w") as f:
72 f.write(json.dumps(output))
73 print("[*] Layer file written in " + args.out_file + " (" + str(num_rules_used) + " rules)")
74
75 if __name__ == "__main__":
76 main()
77
[end of tools/sigma/sigma2attack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py
--- a/tools/sigma/sigma2attack.py
+++ b/tools/sigma/sigma2attack.py
@@ -8,6 +8,7 @@
import yaml
+
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--rules-directory", "-d", dest="rules_dir", default="rules", help="Directory to read rules from")
@@ -20,24 +21,25 @@
curr_max_technique_count = 0
num_rules_used = 0
for rule_file in rule_files:
- try:
- rule = yaml.safe_load(open(rule_file, encoding="utf-8").read())
- except yaml.YAMLError:
- sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n")
- continue
- if "tags" not in rule:
- sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n")
- continue
- tags = rule["tags"]
- for tag in tags:
- if tag.lower().startswith("attack.t"):
- technique_id = tag[len("attack."):].upper()
- num_rules_used += 1
- if technique_id not in techniques_to_rules:
- techniques_to_rules[technique_id] = []
- techniques_to_rules[technique_id].append(os.path.basename(rule_file))
- curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))
-
+ with open(rule_file,encoding='utf-8') as f:
+ docs = yaml.load_all(f, Loader=yaml.FullLoader)
+ double = False
+ for rule in docs:
+ if "tags" not in rule :
+ if double == False : # Only 1 warning
+ sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n")
+ double = True # action globle no tag
+ continue
+ tags = rule["tags"]
+ double = True
+ for tag in tags:
+ if tag.lower().startswith("attack.t"):
+ technique_id = tag[len("attack."):].upper()
+ num_rules_used += 1
+ if technique_id not in techniques_to_rules:
+ techniques_to_rules[technique_id] = []
+ techniques_to_rules[technique_id].append(os.path.basename(rule_file))
+ curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))
scores = []
for technique in techniques_to_rules:
| {"golden_diff": "diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py\n--- a/tools/sigma/sigma2attack.py\n+++ b/tools/sigma/sigma2attack.py\n@@ -8,6 +8,7 @@\n \n import yaml\n \n+\n def main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n@@ -20,24 +21,25 @@\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n- try:\n- rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n- except yaml.YAMLError:\n- sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n- continue\n- if \"tags\" not in rule:\n- sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n- continue\n- tags = rule[\"tags\"]\n- for tag in tags:\n- if tag.lower().startswith(\"attack.t\"):\n- technique_id = tag[len(\"attack.\"):].upper()\n- num_rules_used += 1\n- if technique_id not in techniques_to_rules:\n- techniques_to_rules[technique_id] = []\n- techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n- curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n-\n+ with open(rule_file,encoding='utf-8') as f:\n+ docs = yaml.load_all(f, Loader=yaml.FullLoader)\n+ double = False\n+ for rule in docs:\n+ if \"tags\" not in rule :\n+ if double == False : # Only 1 warning\n+ sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n+ double = True # action globle no tag\n+ continue\n+ tags = rule[\"tags\"]\n+ double = True\n+ for tag in tags:\n+ if tag.lower().startswith(\"attack.t\"):\n+ technique_id = tag[len(\"attack.\"):].upper()\n+ num_rules_used += 1\n+ if technique_id not in techniques_to_rules:\n+ techniques_to_rules[technique_id] = []\n+ techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n+ curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n \n scores = []\n for technique in techniques_to_rules:\n", "issue": "sigma2attack does not support collections\nCollections parsing happens only in [`collection.py`](tools/sigma/parser/collection.py) it seems but [`sigma2attack`](/tools/sigma/sigma2attack.py#L24) uses good old `yaml.safe_load` on his own.\r\n\r\nthat leads to errors when parsing and rules being ignored\r\n\r\n```\r\n[snip]\r\nIgnoring rule rules\\windows\\other\\win_tool_psexec.yml (parsing failed)\r\nIgnoring rule rules\\windows\\powershell\\win_powershell_web_request.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_access\\sysmon_cmstp_execution.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_creation\\win_apt_chafer_mar18.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_creation\\win_apt_empiremonkey.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_creation\\win_apt_gallium.yml (parsing failed)\r\n[snip]\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\n\nimport yaml\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n parser.add_argument(\"--out-file\", \"-o\", dest=\"out_file\", default=\"heatmap.json\", help=\"File to write the JSON layer to\")\n parser.add_argument(\"--no-comment\", dest=\"no_comment\", action=\"store_true\", help=\"Don't store rule names in comments\")\n args = parser.parse_args()\n\n rule_files = glob.glob(os.path.join(args.rules_dir, \"**/*.yml\"), recursive=True)\n techniques_to_rules = {}\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n if \"tags\" not in rule:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n continue\n tags = rule[\"tags\"]\n for tag in tags:\n if tag.lower().startswith(\"attack.t\"):\n technique_id = tag[len(\"attack.\"):].upper()\n num_rules_used += 1\n if technique_id not in techniques_to_rules:\n techniques_to_rules[technique_id] = []\n techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n\n\n scores = []\n for technique in techniques_to_rules:\n entry = {\n \"techniqueID\": technique, \n \"score\": len(techniques_to_rules[technique]), \n }\n if not args.no_comment:\n entry[\"comment\"] = \"\\n\".join(techniques_to_rules[technique])\n\n scores.append(entry)\n\n output = {\n \"domain\": \"mitre-enterprise\",\n \"name\": \"Sigma rules heatmap\",\n \"gradient\": {\n \"colors\": [\n \"#ffffff\",\n \"#ff6666\"\n ],\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n \"versions\": {\n \"navigator\": \"4.0\",\n \"layer\": \"4.0\"\n },\n \"techniques\": scores,\n }\n\n with open(args.out_file, \"w\") as f:\n f.write(json.dumps(output))\n print(\"[*] Layer file written in \" + args.out_file + \" (\" + str(num_rules_used) + \" rules)\")\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/sigma/sigma2attack.py"}]} | 1,520 | 593 |
gh_patches_debug_38913 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't add locations to update through REST API
## Test plan
GIVEN the Up app
WHEN the user tries to add an update
THEN this should not give a 400 error
</issue>
<code>
[start of akvo/rest/serializers/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4 See more details in the license.txt file located at the root folder of the
5 Akvo RSR module. For additional details on the GNU license please
6 see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9
10 from .benchmark import BenchmarkSerializer
11 from .benchmark_name import BenchmarknameSerializer
12 from .budget_item import BudgetItemSerializer, CountryBudgetItemSerializer
13 from .budget_item_label import BudgetItemLabelSerializer
14 from .category import CategorySerializer
15 from .country import CountrySerializer
16 from .custom_field import OrganisationCustomFieldSerializer, ProjectCustomFieldSerializer
17 from .employment import EmploymentSerializer
18 from .focus_area import FocusAreaSerializer
19 from .goal import GoalSerializer
20 from .indicator import IndicatorPeriodSerializer, IndicatorSerializer
21 from .internal_organisation_id import InternalOrganisationIDSerializer
22 from .invoice import InvoiceSerializer
23 from .keyword import KeywordSerializer
24 from .legacy_data import LegacyDataSerializer
25 from .link import LinkSerializer
26 from .organisation import OrganisationSerializer
27 from .organisation_location import (OrganisationLocationSerializer,
28 MapOrganisationLocationSerializer)
29 from .partner_site import PartnerSiteSerializer
30 from .partnership import PartnershipSerializer
31 from .planned_disbursement import PlannedDisbursementSerializer
32 from .policy_marker import PolicyMarkerSerializer
33 from .project import ProjectSerializer, ProjectExtraSerializer, ProjectUpSerializer
34 from .project_comment import ProjectCommentSerializer
35 from .project_condition import ProjectConditionSerializer
36 from .project_contact import ProjectContactSerializer
37 from .project_document import ProjectDocumentSerializer
38 from .project_location import (ProjectLocationSerializer, AdministrativeLocationSerializer,
39 MapProjectLocationSerializer)
40 from .project_update import (ProjectUpdateSerializer,
41 ProjectUpdateExtraSerializer)
42 from .project_update_location import (ProjectUpdateLocationSerializer,
43 MapProjectUpdateLocationSerializer)
44 from .publishing_status import PublishingStatusSerializer
45 from .recipient_country import RecipientCountrySerializer
46 from .region import RecipientRegionSerializer
47 from .related_project import RelatedProjectSerializer
48 from .result import ResultSerializer
49 from .sector import SectorSerializer
50 from .transaction import TransactionSerializer, TransactionSectorSerializer
51 from .typeahead import (TypeaheadCountrySerializer,
52 TypeaheadOrganisationSerializer,
53 TypeaheadProjectSerializer,
54 TypeaheadProjectUpdateSerializer)
55 from .user import UserSerializer, UserDetailsSerializer, UserPasswordSerializer
56
57 __all__ = [
58 'AdministrativeLocationSerializer',
59 'BenchmarknameSerializer',
60 'BenchmarkSerializer',
61 'BudgetItemLabelSerializer',
62 'BudgetItemSerializer',
63 'CategorySerializer',
64 'CountrySerializer',
65 'CountryBudgetItemSerializer',
66 'EmploymentSerializer',
67 'FocusAreaSerializer',
68 'GoalSerializer',
69 'IndicatorPeriodSerializer',
70 'IndicatorSerializer',
71 'InternalOrganisationIDSerializer',
72 'InvoiceSerializer',
73 'KeywordSerializer',
74 'LegacyDataSerializer',
75 'LinkSerializer',
76 'MapOrganisationLocationSerializer',
77 'MapProjectLocationSerializer',
78 'MapProjectUpdateLocationSerializer',
79 'OrganisationSerializer',
80 'OrganisationCustomFieldSerializer',
81 'OrganisationLocationSerializer',
82 'PartnershipSerializer',
83 'PartnerSiteSerializer',
84 'PlannedDisbursementSerializer',
85 'PolicyMarkerSerializer',
86 'ProjectCommentSerializer',
87 'ProjectConditionSerializer',
88 'ProjectContactSerializer',
89 'ProjectCustomFieldSerializer',
90 'ProjectDocumentSerializer',
91 'ProjectExtraSerializer',
92 'ProjectLocationSerializer',
93 'ProjectSerializer',
94 'ProjectUpdateExtraSerializer',
95 'ProjectUpdateLocationSerializer',
96 'ProjectUpdateSerializer',
97 'ProjectUpSerializer',
98 'PublishingStatusSerializer',
99 'RecipientCountrySerializer',
100 'RecipientRegionSerializer',
101 'RelatedProjectSerializer',
102 'ResultSerializer',
103 'SectorSerializer',
104 'TransactionSerializer',
105 'TransactionSectorSerializer',
106 'TypeaheadCountrySerializer',
107 'TypeaheadOrganisationSerializer',
108 'TypeaheadProjectSerializer',
109 'TypeaheadProjectUpdateSerializer',
110 'UserDetailsSerializer',
111 'UserPasswordSerializer',
112 'UserSerializer',
113 ]
114
[end of akvo/rest/serializers/__init__.py]
[start of akvo/rest/serializers/project_update.py]
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3
4 See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6 """
7
8 from rest_framework import serializers
9 from akvo.rsr.models import ProjectUpdate
10 from ..fields import Base64ImageField
11 from .project_update_location import (ProjectUpdateLocationSerializer,
12 ProjectUpdateLocationExtraSerializer)
13 from .rsr_serializer import BaseRSRSerializer
14 from .user import UserSerializer
15
16
17 class ProjectUpdateSerializer(BaseRSRSerializer):
18
19 """Serializer for project updates."""
20
21 locations = ProjectUpdateLocationSerializer(source='locations', many=True, required=False,
22 allow_add_remove=True)
23 photo = Base64ImageField(required=False, allow_empty_file=True)
24
25 class Meta:
26 model = ProjectUpdate
27
28
29 class ProjectUpdateExtraSerializer(BaseRSRSerializer):
30
31 """This serializer includes data about user and connected organisation."""
32
33 photo = Base64ImageField(required=False, allow_empty_file=True)
34 primary_location = ProjectUpdateLocationExtraSerializer()
35 # Limit project data to its PK, this is needed because of Meta.depth = 2
36 project = serializers.Field(source='project.pk')
37 user = UserSerializer()
38
39 class Meta:
40 model = ProjectUpdate
41 depth = 2
42
[end of akvo/rest/serializers/project_update.py]
[start of akvo/rest/serializers/project_update_location.py]
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3 See more details in the license.txt file located at the root folder of the Akvo RSR module.
4 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
5 """
6
7 from rest_framework import serializers
8 from akvo.rsr.models import ProjectUpdateLocation
9 from ..fields import Base64ImageField
10 from .rsr_serializer import BaseRSRSerializer
11
12
13 class ProjectUpdateLocationSerializer(BaseRSRSerializer):
14
15 class Meta:
16 model = ProjectUpdateLocation
17
18
19 class ProjectUpdateLocationExtraSerializer(ProjectUpdateLocationSerializer):
20
21 # Limit update data to its PK, this is needed because of Meta.depth = 2
22 location_target = serializers.Field(source='location_target.pk')
23
24 class Meta(ProjectUpdateLocationSerializer.Meta):
25 depth = 2
26
27
28 class MapProjectUpdateSerializer(serializers.Serializer):
29
30 """To serialize the update field of the update map resource."""
31
32 id = serializers.IntegerField()
33 title = serializers.CharField()
34 url = serializers.URLField(source='get_absolute_url')
35 photo = Base64ImageField(required=False, allow_empty_file=True)
36 video = serializers.CharField(required=False)
37
38
39 class MapProjectUpdateLocationSerializer(serializers.Serializer):
40
41 """To serialize the update map resource."""
42
43 id = serializers.IntegerField()
44 latitude = serializers.FloatField()
45 longitude = serializers.FloatField()
46 update = MapProjectUpdateSerializer(source='location_target')
47
[end of akvo/rest/serializers/project_update_location.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/serializers/__init__.py b/akvo/rest/serializers/__init__.py
--- a/akvo/rest/serializers/__init__.py
+++ b/akvo/rest/serializers/__init__.py
@@ -40,6 +40,7 @@
from .project_update import (ProjectUpdateSerializer,
ProjectUpdateExtraSerializer)
from .project_update_location import (ProjectUpdateLocationSerializer,
+ ProjectUpdateLocationNestedSerializer,
MapProjectUpdateLocationSerializer)
from .publishing_status import PublishingStatusSerializer
from .recipient_country import RecipientCountrySerializer
@@ -93,6 +94,7 @@
'ProjectSerializer',
'ProjectUpdateExtraSerializer',
'ProjectUpdateLocationSerializer',
+ 'ProjectUpdateLocationNestedSerializer',
'ProjectUpdateSerializer',
'ProjectUpSerializer',
'PublishingStatusSerializer',
diff --git a/akvo/rest/serializers/project_update.py b/akvo/rest/serializers/project_update.py
--- a/akvo/rest/serializers/project_update.py
+++ b/akvo/rest/serializers/project_update.py
@@ -8,7 +8,7 @@
from rest_framework import serializers
from akvo.rsr.models import ProjectUpdate
from ..fields import Base64ImageField
-from .project_update_location import (ProjectUpdateLocationSerializer,
+from .project_update_location import (ProjectUpdateLocationNestedSerializer,
ProjectUpdateLocationExtraSerializer)
from .rsr_serializer import BaseRSRSerializer
from .user import UserSerializer
@@ -18,8 +18,8 @@
"""Serializer for project updates."""
- locations = ProjectUpdateLocationSerializer(source='locations', many=True, required=False,
- allow_add_remove=True)
+ locations = ProjectUpdateLocationNestedSerializer(source='locations', many=True, required=False,
+ allow_add_remove=True)
photo = Base64ImageField(required=False, allow_empty_file=True)
class Meta:
diff --git a/akvo/rest/serializers/project_update_location.py b/akvo/rest/serializers/project_update_location.py
--- a/akvo/rest/serializers/project_update_location.py
+++ b/akvo/rest/serializers/project_update_location.py
@@ -16,6 +16,14 @@
model = ProjectUpdateLocation
+class ProjectUpdateLocationNestedSerializer(ProjectUpdateLocationSerializer):
+
+ class Meta(ProjectUpdateLocationSerializer.Meta):
+ # Exclude the mandatory 'location_target' field, so that it is possible to create a
+ # project update location at the same time as the project update.
+ exclude = ('location_target',)
+
+
class ProjectUpdateLocationExtraSerializer(ProjectUpdateLocationSerializer):
# Limit update data to its PK, this is needed because of Meta.depth = 2
| {"golden_diff": "diff --git a/akvo/rest/serializers/__init__.py b/akvo/rest/serializers/__init__.py\n--- a/akvo/rest/serializers/__init__.py\n+++ b/akvo/rest/serializers/__init__.py\n@@ -40,6 +40,7 @@\n from .project_update import (ProjectUpdateSerializer,\n ProjectUpdateExtraSerializer)\n from .project_update_location import (ProjectUpdateLocationSerializer,\n+ ProjectUpdateLocationNestedSerializer,\n MapProjectUpdateLocationSerializer)\n from .publishing_status import PublishingStatusSerializer\n from .recipient_country import RecipientCountrySerializer\n@@ -93,6 +94,7 @@\n 'ProjectSerializer',\n 'ProjectUpdateExtraSerializer',\n 'ProjectUpdateLocationSerializer',\n+ 'ProjectUpdateLocationNestedSerializer',\n 'ProjectUpdateSerializer',\n 'ProjectUpSerializer',\n 'PublishingStatusSerializer',\ndiff --git a/akvo/rest/serializers/project_update.py b/akvo/rest/serializers/project_update.py\n--- a/akvo/rest/serializers/project_update.py\n+++ b/akvo/rest/serializers/project_update.py\n@@ -8,7 +8,7 @@\n from rest_framework import serializers\n from akvo.rsr.models import ProjectUpdate\n from ..fields import Base64ImageField\n-from .project_update_location import (ProjectUpdateLocationSerializer,\n+from .project_update_location import (ProjectUpdateLocationNestedSerializer,\n ProjectUpdateLocationExtraSerializer)\n from .rsr_serializer import BaseRSRSerializer\n from .user import UserSerializer\n@@ -18,8 +18,8 @@\n \n \"\"\"Serializer for project updates.\"\"\"\n \n- locations = ProjectUpdateLocationSerializer(source='locations', many=True, required=False,\n- allow_add_remove=True)\n+ locations = ProjectUpdateLocationNestedSerializer(source='locations', many=True, required=False,\n+ allow_add_remove=True)\n photo = Base64ImageField(required=False, allow_empty_file=True)\n \n class Meta:\ndiff --git a/akvo/rest/serializers/project_update_location.py b/akvo/rest/serializers/project_update_location.py\n--- a/akvo/rest/serializers/project_update_location.py\n+++ b/akvo/rest/serializers/project_update_location.py\n@@ -16,6 +16,14 @@\n model = ProjectUpdateLocation\n \n \n+class ProjectUpdateLocationNestedSerializer(ProjectUpdateLocationSerializer):\n+\n+ class Meta(ProjectUpdateLocationSerializer.Meta):\n+ # Exclude the mandatory 'location_target' field, so that it is possible to create a\n+ # project update location at the same time as the project update.\n+ exclude = ('location_target',)\n+\n+\n class ProjectUpdateLocationExtraSerializer(ProjectUpdateLocationSerializer):\n \n # Limit update data to its PK, this is needed because of Meta.depth = 2\n", "issue": "Can't add locations to update through REST API\n## Test plan\n\nGIVEN the Up app\nWHEN the user tries to add an update\nTHEN this should not give a 400 error\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\n\nfrom .benchmark import BenchmarkSerializer\nfrom .benchmark_name import BenchmarknameSerializer\nfrom .budget_item import BudgetItemSerializer, CountryBudgetItemSerializer\nfrom .budget_item_label import BudgetItemLabelSerializer\nfrom .category import CategorySerializer\nfrom .country import CountrySerializer\nfrom .custom_field import OrganisationCustomFieldSerializer, ProjectCustomFieldSerializer\nfrom .employment import EmploymentSerializer\nfrom .focus_area import FocusAreaSerializer\nfrom .goal import GoalSerializer\nfrom .indicator import IndicatorPeriodSerializer, IndicatorSerializer\nfrom .internal_organisation_id import InternalOrganisationIDSerializer\nfrom .invoice import InvoiceSerializer\nfrom .keyword import KeywordSerializer\nfrom .legacy_data import LegacyDataSerializer\nfrom .link import LinkSerializer\nfrom .organisation import OrganisationSerializer\nfrom .organisation_location import (OrganisationLocationSerializer,\n MapOrganisationLocationSerializer)\nfrom .partner_site import PartnerSiteSerializer\nfrom .partnership import PartnershipSerializer\nfrom .planned_disbursement import PlannedDisbursementSerializer\nfrom .policy_marker import PolicyMarkerSerializer\nfrom .project import ProjectSerializer, ProjectExtraSerializer, ProjectUpSerializer\nfrom .project_comment import ProjectCommentSerializer\nfrom .project_condition import ProjectConditionSerializer\nfrom .project_contact import ProjectContactSerializer\nfrom .project_document import ProjectDocumentSerializer\nfrom .project_location import (ProjectLocationSerializer, AdministrativeLocationSerializer,\n MapProjectLocationSerializer)\nfrom .project_update import (ProjectUpdateSerializer,\n ProjectUpdateExtraSerializer)\nfrom .project_update_location import (ProjectUpdateLocationSerializer,\n MapProjectUpdateLocationSerializer)\nfrom .publishing_status import PublishingStatusSerializer\nfrom .recipient_country import RecipientCountrySerializer\nfrom .region import RecipientRegionSerializer\nfrom .related_project import RelatedProjectSerializer\nfrom .result import ResultSerializer\nfrom .sector import SectorSerializer\nfrom .transaction import TransactionSerializer, TransactionSectorSerializer\nfrom .typeahead import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\nfrom .user import UserSerializer, UserDetailsSerializer, UserPasswordSerializer\n\n__all__ = [\n 'AdministrativeLocationSerializer',\n 'BenchmarknameSerializer',\n 'BenchmarkSerializer',\n 'BudgetItemLabelSerializer',\n 'BudgetItemSerializer',\n 'CategorySerializer',\n 'CountrySerializer',\n 'CountryBudgetItemSerializer',\n 'EmploymentSerializer',\n 'FocusAreaSerializer',\n 'GoalSerializer',\n 'IndicatorPeriodSerializer',\n 'IndicatorSerializer',\n 'InternalOrganisationIDSerializer',\n 'InvoiceSerializer',\n 'KeywordSerializer',\n 'LegacyDataSerializer',\n 'LinkSerializer',\n 'MapOrganisationLocationSerializer',\n 'MapProjectLocationSerializer',\n 'MapProjectUpdateLocationSerializer',\n 'OrganisationSerializer',\n 'OrganisationCustomFieldSerializer',\n 'OrganisationLocationSerializer',\n 'PartnershipSerializer',\n 'PartnerSiteSerializer',\n 'PlannedDisbursementSerializer',\n 'PolicyMarkerSerializer',\n 'ProjectCommentSerializer',\n 'ProjectConditionSerializer',\n 'ProjectContactSerializer',\n 'ProjectCustomFieldSerializer',\n 'ProjectDocumentSerializer',\n 'ProjectExtraSerializer',\n 'ProjectLocationSerializer',\n 'ProjectSerializer',\n 'ProjectUpdateExtraSerializer',\n 'ProjectUpdateLocationSerializer',\n 'ProjectUpdateSerializer',\n 'ProjectUpSerializer',\n 'PublishingStatusSerializer',\n 'RecipientCountrySerializer',\n 'RecipientRegionSerializer',\n 'RelatedProjectSerializer',\n 'ResultSerializer',\n 'SectorSerializer',\n 'TransactionSerializer',\n 'TransactionSectorSerializer',\n 'TypeaheadCountrySerializer',\n 'TypeaheadOrganisationSerializer',\n 'TypeaheadProjectSerializer',\n 'TypeaheadProjectUpdateSerializer',\n 'UserDetailsSerializer',\n 'UserPasswordSerializer',\n 'UserSerializer',\n]\n", "path": "akvo/rest/serializers/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom rest_framework import serializers\nfrom akvo.rsr.models import ProjectUpdate\nfrom ..fields import Base64ImageField\nfrom .project_update_location import (ProjectUpdateLocationSerializer,\n ProjectUpdateLocationExtraSerializer)\nfrom .rsr_serializer import BaseRSRSerializer\nfrom .user import UserSerializer\n\n\nclass ProjectUpdateSerializer(BaseRSRSerializer):\n\n \"\"\"Serializer for project updates.\"\"\"\n\n locations = ProjectUpdateLocationSerializer(source='locations', many=True, required=False,\n allow_add_remove=True)\n photo = Base64ImageField(required=False, allow_empty_file=True)\n\n class Meta:\n model = ProjectUpdate\n\n\nclass ProjectUpdateExtraSerializer(BaseRSRSerializer):\n\n \"\"\"This serializer includes data about user and connected organisation.\"\"\"\n\n photo = Base64ImageField(required=False, allow_empty_file=True)\n primary_location = ProjectUpdateLocationExtraSerializer()\n # Limit project data to its PK, this is needed because of Meta.depth = 2\n project = serializers.Field(source='project.pk')\n user = UserSerializer()\n\n class Meta:\n model = ProjectUpdate\n depth = 2\n", "path": "akvo/rest/serializers/project_update.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom rest_framework import serializers\nfrom akvo.rsr.models import ProjectUpdateLocation\nfrom ..fields import Base64ImageField\nfrom .rsr_serializer import BaseRSRSerializer\n\n\nclass ProjectUpdateLocationSerializer(BaseRSRSerializer):\n\n class Meta:\n model = ProjectUpdateLocation\n\n\nclass ProjectUpdateLocationExtraSerializer(ProjectUpdateLocationSerializer):\n\n # Limit update data to its PK, this is needed because of Meta.depth = 2\n location_target = serializers.Field(source='location_target.pk')\n\n class Meta(ProjectUpdateLocationSerializer.Meta):\n depth = 2\n\n\nclass MapProjectUpdateSerializer(serializers.Serializer):\n\n \"\"\"To serialize the update field of the update map resource.\"\"\"\n\n id = serializers.IntegerField()\n title = serializers.CharField()\n url = serializers.URLField(source='get_absolute_url')\n photo = Base64ImageField(required=False, allow_empty_file=True)\n video = serializers.CharField(required=False)\n\n\nclass MapProjectUpdateLocationSerializer(serializers.Serializer):\n\n \"\"\"To serialize the update map resource.\"\"\"\n\n id = serializers.IntegerField()\n latitude = serializers.FloatField()\n longitude = serializers.FloatField()\n update = MapProjectUpdateSerializer(source='location_target')\n", "path": "akvo/rest/serializers/project_update_location.py"}]} | 2,500 | 600 |
gh_patches_debug_10026 | rasdani/github-patches | git_diff | cowrie__cowrie-818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError in command "apt-get install"
**Describe the bug**
When using the command "apt-get install XY" while logged in per SSH in the cowrie honeypot, nothing happens.
If you look into the logs, the requested software couldn't be installed but a TypeError is thrown:
```Traceback (most recent call last):
File "/home/cowrie/github/cowrie/cowrie/shell/protocol.py", line 359, in call_command
HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)
File "/home/cowrie/github/cowrie/cowrie/shell/protocol.py", line 216, in call_command
obj.start()
File "/home/cowrie/github/cowrie/cowrie/commands/apt.py", line 36, in start
self.do_install()
File "/home/cowrie/github/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/defer.py", line 1532, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
--- <exception caught here> ---
File "/home/cowrie/github/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks
result = g.send(result)
File "/home/cowrie/github/cowrie/cowrie/commands/apt.py", line 123, in do_install
'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),
exceptions.TypeError: choice() takes exactly 2 arguments (3 given)
```
I investigated the source and saw what is actually wrong:
```
'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),
```
in which `random.choice(0, 1)` requires a sequence, so `random.choice([0, 1])` should work.
**To Reproduce**
Steps to reproduce the behavior:
1. Log in a cowrie honeypot per ssh: ssh -p 2222 root@honeypot
2. Try "sudo apt-get install vim"
3. See the error in the cowrie logs on your machine: cowrie/logs/cowrie.log
**Expected behavior**
See some common apt-get messages, like "Reading package lists... Done\n", e.g. simulating to the attacker the command worked.
**Server (please complete the following information):**
- OS: CentOS 7, x86_64
- Python: Python 2.7.5 and Python 3.6.5
**Additional context**
-
</issue>
<code>
[start of cowrie/commands/apt.py]
1 # Copyright (c) 2009 Upi Tamminen <[email protected]>
2 # See the COPYRIGHT file for more information
3
4 from __future__ import division, absolute_import
5
6 import random
7 import re
8
9 from twisted.internet import reactor, defer
10 from twisted.internet.defer import inlineCallbacks
11
12 from cowrie.shell.command import HoneyPotCommand
13
14 commands = {}
15
16 class command_faked_package_class_factory(object):
17 @staticmethod
18 def getCommand(name):
19 class command_faked_installation(HoneyPotCommand):
20 def call(self):
21 self.write(b"%s: Segmentation fault\n" % name)
22 return command_faked_installation
23
24 class command_aptget(HoneyPotCommand):
25 """
26 apt-get fake
27 suppports only the 'install PACKAGE' command & 'moo'.
28 Any installed packages, places a 'Segfault' at /usr/bin/PACKAGE.'''
29 """
30 def start(self):
31 if len(self.args) == 0:
32 self.do_help()
33 elif len(self.args) > 0 and self.args[0] == '-v':
34 self.do_version()
35 elif len(self.args) > 0 and self.args[0] == 'install':
36 self.do_install()
37 elif len(self.args) > 0 and self.args[0] == 'moo':
38 self.do_moo()
39 else:
40 self.do_locked()
41
42 def sleep(self, time, time2=None):
43 d = defer.Deferred()
44 if time2:
45 time = random.randint(time * 100, time2 * 100) / 100.0
46 reactor.callLater(time, d.callback, None)
47 return d
48
49 def do_version(self):
50 self.write('''apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06
51 Supported modules:
52 *Ver: Standard .deb
53 *Pkg: Debian dpkg interface (Priority 30)
54 Pkg: Debian APT solver interface (Priority -1000)
55 S.L: 'deb' Standard Debian binary tree
56 S.L: 'deb-src' Standard Debian source tree
57 Idx: Debian Source Index
58 Idx: Debian Package Index
59 Idx: Debian Translation Index
60 Idx: Debian dpkg status file
61 Idx: EDSP scenario file\n''')
62 self.exit()
63 return
64
65 def do_help(self):
66 self.write('''apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06
67 Usage: apt-get [options] command
68 apt-get [options] install|remove pkg1 [pkg2 ...]
69 apt-get [options] source pkg1 [pkg2 ...]
70
71 apt-get is a simple command line interface for downloading and
72 installing packages. The most frequently used commands are update
73 and install.
74
75 Commands:
76 update - Retrieve new lists of packages
77 upgrade - Perform an upgrade
78 install - Install new packages (pkg is libc6 not libc6.deb)
79 remove - Remove packages
80 autoremove - Remove automatically all unused packages
81 purge - Remove packages and config files
82 source - Download source archives
83 build-dep - Configure build-dependencies for source packages
84 dist-upgrade - Distribution upgrade, see apt-get(8)
85 dselect-upgrade - Follow dselect selections
86 clean - Erase downloaded archive files
87 autoclean - Erase old downloaded archive files
88 check - Verify that there are no broken dependencies
89 changelog - Download and display the changelog for the given package
90 download - Download the binary package into the current directory
91
92 Options:
93 -h This help text.
94 -q Loggable output - no progress indicator
95 -qq No output except for errors
96 -d Download only - do NOT install or unpack archives
97 -s No-act. Perform ordering simulation
98 -y Assume Yes to all queries and do not prompt
99 -f Attempt to correct a system with broken dependencies in place
100 -m Attempt to continue if archives are unlocatable
101 -u Show a list of upgraded packages as well
102 -b Build the source package after fetching it
103 -V Show verbose version numbers
104 -c=? Read this configuration file
105 -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp
106 See the apt-get(8), sources.list(5) and apt.conf(5) manual
107 pages for more information and options.
108 This APT has Super Cow Powers.\n''')
109 self.exit()
110 return
111
112 @inlineCallbacks
113 def do_install(self, *args):
114 if len(self.args) <= 1:
115 msg = '0 upgraded, 0 newly installed, 0 to remove and {0} not upgraded.\n'
116 self.write(msg.format(random.randint(200, 300)))
117 self.exit()
118 return
119
120 packages = {}
121 for y in [re.sub('[^A-Za-z0-9]', '', x) for x in self.args[1:]]:
122 packages[y] = {
123 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),
124 'size': random.randint(100, 900)
125 }
126 totalsize = sum([packages[x]['size'] for x in packages])
127
128 self.write('Reading package lists... Done\n')
129 self.write('Building dependency tree\n')
130 self.write('Reading state information... Done\n')
131 self.write('The following NEW packages will be installed:\n')
132 self.write(' %s ' % ' '.join(packages) + '\n')
133 self.write('0 upgraded, %d newly installed, 0 to remove and 259 not upgraded.\n' % len(packages))
134 self.write('Need to get %s.2kB of archives.\n' % (totalsize))
135 self.write('After this operation, %skB of additional disk space will be used.\n' % (totalsize * 2.2,))
136 i = 1
137 for p in packages:
138 self.write('Get:%d http://ftp.debian.org stable/main %s %s [%s.2kB]\n' %
139 (i, p, packages[p]['version'], packages[p]['size']))
140 i += 1
141 yield self.sleep(1, 2)
142 self.write('Fetched %s.2kB in 1s (4493B/s)\n' % (totalsize))
143 self.write('Reading package fields... Done\n')
144 yield self.sleep(1, 2)
145 self.write('Reading package status... Done\n')
146 self.write('(Reading database ... 177887 files and directories currently installed.)\n')
147 yield self.sleep(1, 2)
148 for p in packages:
149 self.write('Unpacking %s (from .../archives/%s_%s_i386.deb) ...\n' % (p, p, packages[p]['version']))
150 yield self.sleep(1, 2)
151 self.write('Processing triggers for man-db ...\n')
152 yield self.sleep(2)
153 for p in packages:
154 self.write('Setting up %s (%s) ...\n' % (p, packages[p]['version']))
155 self.fs.mkfile('/usr/bin/%s' % p, 0, 0, random.randint(10000, 90000), 33188)
156 self.protocol.commands['/usr/bin/%s' % p] = \
157 command_faked_package_class_factory.getCommand(p)
158 yield self.sleep(2)
159 self.exit()
160
161 def do_moo(self):
162 self.write(' (__)\n')
163 self.write(' (oo)\n')
164 self.write(' /------\\/\n')
165 self.write(' / | ||\n')
166 self.write(' * /\\---/\\ \n')
167 self.write(' ~~ ~~\n')
168 self.write('...."Have you mooed today?"...\n')
169 self.exit()
170
171 def do_locked(self):
172 self.errorWrite('E: Could not open lock file /var/lib/apt/lists/lock - open (13: Permission denied)\n')
173 self.errorWrite('E: Unable to lock the list directory\n')
174 self.exit()
175
176 commands['/usr/bin/apt-get'] = command_aptget
177 commands['apt-get'] = command_aptget
178
[end of cowrie/commands/apt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cowrie/commands/apt.py b/cowrie/commands/apt.py
--- a/cowrie/commands/apt.py
+++ b/cowrie/commands/apt.py
@@ -120,7 +120,7 @@
packages = {}
for y in [re.sub('[^A-Za-z0-9]', '', x) for x in self.args[1:]]:
packages[y] = {
- 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),
+ 'version': '{0}.{1}-{2}'.format(random.choice([0, 1]), random.randint(1, 40), random.randint(1, 10)),
'size': random.randint(100, 900)
}
totalsize = sum([packages[x]['size'] for x in packages])
| {"golden_diff": "diff --git a/cowrie/commands/apt.py b/cowrie/commands/apt.py\n--- a/cowrie/commands/apt.py\n+++ b/cowrie/commands/apt.py\n@@ -120,7 +120,7 @@\n packages = {}\n for y in [re.sub('[^A-Za-z0-9]', '', x) for x in self.args[1:]]:\n packages[y] = {\n- 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),\n+ 'version': '{0}.{1}-{2}'.format(random.choice([0, 1]), random.randint(1, 40), random.randint(1, 10)),\n 'size': random.randint(100, 900)\n }\n totalsize = sum([packages[x]['size'] for x in packages])\n", "issue": "TypeError in command \"apt-get install\"\n**Describe the bug**\r\n\r\nWhen using the command \"apt-get install XY\" while logged in per SSH in the cowrie honeypot, nothing happens.\r\nIf you look into the logs, the requested software couldn't be installed but a TypeError is thrown:\r\n\r\n```Traceback (most recent call last):\r\n\t File \"/home/cowrie/github/cowrie/cowrie/shell/protocol.py\", line 359, in call_command\r\n\t HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)\r\n\t File \"/home/cowrie/github/cowrie/cowrie/shell/protocol.py\", line 216, in call_command\r\n\t obj.start()\r\n\t File \"/home/cowrie/github/cowrie/cowrie/commands/apt.py\", line 36, in start\r\n\t self.do_install()\r\n\t File \"/home/cowrie/github/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1532, in unwindGenerator\r\n\t return _inlineCallbacks(None, gen, Deferred())\r\n\t--- <exception caught here> ---\r\n\t File \"/home/cowrie/github/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1386, in _inlineCallbacks\r\n\t result = g.send(result)\r\n\t File \"/home/cowrie/github/cowrie/cowrie/commands/apt.py\", line 123, in do_install\r\n\t 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),\r\n\texceptions.TypeError: choice() takes exactly 2 arguments (3 given)\r\n\r\n ```\r\n\r\nI investigated the source and saw what is actually wrong:\r\n```\r\n'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),\r\n```\r\nin which `random.choice(0, 1)` requires a sequence, so `random.choice([0, 1])` should work.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Log in a cowrie honeypot per ssh: ssh -p 2222 root@honeypot\r\n2. Try \"sudo apt-get install vim\"\r\n3. See the error in the cowrie logs on your machine: cowrie/logs/cowrie.log\r\n\r\n**Expected behavior**\r\n\r\nSee some common apt-get messages, like \"Reading package lists... Done\\n\", e.g. simulating to the attacker the command worked.\r\n\r\n**Server (please complete the following information):**\r\n - OS: CentOS 7, x86_64\r\n - Python: Python 2.7.5 and Python 3.6.5\r\n\r\n**Additional context**\r\n-\r\n\n", "before_files": [{"content": "# Copyright (c) 2009 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import division, absolute_import\n\nimport random\nimport re\n\nfrom twisted.internet import reactor, defer\nfrom twisted.internet.defer import inlineCallbacks\n\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\nclass command_faked_package_class_factory(object):\n @staticmethod\n def getCommand(name):\n class command_faked_installation(HoneyPotCommand):\n def call(self):\n self.write(b\"%s: Segmentation fault\\n\" % name)\n return command_faked_installation\n\nclass command_aptget(HoneyPotCommand):\n \"\"\"\n apt-get fake\n suppports only the 'install PACKAGE' command & 'moo'.\n Any installed packages, places a 'Segfault' at /usr/bin/PACKAGE.'''\n \"\"\"\n def start(self):\n if len(self.args) == 0:\n self.do_help()\n elif len(self.args) > 0 and self.args[0] == '-v':\n self.do_version()\n elif len(self.args) > 0 and self.args[0] == 'install':\n self.do_install()\n elif len(self.args) > 0 and self.args[0] == 'moo':\n self.do_moo()\n else:\n self.do_locked()\n\n def sleep(self, time, time2=None):\n d = defer.Deferred()\n if time2:\n time = random.randint(time * 100, time2 * 100) / 100.0\n reactor.callLater(time, d.callback, None)\n return d\n\n def do_version(self):\n self.write('''apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06\nSupported modules:\n*Ver: Standard .deb\n*Pkg: Debian dpkg interface (Priority 30)\n Pkg: Debian APT solver interface (Priority -1000)\n S.L: 'deb' Standard Debian binary tree\n S.L: 'deb-src' Standard Debian source tree\n Idx: Debian Source Index\n Idx: Debian Package Index\n Idx: Debian Translation Index\n Idx: Debian dpkg status file\n Idx: EDSP scenario file\\n''')\n self.exit()\n return\n\n def do_help(self):\n self.write('''apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06\nUsage: apt-get [options] command\n apt-get [options] install|remove pkg1 [pkg2 ...]\n apt-get [options] source pkg1 [pkg2 ...]\n\napt-get is a simple command line interface for downloading and\ninstalling packages. The most frequently used commands are update\nand install.\n\nCommands:\n update - Retrieve new lists of packages\n upgrade - Perform an upgrade\n install - Install new packages (pkg is libc6 not libc6.deb)\n remove - Remove packages\n autoremove - Remove automatically all unused packages\n purge - Remove packages and config files\n source - Download source archives\n build-dep - Configure build-dependencies for source packages\n dist-upgrade - Distribution upgrade, see apt-get(8)\n dselect-upgrade - Follow dselect selections\n clean - Erase downloaded archive files\n autoclean - Erase old downloaded archive files\n check - Verify that there are no broken dependencies\n changelog - Download and display the changelog for the given package\n download - Download the binary package into the current directory\n\nOptions:\n -h This help text.\n -q Loggable output - no progress indicator\n -qq No output except for errors\n -d Download only - do NOT install or unpack archives\n -s No-act. Perform ordering simulation\n -y Assume Yes to all queries and do not prompt\n -f Attempt to correct a system with broken dependencies in place\n -m Attempt to continue if archives are unlocatable\n -u Show a list of upgraded packages as well\n -b Build the source package after fetching it\n -V Show verbose version numbers\n -c=? Read this configuration file\n -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\nSee the apt-get(8), sources.list(5) and apt.conf(5) manual\npages for more information and options.\n This APT has Super Cow Powers.\\n''')\n self.exit()\n return\n\n @inlineCallbacks\n def do_install(self, *args):\n if len(self.args) <= 1:\n msg = '0 upgraded, 0 newly installed, 0 to remove and {0} not upgraded.\\n'\n self.write(msg.format(random.randint(200, 300)))\n self.exit()\n return\n\n packages = {}\n for y in [re.sub('[^A-Za-z0-9]', '', x) for x in self.args[1:]]:\n packages[y] = {\n 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)),\n 'size': random.randint(100, 900)\n }\n totalsize = sum([packages[x]['size'] for x in packages])\n\n self.write('Reading package lists... Done\\n')\n self.write('Building dependency tree\\n')\n self.write('Reading state information... Done\\n')\n self.write('The following NEW packages will be installed:\\n')\n self.write(' %s ' % ' '.join(packages) + '\\n')\n self.write('0 upgraded, %d newly installed, 0 to remove and 259 not upgraded.\\n' % len(packages))\n self.write('Need to get %s.2kB of archives.\\n' % (totalsize))\n self.write('After this operation, %skB of additional disk space will be used.\\n' % (totalsize * 2.2,))\n i = 1\n for p in packages:\n self.write('Get:%d http://ftp.debian.org stable/main %s %s [%s.2kB]\\n' %\n (i, p, packages[p]['version'], packages[p]['size']))\n i += 1\n yield self.sleep(1, 2)\n self.write('Fetched %s.2kB in 1s (4493B/s)\\n' % (totalsize))\n self.write('Reading package fields... Done\\n')\n yield self.sleep(1, 2)\n self.write('Reading package status... Done\\n')\n self.write('(Reading database ... 177887 files and directories currently installed.)\\n')\n yield self.sleep(1, 2)\n for p in packages:\n self.write('Unpacking %s (from .../archives/%s_%s_i386.deb) ...\\n' % (p, p, packages[p]['version']))\n yield self.sleep(1, 2)\n self.write('Processing triggers for man-db ...\\n')\n yield self.sleep(2)\n for p in packages:\n self.write('Setting up %s (%s) ...\\n' % (p, packages[p]['version']))\n self.fs.mkfile('/usr/bin/%s' % p, 0, 0, random.randint(10000, 90000), 33188)\n self.protocol.commands['/usr/bin/%s' % p] = \\\n command_faked_package_class_factory.getCommand(p)\n yield self.sleep(2)\n self.exit()\n\n def do_moo(self):\n self.write(' (__)\\n')\n self.write(' (oo)\\n')\n self.write(' /------\\\\/\\n')\n self.write(' / | ||\\n')\n self.write(' * /\\\\---/\\\\ \\n')\n self.write(' ~~ ~~\\n')\n self.write('....\"Have you mooed today?\"...\\n')\n self.exit()\n\n def do_locked(self):\n self.errorWrite('E: Could not open lock file /var/lib/apt/lists/lock - open (13: Permission denied)\\n')\n self.errorWrite('E: Unable to lock the list directory\\n')\n self.exit()\n\ncommands['/usr/bin/apt-get'] = command_aptget\ncommands['apt-get'] = command_aptget\n", "path": "cowrie/commands/apt.py"}]} | 3,451 | 204 |
gh_patches_debug_11125 | rasdani/github-patches | git_diff | Kinto__kinto-2108 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
memcached cache backend does not really test memcached
@autrilla reported that when he moved Kinto to GCP, the heartbeat reported success for all backends, including cache, even though there is no `memcached`. I think the problem is here:
https://github.com/Kinto/kinto/blob/61494caae2bb8fe342f32f6a5d89f40ed2b62dff/kinto/core/cache/__init__.py#L87-L91
The heartbeat for the cache backend just sends two messages which don't expect responses. For something like memcached which communicates over UDP, these messages can "succeed" by silently going nowhere.
memcached cache backend does not really test memcached
@autrilla reported that when he moved Kinto to GCP, the heartbeat reported success for all backends, including cache, even though there is no `memcached`. I think the problem is here:
https://github.com/Kinto/kinto/blob/61494caae2bb8fe342f32f6a5d89f40ed2b62dff/kinto/core/cache/__init__.py#L87-L91
The heartbeat for the cache backend just sends two messages which don't expect responses. For something like memcached which communicates over UDP, these messages can "succeed" by silently going nowhere.
</issue>
<code>
[start of kinto/core/cache/__init__.py]
1 import logging
2 import random
3
4
5 logger = logging.getLogger(__name__)
6
7
8 _HEARTBEAT_DELETE_RATE = 0.5
9 _HEARTBEAT_KEY = "__heartbeat__"
10 _HEARTBEAT_TTL_SECONDS = 3600
11
12
13 class CacheBase:
14 def __init__(self, *args, **kwargs):
15 self.prefix = kwargs["cache_prefix"]
16 self.max_size_bytes = kwargs.get("cache_max_size_bytes")
17
18 def initialize_schema(self, dry_run=False):
19 """Create every necessary objects (like tables or indices) in the
20 backend.
21
22 This is executed when the ``kinto migrate`` command is run.
23
24 :param bool dry_run: simulate instead of executing the operations.
25 """
26 raise NotImplementedError
27
28 def flush(self):
29 """Delete every values."""
30 raise NotImplementedError
31
32 def ttl(self, key):
33 """Obtain the expiration value of the specified `key`.
34
35 :param str key: key
36 :returns: number of seconds or negative if no TTL.
37 :rtype: float
38 """
39 raise NotImplementedError
40
41 def expire(self, key, ttl):
42 """Set the expiration value `ttl` for the specified `key`.
43
44 :param str key: key
45 :param float ttl: number of seconds
46 """
47 raise NotImplementedError
48
49 def set(self, key, value, ttl):
50 """Store a value with the specified `key`.
51
52 :param str key: key
53 :param str value: value to store
54 :param float ttl: expire after number of seconds
55 """
56 raise NotImplementedError
57
58 def get(self, key):
59 """Obtain the value of the specified `key`.
60
61 :param str key: key
62 :returns: the stored value or None if missing.
63 :rtype: str
64 """
65 raise NotImplementedError
66
67 def delete(self, key):
68 """Delete the value of the specified `key`.
69
70 :param str key: key
71 """
72 raise NotImplementedError
73
74
75 def heartbeat(backend):
76 def ping(request):
77 """Test that cache backend is operational.
78
79 :param request: current request object
80 :type request: :class:`~pyramid:pyramid.request.Request`
81 :returns: ``True`` is everything is ok, ``False`` otherwise.
82 :rtype: bool
83 """
84 # No specific case for readonly mode because the cache should
85 # continue to work in that mode.
86 try:
87 if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:
88 backend.delete(_HEARTBEAT_KEY)
89 else:
90 backend.set(_HEARTBEAT_KEY, "alive", _HEARTBEAT_TTL_SECONDS)
91 return True
92 except Exception:
93 logger.exception("Heartbeat Failure")
94 return False
95
96 return ping
97
[end of kinto/core/cache/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/cache/__init__.py b/kinto/core/cache/__init__.py
--- a/kinto/core/cache/__init__.py
+++ b/kinto/core/cache/__init__.py
@@ -86,9 +86,9 @@
try:
if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:
backend.delete(_HEARTBEAT_KEY)
- else:
- backend.set(_HEARTBEAT_KEY, "alive", _HEARTBEAT_TTL_SECONDS)
- return True
+ return backend.get(_HEARTBEAT_KEY) is None
+ backend.set(_HEARTBEAT_KEY, "alive", _HEARTBEAT_TTL_SECONDS)
+ return backend.get(_HEARTBEAT_KEY) == "alive"
except Exception:
logger.exception("Heartbeat Failure")
return False
| {"golden_diff": "diff --git a/kinto/core/cache/__init__.py b/kinto/core/cache/__init__.py\n--- a/kinto/core/cache/__init__.py\n+++ b/kinto/core/cache/__init__.py\n@@ -86,9 +86,9 @@\n try:\n if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:\n backend.delete(_HEARTBEAT_KEY)\n- else:\n- backend.set(_HEARTBEAT_KEY, \"alive\", _HEARTBEAT_TTL_SECONDS)\n- return True\n+ return backend.get(_HEARTBEAT_KEY) is None\n+ backend.set(_HEARTBEAT_KEY, \"alive\", _HEARTBEAT_TTL_SECONDS)\n+ return backend.get(_HEARTBEAT_KEY) == \"alive\"\n except Exception:\n logger.exception(\"Heartbeat Failure\")\n return False\n", "issue": "memcached cache backend does not really test memcached\n@autrilla reported that when he moved Kinto to GCP, the heartbeat reported success for all backends, including cache, even though there is no `memcached`. I think the problem is here:\r\n\r\nhttps://github.com/Kinto/kinto/blob/61494caae2bb8fe342f32f6a5d89f40ed2b62dff/kinto/core/cache/__init__.py#L87-L91\r\n\r\nThe heartbeat for the cache backend just sends two messages which don't expect responses. For something like memcached which communicates over UDP, these messages can \"succeed\" by silently going nowhere.\nmemcached cache backend does not really test memcached\n@autrilla reported that when he moved Kinto to GCP, the heartbeat reported success for all backends, including cache, even though there is no `memcached`. I think the problem is here:\r\n\r\nhttps://github.com/Kinto/kinto/blob/61494caae2bb8fe342f32f6a5d89f40ed2b62dff/kinto/core/cache/__init__.py#L87-L91\r\n\r\nThe heartbeat for the cache backend just sends two messages which don't expect responses. For something like memcached which communicates over UDP, these messages can \"succeed\" by silently going nowhere.\n", "before_files": [{"content": "import logging\nimport random\n\n\nlogger = logging.getLogger(__name__)\n\n\n_HEARTBEAT_DELETE_RATE = 0.5\n_HEARTBEAT_KEY = \"__heartbeat__\"\n_HEARTBEAT_TTL_SECONDS = 3600\n\n\nclass CacheBase:\n def __init__(self, *args, **kwargs):\n self.prefix = kwargs[\"cache_prefix\"]\n self.max_size_bytes = kwargs.get(\"cache_max_size_bytes\")\n\n def initialize_schema(self, dry_run=False):\n \"\"\"Create every necessary objects (like tables or indices) in the\n backend.\n\n This is executed when the ``kinto migrate`` command is run.\n\n :param bool dry_run: simulate instead of executing the operations.\n \"\"\"\n raise NotImplementedError\n\n def flush(self):\n \"\"\"Delete every values.\"\"\"\n raise NotImplementedError\n\n def ttl(self, key):\n \"\"\"Obtain the expiration value of the specified `key`.\n\n :param str key: key\n :returns: number of seconds or negative if no TTL.\n :rtype: float\n \"\"\"\n raise NotImplementedError\n\n def expire(self, key, ttl):\n \"\"\"Set the expiration value `ttl` for the specified `key`.\n\n :param str key: key\n :param float ttl: number of seconds\n \"\"\"\n raise NotImplementedError\n\n def set(self, key, value, ttl):\n \"\"\"Store a value with the specified `key`.\n\n :param str key: key\n :param str value: value to store\n :param float ttl: expire after number of seconds\n \"\"\"\n raise NotImplementedError\n\n def get(self, key):\n \"\"\"Obtain the value of the specified `key`.\n\n :param str key: key\n :returns: the stored value or None if missing.\n :rtype: str\n \"\"\"\n raise NotImplementedError\n\n def delete(self, key):\n \"\"\"Delete the value of the specified `key`.\n\n :param str key: key\n \"\"\"\n raise NotImplementedError\n\n\ndef heartbeat(backend):\n def ping(request):\n \"\"\"Test that cache backend is operational.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n \"\"\"\n # No specific case for readonly mode because the cache should\n # continue to work in that mode.\n try:\n if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:\n backend.delete(_HEARTBEAT_KEY)\n else:\n backend.set(_HEARTBEAT_KEY, \"alive\", _HEARTBEAT_TTL_SECONDS)\n return True\n except Exception:\n logger.exception(\"Heartbeat Failure\")\n return False\n\n return ping\n", "path": "kinto/core/cache/__init__.py"}]} | 1,634 | 188 |
gh_patches_debug_27587 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-101 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
in portal/rr, when you are redirected to home, you are redirected to AI:MMO home
The website now loads the aimmo urls.
But now each time the website is supposed to redirect you to the portal home, it redirects to the AI:MMO login page.
Probably because both urls are named the same in their respective urls.py and the website imports both, finishing with aimmo urls?
</issue>
<code>
[start of players/autoconfig.py]
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2015, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 '''Players autoconfig'''
38
39 DEFAULT_SETTINGS = {
40 'AUTOCONFIG_INDEX_VIEW': 'home',
41 'STATIC_URL': '/static/',
42 }
43
44 SETTINGS = {
45 'INSTALLED_APPS': [
46 'django.contrib.auth',
47 'django.contrib.messages',
48 'django.contrib.staticfiles',
49 'django_js_reverse',
50 ],
51 'TEMPLATES': [
52 {
53 'BACKEND': 'django.template.backends.django.DjangoTemplates',
54 'APP_DIRS': True,
55 'OPTIONS': {
56 'context_processors': [
57 'django.template.context_processors.debug',
58 'django.template.context_processors.request',
59 'django.contrib.auth.context_processors.auth',
60 'django.contrib.messages.context_processors.messages',
61 ]
62 }
63 }
64 ],
65 'USE_TZ': True,
66 }
67
[end of players/autoconfig.py]
[start of players/urls.py]
1 from django.conf.urls import url
2 from django.views.generic import TemplateView
3 from django.contrib.auth.decorators import login_required
4 from django.contrib.admin.views.decorators import staff_member_required
5
6 from players import views
7
8 urlpatterns = [
9 url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='home'),
10
11 url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='program'),
12 url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='watch'),
13 url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='statistics'),
14
15 url(r'^api/code/$', staff_member_required(views.code), name='code'),
16 url(r'^api/games/$', views.games, name='games'),
17
18 url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='js_reverse'), # TODO: Pull request to make django_js_reverse.urls
19 ]
20
[end of players/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/players/autoconfig.py b/players/autoconfig.py
--- a/players/autoconfig.py
+++ b/players/autoconfig.py
@@ -37,7 +37,7 @@
'''Players autoconfig'''
DEFAULT_SETTINGS = {
- 'AUTOCONFIG_INDEX_VIEW': 'home',
+ 'AUTOCONFIG_INDEX_VIEW': 'aimmo/home',
'STATIC_URL': '/static/',
}
diff --git a/players/urls.py b/players/urls.py
--- a/players/urls.py
+++ b/players/urls.py
@@ -6,14 +6,14 @@
from players import views
urlpatterns = [
- url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='home'),
+ url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='aimmo/home'),
- url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='program'),
- url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='watch'),
- url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='statistics'),
+ url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='aimmo/program'),
+ url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='aimmo/watch'),
+ url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='aimmo/statistics'),
- url(r'^api/code/$', staff_member_required(views.code), name='code'),
- url(r'^api/games/$', views.games, name='games'),
+ url(r'^api/code/$', staff_member_required(views.code), name='aimmo/code'),
+ url(r'^api/games/$', views.games, name='aimmo/games'),
- url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='js_reverse'), # TODO: Pull request to make django_js_reverse.urls
+ url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls
]
| {"golden_diff": "diff --git a/players/autoconfig.py b/players/autoconfig.py\n--- a/players/autoconfig.py\n+++ b/players/autoconfig.py\n@@ -37,7 +37,7 @@\n '''Players autoconfig'''\n \n DEFAULT_SETTINGS = {\n- 'AUTOCONFIG_INDEX_VIEW': 'home',\n+ 'AUTOCONFIG_INDEX_VIEW': 'aimmo/home',\n 'STATIC_URL': '/static/',\n }\n \ndiff --git a/players/urls.py b/players/urls.py\n--- a/players/urls.py\n+++ b/players/urls.py\n@@ -6,14 +6,14 @@\n from players import views\n \n urlpatterns = [\n- url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='home'),\n+ url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='aimmo/home'),\n \n- url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='program'),\n- url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='watch'),\n- url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='statistics'),\n+ url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='aimmo/program'),\n+ url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='aimmo/watch'),\n+ url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='aimmo/statistics'),\n \n- url(r'^api/code/$', staff_member_required(views.code), name='code'),\n- url(r'^api/games/$', views.games, name='games'),\n+ url(r'^api/code/$', staff_member_required(views.code), name='aimmo/code'),\n+ url(r'^api/games/$', views.games, name='aimmo/games'),\n \n- url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n+ url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n ]\n", "issue": "in portal/rr, when you are redirected to home, you are redirected to AI:MMO home\nThe website now loads the aimmo urls.\nBut now each time the website is supposed to redirect you to the portal home, it redirects to the AI:MMO login page.\nProbably because both urls are named the same in their respective urls.py and the website imports both, finishing with aimmo urls?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n'''Players autoconfig'''\n\nDEFAULT_SETTINGS = {\n 'AUTOCONFIG_INDEX_VIEW': 'home',\n 'STATIC_URL': '/static/',\n}\n\nSETTINGS = {\n 'INSTALLED_APPS': [\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_js_reverse',\n ],\n 'TEMPLATES': [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ]\n }\n }\n ],\n 'USE_TZ': True,\n}\n", "path": "players/autoconfig.py"}, {"content": "from django.conf.urls import url\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='home'),\n\n url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='program'),\n url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='watch'),\n url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='statistics'),\n\n url(r'^api/code/$', staff_member_required(views.code), name='code'),\n url(r'^api/games/$', views.games, name='games'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n]\n", "path": "players/urls.py"}]} | 1,573 | 502 |
gh_patches_debug_9549 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-331 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document "text" vs. "bytes" values for Entities
Docs need to explain clearly how users can ensure that the values they set onto entities get mapped as they expect: "text" values should be mapped to/from 'string_value' entries on the protobufs, and "bytes" values should be mapped to/from 'blob_value' entries.
We are to have to educate folks that the native 'str' is "bytes" on Python2, but "text" on Python3, and mandate that they be explicit if they want to control how such values get stored.
Document "text" vs. "bytes" values for Entities
Docs need to explain clearly how users can ensure that the values they set onto entities get mapped as they expect: "text" values should be mapped to/from 'string_value' entries on the protobufs, and "bytes" values should be mapped to/from 'blob_value' entries.
We are to have to educate folks that the native 'str' is "bytes" on Python2, but "text" on Python3, and mandate that they be explicit if they want to control how such values get stored.
</issue>
<code>
[start of gcloud/datastore/entity.py]
1 """Class for representing a single entity in the Cloud Datastore.
2
3 Entities are akin to rows in a relational database,
4 storing the actual instance of data.
5
6 Each entity is officially represented with
7 a :class:`gcloud.datastore.key.Key` class,
8 however it is possible that you might create
9 an Entity with only a partial Key
10 (that is, a Key with a Kind,
11 and possibly a parent, but without an ID).
12
13 Entities in this API act like dictionaries
14 with extras built in that allow you to
15 delete or persist the data stored on the entity.
16 """
17
18 from gcloud.datastore import datastore_v1_pb2 as datastore_pb
19 from gcloud.datastore.key import Key
20
21
22 class NoKey(RuntimeError):
23 """Exception raised by Entity methods which require a key."""
24
25
26 class NoDataset(RuntimeError):
27 """Exception raised by Entity methods which require a dataset."""
28
29
30 class Entity(dict):
31 """:type dataset: :class:`gcloud.datastore.dataset.Dataset`
32 :param dataset: The dataset in which this entity belongs.
33
34 :type kind: string
35 :param kind: The kind of entity this is, akin to a table name in a
36 relational database.
37
38 Entities are mutable and act like a subclass of a dictionary.
39 This means you could take an existing entity and change the key
40 to duplicate the object.
41
42 This can be used on its own, however it is likely easier to use
43 the shortcut methods provided by :class:`gcloud.datastore.dataset.Dataset`
44 such as:
45
46 - :func:`gcloud.datastore.dataset.Dataset.entity` to create a new entity.
47
48 >>> dataset.entity('MyEntityKind')
49 <Entity[{'kind': 'MyEntityKind'}] {}>
50
51 - :func:`gcloud.datastore.dataset.Dataset.get_entity`
52 to retrieve an existing entity.
53
54 >>> dataset.get_entity(key)
55 <Entity[{'kind': 'EntityKind', id: 1234}] {'property': 'value'}>
56
57 You can the set values on the entity
58 just like you would on any other dictionary.
59
60 >>> entity['age'] = 20
61 >>> entity['name'] = 'JJ'
62 >>> entity
63 <Entity[{'kind': 'EntityKind', id: 1234}] {'age': 20, 'name': 'JJ'}>
64
65 And you can cast an entity to a regular Python dictionary
66 with the `dict` builtin:
67
68 >>> dict(entity)
69 {'age': 20, 'name': 'JJ'}
70
71 """
72
73 def __init__(self, dataset=None, kind=None):
74 super(Entity, self).__init__()
75 self._dataset = dataset
76 if kind:
77 self._key = Key().kind(kind)
78 else:
79 self._key = None
80
81 def dataset(self):
82 """Get the :class:`.dataset.Dataset` in which this entity belongs.
83
84 :rtype: :class:`gcloud.datastore.dataset.Dataset`
85 :returns: The Dataset containing the entity if there is a key,
86 else None.
87
88 .. note::
89 This is based on the :class:`gcloud.datastore.key.Key` set on the
90 entity. That means that if you have no key set, the dataset might
91 be `None`. It also means that if you change the key on the entity,
92 this will refer to that key's dataset.
93 """
94 return self._dataset
95
96 def key(self, key=None):
97 """Get or set the :class:`.datastore.key.Key` on the current entity.
98
99 :type key: :class:`glcouddatastore.key.Key`
100 :param key: The key you want to set on the entity.
101
102 :rtype: :class:`gcloud.datastore.key.Key` or :class:`Entity`.
103 :returns: Either the current key (on get) or the current
104 object (on set).
105
106 >>> entity.key(my_other_key) # This returns the original entity.
107 <Entity[{'kind': 'OtherKeyKind', 'id': 1234}] {'property': 'value'}>
108 >>> entity.key() # This returns the key.
109 <Key[{'kind': 'OtherKeyKind', 'id': 1234}]>
110 """
111
112 if key is not None:
113 self._key = key
114 return self
115 else:
116 return self._key
117
118 def kind(self):
119 """Get the kind of the current entity.
120
121 .. note::
122 This relies entirely on
123 the :class:`gcloud.datastore.key.Key`
124 set on the entity.
125 That means that we're not storing the kind of the entity at all,
126 just the properties and a pointer to a Key
127 which knows its Kind.
128 """
129
130 if self._key:
131 return self._key.kind()
132
133 @classmethod
134 def from_key(cls, key, dataset=None):
135 """Create entity based on :class:`.datastore.key.Key`.
136
137 .. note::
138 This is a factory method.
139
140 :type key: :class:`gcloud.datastore.key.Key`
141 :param key: The key for the entity.
142
143 :returns: The :class:`Entity` derived from the
144 :class:`gcloud.datastore.key.Key`.
145 """
146
147 return cls(dataset).key(key)
148
149 @property
150 def _must_key(self):
151 """Return our key, or raise NoKey if not set.
152
153 :rtype: :class:`gcloud.datastore.key.Key`.
154 :returns: our key
155 :raises: NoKey if key is None
156 """
157 if self._key is None:
158 raise NoKey()
159 return self._key
160
161 @property
162 def _must_dataset(self):
163 """Return our dataset, or raise NoDataset if not set.
164
165 :rtype: :class:`gcloud.datastore.key.Key`.
166 :returns: our key
167 :raises: NoDataset if key is None
168 """
169 if self._dataset is None:
170 raise NoDataset()
171 return self._dataset
172
173 def reload(self):
174 """Reloads the contents of this entity from the datastore.
175
176 This method takes the :class:`gcloud.datastore.key.Key`, loads all
177 properties from the Cloud Datastore, and sets the updated properties on
178 the current object.
179
180 .. warning::
181 This will override any existing properties if a different value
182 exists remotely, however it will *not* override any properties that
183 exist only locally.
184 """
185 key = self._must_key
186 dataset = self._must_dataset
187 entity = dataset.get_entity(key.to_protobuf())
188
189 if entity:
190 self.update(entity)
191 return self
192
193 def save(self):
194 """Save the entity in the Cloud Datastore.
195
196 .. note::
197 Any existing properties for the entity will be replaced by those
198 currently set on this instance. Already-stored properties which do
199 not correspond to keys set on this instance will be removed from
200 the datastore.
201
202 .. note::
203 Property values which are "text" ('unicode' in Python2, 'str' in
204 Python3) map to 'string_value' in the datastore; values which are
205 "bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
206
207 :rtype: :class:`gcloud.datastore.entity.Entity`
208 :returns: The entity with a possibly updated Key.
209 """
210 key = self._must_key
211 dataset = self._must_dataset
212 connection = dataset.connection()
213 key_pb = connection.save_entity(
214 dataset_id=dataset.id(),
215 key_pb=key.to_protobuf(),
216 properties=dict(self))
217
218 # If we are in a transaction and the current entity needs an
219 # automatically assigned ID, tell the transaction where to put that.
220 transaction = connection.transaction()
221 if transaction and key.is_partial():
222 transaction.add_auto_id_entity(self)
223
224 if isinstance(key_pb, datastore_pb.Key):
225 # Update the path (which may have been altered).
226 # NOTE: The underlying namespace can't have changed in a save().
227 # The value of the dataset ID may have changed from implicit
228 # (i.e. None, with the ID implied from the dataset.Dataset
229 # object associated with the Entity/Key), but if it was
230 # implicit before the save() we leave it as implicit.
231 path = []
232 for element in key_pb.path_element:
233 key_part = {}
234 for descriptor, value in element._fields.items():
235 key_part[descriptor.name] = value
236 path.append(key_part)
237 self._key = key.path(path)
238
239 return self
240
241 def delete(self):
242 """Delete the entity in the Cloud Datastore.
243
244 .. note::
245 This is based entirely off of the :class:`gcloud.datastore.key.Key`
246 set on the entity. Whatever is stored remotely using the key on the
247 entity will be deleted.
248 """
249 key = self._must_key
250 dataset = self._must_dataset
251 dataset.connection().delete_entities(
252 dataset_id=dataset.id(),
253 key_pbs=[key.to_protobuf()],
254 )
255
256 def __repr__(self):
257 if self._key:
258 return '<Entity%s %s>' % (self._key.path(),
259 super(Entity, self).__repr__())
260 else:
261 return '<Entity %s>' % (super(Entity, self).__repr__())
262
[end of gcloud/datastore/entity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/datastore/entity.py b/gcloud/datastore/entity.py
--- a/gcloud/datastore/entity.py
+++ b/gcloud/datastore/entity.py
@@ -68,6 +68,16 @@
>>> dict(entity)
{'age': 20, 'name': 'JJ'}
+ .. note::
+
+ When saving an entity to the backend, values which are "text"
+ ('unicode' in Python2, 'str' in Python3) will be saved using
+ the 'text_value' field, after being encoded to UTF-8. When
+ retrieved from the back-end, such values will be decoded to "text"
+ again. Values which are "bytes" ('str' in Python2, 'bytes' in
+ Python3), will be saved using the 'blob_value' field, without
+ any decoding / encoding step.
+
"""
def __init__(self, dataset=None, kind=None):
| {"golden_diff": "diff --git a/gcloud/datastore/entity.py b/gcloud/datastore/entity.py\n--- a/gcloud/datastore/entity.py\n+++ b/gcloud/datastore/entity.py\n@@ -68,6 +68,16 @@\n >>> dict(entity)\n {'age': 20, 'name': 'JJ'}\n \n+ .. note::\n+\n+ When saving an entity to the backend, values which are \"text\"\n+ ('unicode' in Python2, 'str' in Python3) will be saved using\n+ the 'text_value' field, after being encoded to UTF-8. When\n+ retrieved from the back-end, such values will be decoded to \"text\"\n+ again. Values which are \"bytes\" ('str' in Python2, 'bytes' in\n+ Python3), will be saved using the 'blob_value' field, without\n+ any decoding / encoding step.\n+\n \"\"\"\n \n def __init__(self, dataset=None, kind=None):\n", "issue": "Document \"text\" vs. \"bytes\" values for Entities\nDocs need to explain clearly how users can ensure that the values they set onto entities get mapped as they expect: \"text\" values should be mapped to/from 'string_value' entries on the protobufs, and \"bytes\" values should be mapped to/from 'blob_value' entries.\n\nWe are to have to educate folks that the native 'str' is \"bytes\" on Python2, but \"text\" on Python3, and mandate that they be explicit if they want to control how such values get stored.\n\nDocument \"text\" vs. \"bytes\" values for Entities\nDocs need to explain clearly how users can ensure that the values they set onto entities get mapped as they expect: \"text\" values should be mapped to/from 'string_value' entries on the protobufs, and \"bytes\" values should be mapped to/from 'blob_value' entries.\n\nWe are to have to educate folks that the native 'str' is \"bytes\" on Python2, but \"text\" on Python3, and mandate that they be explicit if they want to control how such values get stored.\n\n", "before_files": [{"content": "\"\"\"Class for representing a single entity in the Cloud Datastore.\n\nEntities are akin to rows in a relational database,\nstoring the actual instance of data.\n\nEach entity is officially represented with\na :class:`gcloud.datastore.key.Key` class,\nhowever it is possible that you might create\nan Entity with only a partial Key\n(that is, a Key with a Kind,\nand possibly a parent, but without an ID).\n\nEntities in this API act like dictionaries\nwith extras built in that allow you to\ndelete or persist the data stored on the entity.\n\"\"\"\n\nfrom gcloud.datastore import datastore_v1_pb2 as datastore_pb\nfrom gcloud.datastore.key import Key\n\n\nclass NoKey(RuntimeError):\n \"\"\"Exception raised by Entity methods which require a key.\"\"\"\n\n\nclass NoDataset(RuntimeError):\n \"\"\"Exception raised by Entity methods which require a dataset.\"\"\"\n\n\nclass Entity(dict):\n \"\"\":type dataset: :class:`gcloud.datastore.dataset.Dataset`\n :param dataset: The dataset in which this entity belongs.\n\n :type kind: string\n :param kind: The kind of entity this is, akin to a table name in a\n relational database.\n\n Entities are mutable and act like a subclass of a dictionary.\n This means you could take an existing entity and change the key\n to duplicate the object.\n\n This can be used on its own, however it is likely easier to use\n the shortcut methods provided by :class:`gcloud.datastore.dataset.Dataset`\n such as:\n\n - :func:`gcloud.datastore.dataset.Dataset.entity` to create a new entity.\n\n >>> dataset.entity('MyEntityKind')\n <Entity[{'kind': 'MyEntityKind'}] {}>\n\n - :func:`gcloud.datastore.dataset.Dataset.get_entity`\n to retrieve an existing entity.\n\n >>> dataset.get_entity(key)\n <Entity[{'kind': 'EntityKind', id: 1234}] {'property': 'value'}>\n\n You can the set values on the entity\n just like you would on any other dictionary.\n\n >>> entity['age'] = 20\n >>> entity['name'] = 'JJ'\n >>> entity\n <Entity[{'kind': 'EntityKind', id: 1234}] {'age': 20, 'name': 'JJ'}>\n\n And you can cast an entity to a regular Python dictionary\n with the `dict` builtin:\n\n >>> dict(entity)\n {'age': 20, 'name': 'JJ'}\n\n \"\"\"\n\n def __init__(self, dataset=None, kind=None):\n super(Entity, self).__init__()\n self._dataset = dataset\n if kind:\n self._key = Key().kind(kind)\n else:\n self._key = None\n\n def dataset(self):\n \"\"\"Get the :class:`.dataset.Dataset` in which this entity belongs.\n\n :rtype: :class:`gcloud.datastore.dataset.Dataset`\n :returns: The Dataset containing the entity if there is a key,\n else None.\n\n .. note::\n This is based on the :class:`gcloud.datastore.key.Key` set on the\n entity. That means that if you have no key set, the dataset might\n be `None`. It also means that if you change the key on the entity,\n this will refer to that key's dataset.\n \"\"\"\n return self._dataset\n\n def key(self, key=None):\n \"\"\"Get or set the :class:`.datastore.key.Key` on the current entity.\n\n :type key: :class:`glcouddatastore.key.Key`\n :param key: The key you want to set on the entity.\n\n :rtype: :class:`gcloud.datastore.key.Key` or :class:`Entity`.\n :returns: Either the current key (on get) or the current\n object (on set).\n\n >>> entity.key(my_other_key) # This returns the original entity.\n <Entity[{'kind': 'OtherKeyKind', 'id': 1234}] {'property': 'value'}>\n >>> entity.key() # This returns the key.\n <Key[{'kind': 'OtherKeyKind', 'id': 1234}]>\n \"\"\"\n\n if key is not None:\n self._key = key\n return self\n else:\n return self._key\n\n def kind(self):\n \"\"\"Get the kind of the current entity.\n\n .. note::\n This relies entirely on\n the :class:`gcloud.datastore.key.Key`\n set on the entity.\n That means that we're not storing the kind of the entity at all,\n just the properties and a pointer to a Key\n which knows its Kind.\n \"\"\"\n\n if self._key:\n return self._key.kind()\n\n @classmethod\n def from_key(cls, key, dataset=None):\n \"\"\"Create entity based on :class:`.datastore.key.Key`.\n\n .. note::\n This is a factory method.\n\n :type key: :class:`gcloud.datastore.key.Key`\n :param key: The key for the entity.\n\n :returns: The :class:`Entity` derived from the\n :class:`gcloud.datastore.key.Key`.\n \"\"\"\n\n return cls(dataset).key(key)\n\n @property\n def _must_key(self):\n \"\"\"Return our key, or raise NoKey if not set.\n\n :rtype: :class:`gcloud.datastore.key.Key`.\n :returns: our key\n :raises: NoKey if key is None\n \"\"\"\n if self._key is None:\n raise NoKey()\n return self._key\n\n @property\n def _must_dataset(self):\n \"\"\"Return our dataset, or raise NoDataset if not set.\n\n :rtype: :class:`gcloud.datastore.key.Key`.\n :returns: our key\n :raises: NoDataset if key is None\n \"\"\"\n if self._dataset is None:\n raise NoDataset()\n return self._dataset\n\n def reload(self):\n \"\"\"Reloads the contents of this entity from the datastore.\n\n This method takes the :class:`gcloud.datastore.key.Key`, loads all\n properties from the Cloud Datastore, and sets the updated properties on\n the current object.\n\n .. warning::\n This will override any existing properties if a different value\n exists remotely, however it will *not* override any properties that\n exist only locally.\n \"\"\"\n key = self._must_key\n dataset = self._must_dataset\n entity = dataset.get_entity(key.to_protobuf())\n\n if entity:\n self.update(entity)\n return self\n\n def save(self):\n \"\"\"Save the entity in the Cloud Datastore.\n\n .. note::\n Any existing properties for the entity will be replaced by those\n currently set on this instance. Already-stored properties which do\n not correspond to keys set on this instance will be removed from\n the datastore.\n\n .. note::\n Property values which are \"text\" ('unicode' in Python2, 'str' in\n Python3) map to 'string_value' in the datastore; values which are\n \"bytes\" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.\n\n :rtype: :class:`gcloud.datastore.entity.Entity`\n :returns: The entity with a possibly updated Key.\n \"\"\"\n key = self._must_key\n dataset = self._must_dataset\n connection = dataset.connection()\n key_pb = connection.save_entity(\n dataset_id=dataset.id(),\n key_pb=key.to_protobuf(),\n properties=dict(self))\n\n # If we are in a transaction and the current entity needs an\n # automatically assigned ID, tell the transaction where to put that.\n transaction = connection.transaction()\n if transaction and key.is_partial():\n transaction.add_auto_id_entity(self)\n\n if isinstance(key_pb, datastore_pb.Key):\n # Update the path (which may have been altered).\n # NOTE: The underlying namespace can't have changed in a save().\n # The value of the dataset ID may have changed from implicit\n # (i.e. None, with the ID implied from the dataset.Dataset\n # object associated with the Entity/Key), but if it was\n # implicit before the save() we leave it as implicit.\n path = []\n for element in key_pb.path_element:\n key_part = {}\n for descriptor, value in element._fields.items():\n key_part[descriptor.name] = value\n path.append(key_part)\n self._key = key.path(path)\n\n return self\n\n def delete(self):\n \"\"\"Delete the entity in the Cloud Datastore.\n\n .. note::\n This is based entirely off of the :class:`gcloud.datastore.key.Key`\n set on the entity. Whatever is stored remotely using the key on the\n entity will be deleted.\n \"\"\"\n key = self._must_key\n dataset = self._must_dataset\n dataset.connection().delete_entities(\n dataset_id=dataset.id(),\n key_pbs=[key.to_protobuf()],\n )\n\n def __repr__(self):\n if self._key:\n return '<Entity%s %s>' % (self._key.path(),\n super(Entity, self).__repr__())\n else:\n return '<Entity %s>' % (super(Entity, self).__repr__())\n", "path": "gcloud/datastore/entity.py"}]} | 3,485 | 215 |
gh_patches_debug_13745 | rasdani/github-patches | git_diff | rasterio__rasterio-1105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove dataset's ul method
It's been deprecated since June 2016 (0.36).
</issue>
<code>
[start of rasterio/io.py]
1 """Classes capable of reading and writing datasets
2
3 Instances of these classes are called dataset objects.
4 """
5
6
7 import logging
8 import math
9 import warnings
10
11 from rasterio._base import (
12 get_dataset_driver, driver_can_create, driver_can_create_copy)
13 from rasterio._io import (
14 DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,
15 MemoryFileBase)
16 from rasterio import windows
17 from rasterio.enums import Resampling
18 from rasterio.env import ensure_env
19 from rasterio.errors import RasterioDeprecationWarning
20 from rasterio.transform import guard_transform, xy, rowcol
21
22
23 log = logging.getLogger(__name__)
24
25
26 class TransformMethodsMixin(object):
27 """Mixin providing methods for calculations related
28 to transforming between rows and columns of the raster
29 array and the coordinates.
30
31 These methods are wrappers for the functionality in
32 `rasterio.transform` module.
33
34 A subclass with this mixin MUST provide a `transform`
35 property.
36 """
37
38 def xy(self, row, col, offset="center"):
39 """Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.
40 The pixel's center is returned by default, but a corner can be returned
41 by setting `offset` to one of `ul, ur, ll, lr`.
42
43 Parameters
44 ----------
45 row : int
46 Pixel row.
47 col : int
48 Pixel column.
49 offset : str, optional
50 Determines if the returned coordinates are for the center of the
51 pixel or for a corner.
52
53 Returns
54 -------
55 tuple
56 ``(x, y)``
57 """
58 return xy(self.transform, row, col, offset=offset)
59
60 def ul(self, row, col):
61 """Returns the coordinates (x, y) of the upper left corner of a
62 pixel at `row` and `col` in the units of the dataset's
63 coordinate reference system.
64
65 Deprecated; Use `xy(row, col, offset='ul')` instead.
66 """
67 warnings.warn("ul method is deprecated. Use xy(row, col, offset='ul')",
68 DeprecationWarning)
69 return xy(self.transform, row, col, offset='ul')
70
71 def index(self, x, y, op=math.floor, precision=6):
72 """
73 Returns the (row, col) index of the pixel containing (x, y) given a
74 coordinate reference system.
75
76 Use an epsilon, magnitude determined by the precision parameter
77 and sign determined by the op function:
78 positive for floor, negative for ceil.
79
80 Parameters
81 ----------
82 x : float
83 x value in coordinate reference system
84 y : float
85 y value in coordinate reference system
86 op : function, optional (default: math.floor)
87 Function to convert fractional pixels to whole numbers (floor,
88 ceiling, round)
89 precision : int, optional (default: 6)
90 Decimal places of precision in indexing, as in `round()`.
91
92 Returns
93 -------
94 tuple
95 (row index, col index)
96 """
97 return rowcol(self.transform, x, y, op=op, precision=precision)
98
99
100 class WindowMethodsMixin(object):
101 """Mixin providing methods for window-related calculations.
102 These methods are wrappers for the functionality in
103 `rasterio.windows` module.
104
105 A subclass with this mixin MUST provide the following
106 properties: `transform`, `height` and `width`
107 """
108
109 def window(self, left, bottom, right, top, precision=6, **kwargs):
110 """Get the window corresponding to the bounding coordinates.
111
112 The resulting window is not cropped to the row and column
113 limits of the dataset.
114
115 Parameters
116 ----------
117 left: float
118 Left (west) bounding coordinate
119 bottom: float
120 Bottom (south) bounding coordinate
121 right: float
122 Right (east) bounding coordinate
123 top: float
124 Top (north) bounding coordinate
125 precision: int, optional
126 Number of decimal points of precision when computing inverse
127 transform.
128 kwargs: mapping
129 For backwards compatibility: absorbs deprecated keyword args.
130
131 Returns
132 -------
133 window: Window
134 """
135 if 'boundless' in kwargs: # pragma: no branch
136 warnings.warn("boundless keyword arg should not be used",
137 RasterioDeprecationWarning)
138
139 transform = guard_transform(self.transform)
140 return windows.from_bounds(
141 left, bottom, right, top, transform=transform,
142 height=self.height, width=self.width, precision=precision)
143
144 def window_transform(self, window):
145 """Get the affine transform for a dataset window.
146
147 Parameters
148 ----------
149 window: rasterio.windows.Window
150 Dataset window
151
152 Returns
153 -------
154 transform: Affine
155 The affine transform matrix for the given window
156 """
157
158 transform = guard_transform(self.transform)
159 return windows.transform(window, transform)
160
161 def window_bounds(self, window):
162 """Get the bounds of a window
163
164 Parameters
165 ----------
166 window: rasterio.windows.Window
167 Dataset window
168
169 Returns
170 -------
171 bounds : tuple
172 x_min, y_min, x_max, y_max for the given window
173 """
174
175 transform = guard_transform(self.transform)
176 return windows.bounds(window, transform)
177
178
179 class DatasetReader(DatasetReaderBase, WindowMethodsMixin,
180 TransformMethodsMixin):
181 """An unbuffered data and metadata reader"""
182
183 def __repr__(self):
184 return "<{} DatasetReader name='{}' mode='{}'>".format(
185 self.closed and 'closed' or 'open', self.name, self.mode)
186
187
188 class DatasetWriter(DatasetWriterBase, WindowMethodsMixin,
189 TransformMethodsMixin):
190 """An unbuffered data and metadata writer. Its methods write data
191 directly to disk.
192 """
193
194 def __repr__(self):
195 return "<{} DatasetWriter name='{}' mode='{}'>".format(
196 self.closed and 'closed' or 'open', self.name, self.mode)
197
198
199 class BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,
200 TransformMethodsMixin):
201 """Maintains data and metadata in a buffer, writing to disk or
202 network only when `close()` is called.
203
204 This allows incremental updates to datasets using formats that don't
205 otherwise support updates, such as JPEG.
206 """
207
208 def __repr__(self):
209 return "<{} BufferedDatasetWriter name='{}' mode='{}'>".format(
210 self.closed and 'closed' or 'open', self.name, self.mode)
211
212
213 class MemoryFile(MemoryFileBase):
214 """A BytesIO-like object, backed by an in-memory file.
215
216 This allows formatted files to be read and written without I/O.
217
218 A MemoryFile created with initial bytes becomes immutable. A
219 MemoryFile created without initial bytes may be written to using
220 either file-like or dataset interfaces.
221
222 Examples
223 --------
224
225 A GeoTIFF can be loaded in memory and accessed using the GeoTIFF
226 format driver
227
228 >>> with open('tests/data/RGB.byte.tif', 'rb') as f, MemoryFile(f) as memfile:
229 ... with memfile.open() as src:
230 ... pprint.pprint(src.profile)
231 ...
232 {'count': 3,
233 'crs': CRS({'init': 'epsg:32618'}),
234 'driver': 'GTiff',
235 'dtype': 'uint8',
236 'height': 718,
237 'interleave': 'pixel',
238 'nodata': 0.0,
239 'tiled': False,
240 'transform': Affine(300.0379266750948, 0.0, 101985.0,
241 0.0, -300.041782729805, 2826915.0),
242 'width': 791}
243
244 """
245 def __init__(self, file_or_bytes=None, filename=None, ext=''):
246 super(MemoryFile, self).__init__(
247 file_or_bytes=file_or_bytes, filename=filename, ext=ext)
248
249 @ensure_env
250 def open(self, driver=None, width=None, height=None, count=None, crs=None,
251 transform=None, dtype=None, nodata=None, **kwargs):
252 """Open the file and return a Rasterio dataset object.
253
254 If data has already been written, the file is opened in 'r+'
255 mode. Otherwise, the file is opened in 'w' mode.
256
257 Parameters
258 ----------
259 Note well that there is no `path` parameter: a `MemoryFile`
260 contains a single dataset and there is no need to specify a
261 path.
262
263 Other parameters are optional and have the same semantics as the
264 parameters of `rasterio.open()`.
265 """
266 vsi_path = self.name
267
268 if self.closed:
269 raise IOError("I/O operation on closed file.")
270 if self.exists():
271 s = DatasetReader(vsi_path, 'r+')
272 else:
273 writer = get_writer_for_driver(driver)
274 s = writer(vsi_path, 'w', driver=driver, width=width,
275 height=height, count=count, crs=crs,
276 transform=transform, dtype=dtype,
277 nodata=nodata, **kwargs)
278 s.start()
279 return s
280
281 def __enter__(self):
282 return self
283
284 def __exit__(self, *args, **kwargs):
285 self.close()
286
287
288 class ZipMemoryFile(MemoryFile):
289 """A read-only BytesIO-like object backed by an in-memory zip file.
290
291 This allows a zip file containing formatted files to be read
292 without I/O.
293 """
294
295 def __init__(self, file_or_bytes=None):
296 super(ZipMemoryFile, self).__init__(file_or_bytes, ext='zip')
297
298 @ensure_env
299 def open(self, path):
300 """Open a dataset within the zipped stream.
301
302 Parameters
303 ----------
304 path : str
305 Path to a dataset in the zip file, relative to the root of the
306 archive.
307
308 Returns
309 -------
310 A Rasterio dataset object
311 """
312 vsi_path = '/vsizip{0}/{1}'.format(self.name, path.lstrip('/'))
313
314 if self.closed:
315 raise IOError("I/O operation on closed file.")
316 s = DatasetReader(vsi_path, 'r')
317 s.start()
318 return s
319
320
321 def get_writer_for_driver(driver):
322 """Return the writer class appropriate for the specified driver."""
323 cls = None
324 if driver_can_create(driver):
325 cls = DatasetWriter
326 elif driver_can_create_copy(driver): # pragma: no branch
327 cls = BufferedDatasetWriter
328 return cls
329
330
331 def get_writer_for_path(path):
332 """Return the writer class appropriate for the existing dataset."""
333 driver = get_dataset_driver(path)
334 return get_writer_for_driver(driver)
335
[end of rasterio/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/io.py b/rasterio/io.py
--- a/rasterio/io.py
+++ b/rasterio/io.py
@@ -57,17 +57,6 @@
"""
return xy(self.transform, row, col, offset=offset)
- def ul(self, row, col):
- """Returns the coordinates (x, y) of the upper left corner of a
- pixel at `row` and `col` in the units of the dataset's
- coordinate reference system.
-
- Deprecated; Use `xy(row, col, offset='ul')` instead.
- """
- warnings.warn("ul method is deprecated. Use xy(row, col, offset='ul')",
- DeprecationWarning)
- return xy(self.transform, row, col, offset='ul')
-
def index(self, x, y, op=math.floor, precision=6):
"""
Returns the (row, col) index of the pixel containing (x, y) given a
| {"golden_diff": "diff --git a/rasterio/io.py b/rasterio/io.py\n--- a/rasterio/io.py\n+++ b/rasterio/io.py\n@@ -57,17 +57,6 @@\n \"\"\"\n return xy(self.transform, row, col, offset=offset)\n \n- def ul(self, row, col):\n- \"\"\"Returns the coordinates (x, y) of the upper left corner of a\n- pixel at `row` and `col` in the units of the dataset's\n- coordinate reference system.\n-\n- Deprecated; Use `xy(row, col, offset='ul')` instead.\n- \"\"\"\n- warnings.warn(\"ul method is deprecated. Use xy(row, col, offset='ul')\",\n- DeprecationWarning)\n- return xy(self.transform, row, col, offset='ul')\n-\n def index(self, x, y, op=math.floor, precision=6):\n \"\"\"\n Returns the (row, col) index of the pixel containing (x, y) given a\n", "issue": "Remove dataset's ul method\nIt's been deprecated since June 2016 (0.36).\n", "before_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\n\nimport logging\nimport math\nimport warnings\n\nfrom rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\nfrom rasterio import windows\nfrom rasterio.enums import Resampling\nfrom rasterio.env import ensure_env\nfrom rasterio.errors import RasterioDeprecationWarning\nfrom rasterio.transform import guard_transform, xy, rowcol\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TransformMethodsMixin(object):\n \"\"\"Mixin providing methods for calculations related\n to transforming between rows and columns of the raster\n array and the coordinates.\n\n These methods are wrappers for the functionality in\n `rasterio.transform` module.\n\n A subclass with this mixin MUST provide a `transform`\n property.\n \"\"\"\n\n def xy(self, row, col, offset=\"center\"):\n \"\"\"Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.\n The pixel's center is returned by default, but a corner can be returned\n by setting `offset` to one of `ul, ur, ll, lr`.\n\n Parameters\n ----------\n row : int\n Pixel row.\n col : int\n Pixel column.\n offset : str, optional\n Determines if the returned coordinates are for the center of the\n pixel or for a corner.\n\n Returns\n -------\n tuple\n ``(x, y)``\n \"\"\"\n return xy(self.transform, row, col, offset=offset)\n\n def ul(self, row, col):\n \"\"\"Returns the coordinates (x, y) of the upper left corner of a\n pixel at `row` and `col` in the units of the dataset's\n coordinate reference system.\n\n Deprecated; Use `xy(row, col, offset='ul')` instead.\n \"\"\"\n warnings.warn(\"ul method is deprecated. Use xy(row, col, offset='ul')\",\n DeprecationWarning)\n return xy(self.transform, row, col, offset='ul')\n\n def index(self, x, y, op=math.floor, precision=6):\n \"\"\"\n Returns the (row, col) index of the pixel containing (x, y) given a\n coordinate reference system.\n\n Use an epsilon, magnitude determined by the precision parameter\n and sign determined by the op function:\n positive for floor, negative for ceil.\n\n Parameters\n ----------\n x : float\n x value in coordinate reference system\n y : float\n y value in coordinate reference system\n op : function, optional (default: math.floor)\n Function to convert fractional pixels to whole numbers (floor,\n ceiling, round)\n precision : int, optional (default: 6)\n Decimal places of precision in indexing, as in `round()`.\n\n Returns\n -------\n tuple\n (row index, col index)\n \"\"\"\n return rowcol(self.transform, x, y, op=op, precision=precision)\n\n\nclass WindowMethodsMixin(object):\n \"\"\"Mixin providing methods for window-related calculations.\n These methods are wrappers for the functionality in\n `rasterio.windows` module.\n\n A subclass with this mixin MUST provide the following\n properties: `transform`, `height` and `width`\n \"\"\"\n\n def window(self, left, bottom, right, top, precision=6, **kwargs):\n \"\"\"Get the window corresponding to the bounding coordinates.\n\n The resulting window is not cropped to the row and column\n limits of the dataset.\n\n Parameters\n ----------\n left: float\n Left (west) bounding coordinate\n bottom: float\n Bottom (south) bounding coordinate\n right: float\n Right (east) bounding coordinate\n top: float\n Top (north) bounding coordinate\n precision: int, optional\n Number of decimal points of precision when computing inverse\n transform.\n kwargs: mapping\n For backwards compatibility: absorbs deprecated keyword args.\n\n Returns\n -------\n window: Window\n \"\"\"\n if 'boundless' in kwargs: # pragma: no branch\n warnings.warn(\"boundless keyword arg should not be used\",\n RasterioDeprecationWarning)\n\n transform = guard_transform(self.transform)\n return windows.from_bounds(\n left, bottom, right, top, transform=transform,\n height=self.height, width=self.width, precision=precision)\n\n def window_transform(self, window):\n \"\"\"Get the affine transform for a dataset window.\n\n Parameters\n ----------\n window: rasterio.windows.Window\n Dataset window\n\n Returns\n -------\n transform: Affine\n The affine transform matrix for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.transform(window, transform)\n\n def window_bounds(self, window):\n \"\"\"Get the bounds of a window\n\n Parameters\n ----------\n window: rasterio.windows.Window\n Dataset window\n\n Returns\n -------\n bounds : tuple\n x_min, y_min, x_max, y_max for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.bounds(window, transform)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, MemoryFile(f) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n def __init__(self, file_or_bytes=None, filename=None, ext=''):\n super(MemoryFile, self).__init__(\n file_or_bytes=file_or_bytes, filename=filename, ext=ext)\n\n @ensure_env\n def open(self, driver=None, width=None, height=None, count=None, crs=None,\n transform=None, dtype=None, nodata=None, **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r+'\n mode. Otherwise, the file is opened in 'w' mode.\n\n Parameters\n ----------\n Note well that there is no `path` parameter: a `MemoryFile`\n contains a single dataset and there is no need to specify a\n path.\n\n Other parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n \"\"\"\n vsi_path = self.name\n\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n if self.exists():\n s = DatasetReader(vsi_path, 'r+')\n else:\n writer = get_writer_for_driver(driver)\n s = writer(vsi_path, 'w', driver=driver, width=width,\n height=height, count=count, crs=crs,\n transform=transform, dtype=dtype,\n nodata=nodata, **kwargs)\n s.start()\n return s\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.close()\n\n\nclass ZipMemoryFile(MemoryFile):\n \"\"\"A read-only BytesIO-like object backed by an in-memory zip file.\n\n This allows a zip file containing formatted files to be read\n without I/O.\n \"\"\"\n\n def __init__(self, file_or_bytes=None):\n super(ZipMemoryFile, self).__init__(file_or_bytes, ext='zip')\n\n @ensure_env\n def open(self, path):\n \"\"\"Open a dataset within the zipped stream.\n\n Parameters\n ----------\n path : str\n Path to a dataset in the zip file, relative to the root of the\n archive.\n\n Returns\n -------\n A Rasterio dataset object\n \"\"\"\n vsi_path = '/vsizip{0}/{1}'.format(self.name, path.lstrip('/'))\n\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n s = DatasetReader(vsi_path, 'r')\n s.start()\n return s\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}]} | 3,834 | 223 |
gh_patches_debug_33169 | rasdani/github-patches | git_diff | streamlink__streamlink-2388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Playtv uk
playtv.fr working fine is there any possibility to run http://uk.play.tv/ looks like both sites france and uk owned by same company. As streamlink support tvplay.fr. hope u get support for uk playtv.
http://uk.play.tv/live-tv/363/pick-tv/
http://uk.play.tv/live-tv/752/itv4-1/
http://uk.play.tv/live-tv/1106/itv3-1/
http://uk.play.tv/live-tv/1105/itv-1/
Playtv uk
playtv.fr working fine is there any possibility to run http://uk.play.tv/ looks like both sites france and uk owned by same company. As streamlink support tvplay.fr. hope u get support for uk playtv.
http://uk.play.tv/live-tv/363/pick-tv/
http://uk.play.tv/live-tv/752/itv4-1/
http://uk.play.tv/live-tv/1106/itv3-1/
http://uk.play.tv/live-tv/1105/itv-1/
</issue>
<code>
[start of src/streamlink/plugins/playtv.py]
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugin.api import validate
5 from streamlink.stream import HDSStream, HLSStream
6
7
8 class PlayTV(Plugin):
9 FORMATS_URL = 'http://playtv.fr/player/initialize/{0}/'
10 API_URL = 'http://playtv.fr/player/play/{0}/?format={1}&language={2}&bitrate={3}'
11
12 _url_re = re.compile(r'http://(?:playtv\.fr/television|play\.tv/live-tv/\d+)/(?P<channel>[^/]+)/?')
13
14 _formats_schema = validate.Schema({
15 'streams': validate.any(
16 [],
17 {
18 validate.text: validate.Schema({
19 validate.text: {
20 'bitrates': validate.all([
21 validate.Schema({
22 'value': int
23 })
24 ])
25 }
26 })
27 }
28 )
29 })
30 _api_schema = validate.Schema({
31 'url': validate.url()
32 })
33
34 @classmethod
35 def can_handle_url(cls, url):
36 return PlayTV._url_re.match(url)
37
38 def _get_streams(self):
39 match = self._url_re.match(self.url)
40 channel = match.group('channel')
41
42 res = self.session.http.get(self.FORMATS_URL.format(channel))
43 streams = self.session.http.json(res, schema=self._formats_schema)['streams']
44 if streams == []:
45 self.logger.error('Channel may be geo-restricted, not directly provided by PlayTV or not freely available')
46 return
47
48 for language in streams:
49 for protocol, bitrates in list(streams[language].items()):
50 # - Ignore non-supported protocols (RTSP, DASH)
51 # - Ignore deprecated Flash (RTMPE/HDS) streams (PlayTV doesn't provide anymore a Flash player)
52 if protocol in ['rtsp', 'flash', 'dash', 'hds']:
53 continue
54
55 for bitrate in bitrates['bitrates']:
56 if bitrate['value'] == 0:
57 continue
58 api_url = self.API_URL.format(channel, protocol, language, bitrate['value'])
59 res = self.session.http.get(api_url)
60 video_url = self.session.http.json(res, schema=self._api_schema)['url']
61 bs = '{0}k'.format(bitrate['value'])
62
63 if protocol == 'hls':
64 for _, stream in HLSStream.parse_variant_playlist(self.session, video_url).items():
65 yield bs, stream
66 elif protocol == 'hds':
67 for _, stream in HDSStream.parse_manifest(self.session, video_url).items():
68 yield bs, stream
69
70
71 __plugin__ = PlayTV
72
[end of src/streamlink/plugins/playtv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/playtv.py b/src/streamlink/plugins/playtv.py
--- a/src/streamlink/plugins/playtv.py
+++ b/src/streamlink/plugins/playtv.py
@@ -1,3 +1,5 @@
+import base64
+import json
import re
from streamlink.plugin import Plugin
@@ -5,11 +7,17 @@
from streamlink.stream import HDSStream, HLSStream
+def jwt_decode(token):
+ info, payload, sig = token.split(".")
+ data = base64.urlsafe_b64decode(payload + '=' * (-len(payload) % 4))
+ return json.loads(data)
+
+
class PlayTV(Plugin):
- FORMATS_URL = 'http://playtv.fr/player/initialize/{0}/'
- API_URL = 'http://playtv.fr/player/play/{0}/?format={1}&language={2}&bitrate={3}'
+ FORMATS_URL = 'https://playtv.fr/player/initialize/{0}/'
+ API_URL = 'https://playtv.fr/player/play/{0}/?format={1}&language={2}&bitrate={3}'
- _url_re = re.compile(r'http://(?:playtv\.fr/television|play\.tv/live-tv/\d+)/(?P<channel>[^/]+)/?')
+ _url_re = re.compile(r'https?://(?:playtv\.fr/television|(:?\w+\.)?play\.tv/live-tv/\d+)/(?P<channel>[^/]+)/?')
_formats_schema = validate.Schema({
'streams': validate.any(
@@ -27,9 +35,13 @@
}
)
})
- _api_schema = validate.Schema({
- 'url': validate.url()
- })
+
+ _api_schema = validate.Schema(
+ validate.transform(lambda x: jwt_decode(x)),
+ {
+ 'url': validate.url()
+ }
+ )
@classmethod
def can_handle_url(cls, url):
@@ -57,7 +69,7 @@
continue
api_url = self.API_URL.format(channel, protocol, language, bitrate['value'])
res = self.session.http.get(api_url)
- video_url = self.session.http.json(res, schema=self._api_schema)['url']
+ video_url = self._api_schema.validate(res.text)['url']
bs = '{0}k'.format(bitrate['value'])
if protocol == 'hls':
| {"golden_diff": "diff --git a/src/streamlink/plugins/playtv.py b/src/streamlink/plugins/playtv.py\n--- a/src/streamlink/plugins/playtv.py\n+++ b/src/streamlink/plugins/playtv.py\n@@ -1,3 +1,5 @@\n+import base64\n+import json\n import re\n \n from streamlink.plugin import Plugin\n@@ -5,11 +7,17 @@\n from streamlink.stream import HDSStream, HLSStream\n \n \n+def jwt_decode(token):\n+ info, payload, sig = token.split(\".\")\n+ data = base64.urlsafe_b64decode(payload + '=' * (-len(payload) % 4))\n+ return json.loads(data)\n+\n+\n class PlayTV(Plugin):\n- FORMATS_URL = 'http://playtv.fr/player/initialize/{0}/'\n- API_URL = 'http://playtv.fr/player/play/{0}/?format={1}&language={2}&bitrate={3}'\n+ FORMATS_URL = 'https://playtv.fr/player/initialize/{0}/'\n+ API_URL = 'https://playtv.fr/player/play/{0}/?format={1}&language={2}&bitrate={3}'\n \n- _url_re = re.compile(r'http://(?:playtv\\.fr/television|play\\.tv/live-tv/\\d+)/(?P<channel>[^/]+)/?')\n+ _url_re = re.compile(r'https?://(?:playtv\\.fr/television|(:?\\w+\\.)?play\\.tv/live-tv/\\d+)/(?P<channel>[^/]+)/?')\n \n _formats_schema = validate.Schema({\n 'streams': validate.any(\n@@ -27,9 +35,13 @@\n }\n )\n })\n- _api_schema = validate.Schema({\n- 'url': validate.url()\n- })\n+\n+ _api_schema = validate.Schema(\n+ validate.transform(lambda x: jwt_decode(x)),\n+ {\n+ 'url': validate.url()\n+ }\n+ )\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -57,7 +69,7 @@\n continue\n api_url = self.API_URL.format(channel, protocol, language, bitrate['value'])\n res = self.session.http.get(api_url)\n- video_url = self.session.http.json(res, schema=self._api_schema)['url']\n+ video_url = self._api_schema.validate(res.text)['url']\n bs = '{0}k'.format(bitrate['value'])\n \n if protocol == 'hls':\n", "issue": "Playtv uk\nplaytv.fr working fine is there any possibility to run http://uk.play.tv/ looks like both sites france and uk owned by same company. As streamlink support tvplay.fr. hope u get support for uk playtv.\r\nhttp://uk.play.tv/live-tv/363/pick-tv/\r\nhttp://uk.play.tv/live-tv/752/itv4-1/\r\nhttp://uk.play.tv/live-tv/1106/itv3-1/\r\nhttp://uk.play.tv/live-tv/1105/itv-1/\nPlaytv uk\nplaytv.fr working fine is there any possibility to run http://uk.play.tv/ looks like both sites france and uk owned by same company. As streamlink support tvplay.fr. hope u get support for uk playtv.\r\nhttp://uk.play.tv/live-tv/363/pick-tv/\r\nhttp://uk.play.tv/live-tv/752/itv4-1/\r\nhttp://uk.play.tv/live-tv/1106/itv3-1/\r\nhttp://uk.play.tv/live-tv/1105/itv-1/\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HDSStream, HLSStream\n\n\nclass PlayTV(Plugin):\n FORMATS_URL = 'http://playtv.fr/player/initialize/{0}/'\n API_URL = 'http://playtv.fr/player/play/{0}/?format={1}&language={2}&bitrate={3}'\n\n _url_re = re.compile(r'http://(?:playtv\\.fr/television|play\\.tv/live-tv/\\d+)/(?P<channel>[^/]+)/?')\n\n _formats_schema = validate.Schema({\n 'streams': validate.any(\n [],\n {\n validate.text: validate.Schema({\n validate.text: {\n 'bitrates': validate.all([\n validate.Schema({\n 'value': int\n })\n ])\n }\n })\n }\n )\n })\n _api_schema = validate.Schema({\n 'url': validate.url()\n })\n\n @classmethod\n def can_handle_url(cls, url):\n return PlayTV._url_re.match(url)\n\n def _get_streams(self):\n match = self._url_re.match(self.url)\n channel = match.group('channel')\n\n res = self.session.http.get(self.FORMATS_URL.format(channel))\n streams = self.session.http.json(res, schema=self._formats_schema)['streams']\n if streams == []:\n self.logger.error('Channel may be geo-restricted, not directly provided by PlayTV or not freely available')\n return\n\n for language in streams:\n for protocol, bitrates in list(streams[language].items()):\n # - Ignore non-supported protocols (RTSP, DASH)\n # - Ignore deprecated Flash (RTMPE/HDS) streams (PlayTV doesn't provide anymore a Flash player)\n if protocol in ['rtsp', 'flash', 'dash', 'hds']:\n continue\n\n for bitrate in bitrates['bitrates']:\n if bitrate['value'] == 0:\n continue\n api_url = self.API_URL.format(channel, protocol, language, bitrate['value'])\n res = self.session.http.get(api_url)\n video_url = self.session.http.json(res, schema=self._api_schema)['url']\n bs = '{0}k'.format(bitrate['value'])\n\n if protocol == 'hls':\n for _, stream in HLSStream.parse_variant_playlist(self.session, video_url).items():\n yield bs, stream\n elif protocol == 'hds':\n for _, stream in HDSStream.parse_manifest(self.session, video_url).items():\n yield bs, stream\n\n\n__plugin__ = PlayTV\n", "path": "src/streamlink/plugins/playtv.py"}]} | 1,490 | 546 |
gh_patches_debug_21350 | rasdani/github-patches | git_diff | pytorch__TensorRT-431 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API to register arbitrary TRT engines as modules
It would be nice to be able to accept TRT engines built outside TRTorch and register them in the execution system.
</issue>
<code>
[start of py/trtorch/_compiler.py]
1 from typing import List, Dict, Any
2 import torch
3 from torch import nn
4
5 import trtorch._C
6 from trtorch._compile_spec import _parse_compile_spec
7 from trtorch._version import __version__
8 from types import FunctionType
9
10
11 def compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:
12 """Compile a TorchScript module for NVIDIA GPUs using TensorRT
13
14 Takes a existing TorchScript module and a set of settings to configure the compiler
15 and will convert methods to JIT Graphs which call equivalent TensorRT engines
16
17 Converts specifically the forward method of a TorchScript Module
18
19 Args:
20 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
21 ``torch.nn.Module``
22 compile_spec (dict): Compilation settings including operating precision, target device, etc.
23 One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs
24 to the graph. All other keys are optional
25
26 .. code-block:: py
27
28 compile_spec = {
29 "input_shapes": [
30 (1, 3, 224, 224), # Static input shape for input #1
31 {
32 "min": (1, 3, 224, 224),
33 "opt": (1, 3, 512, 512),
34 "max": (1, 3, 1024, 1024)
35 } # Dynamic input shape for input #2
36 ],
37 "device": {
38 "device_type": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)
39 "gpu_id": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)
40 "dla_core": 0, # (DLA only) Target dla core id to run engine
41 "allow_gpu_fallback": false, # (DLA only) Allow layers unsupported on DLA to run on GPU
42 },
43 "op_precision": torch.half, # Operating precision set to FP16
44 "refit": false, # enable refit
45 "debug": false, # enable debuggable engine
46 "strict_types": false, # kernels should strictly run in operating precision
47 "capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels
48 "num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels
49 "num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels
50 "workspace_size": 0, # Maximum size of workspace given to TensorRT
51 "max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)
52 }
53
54 Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using
55 torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum
56 to select device type.
57
58 Returns:
59 torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT
60 """
61
62 if isinstance(module, torch.jit.ScriptFunction):
63 raise TypeError(
64 "torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile")
65
66 compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))
67 compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)
68 return compiled_module
69
70
71 def convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:
72 """Convert a TorchScript module method to a serialized TensorRT engine
73
74 Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings
75
76 Args:
77 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
78 ``torch.nn.Module``
79 method_name (str): Name of method to convert
80 compile_spec (dict): Compilation settings including operating precision, target device, etc.
81 One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs
82 to the graph. All other keys are optional
83
84 .. code-block:: py
85
86 CompileSpec = {
87 "input_shapes": [
88 (1, 3, 224, 224), # Static input shape for input #1
89 {
90 "min": (1, 3, 224, 224),
91 "opt": (1, 3, 512, 512),
92 "max": (1, 3, 1024, 1024)
93 } # Dynamic input shape for input #2
94 ],
95 "device": {
96 "device_type": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)
97 "gpu_id": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)
98 "dla_core": 0, # (DLA only) Target dla core id to run engine
99 "allow_gpu_fallback": false, # (DLA only) Allow layers unsupported on DLA to run on GPU
100 },
101 "op_precision": torch.half, # Operating precision set to FP16
102 "disable_tf32": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
103 "refit": false, # enable refit
104 "debug": false, # enable debuggable engine
105 "strict_types": false, # kernels should strictly run in operating precision
106 "capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels
107 "num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels
108 "num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels
109 "workspace_size": 0, # Maximum size of workspace given to TensorRT
110 "max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)
111 }
112
113 Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using
114 torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum
115 to select device type.
116
117 Returns:
118 bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs
119 """
120 if isinstance(module, torch.jit.ScriptFunction):
121 raise TypeError(
122 "torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile")
123
124 return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))
125
126
127 def check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:
128 """Checks to see if a method is fully supported by TRTorch
129
130 Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators
131 that are not supported are printed out and the function returns false, else true.
132
133 Args:
134 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
135 ``torch.nn.Module``
136 method_name (str): Name of method to check
137
138 Returns:
139 bool: True if supported Method
140 """
141 return trtorch._C.check_method_op_support(module._c, method_name)
142
143
144 def dump_build_info():
145 """Prints build information about the TRTorch distribution to stdout
146 """
147 print(get_build_info())
148
149
150 def get_build_info() -> str:
151 """Returns a string containing the build information of TRTorch distribution
152
153 Returns:
154 str: String containing the build information for TRTorch distribution
155 """
156 build_info = trtorch._C.get_build_info()
157 build_info = "TRTorch Version: " + str(__version__) + '\n' + build_info
158 return build_info
159
160
161 def set_device(gpu_id):
162 trtorch._C.set_device(gpu_id)
163
[end of py/trtorch/_compiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/trtorch/_compiler.py b/py/trtorch/_compiler.py
--- a/py/trtorch/_compiler.py
+++ b/py/trtorch/_compiler.py
@@ -124,6 +124,26 @@
return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))
+def embed_engine_in_new_module(serialized_engine: bytes) -> torch.jit.ScriptModule:
+ """Takes a pre-built serialized TensorRT engine and embeds it within a TorchScript module
+
+ Takes a pre-built serialied TensorRT engine (as bytes) and embeds it within a TorchScript module.
+ Registers the forward method to execute the TensorRT engine with the function signature:
+
+ forward(Tensor[]) -> Tensor[]
+
+ Module can be save with engine embedded with torch.jit.save and moved / loaded according to TRTorch portability rules
+
+ Args:
+ serialized_engine (bytes): Serialized TensorRT engine from either TRTorch or TensorRT APIs
+
+ Returns:
+ torch.jit.ScriptModule: New TorchScript module with engine embedded
+ """
+ cpp_mod = trtorch._C.embed_engine_in_new_module(serialized_engine)
+ return torch.jit._recursive.wrap_cpp_module(cpp_mod)
+
+
def check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:
"""Checks to see if a method is fully supported by TRTorch
| {"golden_diff": "diff --git a/py/trtorch/_compiler.py b/py/trtorch/_compiler.py\n--- a/py/trtorch/_compiler.py\n+++ b/py/trtorch/_compiler.py\n@@ -124,6 +124,26 @@\n return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n \n \n+def embed_engine_in_new_module(serialized_engine: bytes) -> torch.jit.ScriptModule:\n+ \"\"\"Takes a pre-built serialized TensorRT engine and embeds it within a TorchScript module\n+\n+ Takes a pre-built serialied TensorRT engine (as bytes) and embeds it within a TorchScript module.\n+ Registers the forward method to execute the TensorRT engine with the function signature:\n+\n+ forward(Tensor[]) -> Tensor[]\n+\n+ Module can be save with engine embedded with torch.jit.save and moved / loaded according to TRTorch portability rules\n+\n+ Args:\n+ serialized_engine (bytes): Serialized TensorRT engine from either TRTorch or TensorRT APIs\n+\n+ Returns:\n+ torch.jit.ScriptModule: New TorchScript module with engine embedded\n+ \"\"\"\n+ cpp_mod = trtorch._C.embed_engine_in_new_module(serialized_engine)\n+ return torch.jit._recursive.wrap_cpp_module(cpp_mod)\n+\n+\n def check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by TRTorch\n", "issue": "API to register arbitrary TRT engines as modules\nIt would be nice to be able to accept TRT engines built outside TRTorch and register them in the execution system. \n", "before_files": [{"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport trtorch._C\nfrom trtorch._compile_spec import _parse_compile_spec\nfrom trtorch._version import __version__\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n compile_spec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n CompileSpec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"disable_tf32\": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by TRTorch\n\n Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return trtorch._C.check_method_op_support(module._c, method_name)\n\n\ndef dump_build_info():\n \"\"\"Prints build information about the TRTorch distribution to stdout\n \"\"\"\n print(get_build_info())\n\n\ndef get_build_info() -> str:\n \"\"\"Returns a string containing the build information of TRTorch distribution\n\n Returns:\n str: String containing the build information for TRTorch distribution\n \"\"\"\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n\n\ndef set_device(gpu_id):\n trtorch._C.set_device(gpu_id)\n", "path": "py/trtorch/_compiler.py"}]} | 2,809 | 324 |
gh_patches_debug_3132 | rasdani/github-patches | git_diff | pantsbuild__pants-6037 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
contrib go plugin not able to recognize meta tag if meta ends with />
The regex only recognize `<meta xxxxxxxxxxxxx >` but not `<meta xxxxxxxxxx />`.
</issue>
<code>
[start of contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py]
1 # coding=utf-8
2 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
6 unicode_literals, with_statement)
7
8 import re
9
10 import requests
11 from pants.subsystem.subsystem import Subsystem
12 from pants.util.memo import memoized_method
13
14 from pants.contrib.go.subsystems.imported_repo import ImportedRepo
15
16
17 class GoImportMetaTagReader(Subsystem):
18 """Implements a reader for the <meta name="go-import"> protocol.
19
20 See https://golang.org/cmd/go/#hdr-Remote_import_paths .
21 """
22 options_scope = 'go-import-metatag-reader'
23
24 @classmethod
25 def register_options(cls, register):
26 super(GoImportMetaTagReader, cls).register_options(register)
27 register('--retries', type=int, default=1, advanced=True,
28 help='How many times to retry when fetching meta tags.')
29
30 _META_IMPORT_REGEX = re.compile(r"""
31 <meta
32 \s+
33 name=['"]go-import['"]
34 \s+
35 content=['"](?P<root>[^\s]+)\s+(?P<vcs>[^\s]+)\s+(?P<url>[^\s]+)['"]
36 \s*
37 >""", flags=re.VERBOSE)
38
39 @classmethod
40 def find_meta_tags(cls, page_html):
41 """Returns the content of the meta tag if found inside of the provided HTML."""
42
43 return cls._META_IMPORT_REGEX.findall(page_html)
44
45 @memoized_method
46 def get_imported_repo(self, import_path):
47 """Looks for a go-import meta tag for the provided import_path.
48
49 Returns an ImportedRepo instance with the information in the meta tag,
50 or None if no go-import meta tag is found.
51 """
52 try:
53 session = requests.session()
54 # TODO: Support https with (optional) fallback to http, as Go does.
55 # See https://github.com/pantsbuild/pants/issues/3503.
56 session.mount("http://",
57 requests.adapters.HTTPAdapter(max_retries=self.get_options().retries))
58 page_data = session.get('http://{import_path}?go-get=1'.format(import_path=import_path))
59 except requests.ConnectionError:
60 return None
61
62 if not page_data:
63 return None
64
65 # Return the first match, rather than doing some kind of longest prefix search.
66 # Hopefully no one returns multiple valid go-import meta tags.
67 for (root, vcs, url) in self.find_meta_tags(page_data.text):
68 if root and vcs and url:
69 # Check to make sure returned root is an exact match to the provided import path. If it is
70 # not then run a recursive check on the returned and return the values provided by that call.
71 if root == import_path:
72 return ImportedRepo(root, vcs, url)
73 elif import_path.startswith(root):
74 return self.get_imported_repo(root)
75
76 return None
77
[end of contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py b/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py
--- a/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py
+++ b/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py
@@ -34,7 +34,7 @@
\s+
content=['"](?P<root>[^\s]+)\s+(?P<vcs>[^\s]+)\s+(?P<url>[^\s]+)['"]
\s*
- >""", flags=re.VERBOSE)
+ /?>""", flags=re.VERBOSE)
@classmethod
def find_meta_tags(cls, page_html):
| {"golden_diff": "diff --git a/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py b/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py\n--- a/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py\n+++ b/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py\n@@ -34,7 +34,7 @@\n \\s+\n content=['\"](?P<root>[^\\s]+)\\s+(?P<vcs>[^\\s]+)\\s+(?P<url>[^\\s]+)['\"]\n \\s*\n- >\"\"\", flags=re.VERBOSE)\n+ /?>\"\"\", flags=re.VERBOSE)\n \n @classmethod\n def find_meta_tags(cls, page_html):\n", "issue": "contrib go plugin not able to recognize meta tag if meta ends with />\nThe regex only recognize `<meta xxxxxxxxxxxxx >` but not `<meta xxxxxxxxxx />`.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport re\n\nimport requests\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.memo import memoized_method\n\nfrom pants.contrib.go.subsystems.imported_repo import ImportedRepo\n\n\nclass GoImportMetaTagReader(Subsystem):\n \"\"\"Implements a reader for the <meta name=\"go-import\"> protocol.\n\n See https://golang.org/cmd/go/#hdr-Remote_import_paths .\n \"\"\"\n options_scope = 'go-import-metatag-reader'\n\n @classmethod\n def register_options(cls, register):\n super(GoImportMetaTagReader, cls).register_options(register)\n register('--retries', type=int, default=1, advanced=True,\n help='How many times to retry when fetching meta tags.')\n\n _META_IMPORT_REGEX = re.compile(r\"\"\"\n <meta\n \\s+\n name=['\"]go-import['\"]\n \\s+\n content=['\"](?P<root>[^\\s]+)\\s+(?P<vcs>[^\\s]+)\\s+(?P<url>[^\\s]+)['\"]\n \\s*\n >\"\"\", flags=re.VERBOSE)\n\n @classmethod\n def find_meta_tags(cls, page_html):\n \"\"\"Returns the content of the meta tag if found inside of the provided HTML.\"\"\"\n\n return cls._META_IMPORT_REGEX.findall(page_html)\n\n @memoized_method\n def get_imported_repo(self, import_path):\n \"\"\"Looks for a go-import meta tag for the provided import_path.\n\n Returns an ImportedRepo instance with the information in the meta tag,\n or None if no go-import meta tag is found.\n \"\"\"\n try:\n session = requests.session()\n # TODO: Support https with (optional) fallback to http, as Go does.\n # See https://github.com/pantsbuild/pants/issues/3503.\n session.mount(\"http://\",\n requests.adapters.HTTPAdapter(max_retries=self.get_options().retries))\n page_data = session.get('http://{import_path}?go-get=1'.format(import_path=import_path))\n except requests.ConnectionError:\n return None\n\n if not page_data:\n return None\n\n # Return the first match, rather than doing some kind of longest prefix search.\n # Hopefully no one returns multiple valid go-import meta tags.\n for (root, vcs, url) in self.find_meta_tags(page_data.text):\n if root and vcs and url:\n # Check to make sure returned root is an exact match to the provided import path. If it is\n # not then run a recursive check on the returned and return the values provided by that call.\n if root == import_path:\n return ImportedRepo(root, vcs, url)\n elif import_path.startswith(root):\n return self.get_imported_repo(root)\n\n return None\n", "path": "contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py"}]} | 1,399 | 176 |
gh_patches_debug_18605 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-287 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for running bwctest.sh
</issue>
<code>
[start of bundle-workflow/src/paths/tree_walker.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9
10 def walk(root):
11 print(f'Walking tree from {root}')
12 for dir, dirs, files in os.walk(root):
13 for file_name in files:
14 absolute_path = os.path.join(dir, file_name)
15 relative_path = os.path.relpath(absolute_path, root)
16 yield (os.path.realpath(absolute_path), relative_path)
17
[end of bundle-workflow/src/paths/tree_walker.py]
[start of bundle-workflow/src/system/execute.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import subprocess
8
9
10 def execute(command, dir, capture=True, raise_on_failure=True):
11 """
12 Execute a shell command inside a directory.
13 :param command: The shell command to execute.
14 :param dir: The full path to the directory that the command should be executed in.
15 :returns a tuple containing the exit code, stdout, and stderr.
16 """
17 print(f'Executing "{command}" in {dir}')
18 result = subprocess.run(command, cwd=dir, shell=True, capture_output=capture, text=True)
19 if raise_on_failure:
20 result.check_returncode()
21 return (result.returncode, result.stdout, result.stderr)
22
[end of bundle-workflow/src/system/execute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bundle-workflow/src/paths/tree_walker.py b/bundle-workflow/src/paths/tree_walker.py
--- a/bundle-workflow/src/paths/tree_walker.py
+++ b/bundle-workflow/src/paths/tree_walker.py
@@ -8,7 +8,7 @@
def walk(root):
- print(f'Walking tree from {root}')
+ print(f"Walking tree from {root}")
for dir, dirs, files in os.walk(root):
for file_name in files:
absolute_path = os.path.join(dir, file_name)
diff --git a/bundle-workflow/src/system/execute.py b/bundle-workflow/src/system/execute.py
--- a/bundle-workflow/src/system/execute.py
+++ b/bundle-workflow/src/system/execute.py
@@ -15,7 +15,9 @@
:returns a tuple containing the exit code, stdout, and stderr.
"""
print(f'Executing "{command}" in {dir}')
- result = subprocess.run(command, cwd=dir, shell=True, capture_output=capture, text=True)
+ result = subprocess.run(
+ command, cwd=dir, shell=True, capture_output=capture, text=True
+ )
if raise_on_failure:
result.check_returncode()
return (result.returncode, result.stdout, result.stderr)
| {"golden_diff": "diff --git a/bundle-workflow/src/paths/tree_walker.py b/bundle-workflow/src/paths/tree_walker.py\n--- a/bundle-workflow/src/paths/tree_walker.py\n+++ b/bundle-workflow/src/paths/tree_walker.py\n@@ -8,7 +8,7 @@\n \n \n def walk(root):\n- print(f'Walking tree from {root}')\n+ print(f\"Walking tree from {root}\")\n for dir, dirs, files in os.walk(root):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\ndiff --git a/bundle-workflow/src/system/execute.py b/bundle-workflow/src/system/execute.py\n--- a/bundle-workflow/src/system/execute.py\n+++ b/bundle-workflow/src/system/execute.py\n@@ -15,7 +15,9 @@\n :returns a tuple containing the exit code, stdout, and stderr.\n \"\"\"\n print(f'Executing \"{command}\" in {dir}')\n- result = subprocess.run(command, cwd=dir, shell=True, capture_output=capture, text=True)\n+ result = subprocess.run(\n+ command, cwd=dir, shell=True, capture_output=capture, text=True\n+ )\n if raise_on_failure:\n result.check_returncode()\n return (result.returncode, result.stdout, result.stderr)\n", "issue": "Add support for running bwctest.sh\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\ndef walk(root):\n print(f'Walking tree from {root}')\n for dir, dirs, files in os.walk(root):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, root)\n yield (os.path.realpath(absolute_path), relative_path)\n", "path": "bundle-workflow/src/paths/tree_walker.py"}, {"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport subprocess\n\n\ndef execute(command, dir, capture=True, raise_on_failure=True):\n \"\"\"\n Execute a shell command inside a directory.\n :param command: The shell command to execute.\n :param dir: The full path to the directory that the command should be executed in.\n :returns a tuple containing the exit code, stdout, and stderr.\n \"\"\"\n print(f'Executing \"{command}\" in {dir}')\n result = subprocess.run(command, cwd=dir, shell=True, capture_output=capture, text=True)\n if raise_on_failure:\n result.check_returncode()\n return (result.returncode, result.stdout, result.stderr)\n", "path": "bundle-workflow/src/system/execute.py"}]} | 939 | 290 |
gh_patches_debug_11553 | rasdani/github-patches | git_diff | carpentries__amy-1652 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Select2 Tag widget error when using on empty event form
Perhaps a related issue - I also got a 500 Server Error when trying to accept a workshop request (e.g. https://amy.carpentries.org/requests/selforganised_submission/552/) but then on repeated attempts accepting some of the problematic requests worked so not sure what was happening but thought I should report it. This one is still causing problems: https://amy.carpentries.org/requests/selforganised_submission/552/
_Originally posted by @anenadic in https://github.com/carpentries/amy/issues/1650#issuecomment-623344963_
</issue>
<code>
[start of amy/workshops/fields.py]
1 from django_select2.forms import (
2 Select2Widget as DS2_Select2Widget,
3 Select2MultipleWidget as DS2_Select2MultipleWidget,
4 ModelSelect2Widget as DS2_ModelSelect2Widget,
5 ModelSelect2MultipleWidget as DS2_ModelSelect2MultipleWidget,
6 Select2TagWidget as DS2_Select2TagWidget,
7 )
8 from django.core.validators import RegexValidator, MaxLengthValidator
9 from django.db import models
10 from django import forms
11 from django.utils.safestring import mark_safe
12
13
14 GHUSERNAME_MAX_LENGTH_VALIDATOR = MaxLengthValidator(39,
15 message='Maximum allowed username length is 39 characters.',
16 )
17 # according to https://stackoverflow.com/q/30281026,
18 # GH username can only contain alphanumeric characters and
19 # hyphens (but not consecutive), cannot start or end with
20 # a hyphen, and can't be longer than 39 characters
21 GHUSERNAME_REGEX_VALIDATOR = RegexValidator(
22 # regex inspired by above StackOverflow thread
23 regex=r'^([a-zA-Z\d](?:-?[a-zA-Z\d])*)$',
24 message='This is not a valid GitHub username.',
25 )
26
27
28 class NullableGithubUsernameField(models.CharField):
29 def __init__(self, **kwargs):
30 kwargs.setdefault('null', True)
31 kwargs.setdefault('blank', True)
32 kwargs.setdefault('default', '')
33 # max length of the GH username is 39 characters
34 kwargs.setdefault('max_length', 39)
35 super().__init__(**kwargs)
36
37 default_validators = [
38 GHUSERNAME_MAX_LENGTH_VALIDATOR,
39 GHUSERNAME_REGEX_VALIDATOR,
40 ]
41
42
43 #------------------------------------------------------------
44
45 class FakeRequiredMixin:
46 def __init__(self, *args, **kwargs):
47 # Intercept "fake_required" attribute that's used for marking field
48 # with "*" (asterisk) even though it's not required.
49 # Additionally `fake_required` doesn't trigger any validation.
50 self.fake_required = kwargs.pop('fake_required', False)
51 super().__init__(*args, **kwargs)
52
53
54 class RadioSelectWithOther(FakeRequiredMixin, forms.RadioSelect):
55 """A RadioSelect widget that should render additional field ('Other').
56
57 We have a number of occurences of two model fields bound together: one
58 containing predefined set of choices, the other being a text input for
59 other input user wants to choose instead of one of our predefined options.
60
61 This widget should help with rendering two widgets in one table row."""
62
63 other_field = None # to be bound later
64
65 def __init__(self, other_field_name, *args, **kwargs):
66 super().__init__(*args, **kwargs)
67 self.other_field_name = other_field_name
68
69
70 class CheckboxSelectMultipleWithOthers(FakeRequiredMixin, forms.CheckboxSelectMultiple):
71 """A multiple choice widget that should render additional field ('Other').
72
73 We have a number of occurences of two model fields bound together: one
74 containing predefined set of choices, the other being a text input for
75 other input user wants to choose instead of one of our predefined options.
76
77 This widget should help with rendering two widgets in one table row."""
78
79 other_field = None # to be bound later
80
81 def __init__(self, other_field_name, *args, **kwargs):
82 super().__init__(*args, **kwargs)
83 self.other_field_name = other_field_name
84
85
86 class RadioSelectFakeMultiple(FakeRequiredMixin, forms.RadioSelect):
87 """Pretend to be a radio-select with multiple selection possible. This
88 is intended to 'fool' Django into thinking that user selected 1 item on
89 a multi-select item list."""
90 allow_multiple_selected = True
91
92
93 class SafeLabelFromInstanceMixin:
94 def label_from_instance(self, obj):
95 return mark_safe(obj)
96
97
98 class SafeModelChoiceField(SafeLabelFromInstanceMixin, forms.ModelChoiceField):
99 pass
100
101
102 class SafeModelMultipleChoiceField(SafeLabelFromInstanceMixin,
103 forms.ModelMultipleChoiceField):
104 pass
105
106
107 class CurriculumModelMultipleChoiceField(SafeModelMultipleChoiceField):
108 def label_from_instance(self, obj):
109 # Display in tooltip (it's a little better than popover, because it
110 # auto-hides and doesn't require clicking on the element, whereas
111 # popover by clicking will automatically select the clicked item)
112 data = (
113 '<a tabindex="0" role="button" data-toggle="tooltip" '
114 'data-placement="top" title="{description}">{obj}</a>'
115 .format(obj=obj, description=obj.description)
116 )
117 return super().label_from_instance(data)
118
119
120 #------------------------------------------------------------
121
122 class Select2BootstrapMixin:
123 def build_attrs(self, *args, **kwargs):
124 attrs = super().build_attrs(*args, **kwargs)
125 attrs.setdefault('data-theme', 'bootstrap4')
126 return attrs
127
128
129 class Select2NoMinimumInputLength:
130 def build_attrs(self, *args, **kwargs):
131 # Let's set up the minimum input length first!
132 # It will overwrite `setdefault('data-minimum-input-length')` from
133 # other mixins.
134 self.attrs.setdefault('data-minimum-input-length', 0)
135 attrs = super().build_attrs(*args, **kwargs)
136 return attrs
137
138
139
140 class Select2Widget(FakeRequiredMixin, Select2BootstrapMixin,
141 DS2_Select2Widget):
142 pass
143
144
145 class Select2MultipleWidget(Select2BootstrapMixin, DS2_Select2MultipleWidget):
146 pass
147
148
149 class ModelSelect2Widget(Select2BootstrapMixin, Select2NoMinimumInputLength,
150 DS2_ModelSelect2Widget):
151 pass
152
153
154 class ModelSelect2MultipleWidget(Select2BootstrapMixin,
155 Select2NoMinimumInputLength,
156 DS2_ModelSelect2MultipleWidget):
157 pass
158
159
160 TAG_SEPARATOR = ';'
161
162
163 class Select2TagWidget(Select2BootstrapMixin, DS2_Select2TagWidget):
164 def build_attrs(self, base_attrs, extra_attrs=None):
165 """Select2's tag attributes. By default other token separators are
166 used, but we want to use "," and ";"."""
167 default_attrs = {
168 'data-minimum-input-length': 1,
169 'data-tags': 'true',
170 'data-token-separators': '[",", ";"]'
171 }
172 assert TAG_SEPARATOR in default_attrs['data-token-separators']
173
174 default_attrs.update(base_attrs)
175 return super().build_attrs(default_attrs, extra_attrs=extra_attrs)
176
177 def value_from_datadict(self, data, files, name):
178 # sometimes data is held as an immutable QueryDict
179 # in those cases, we need to make a copy of it to "disable"
180 # the mutability
181 try:
182 data_mutable = data.copy()
183 except AttributeError:
184 data_mutable = data
185
186 data_mutable.setdefault(name, '')
187 values = super().value_from_datadict(data_mutable, files, name)
188 return TAG_SEPARATOR.join(values)
189
190 def optgroups(self, name, value, attrs=None):
191 """Example from
192 https://django-select2.readthedocs.io/en/latest/django_select2.html#django_select2.forms.Select2TagWidget"""
193 values = value[0].split(TAG_SEPARATOR) if value[0] else []
194 selected = set(values)
195 subgroup = [
196 self.create_option(name, v, v, selected, i)
197 for i, v in enumerate(values)
198 ]
199 return [(None, subgroup, 0)]
200
[end of amy/workshops/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/workshops/fields.py b/amy/workshops/fields.py
--- a/amy/workshops/fields.py
+++ b/amy/workshops/fields.py
@@ -190,7 +190,11 @@
def optgroups(self, name, value, attrs=None):
"""Example from
https://django-select2.readthedocs.io/en/latest/django_select2.html#django_select2.forms.Select2TagWidget"""
- values = value[0].split(TAG_SEPARATOR) if value[0] else []
+ try:
+ values = value[0].split(TAG_SEPARATOR)
+ except (IndexError, AttributeError):
+ values = []
+
selected = set(values)
subgroup = [
self.create_option(name, v, v, selected, i)
| {"golden_diff": "diff --git a/amy/workshops/fields.py b/amy/workshops/fields.py\n--- a/amy/workshops/fields.py\n+++ b/amy/workshops/fields.py\n@@ -190,7 +190,11 @@\n def optgroups(self, name, value, attrs=None):\n \"\"\"Example from\n https://django-select2.readthedocs.io/en/latest/django_select2.html#django_select2.forms.Select2TagWidget\"\"\"\n- values = value[0].split(TAG_SEPARATOR) if value[0] else []\n+ try:\n+ values = value[0].split(TAG_SEPARATOR)\n+ except (IndexError, AttributeError):\n+ values = []\n+\n selected = set(values)\n subgroup = [\n self.create_option(name, v, v, selected, i)\n", "issue": "Select2 Tag widget error when using on empty event form\nPerhaps a related issue - I also got a 500 Server Error when trying to accept a workshop request (e.g. https://amy.carpentries.org/requests/selforganised_submission/552/) but then on repeated attempts accepting some of the problematic requests worked so not sure what was happening but thought I should report it. This one is still causing problems: https://amy.carpentries.org/requests/selforganised_submission/552/\r\n\r\n_Originally posted by @anenadic in https://github.com/carpentries/amy/issues/1650#issuecomment-623344963_\n", "before_files": [{"content": "from django_select2.forms import (\n Select2Widget as DS2_Select2Widget,\n Select2MultipleWidget as DS2_Select2MultipleWidget,\n ModelSelect2Widget as DS2_ModelSelect2Widget,\n ModelSelect2MultipleWidget as DS2_ModelSelect2MultipleWidget,\n Select2TagWidget as DS2_Select2TagWidget,\n)\nfrom django.core.validators import RegexValidator, MaxLengthValidator\nfrom django.db import models\nfrom django import forms\nfrom django.utils.safestring import mark_safe\n\n\nGHUSERNAME_MAX_LENGTH_VALIDATOR = MaxLengthValidator(39,\n message='Maximum allowed username length is 39 characters.',\n)\n# according to https://stackoverflow.com/q/30281026,\n# GH username can only contain alphanumeric characters and\n# hyphens (but not consecutive), cannot start or end with\n# a hyphen, and can't be longer than 39 characters\nGHUSERNAME_REGEX_VALIDATOR = RegexValidator(\n # regex inspired by above StackOverflow thread\n regex=r'^([a-zA-Z\\d](?:-?[a-zA-Z\\d])*)$',\n message='This is not a valid GitHub username.',\n)\n\n\nclass NullableGithubUsernameField(models.CharField):\n def __init__(self, **kwargs):\n kwargs.setdefault('null', True)\n kwargs.setdefault('blank', True)\n kwargs.setdefault('default', '')\n # max length of the GH username is 39 characters\n kwargs.setdefault('max_length', 39)\n super().__init__(**kwargs)\n\n default_validators = [\n GHUSERNAME_MAX_LENGTH_VALIDATOR,\n GHUSERNAME_REGEX_VALIDATOR,\n ]\n\n\n#------------------------------------------------------------\n\nclass FakeRequiredMixin:\n def __init__(self, *args, **kwargs):\n # Intercept \"fake_required\" attribute that's used for marking field\n # with \"*\" (asterisk) even though it's not required.\n # Additionally `fake_required` doesn't trigger any validation.\n self.fake_required = kwargs.pop('fake_required', False)\n super().__init__(*args, **kwargs)\n\n\nclass RadioSelectWithOther(FakeRequiredMixin, forms.RadioSelect):\n \"\"\"A RadioSelect widget that should render additional field ('Other').\n\n We have a number of occurences of two model fields bound together: one\n containing predefined set of choices, the other being a text input for\n other input user wants to choose instead of one of our predefined options.\n\n This widget should help with rendering two widgets in one table row.\"\"\"\n\n other_field = None # to be bound later\n\n def __init__(self, other_field_name, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.other_field_name = other_field_name\n\n\nclass CheckboxSelectMultipleWithOthers(FakeRequiredMixin, forms.CheckboxSelectMultiple):\n \"\"\"A multiple choice widget that should render additional field ('Other').\n\n We have a number of occurences of two model fields bound together: one\n containing predefined set of choices, the other being a text input for\n other input user wants to choose instead of one of our predefined options.\n\n This widget should help with rendering two widgets in one table row.\"\"\"\n\n other_field = None # to be bound later\n\n def __init__(self, other_field_name, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.other_field_name = other_field_name\n\n\nclass RadioSelectFakeMultiple(FakeRequiredMixin, forms.RadioSelect):\n \"\"\"Pretend to be a radio-select with multiple selection possible. This\n is intended to 'fool' Django into thinking that user selected 1 item on\n a multi-select item list.\"\"\"\n allow_multiple_selected = True\n\n\nclass SafeLabelFromInstanceMixin:\n def label_from_instance(self, obj):\n return mark_safe(obj)\n\n\nclass SafeModelChoiceField(SafeLabelFromInstanceMixin, forms.ModelChoiceField):\n pass\n\n\nclass SafeModelMultipleChoiceField(SafeLabelFromInstanceMixin,\n forms.ModelMultipleChoiceField):\n pass\n\n\nclass CurriculumModelMultipleChoiceField(SafeModelMultipleChoiceField):\n def label_from_instance(self, obj):\n # Display in tooltip (it's a little better than popover, because it\n # auto-hides and doesn't require clicking on the element, whereas\n # popover by clicking will automatically select the clicked item)\n data = (\n '<a tabindex=\"0\" role=\"button\" data-toggle=\"tooltip\" '\n 'data-placement=\"top\" title=\"{description}\">{obj}</a>'\n .format(obj=obj, description=obj.description)\n )\n return super().label_from_instance(data)\n\n\n#------------------------------------------------------------\n\nclass Select2BootstrapMixin:\n def build_attrs(self, *args, **kwargs):\n attrs = super().build_attrs(*args, **kwargs)\n attrs.setdefault('data-theme', 'bootstrap4')\n return attrs\n\n\nclass Select2NoMinimumInputLength:\n def build_attrs(self, *args, **kwargs):\n # Let's set up the minimum input length first!\n # It will overwrite `setdefault('data-minimum-input-length')` from\n # other mixins.\n self.attrs.setdefault('data-minimum-input-length', 0)\n attrs = super().build_attrs(*args, **kwargs)\n return attrs\n\n\n\nclass Select2Widget(FakeRequiredMixin, Select2BootstrapMixin,\n DS2_Select2Widget):\n pass\n\n\nclass Select2MultipleWidget(Select2BootstrapMixin, DS2_Select2MultipleWidget):\n pass\n\n\nclass ModelSelect2Widget(Select2BootstrapMixin, Select2NoMinimumInputLength,\n DS2_ModelSelect2Widget):\n pass\n\n\nclass ModelSelect2MultipleWidget(Select2BootstrapMixin,\n Select2NoMinimumInputLength,\n DS2_ModelSelect2MultipleWidget):\n pass\n\n\nTAG_SEPARATOR = ';'\n\n\nclass Select2TagWidget(Select2BootstrapMixin, DS2_Select2TagWidget):\n def build_attrs(self, base_attrs, extra_attrs=None):\n \"\"\"Select2's tag attributes. By default other token separators are\n used, but we want to use \",\" and \";\".\"\"\"\n default_attrs = {\n 'data-minimum-input-length': 1,\n 'data-tags': 'true',\n 'data-token-separators': '[\",\", \";\"]'\n }\n assert TAG_SEPARATOR in default_attrs['data-token-separators']\n\n default_attrs.update(base_attrs)\n return super().build_attrs(default_attrs, extra_attrs=extra_attrs)\n\n def value_from_datadict(self, data, files, name):\n # sometimes data is held as an immutable QueryDict\n # in those cases, we need to make a copy of it to \"disable\"\n # the mutability\n try:\n data_mutable = data.copy()\n except AttributeError:\n data_mutable = data\n\n data_mutable.setdefault(name, '')\n values = super().value_from_datadict(data_mutable, files, name)\n return TAG_SEPARATOR.join(values)\n\n def optgroups(self, name, value, attrs=None):\n \"\"\"Example from\n https://django-select2.readthedocs.io/en/latest/django_select2.html#django_select2.forms.Select2TagWidget\"\"\"\n values = value[0].split(TAG_SEPARATOR) if value[0] else []\n selected = set(values)\n subgroup = [\n self.create_option(name, v, v, selected, i)\n for i, v in enumerate(values)\n ]\n return [(None, subgroup, 0)]\n", "path": "amy/workshops/fields.py"}]} | 2,780 | 174 |
gh_patches_debug_16055 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3510 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Completing URL with '&' shows '&'
Consider this URL: https://wiki.archlinux.org/api.php?action=query&pageids=12345
After writing `action=query` in the `:open` prompt, it is highlighted correctly. After adding `&` (i.e. the filter is `action=query&`), the URL is changed to `https://wiki.archlinux.org/api.php?action=query&pageids=12345` in the completer and the pattern is still highlighted. After adding anything what follows in the URL, e.g. `action=query&pageids`, the URL is changed back to the original and nothing is highlighted, although the URL still matches the filter as expected.
</issue>
<code>
[start of qutebrowser/completion/completiondelegate.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Completion item delegate for CompletionView.
21
22 We use this to be able to highlight parts of the text.
23 """
24
25 import re
26 import html
27
28 from PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate
29 from PyQt5.QtCore import QRectF, QSize, Qt
30 from PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,
31 QAbstractTextDocumentLayout)
32
33 from qutebrowser.config import config
34 from qutebrowser.utils import qtutils, jinja
35
36
37 _cached_stylesheet = None
38
39
40 class CompletionItemDelegate(QStyledItemDelegate):
41
42 """Delegate used by CompletionView to draw individual items.
43
44 Mainly a cleaned up port of Qt's way to draw a TreeView item, except it
45 uses a QTextDocument to draw the text and add marking.
46
47 Original implementation:
48 qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153
49
50 Attributes:
51 _opt: The QStyleOptionViewItem which is used.
52 _style: The style to be used.
53 _painter: The QPainter to be used.
54 _doc: The QTextDocument to be used.
55 """
56
57 # FIXME this is horribly slow when resizing.
58 # We should probably cache something in _get_textdoc or so, but as soon as
59 # we implement eliding that cache probably isn't worth much anymore...
60 # https://github.com/qutebrowser/qutebrowser/issues/121
61
62 def __init__(self, parent=None):
63 self._painter = None
64 self._opt = None
65 self._doc = None
66 self._style = None
67 super().__init__(parent)
68
69 def _draw_background(self):
70 """Draw the background of an ItemViewItem."""
71 self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt,
72 self._painter, self._opt.widget)
73
74 def _draw_icon(self):
75 """Draw the icon of an ItemViewItem."""
76 icon_rect = self._style.subElementRect(
77 self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget)
78 if not icon_rect.isValid():
79 # The rect seems to be wrong in all kind of ways if no icon should
80 # be displayed.
81 return
82
83 mode = QIcon.Normal
84 if not self._opt.state & QStyle.State_Enabled:
85 mode = QIcon.Disabled
86 elif self._opt.state & QStyle.State_Selected:
87 mode = QIcon.Selected
88 state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off
89 self._opt.icon.paint(self._painter, icon_rect,
90 self._opt.decorationAlignment, mode, state)
91
92 def _draw_text(self, index):
93 """Draw the text of an ItemViewItem.
94
95 This is the main part where we differ from the original implementation
96 in Qt: We use a QTextDocument to draw text.
97
98 Args:
99 index: The QModelIndex of the item to draw.
100 """
101 if not self._opt.text:
102 return
103
104 text_rect_ = self._style.subElementRect(
105 self._style.SE_ItemViewItemText, self._opt, self._opt.widget)
106 qtutils.ensure_valid(text_rect_)
107 margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin,
108 self._opt, self._opt.widget) + 1
109 # remove width padding
110 text_rect = text_rect_.adjusted(margin, 0, -margin, 0)
111 qtutils.ensure_valid(text_rect)
112 # move text upwards a bit
113 if index.parent().isValid():
114 text_rect.adjust(0, -1, 0, -1)
115 else:
116 text_rect.adjust(0, -2, 0, -2)
117 self._painter.save()
118 state = self._opt.state
119 if state & QStyle.State_Enabled and state & QStyle.State_Active:
120 cg = QPalette.Normal
121 elif state & QStyle.State_Enabled:
122 cg = QPalette.Inactive
123 else:
124 cg = QPalette.Disabled
125
126 if state & QStyle.State_Selected:
127 self._painter.setPen(self._opt.palette.color(
128 cg, QPalette.HighlightedText))
129 # This is a dirty fix for the text jumping by one pixel for
130 # whatever reason.
131 text_rect.adjust(0, -1, 0, 0)
132 else:
133 self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
134
135 if state & QStyle.State_Editing:
136 self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
137 self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))
138
139 self._painter.translate(text_rect.left(), text_rect.top())
140 self._get_textdoc(index)
141 self._draw_textdoc(text_rect, index.column())
142 self._painter.restore()
143
144 def _draw_textdoc(self, rect, col):
145 """Draw the QTextDocument of an item.
146
147 Args:
148 rect: The QRect to clip the drawing to.
149 """
150 # We can't use drawContents because then the color would be ignored.
151 clip = QRectF(0, 0, rect.width(), rect.height())
152 self._painter.save()
153
154 if self._opt.state & QStyle.State_Selected:
155 color = config.val.colors.completion.item.selected.fg
156 elif not self._opt.state & QStyle.State_Enabled:
157 color = config.val.colors.completion.category.fg
158 else:
159 colors = config.val.colors.completion.fg
160 # if multiple colors are set, use different colors per column
161 color = colors[col % len(colors)]
162 self._painter.setPen(color)
163
164 ctx = QAbstractTextDocumentLayout.PaintContext()
165 ctx.palette.setColor(QPalette.Text, self._painter.pen().color())
166 if clip.isValid():
167 self._painter.setClipRect(clip)
168 ctx.clip = clip
169 self._doc.documentLayout().draw(self._painter, ctx)
170 self._painter.restore()
171
172 def _get_textdoc(self, index):
173 """Create the QTextDocument of an item.
174
175 Args:
176 index: The QModelIndex of the item to draw.
177 """
178 # FIXME we probably should do eliding here. See
179 # qcommonstyle.cpp:viewItemDrawText
180 # https://github.com/qutebrowser/qutebrowser/issues/118
181 text_option = QTextOption()
182 if self._opt.features & QStyleOptionViewItem.WrapText:
183 text_option.setWrapMode(QTextOption.WordWrap)
184 else:
185 text_option.setWrapMode(QTextOption.ManualWrap)
186 text_option.setTextDirection(self._opt.direction)
187 text_option.setAlignment(QStyle.visualAlignment(
188 self._opt.direction, self._opt.displayAlignment))
189
190 if self._doc is not None:
191 self._doc.deleteLater()
192 self._doc = QTextDocument(self)
193 self._doc.setDefaultFont(self._opt.font)
194 self._doc.setDefaultTextOption(text_option)
195 self._doc.setDocumentMargin(2)
196
197 assert _cached_stylesheet is not None
198 self._doc.setDefaultStyleSheet(_cached_stylesheet)
199
200 if index.parent().isValid():
201 view = self.parent()
202 pattern = view.pattern
203 columns_to_filter = index.model().columns_to_filter(index)
204 if index.column() in columns_to_filter and pattern:
205 repl = r'<span class="highlight">\g<0></span>'
206 text = re.sub(re.escape(pattern).replace(r'\ ', r'|'),
207 repl, html.escape(self._opt.text),
208 flags=re.IGNORECASE)
209 self._doc.setHtml(text)
210 else:
211 self._doc.setPlainText(self._opt.text)
212 else:
213 self._doc.setHtml(
214 '<span style="font: {};">{}</span>'.format(
215 html.escape(config.val.fonts.completion.category),
216 html.escape(self._opt.text)))
217
218 def _draw_focus_rect(self):
219 """Draw the focus rectangle of an ItemViewItem."""
220 state = self._opt.state
221 if not state & QStyle.State_HasFocus:
222 return
223 o = self._opt
224 o.rect = self._style.subElementRect(
225 self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)
226 o.state |= QStyle.State_KeyboardFocusChange | QStyle.State_Item
227 qtutils.ensure_valid(o.rect)
228 if state & QStyle.State_Enabled:
229 cg = QPalette.Normal
230 else:
231 cg = QPalette.Disabled
232 if state & QStyle.State_Selected:
233 role = QPalette.Highlight
234 else:
235 role = QPalette.Window
236 o.backgroundColor = self._opt.palette.color(cg, role)
237 self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter,
238 self._opt.widget)
239
240 def sizeHint(self, option, index):
241 """Override sizeHint of QStyledItemDelegate.
242
243 Return the cell size based on the QTextDocument size, but might not
244 work correctly yet.
245
246 Args:
247 option: const QStyleOptionViewItem & option
248 index: const QModelIndex & index
249
250 Return:
251 A QSize with the recommended size.
252 """
253 value = index.data(Qt.SizeHintRole)
254 if value is not None:
255 return value
256 self._opt = QStyleOptionViewItem(option)
257 self.initStyleOption(self._opt, index)
258 self._style = self._opt.widget.style()
259 self._get_textdoc(index)
260 docsize = self._doc.size().toSize()
261 size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt,
262 docsize, self._opt.widget)
263 qtutils.ensure_valid(size)
264 return size + QSize(10, 3)
265
266 def paint(self, painter, option, index):
267 """Override the QStyledItemDelegate paint function.
268
269 Args:
270 painter: QPainter * painter
271 option: const QStyleOptionViewItem & option
272 index: const QModelIndex & index
273 """
274 self._painter = painter
275 self._painter.save()
276 self._opt = QStyleOptionViewItem(option)
277 self.initStyleOption(self._opt, index)
278 self._style = self._opt.widget.style()
279
280 self._draw_background()
281 self._draw_icon()
282 self._draw_text(index)
283 self._draw_focus_rect()
284
285 self._painter.restore()
286
287
288 @config.change_filter('colors.completion.match.fg', function=True)
289 def _update_stylesheet():
290 """Update the cached stylesheet."""
291 stylesheet = """
292 .highlight {
293 color: {{ conf.colors.completion.match.fg }};
294 }
295 """
296 with jinja.environment.no_autoescape():
297 template = jinja.environment.from_string(stylesheet)
298
299 global _cached_stylesheet
300 _cached_stylesheet = template.render(conf=config.val)
301
302
303 def init():
304 """Initialize the cached stylesheet."""
305 _update_stylesheet()
306 config.instance.changed.connect(_update_stylesheet)
307
[end of qutebrowser/completion/completiondelegate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qutebrowser/completion/completiondelegate.py b/qutebrowser/completion/completiondelegate.py
--- a/qutebrowser/completion/completiondelegate.py
+++ b/qutebrowser/completion/completiondelegate.py
@@ -203,9 +203,9 @@
columns_to_filter = index.model().columns_to_filter(index)
if index.column() in columns_to_filter and pattern:
repl = r'<span class="highlight">\g<0></span>'
- text = re.sub(re.escape(pattern).replace(r'\ ', r'|'),
- repl, html.escape(self._opt.text),
- flags=re.IGNORECASE)
+ pat = html.escape(re.escape(pattern)).replace(r'\ ', r'|')
+ txt = html.escape(self._opt.text)
+ text = re.sub(pat, repl, txt, flags=re.IGNORECASE)
self._doc.setHtml(text)
else:
self._doc.setPlainText(self._opt.text)
| {"golden_diff": "diff --git a/qutebrowser/completion/completiondelegate.py b/qutebrowser/completion/completiondelegate.py\n--- a/qutebrowser/completion/completiondelegate.py\n+++ b/qutebrowser/completion/completiondelegate.py\n@@ -203,9 +203,9 @@\n columns_to_filter = index.model().columns_to_filter(index)\n if index.column() in columns_to_filter and pattern:\n repl = r'<span class=\"highlight\">\\g<0></span>'\n- text = re.sub(re.escape(pattern).replace(r'\\ ', r'|'),\n- repl, html.escape(self._opt.text),\n- flags=re.IGNORECASE)\n+ pat = html.escape(re.escape(pattern)).replace(r'\\ ', r'|')\n+ txt = html.escape(self._opt.text)\n+ text = re.sub(pat, repl, txt, flags=re.IGNORECASE)\n self._doc.setHtml(text)\n else:\n self._doc.setPlainText(self._opt.text)\n", "issue": "Completing URL with '&' shows '&'\nConsider this URL: https://wiki.archlinux.org/api.php?action=query&pageids=12345\r\n\r\nAfter writing `action=query` in the `:open` prompt, it is highlighted correctly. After adding `&` (i.e. the filter is `action=query&`), the URL is changed to `https://wiki.archlinux.org/api.php?action=query&pageids=12345` in the completer and the pattern is still highlighted. After adding anything what follows in the URL, e.g. `action=query&pageids`, the URL is changed back to the original and nothing is highlighted, although the URL still matches the filter as expected.\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completion item delegate for CompletionView.\n\nWe use this to be able to highlight parts of the text.\n\"\"\"\n\nimport re\nimport html\n\nfrom PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate\nfrom PyQt5.QtCore import QRectF, QSize, Qt\nfrom PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,\n QAbstractTextDocumentLayout)\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import qtutils, jinja\n\n\n_cached_stylesheet = None\n\n\nclass CompletionItemDelegate(QStyledItemDelegate):\n\n \"\"\"Delegate used by CompletionView to draw individual items.\n\n Mainly a cleaned up port of Qt's way to draw a TreeView item, except it\n uses a QTextDocument to draw the text and add marking.\n\n Original implementation:\n qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153\n\n Attributes:\n _opt: The QStyleOptionViewItem which is used.\n _style: The style to be used.\n _painter: The QPainter to be used.\n _doc: The QTextDocument to be used.\n \"\"\"\n\n # FIXME this is horribly slow when resizing.\n # We should probably cache something in _get_textdoc or so, but as soon as\n # we implement eliding that cache probably isn't worth much anymore...\n # https://github.com/qutebrowser/qutebrowser/issues/121\n\n def __init__(self, parent=None):\n self._painter = None\n self._opt = None\n self._doc = None\n self._style = None\n super().__init__(parent)\n\n def _draw_background(self):\n \"\"\"Draw the background of an ItemViewItem.\"\"\"\n self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt,\n self._painter, self._opt.widget)\n\n def _draw_icon(self):\n \"\"\"Draw the icon of an ItemViewItem.\"\"\"\n icon_rect = self._style.subElementRect(\n self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget)\n if not icon_rect.isValid():\n # The rect seems to be wrong in all kind of ways if no icon should\n # be displayed.\n return\n\n mode = QIcon.Normal\n if not self._opt.state & QStyle.State_Enabled:\n mode = QIcon.Disabled\n elif self._opt.state & QStyle.State_Selected:\n mode = QIcon.Selected\n state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off\n self._opt.icon.paint(self._painter, icon_rect,\n self._opt.decorationAlignment, mode, state)\n\n def _draw_text(self, index):\n \"\"\"Draw the text of an ItemViewItem.\n\n This is the main part where we differ from the original implementation\n in Qt: We use a QTextDocument to draw text.\n\n Args:\n index: The QModelIndex of the item to draw.\n \"\"\"\n if not self._opt.text:\n return\n\n text_rect_ = self._style.subElementRect(\n self._style.SE_ItemViewItemText, self._opt, self._opt.widget)\n qtutils.ensure_valid(text_rect_)\n margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin,\n self._opt, self._opt.widget) + 1\n # remove width padding\n text_rect = text_rect_.adjusted(margin, 0, -margin, 0)\n qtutils.ensure_valid(text_rect)\n # move text upwards a bit\n if index.parent().isValid():\n text_rect.adjust(0, -1, 0, -1)\n else:\n text_rect.adjust(0, -2, 0, -2)\n self._painter.save()\n state = self._opt.state\n if state & QStyle.State_Enabled and state & QStyle.State_Active:\n cg = QPalette.Normal\n elif state & QStyle.State_Enabled:\n cg = QPalette.Inactive\n else:\n cg = QPalette.Disabled\n\n if state & QStyle.State_Selected:\n self._painter.setPen(self._opt.palette.color(\n cg, QPalette.HighlightedText))\n # This is a dirty fix for the text jumping by one pixel for\n # whatever reason.\n text_rect.adjust(0, -1, 0, 0)\n else:\n self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))\n\n if state & QStyle.State_Editing:\n self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))\n self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))\n\n self._painter.translate(text_rect.left(), text_rect.top())\n self._get_textdoc(index)\n self._draw_textdoc(text_rect, index.column())\n self._painter.restore()\n\n def _draw_textdoc(self, rect, col):\n \"\"\"Draw the QTextDocument of an item.\n\n Args:\n rect: The QRect to clip the drawing to.\n \"\"\"\n # We can't use drawContents because then the color would be ignored.\n clip = QRectF(0, 0, rect.width(), rect.height())\n self._painter.save()\n\n if self._opt.state & QStyle.State_Selected:\n color = config.val.colors.completion.item.selected.fg\n elif not self._opt.state & QStyle.State_Enabled:\n color = config.val.colors.completion.category.fg\n else:\n colors = config.val.colors.completion.fg\n # if multiple colors are set, use different colors per column\n color = colors[col % len(colors)]\n self._painter.setPen(color)\n\n ctx = QAbstractTextDocumentLayout.PaintContext()\n ctx.palette.setColor(QPalette.Text, self._painter.pen().color())\n if clip.isValid():\n self._painter.setClipRect(clip)\n ctx.clip = clip\n self._doc.documentLayout().draw(self._painter, ctx)\n self._painter.restore()\n\n def _get_textdoc(self, index):\n \"\"\"Create the QTextDocument of an item.\n\n Args:\n index: The QModelIndex of the item to draw.\n \"\"\"\n # FIXME we probably should do eliding here. See\n # qcommonstyle.cpp:viewItemDrawText\n # https://github.com/qutebrowser/qutebrowser/issues/118\n text_option = QTextOption()\n if self._opt.features & QStyleOptionViewItem.WrapText:\n text_option.setWrapMode(QTextOption.WordWrap)\n else:\n text_option.setWrapMode(QTextOption.ManualWrap)\n text_option.setTextDirection(self._opt.direction)\n text_option.setAlignment(QStyle.visualAlignment(\n self._opt.direction, self._opt.displayAlignment))\n\n if self._doc is not None:\n self._doc.deleteLater()\n self._doc = QTextDocument(self)\n self._doc.setDefaultFont(self._opt.font)\n self._doc.setDefaultTextOption(text_option)\n self._doc.setDocumentMargin(2)\n\n assert _cached_stylesheet is not None\n self._doc.setDefaultStyleSheet(_cached_stylesheet)\n\n if index.parent().isValid():\n view = self.parent()\n pattern = view.pattern\n columns_to_filter = index.model().columns_to_filter(index)\n if index.column() in columns_to_filter and pattern:\n repl = r'<span class=\"highlight\">\\g<0></span>'\n text = re.sub(re.escape(pattern).replace(r'\\ ', r'|'),\n repl, html.escape(self._opt.text),\n flags=re.IGNORECASE)\n self._doc.setHtml(text)\n else:\n self._doc.setPlainText(self._opt.text)\n else:\n self._doc.setHtml(\n '<span style=\"font: {};\">{}</span>'.format(\n html.escape(config.val.fonts.completion.category),\n html.escape(self._opt.text)))\n\n def _draw_focus_rect(self):\n \"\"\"Draw the focus rectangle of an ItemViewItem.\"\"\"\n state = self._opt.state\n if not state & QStyle.State_HasFocus:\n return\n o = self._opt\n o.rect = self._style.subElementRect(\n self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)\n o.state |= QStyle.State_KeyboardFocusChange | QStyle.State_Item\n qtutils.ensure_valid(o.rect)\n if state & QStyle.State_Enabled:\n cg = QPalette.Normal\n else:\n cg = QPalette.Disabled\n if state & QStyle.State_Selected:\n role = QPalette.Highlight\n else:\n role = QPalette.Window\n o.backgroundColor = self._opt.palette.color(cg, role)\n self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter,\n self._opt.widget)\n\n def sizeHint(self, option, index):\n \"\"\"Override sizeHint of QStyledItemDelegate.\n\n Return the cell size based on the QTextDocument size, but might not\n work correctly yet.\n\n Args:\n option: const QStyleOptionViewItem & option\n index: const QModelIndex & index\n\n Return:\n A QSize with the recommended size.\n \"\"\"\n value = index.data(Qt.SizeHintRole)\n if value is not None:\n return value\n self._opt = QStyleOptionViewItem(option)\n self.initStyleOption(self._opt, index)\n self._style = self._opt.widget.style()\n self._get_textdoc(index)\n docsize = self._doc.size().toSize()\n size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt,\n docsize, self._opt.widget)\n qtutils.ensure_valid(size)\n return size + QSize(10, 3)\n\n def paint(self, painter, option, index):\n \"\"\"Override the QStyledItemDelegate paint function.\n\n Args:\n painter: QPainter * painter\n option: const QStyleOptionViewItem & option\n index: const QModelIndex & index\n \"\"\"\n self._painter = painter\n self._painter.save()\n self._opt = QStyleOptionViewItem(option)\n self.initStyleOption(self._opt, index)\n self._style = self._opt.widget.style()\n\n self._draw_background()\n self._draw_icon()\n self._draw_text(index)\n self._draw_focus_rect()\n\n self._painter.restore()\n\n\[email protected]_filter('colors.completion.match.fg', function=True)\ndef _update_stylesheet():\n \"\"\"Update the cached stylesheet.\"\"\"\n stylesheet = \"\"\"\n .highlight {\n color: {{ conf.colors.completion.match.fg }};\n }\n \"\"\"\n with jinja.environment.no_autoescape():\n template = jinja.environment.from_string(stylesheet)\n\n global _cached_stylesheet\n _cached_stylesheet = template.render(conf=config.val)\n\n\ndef init():\n \"\"\"Initialize the cached stylesheet.\"\"\"\n _update_stylesheet()\n config.instance.changed.connect(_update_stylesheet)\n", "path": "qutebrowser/completion/completiondelegate.py"}]} | 4,076 | 205 |
gh_patches_debug_31112 | rasdani/github-patches | git_diff | quantumlib__Cirq-1646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Complex numbers can be approximately equal to integers
but approx_eq incorrectly disagrees
</issue>
<code>
[start of cirq/protocols/approximate_equality.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, Union
16
17 import numpy as np
18
19 from typing_extensions import Protocol
20
21
22 class SupportsApproximateEquality(Protocol):
23 """Object which can be compared approximately."""
24
25 def _approx_eq_(
26 self,
27 other: Any,
28 *,
29 atol: Union[int, float]
30 ) -> bool:
31 """Approximate comparator.
32
33 Types implementing this protocol define their own logic for approximate
34 comparison with other types.
35
36 Args:
37 other: Target object for approximate comparison.
38 atol: The minimum absolute tolerance. See np.isclose() documentation
39 for details.
40
41 Returns:
42 True if objects are approximately equal, False otherwise. Returns
43 NotImplemented when approximate equality is not implemented for
44 given types.
45 """
46
47
48 def approx_eq(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:
49 """Approximately compares two objects.
50
51 If `val` implements SupportsApproxEquality protocol then it is invoked and
52 takes precedence over all other checks:
53 - For primitive numeric types `int` and `float` approximate equality is
54 delegated to math.isclose().
55 - For complex primitive type the real and imaginary parts are treated
56 independently and compared using math.isclose().
57 - For `val` and `other` both iterable of the same length, consecutive
58 elements are compared recursively. Types of `val` and `other` does not
59 necessarily needs to match each other. They just need to be iterable and
60 have the same structure.
61
62 Args:
63 val: Source object for approximate comparison.
64 other: Target object for approximate comparison.
65 atol: The minimum absolute tolerance. See np.isclose() documentation for
66 details. Defaults to 1e-8 which matches np.isclose() default
67 absolute tolerance.
68
69 Returns:
70 True if objects are approximately equal, False otherwise.
71 """
72
73 # Check if val defines approximate equality via _approx_eq_. This takes
74 # precedence over all other overloads.
75 approx_eq_getter = getattr(val, '_approx_eq_', None)
76 if approx_eq_getter is not None:
77 result = approx_eq_getter(other, atol)
78 if result is not NotImplemented:
79 return result
80
81 # The same for other to make approx_eq symmetric.
82 other_approx_eq_getter = getattr(other, '_approx_eq_', None)
83 if other_approx_eq_getter is not None:
84 result = other_approx_eq_getter(val, atol)
85 if result is not NotImplemented:
86 return result
87
88 # Compare primitive types directly.
89 if isinstance(val, (int, float)):
90 if not isinstance(other, (int, float)):
91 return False
92 return _isclose(val, other, atol=atol)
93
94 if isinstance(val, complex):
95 if not isinstance(other, complex):
96 return False
97 return _isclose(val, other, atol=atol)
98
99 # Try to compare source and target recursively, assuming they're iterable.
100 result = _approx_eq_iterables(val, other, atol=atol)
101
102 # Fallback to __eq__() when anything else fails.
103 if result is NotImplemented:
104 return val == other
105 return result
106
107
108 def _approx_eq_iterables(val: Any, other: Any, *,
109 atol: Union[int, float]) -> bool:
110 """Iterates over arguments and calls approx_eq recursively.
111
112 Types of `val` and `other` does not necessarily needs to match each other.
113 They just need to be iterable of the same length and have the same
114 structure, approx_eq() will be called on each consecutive element of `val`
115 and `other`.
116
117 Args:
118 val: Source for approximate comparison.
119 other: Target for approximate comparison.
120 atol: The minimum absolute tolerance. See np.isclose() documentation for
121 details.
122
123 Returns:
124 True if objects are approximately equal, False otherwise. Returns
125 NotImplemented when approximate equality is not implemented for given
126 types.
127 """
128
129 def get_iter(iterable):
130 try:
131 return iter(iterable)
132 except TypeError:
133 return None
134
135 val_it = get_iter(val)
136 other_it = get_iter(other)
137
138 if val_it is not None and other_it is not None:
139 while True:
140 try:
141 val_next = next(val_it)
142 except StopIteration:
143 try:
144 next(other_it)
145 return False
146 except StopIteration:
147 return True
148
149 try:
150 other_next = next(other_it)
151 except StopIteration:
152 return False
153
154 result = approx_eq(val_next, other_next, atol=atol)
155 if result is not True:
156 return result
157
158 return NotImplemented
159
160
161 def _isclose(a: Any, b: Any, *, atol: Union[int, float]) -> bool:
162 """Convenience wrapper around np.isclose."""
163 return True if np.isclose([a], [b], atol=atol, rtol=0.0)[0] else False
164
[end of cirq/protocols/approximate_equality.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/protocols/approximate_equality.py b/cirq/protocols/approximate_equality.py
--- a/cirq/protocols/approximate_equality.py
+++ b/cirq/protocols/approximate_equality.py
@@ -13,7 +13,10 @@
# limitations under the License.
from typing import Any, Union
+from fractions import Fraction
+from decimal import Decimal
+import numbers
import numpy as np
from typing_extensions import Protocol
@@ -86,15 +89,12 @@
return result
# Compare primitive types directly.
- if isinstance(val, (int, float)):
- if not isinstance(other, (int, float)):
+ if isinstance(val, numbers.Number):
+ if not isinstance(other, numbers.Number):
return False
- return _isclose(val, other, atol=atol)
-
- if isinstance(val, complex):
- if not isinstance(other, complex):
- return False
- return _isclose(val, other, atol=atol)
+ result = _isclose(val, other, atol=atol)
+ if result is not NotImplemented:
+ return result
# Try to compare source and target recursively, assuming they're iterable.
result = _approx_eq_iterables(val, other, atol=atol)
@@ -160,4 +160,19 @@
def _isclose(a: Any, b: Any, *, atol: Union[int, float]) -> bool:
"""Convenience wrapper around np.isclose."""
- return True if np.isclose([a], [b], atol=atol, rtol=0.0)[0] else False
+
+ # support casting some standard numeric types
+ x1 = np.asarray([a])
+ if isinstance(a, (Fraction, Decimal)):
+ x1 = x1.astype(np.float64)
+ x2 = np.asarray([b])
+ if isinstance(b, (Fraction, Decimal)):
+ x2 = x2.astype(np.float64)
+
+ # workaround np.isfinite type limitations. Cast to bool to avoid np.bool_
+ try:
+ result = bool(np.isclose(x1, x2, atol=atol, rtol=0.0)[0])
+ except TypeError:
+ return NotImplemented
+
+ return result
| {"golden_diff": "diff --git a/cirq/protocols/approximate_equality.py b/cirq/protocols/approximate_equality.py\n--- a/cirq/protocols/approximate_equality.py\n+++ b/cirq/protocols/approximate_equality.py\n@@ -13,7 +13,10 @@\n # limitations under the License.\n \n from typing import Any, Union\n+from fractions import Fraction\n+from decimal import Decimal\n \n+import numbers\n import numpy as np\n \n from typing_extensions import Protocol\n@@ -86,15 +89,12 @@\n return result\n \n # Compare primitive types directly.\n- if isinstance(val, (int, float)):\n- if not isinstance(other, (int, float)):\n+ if isinstance(val, numbers.Number):\n+ if not isinstance(other, numbers.Number):\n return False\n- return _isclose(val, other, atol=atol)\n-\n- if isinstance(val, complex):\n- if not isinstance(other, complex):\n- return False\n- return _isclose(val, other, atol=atol)\n+ result = _isclose(val, other, atol=atol)\n+ if result is not NotImplemented:\n+ return result\n \n # Try to compare source and target recursively, assuming they're iterable.\n result = _approx_eq_iterables(val, other, atol=atol)\n@@ -160,4 +160,19 @@\n \n def _isclose(a: Any, b: Any, *, atol: Union[int, float]) -> bool:\n \"\"\"Convenience wrapper around np.isclose.\"\"\"\n- return True if np.isclose([a], [b], atol=atol, rtol=0.0)[0] else False\n+\n+ # support casting some standard numeric types\n+ x1 = np.asarray([a])\n+ if isinstance(a, (Fraction, Decimal)):\n+ x1 = x1.astype(np.float64)\n+ x2 = np.asarray([b])\n+ if isinstance(b, (Fraction, Decimal)):\n+ x2 = x2.astype(np.float64)\n+\n+ # workaround np.isfinite type limitations. Cast to bool to avoid np.bool_\n+ try:\n+ result = bool(np.isclose(x1, x2, atol=atol, rtol=0.0)[0])\n+ except TypeError:\n+ return NotImplemented\n+\n+ return result\n", "issue": "Complex numbers can be approximately equal to integers\nbut approx_eq incorrectly disagrees\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Union\n\nimport numpy as np\n\nfrom typing_extensions import Protocol\n\n\nclass SupportsApproximateEquality(Protocol):\n \"\"\"Object which can be compared approximately.\"\"\"\n\n def _approx_eq_(\n self,\n other: Any,\n *,\n atol: Union[int, float]\n ) -> bool:\n \"\"\"Approximate comparator.\n\n Types implementing this protocol define their own logic for approximate\n comparison with other types.\n\n Args:\n other: Target object for approximate comparison.\n atol: The minimum absolute tolerance. See np.isclose() documentation\n for details.\n\n Returns:\n True if objects are approximately equal, False otherwise. Returns\n NotImplemented when approximate equality is not implemented for\n given types.\n \"\"\"\n\n\ndef approx_eq(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:\n \"\"\"Approximately compares two objects.\n\n If `val` implements SupportsApproxEquality protocol then it is invoked and\n takes precedence over all other checks:\n - For primitive numeric types `int` and `float` approximate equality is\n delegated to math.isclose().\n - For complex primitive type the real and imaginary parts are treated\n independently and compared using math.isclose().\n - For `val` and `other` both iterable of the same length, consecutive\n elements are compared recursively. Types of `val` and `other` does not\n necessarily needs to match each other. They just need to be iterable and\n have the same structure.\n\n Args:\n val: Source object for approximate comparison.\n other: Target object for approximate comparison.\n atol: The minimum absolute tolerance. See np.isclose() documentation for\n details. Defaults to 1e-8 which matches np.isclose() default\n absolute tolerance.\n\n Returns:\n True if objects are approximately equal, False otherwise.\n \"\"\"\n\n # Check if val defines approximate equality via _approx_eq_. This takes\n # precedence over all other overloads.\n approx_eq_getter = getattr(val, '_approx_eq_', None)\n if approx_eq_getter is not None:\n result = approx_eq_getter(other, atol)\n if result is not NotImplemented:\n return result\n\n # The same for other to make approx_eq symmetric.\n other_approx_eq_getter = getattr(other, '_approx_eq_', None)\n if other_approx_eq_getter is not None:\n result = other_approx_eq_getter(val, atol)\n if result is not NotImplemented:\n return result\n\n # Compare primitive types directly.\n if isinstance(val, (int, float)):\n if not isinstance(other, (int, float)):\n return False\n return _isclose(val, other, atol=atol)\n\n if isinstance(val, complex):\n if not isinstance(other, complex):\n return False\n return _isclose(val, other, atol=atol)\n\n # Try to compare source and target recursively, assuming they're iterable.\n result = _approx_eq_iterables(val, other, atol=atol)\n\n # Fallback to __eq__() when anything else fails.\n if result is NotImplemented:\n return val == other\n return result\n\n\ndef _approx_eq_iterables(val: Any, other: Any, *,\n atol: Union[int, float]) -> bool:\n \"\"\"Iterates over arguments and calls approx_eq recursively.\n\n Types of `val` and `other` does not necessarily needs to match each other.\n They just need to be iterable of the same length and have the same\n structure, approx_eq() will be called on each consecutive element of `val`\n and `other`.\n\n Args:\n val: Source for approximate comparison.\n other: Target for approximate comparison.\n atol: The minimum absolute tolerance. See np.isclose() documentation for\n details.\n\n Returns:\n True if objects are approximately equal, False otherwise. Returns\n NotImplemented when approximate equality is not implemented for given\n types.\n \"\"\"\n\n def get_iter(iterable):\n try:\n return iter(iterable)\n except TypeError:\n return None\n\n val_it = get_iter(val)\n other_it = get_iter(other)\n\n if val_it is not None and other_it is not None:\n while True:\n try:\n val_next = next(val_it)\n except StopIteration:\n try:\n next(other_it)\n return False\n except StopIteration:\n return True\n\n try:\n other_next = next(other_it)\n except StopIteration:\n return False\n\n result = approx_eq(val_next, other_next, atol=atol)\n if result is not True:\n return result\n\n return NotImplemented\n\n\ndef _isclose(a: Any, b: Any, *, atol: Union[int, float]) -> bool:\n \"\"\"Convenience wrapper around np.isclose.\"\"\"\n return True if np.isclose([a], [b], atol=atol, rtol=0.0)[0] else False\n", "path": "cirq/protocols/approximate_equality.py"}]} | 2,151 | 517 |
gh_patches_debug_17180 | rasdani/github-patches | git_diff | pandas-dev__pandas-6163 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: boolean query evaluation
http://stackoverflow.com/questions/21414959/querying-a-single-row-dataframe-with-anded-conditionals/21415119#21415119
cc @cpcloud
</issue>
<code>
[start of pandas/computation/align.py]
1 """Core eval alignment algorithms
2 """
3
4 import warnings
5 from functools import partial, wraps
6 from pandas.compat import zip, range
7
8 import numpy as np
9
10 import pandas as pd
11 from pandas import compat
12 import pandas.core.common as com
13
14
15 def _align_core_single_unary_op(term):
16 if isinstance(term.value, np.ndarray):
17 typ = partial(np.asanyarray, dtype=term.value.dtype)
18 else:
19 typ = type(term.value)
20 ret = typ,
21
22 if not hasattr(term.value, 'axes'):
23 ret += None,
24 else:
25 ret += _zip_axes_from_type(typ, term.value.axes),
26 return ret
27
28
29 def _zip_axes_from_type(typ, new_axes):
30 axes = {}
31 for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):
32 axes[ax_name] = new_axes[ax_ind]
33 return axes
34
35
36 def _maybe_promote_shape(values, naxes):
37 # test to see if we have an array else leave since must be a number
38 if not isinstance(values, np.ndarray):
39 return values
40
41 ndims = values.ndim
42 if ndims > naxes:
43 raise AssertionError('cannot have more dims than axes, '
44 '{0} > {1}'.format(ndims, naxes))
45 if ndims == naxes:
46 return values
47
48 ndim, nax = range(ndims), range(naxes)
49
50 axes_slice = [slice(None)] * naxes
51
52 # set difference of numaxes and ndims
53 slices = list(set(nax) - set(ndim))
54
55 if ndims == naxes:
56 if slices:
57 raise AssertionError('slices should be empty if ndims == naxes '
58 '{0}'.format(slices))
59 else:
60 if not slices:
61 raise AssertionError('slices should NOT be empty if ndim != naxes '
62 '{0}'.format(slices))
63
64 for sl in slices:
65 axes_slice[sl] = np.newaxis
66
67 return values[tuple(axes_slice)]
68
69
70 def _any_pandas_objects(terms):
71 """Check a sequence of terms for instances of PandasObject."""
72 return any(isinstance(term.value, pd.core.generic.PandasObject)
73 for term in terms)
74
75
76 def _filter_special_cases(f):
77 @wraps(f)
78 def wrapper(terms):
79 # single unary operand
80 if len(terms) == 1:
81 return _align_core_single_unary_op(terms[0])
82
83 term_values = (term.value for term in terms)
84 # only scalars or indexes
85 if all(isinstance(term.value, pd.Index) or term.isscalar for term in
86 terms):
87 return np.result_type(*term_values), None
88
89 # single element ndarrays
90 all_has_size = all(hasattr(term.value, 'size') for term in terms)
91 if all_has_size and all(term.value.size == 1 for term in terms):
92 return np.result_type(*term_values), None
93
94 # no pandas objects
95 if not _any_pandas_objects(terms):
96 return np.result_type(*term_values), None
97
98 return f(terms)
99 return wrapper
100
101
102 @_filter_special_cases
103 def _align_core(terms):
104 term_index = [i for i, term in enumerate(terms)
105 if hasattr(term.value, 'axes')]
106 term_dims = [terms[i].value.ndim for i in term_index]
107 ndims = pd.Series(dict(zip(term_index, term_dims)))
108
109 # initial axes are the axes of the largest-axis'd term
110 biggest = terms[ndims.idxmax()].value
111 typ = biggest._constructor
112 axes = biggest.axes
113 naxes = len(axes)
114 gt_than_one_axis = naxes > 1
115
116 for value in (terms[i].value for i in term_index):
117 is_series = isinstance(value, pd.Series)
118 is_series_and_gt_one_axis = is_series and gt_than_one_axis
119
120 for axis, items in enumerate(value.axes):
121 if is_series_and_gt_one_axis:
122 ax, itm = naxes - 1, value.index
123 else:
124 ax, itm = axis, items
125
126 if not axes[ax].is_(itm):
127 axes[ax] = axes[ax].join(itm, how='outer')
128
129 for i, ndim in compat.iteritems(ndims):
130 for axis, items in zip(range(ndim), axes):
131 ti = terms[i].value
132
133 if hasattr(ti, 'reindex_axis'):
134 transpose = isinstance(ti, pd.Series) and naxes > 1
135 reindexer = axes[naxes - 1] if transpose else items
136
137 term_axis_size = len(ti.axes[axis])
138 reindexer_size = len(reindexer)
139
140 ordm = np.log10(abs(reindexer_size - term_axis_size))
141 if ordm >= 1 and reindexer_size >= 10000:
142 warnings.warn('Alignment difference on axis {0} is larger '
143 'than an order of magnitude on term {1!r}, '
144 'by more than {2:.4g}; performance may '
145 'suffer'.format(axis, terms[i].name, ordm),
146 category=pd.io.common.PerformanceWarning)
147
148 if transpose:
149 f = partial(ti.reindex, index=reindexer, copy=False)
150 else:
151 f = partial(ti.reindex_axis, reindexer, axis=axis,
152 copy=False)
153
154 # need to fill if we have a bool dtype/array
155 if (isinstance(ti, (np.ndarray, pd.Series))
156 and ti.dtype == object
157 and pd.lib.is_bool_array(ti.values)):
158 r = f(fill_value=True)
159 else:
160 r = f()
161
162 terms[i].update(r)
163
164 res = _maybe_promote_shape(terms[i].value.T if transpose else
165 terms[i].value, naxes)
166 res = res.T if transpose else res
167
168 try:
169 v = res.values
170 except AttributeError:
171 v = res
172 terms[i].update(v)
173
174 return typ, _zip_axes_from_type(typ, axes)
175
176
177 def _filter_terms(flat):
178 # numeric literals
179 literals = frozenset(filter(lambda x: isinstance(x, Constant), flat))
180
181 # these are strings which are variable names
182 names = frozenset(flat) - literals
183
184 # literals are not names and names are not literals, so intersection should
185 # be empty
186 if literals & names:
187 raise ValueError('literals cannot be names and names cannot be '
188 'literals')
189 return names, literals
190
191
192 def _align(terms):
193 """Align a set of terms"""
194 try:
195 # flatten the parse tree (a nested list, really)
196 terms = list(com.flatten(terms))
197 except TypeError:
198 # can't iterate so it must just be a constant or single variable
199 if isinstance(terms.value, pd.core.generic.NDFrame):
200 typ = type(terms.value)
201 return typ, _zip_axes_from_type(typ, terms.value.axes)
202 return np.result_type(terms.type), None
203
204 # if all resolved variables are numeric scalars
205 if all(term.isscalar for term in terms):
206 return np.result_type(*(term.value for term in terms)).type, None
207
208 # perform the main alignment
209 typ, axes = _align_core(terms)
210 return typ, axes
211
212
213 def _reconstruct_object(typ, obj, axes, dtype):
214 """Reconstruct an object given its type, raw value, and possibly empty
215 (None) axes.
216
217 Parameters
218 ----------
219 typ : object
220 A type
221 obj : object
222 The value to use in the type constructor
223 axes : dict
224 The axes to use to construct the resulting pandas object
225
226 Returns
227 -------
228 ret : typ
229 An object of type ``typ`` with the value `obj` and possible axes
230 `axes`.
231 """
232 try:
233 typ = typ.type
234 except AttributeError:
235 pass
236
237 try:
238 res_t = np.result_type(obj.dtype, dtype)
239 except AttributeError:
240 res_t = dtype
241
242 if (not isinstance(typ, partial) and
243 issubclass(typ, pd.core.generic.PandasObject)):
244 return typ(obj, dtype=res_t, **axes)
245
246 # special case for pathological things like ~True/~False
247 if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
248 ret_value = res_t.type(obj)
249 else:
250 ret_value = typ(obj).astype(res_t)
251
252 try:
253 ret = ret_value.item()
254 except (ValueError, IndexError):
255 # XXX: we catch IndexError to absorb a
256 # regression in numpy 1.7.0
257 # fixed by numpy/numpy@04b89c63
258 ret = ret_value
259 return ret
260
[end of pandas/computation/align.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandas/computation/align.py b/pandas/computation/align.py
--- a/pandas/computation/align.py
+++ b/pandas/computation/align.py
@@ -81,16 +81,12 @@
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
+
# only scalars or indexes
if all(isinstance(term.value, pd.Index) or term.isscalar for term in
terms):
return np.result_type(*term_values), None
- # single element ndarrays
- all_has_size = all(hasattr(term.value, 'size') for term in terms)
- if all_has_size and all(term.value.size == 1 for term in terms):
- return np.result_type(*term_values), None
-
# no pandas objects
if not _any_pandas_objects(terms):
return np.result_type(*term_values), None
| {"golden_diff": "diff --git a/pandas/computation/align.py b/pandas/computation/align.py\n--- a/pandas/computation/align.py\n+++ b/pandas/computation/align.py\n@@ -81,16 +81,12 @@\n return _align_core_single_unary_op(terms[0])\n \n term_values = (term.value for term in terms)\n+\n # only scalars or indexes\n if all(isinstance(term.value, pd.Index) or term.isscalar for term in\n terms):\n return np.result_type(*term_values), None\n \n- # single element ndarrays\n- all_has_size = all(hasattr(term.value, 'size') for term in terms)\n- if all_has_size and all(term.value.size == 1 for term in terms):\n- return np.result_type(*term_values), None\n-\n # no pandas objects\n if not _any_pandas_objects(terms):\n return np.result_type(*term_values), None\n", "issue": "BUG: boolean query evaluation\nhttp://stackoverflow.com/questions/21414959/querying-a-single-row-dataframe-with-anded-conditionals/21415119#21415119\n\ncc @cpcloud \n\n", "before_files": [{"content": "\"\"\"Core eval alignment algorithms\n\"\"\"\n\nimport warnings\nfrom functools import partial, wraps\nfrom pandas.compat import zip, range\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas import compat\nimport pandas.core.common as com\n\n\ndef _align_core_single_unary_op(term):\n if isinstance(term.value, np.ndarray):\n typ = partial(np.asanyarray, dtype=term.value.dtype)\n else:\n typ = type(term.value)\n ret = typ,\n\n if not hasattr(term.value, 'axes'):\n ret += None,\n else:\n ret += _zip_axes_from_type(typ, term.value.axes),\n return ret\n\n\ndef _zip_axes_from_type(typ, new_axes):\n axes = {}\n for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):\n axes[ax_name] = new_axes[ax_ind]\n return axes\n\n\ndef _maybe_promote_shape(values, naxes):\n # test to see if we have an array else leave since must be a number\n if not isinstance(values, np.ndarray):\n return values\n\n ndims = values.ndim\n if ndims > naxes:\n raise AssertionError('cannot have more dims than axes, '\n '{0} > {1}'.format(ndims, naxes))\n if ndims == naxes:\n return values\n\n ndim, nax = range(ndims), range(naxes)\n\n axes_slice = [slice(None)] * naxes\n\n # set difference of numaxes and ndims\n slices = list(set(nax) - set(ndim))\n\n if ndims == naxes:\n if slices:\n raise AssertionError('slices should be empty if ndims == naxes '\n '{0}'.format(slices))\n else:\n if not slices:\n raise AssertionError('slices should NOT be empty if ndim != naxes '\n '{0}'.format(slices))\n\n for sl in slices:\n axes_slice[sl] = np.newaxis\n\n return values[tuple(axes_slice)]\n\n\ndef _any_pandas_objects(terms):\n \"\"\"Check a sequence of terms for instances of PandasObject.\"\"\"\n return any(isinstance(term.value, pd.core.generic.PandasObject)\n for term in terms)\n\n\ndef _filter_special_cases(f):\n @wraps(f)\n def wrapper(terms):\n # single unary operand\n if len(terms) == 1:\n return _align_core_single_unary_op(terms[0])\n\n term_values = (term.value for term in terms)\n # only scalars or indexes\n if all(isinstance(term.value, pd.Index) or term.isscalar for term in\n terms):\n return np.result_type(*term_values), None\n\n # single element ndarrays\n all_has_size = all(hasattr(term.value, 'size') for term in terms)\n if all_has_size and all(term.value.size == 1 for term in terms):\n return np.result_type(*term_values), None\n\n # no pandas objects\n if not _any_pandas_objects(terms):\n return np.result_type(*term_values), None\n\n return f(terms)\n return wrapper\n\n\n@_filter_special_cases\ndef _align_core(terms):\n term_index = [i for i, term in enumerate(terms)\n if hasattr(term.value, 'axes')]\n term_dims = [terms[i].value.ndim for i in term_index]\n ndims = pd.Series(dict(zip(term_index, term_dims)))\n\n # initial axes are the axes of the largest-axis'd term\n biggest = terms[ndims.idxmax()].value\n typ = biggest._constructor\n axes = biggest.axes\n naxes = len(axes)\n gt_than_one_axis = naxes > 1\n\n for value in (terms[i].value for i in term_index):\n is_series = isinstance(value, pd.Series)\n is_series_and_gt_one_axis = is_series and gt_than_one_axis\n\n for axis, items in enumerate(value.axes):\n if is_series_and_gt_one_axis:\n ax, itm = naxes - 1, value.index\n else:\n ax, itm = axis, items\n\n if not axes[ax].is_(itm):\n axes[ax] = axes[ax].join(itm, how='outer')\n\n for i, ndim in compat.iteritems(ndims):\n for axis, items in zip(range(ndim), axes):\n ti = terms[i].value\n\n if hasattr(ti, 'reindex_axis'):\n transpose = isinstance(ti, pd.Series) and naxes > 1\n reindexer = axes[naxes - 1] if transpose else items\n\n term_axis_size = len(ti.axes[axis])\n reindexer_size = len(reindexer)\n\n ordm = np.log10(abs(reindexer_size - term_axis_size))\n if ordm >= 1 and reindexer_size >= 10000:\n warnings.warn('Alignment difference on axis {0} is larger '\n 'than an order of magnitude on term {1!r}, '\n 'by more than {2:.4g}; performance may '\n 'suffer'.format(axis, terms[i].name, ordm),\n category=pd.io.common.PerformanceWarning)\n\n if transpose:\n f = partial(ti.reindex, index=reindexer, copy=False)\n else:\n f = partial(ti.reindex_axis, reindexer, axis=axis,\n copy=False)\n\n # need to fill if we have a bool dtype/array\n if (isinstance(ti, (np.ndarray, pd.Series))\n and ti.dtype == object\n and pd.lib.is_bool_array(ti.values)):\n r = f(fill_value=True)\n else:\n r = f()\n\n terms[i].update(r)\n\n res = _maybe_promote_shape(terms[i].value.T if transpose else\n terms[i].value, naxes)\n res = res.T if transpose else res\n\n try:\n v = res.values\n except AttributeError:\n v = res\n terms[i].update(v)\n\n return typ, _zip_axes_from_type(typ, axes)\n\n\ndef _filter_terms(flat):\n # numeric literals\n literals = frozenset(filter(lambda x: isinstance(x, Constant), flat))\n\n # these are strings which are variable names\n names = frozenset(flat) - literals\n\n # literals are not names and names are not literals, so intersection should\n # be empty\n if literals & names:\n raise ValueError('literals cannot be names and names cannot be '\n 'literals')\n return names, literals\n\n\ndef _align(terms):\n \"\"\"Align a set of terms\"\"\"\n try:\n # flatten the parse tree (a nested list, really)\n terms = list(com.flatten(terms))\n except TypeError:\n # can't iterate so it must just be a constant or single variable\n if isinstance(terms.value, pd.core.generic.NDFrame):\n typ = type(terms.value)\n return typ, _zip_axes_from_type(typ, terms.value.axes)\n return np.result_type(terms.type), None\n\n # if all resolved variables are numeric scalars\n if all(term.isscalar for term in terms):\n return np.result_type(*(term.value for term in terms)).type, None\n\n # perform the main alignment\n typ, axes = _align_core(terms)\n return typ, axes\n\n\ndef _reconstruct_object(typ, obj, axes, dtype):\n \"\"\"Reconstruct an object given its type, raw value, and possibly empty\n (None) axes.\n\n Parameters\n ----------\n typ : object\n A type\n obj : object\n The value to use in the type constructor\n axes : dict\n The axes to use to construct the resulting pandas object\n\n Returns\n -------\n ret : typ\n An object of type ``typ`` with the value `obj` and possible axes\n `axes`.\n \"\"\"\n try:\n typ = typ.type\n except AttributeError:\n pass\n\n try:\n res_t = np.result_type(obj.dtype, dtype)\n except AttributeError:\n res_t = dtype\n\n if (not isinstance(typ, partial) and\n issubclass(typ, pd.core.generic.PandasObject)):\n return typ(obj, dtype=res_t, **axes)\n\n # special case for pathological things like ~True/~False\n if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:\n ret_value = res_t.type(obj)\n else:\n ret_value = typ(obj).astype(res_t)\n\n try:\n ret = ret_value.item()\n except (ValueError, IndexError):\n # XXX: we catch IndexError to absorb a\n # regression in numpy 1.7.0\n # fixed by numpy/numpy@04b89c63\n ret = ret_value\n return ret\n", "path": "pandas/computation/align.py"}]} | 3,204 | 211 |
gh_patches_debug_28976 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2415 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace `DatasetModuleNotInstalledError` errors with `DatasetWithScriptNotSupportedError`
We should never have a `DatasetModuleNotInstalledError` error, because we should return a `DatasetWithScriptNotSupportedError` error before
See https://github.com/huggingface/datasets-server/issues/1067#issuecomment-1924305954
</issue>
<code>
[start of services/worker/src/worker/job_runners/dataset/config_names.py]
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3
4 import logging
5 from typing import Optional
6
7 from datasets import get_dataset_config_names
8 from datasets.data_files import EmptyDatasetError as _EmptyDatasetError
9 from libcommon.exceptions import (
10 ConfigNamesError,
11 DatasetModuleNotInstalledError,
12 DatasetWithScriptNotSupportedError,
13 DatasetWithTooManyConfigsError,
14 EmptyDatasetError,
15 )
16
17 from worker.dtos import CompleteJobResult, ConfigNameItem, DatasetConfigNamesResponse
18 from worker.job_runners.dataset.dataset_job_runner import (
19 DatasetJobRunnerWithDatasetsCache,
20 )
21 from worker.utils import resolve_trust_remote_code
22
23
24 def compute_config_names_response(
25 dataset: str,
26 max_number: int,
27 dataset_scripts_allow_list: list[str],
28 hf_token: Optional[str] = None,
29 ) -> DatasetConfigNamesResponse:
30 """
31 Get the response of 'dataset-config-names' for one specific dataset on huggingface.co.
32 Dataset can be gated if you pass an acceptable token.
33 It is assumed that the dataset exists and can be accessed using the token.
34
35 Args:
36 dataset (`str`):
37 A namespace (user or an organization) and a repo name separated by a `/`.
38 max_number (`int`):
39 The maximum number of configs for a dataset.
40 dataset_scripts_allow_list (`list[str]`):
41 List of datasets for which we support dataset scripts.
42 Unix shell-style wildcards also work in the dataset name for namespaced datasets,
43 for example `some_namespace/*` to refer to all the datasets in the `some_namespace` namespace.
44 The keyword `{{ALL_DATASETS_WITH_NO_NAMESPACE}}` refers to all the datasets without namespace.
45 hf_token (`str`, *optional*):
46 An authentication token (See https://huggingface.co/settings/token)
47
48 Raises:
49 [~`libcommon.exceptions.EmptyDatasetError`]:
50 The dataset is empty.
51 [~`libcommon.exceptions.DatasetModuleNotInstalledError`]:
52 The dataset tries to import a module that is not installed.
53 [~`libcommon.exceptions.ConfigNamesError`]:
54 If the list of configs could not be obtained using the datasets library.
55 [~`libcommon.exceptions.DatasetWithScriptNotSupportedError`]:
56 If the dataset has a dataset script and is not in the allow list.
57
58 Returns:
59 `DatasetConfigNamesResponse`: An object with the list of config names.
60 """
61 logging.info(f"get 'dateset-config-names' for {dataset=}")
62 # get the list of splits in streaming mode
63 try:
64 config_name_items: list[ConfigNameItem] = [
65 {"dataset": dataset, "config": str(config)}
66 for config in sorted(
67 get_dataset_config_names(
68 path=dataset,
69 token=hf_token,
70 trust_remote_code=resolve_trust_remote_code(
71 dataset=dataset, allow_list=dataset_scripts_allow_list
72 ),
73 )
74 )
75 ]
76 except _EmptyDatasetError as err:
77 raise EmptyDatasetError("The dataset is empty.", cause=err) from err
78 except ImportError as err:
79 raise DatasetModuleNotInstalledError(
80 "The dataset tries to import a module that is not installed.", cause=err
81 ) from err
82 except Exception as err:
83 if isinstance(err, ValueError) and "trust_remote_code" in str(err):
84 raise DatasetWithScriptNotSupportedError(
85 "The dataset viewer doesn't support this dataset because it runs "
86 "arbitrary python code. Please open a discussion in the discussion tab "
87 "if you think this is an error and tag @lhoestq and @severo."
88 ) from err
89 raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
90
91 number_of_configs = len(config_name_items)
92 if number_of_configs > max_number:
93 raise DatasetWithTooManyConfigsError(
94 f"The maximum number of configs allowed is {max_number}, dataset has {number_of_configs} configs."
95 )
96
97 return DatasetConfigNamesResponse(config_names=config_name_items)
98
99
100 class DatasetConfigNamesJobRunner(DatasetJobRunnerWithDatasetsCache):
101 @staticmethod
102 def get_job_type() -> str:
103 return "dataset-config-names"
104
105 def compute(self) -> CompleteJobResult:
106 return CompleteJobResult(
107 compute_config_names_response(
108 dataset=self.dataset,
109 hf_token=self.app_config.common.hf_token,
110 max_number=self.app_config.config_names.max_number,
111 dataset_scripts_allow_list=self.app_config.common.dataset_scripts_allow_list,
112 )
113 )
114
[end of services/worker/src/worker/job_runners/dataset/config_names.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/services/worker/src/worker/job_runners/dataset/config_names.py b/services/worker/src/worker/job_runners/dataset/config_names.py
--- a/services/worker/src/worker/job_runners/dataset/config_names.py
+++ b/services/worker/src/worker/job_runners/dataset/config_names.py
@@ -75,18 +75,21 @@
]
except _EmptyDatasetError as err:
raise EmptyDatasetError("The dataset is empty.", cause=err) from err
- except ImportError as err:
- raise DatasetModuleNotInstalledError(
- "The dataset tries to import a module that is not installed.", cause=err
- ) from err
- except Exception as err:
- if isinstance(err, ValueError) and "trust_remote_code" in str(err):
+ except ValueError as err:
+ if "trust_remote_code" in str(err):
raise DatasetWithScriptNotSupportedError(
"The dataset viewer doesn't support this dataset because it runs "
"arbitrary python code. Please open a discussion in the discussion tab "
"if you think this is an error and tag @lhoestq and @severo."
) from err
raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
+ except ImportError as err:
+ # this should only happen if the dataset is in the allow list, which should soon disappear
+ raise DatasetModuleNotInstalledError(
+ "The dataset tries to import a module that is not installed.", cause=err
+ ) from err
+ except Exception as err:
+ raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
number_of_configs = len(config_name_items)
if number_of_configs > max_number:
| {"golden_diff": "diff --git a/services/worker/src/worker/job_runners/dataset/config_names.py b/services/worker/src/worker/job_runners/dataset/config_names.py\n--- a/services/worker/src/worker/job_runners/dataset/config_names.py\n+++ b/services/worker/src/worker/job_runners/dataset/config_names.py\n@@ -75,18 +75,21 @@\n ]\n except _EmptyDatasetError as err:\n raise EmptyDatasetError(\"The dataset is empty.\", cause=err) from err\n- except ImportError as err:\n- raise DatasetModuleNotInstalledError(\n- \"The dataset tries to import a module that is not installed.\", cause=err\n- ) from err\n- except Exception as err:\n- if isinstance(err, ValueError) and \"trust_remote_code\" in str(err):\n+ except ValueError as err:\n+ if \"trust_remote_code\" in str(err):\n raise DatasetWithScriptNotSupportedError(\n \"The dataset viewer doesn't support this dataset because it runs \"\n \"arbitrary python code. Please open a discussion in the discussion tab \"\n \"if you think this is an error and tag @lhoestq and @severo.\"\n ) from err\n raise ConfigNamesError(\"Cannot get the config names for the dataset.\", cause=err) from err\n+ except ImportError as err:\n+ # this should only happen if the dataset is in the allow list, which should soon disappear\n+ raise DatasetModuleNotInstalledError(\n+ \"The dataset tries to import a module that is not installed.\", cause=err\n+ ) from err\n+ except Exception as err:\n+ raise ConfigNamesError(\"Cannot get the config names for the dataset.\", cause=err) from err\n \n number_of_configs = len(config_name_items)\n if number_of_configs > max_number:\n", "issue": "Replace `DatasetModuleNotInstalledError` errors with `DatasetWithScriptNotSupportedError`\nWe should never have a `DatasetModuleNotInstalledError` error, because we should return a `DatasetWithScriptNotSupportedError` error before\r\n\r\nSee https://github.com/huggingface/datasets-server/issues/1067#issuecomment-1924305954\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom typing import Optional\n\nfrom datasets import get_dataset_config_names\nfrom datasets.data_files import EmptyDatasetError as _EmptyDatasetError\nfrom libcommon.exceptions import (\n ConfigNamesError,\n DatasetModuleNotInstalledError,\n DatasetWithScriptNotSupportedError,\n DatasetWithTooManyConfigsError,\n EmptyDatasetError,\n)\n\nfrom worker.dtos import CompleteJobResult, ConfigNameItem, DatasetConfigNamesResponse\nfrom worker.job_runners.dataset.dataset_job_runner import (\n DatasetJobRunnerWithDatasetsCache,\n)\nfrom worker.utils import resolve_trust_remote_code\n\n\ndef compute_config_names_response(\n dataset: str,\n max_number: int,\n dataset_scripts_allow_list: list[str],\n hf_token: Optional[str] = None,\n) -> DatasetConfigNamesResponse:\n \"\"\"\n Get the response of 'dataset-config-names' for one specific dataset on huggingface.co.\n Dataset can be gated if you pass an acceptable token.\n It is assumed that the dataset exists and can be accessed using the token.\n\n Args:\n dataset (`str`):\n A namespace (user or an organization) and a repo name separated by a `/`.\n max_number (`int`):\n The maximum number of configs for a dataset.\n dataset_scripts_allow_list (`list[str]`):\n List of datasets for which we support dataset scripts.\n Unix shell-style wildcards also work in the dataset name for namespaced datasets,\n for example `some_namespace/*` to refer to all the datasets in the `some_namespace` namespace.\n The keyword `{{ALL_DATASETS_WITH_NO_NAMESPACE}}` refers to all the datasets without namespace.\n hf_token (`str`, *optional*):\n An authentication token (See https://huggingface.co/settings/token)\n\n Raises:\n [~`libcommon.exceptions.EmptyDatasetError`]:\n The dataset is empty.\n [~`libcommon.exceptions.DatasetModuleNotInstalledError`]:\n The dataset tries to import a module that is not installed.\n [~`libcommon.exceptions.ConfigNamesError`]:\n If the list of configs could not be obtained using the datasets library.\n [~`libcommon.exceptions.DatasetWithScriptNotSupportedError`]:\n If the dataset has a dataset script and is not in the allow list.\n\n Returns:\n `DatasetConfigNamesResponse`: An object with the list of config names.\n \"\"\"\n logging.info(f\"get 'dateset-config-names' for {dataset=}\")\n # get the list of splits in streaming mode\n try:\n config_name_items: list[ConfigNameItem] = [\n {\"dataset\": dataset, \"config\": str(config)}\n for config in sorted(\n get_dataset_config_names(\n path=dataset,\n token=hf_token,\n trust_remote_code=resolve_trust_remote_code(\n dataset=dataset, allow_list=dataset_scripts_allow_list\n ),\n )\n )\n ]\n except _EmptyDatasetError as err:\n raise EmptyDatasetError(\"The dataset is empty.\", cause=err) from err\n except ImportError as err:\n raise DatasetModuleNotInstalledError(\n \"The dataset tries to import a module that is not installed.\", cause=err\n ) from err\n except Exception as err:\n if isinstance(err, ValueError) and \"trust_remote_code\" in str(err):\n raise DatasetWithScriptNotSupportedError(\n \"The dataset viewer doesn't support this dataset because it runs \"\n \"arbitrary python code. Please open a discussion in the discussion tab \"\n \"if you think this is an error and tag @lhoestq and @severo.\"\n ) from err\n raise ConfigNamesError(\"Cannot get the config names for the dataset.\", cause=err) from err\n\n number_of_configs = len(config_name_items)\n if number_of_configs > max_number:\n raise DatasetWithTooManyConfigsError(\n f\"The maximum number of configs allowed is {max_number}, dataset has {number_of_configs} configs.\"\n )\n\n return DatasetConfigNamesResponse(config_names=config_name_items)\n\n\nclass DatasetConfigNamesJobRunner(DatasetJobRunnerWithDatasetsCache):\n @staticmethod\n def get_job_type() -> str:\n return \"dataset-config-names\"\n\n def compute(self) -> CompleteJobResult:\n return CompleteJobResult(\n compute_config_names_response(\n dataset=self.dataset,\n hf_token=self.app_config.common.hf_token,\n max_number=self.app_config.config_names.max_number,\n dataset_scripts_allow_list=self.app_config.common.dataset_scripts_allow_list,\n )\n )\n", "path": "services/worker/src/worker/job_runners/dataset/config_names.py"}]} | 1,846 | 394 |
gh_patches_debug_185 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-868 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash on closing Matplotlib export
E.g. when opening the Matplotlib exporter multiple times, and closing the windows again, Python crashes with a segmentation fault.
This is caused by the Matplotlib QMainWindow listening to the closeEvent and deleting the only reference of the window before it is closed properly.
</issue>
<code>
[start of pyqtgraph/exporters/Matplotlib.py]
1 from ..Qt import QtGui, QtCore
2 from .Exporter import Exporter
3 from .. import PlotItem
4 from .. import functions as fn
5
6 __all__ = ['MatplotlibExporter']
7
8 """
9 It is helpful when using the matplotlib Exporter if your
10 .matplotlib/matplotlibrc file is configured appropriately.
11 The following are suggested for getting usable PDF output that
12 can be edited in Illustrator, etc.
13
14 backend : Qt4Agg
15 text.usetex : True # Assumes you have a findable LaTeX installation
16 interactive : False
17 font.family : sans-serif
18 font.sans-serif : 'Arial' # (make first in list)
19 mathtext.default : sf
20 figure.facecolor : white # personal preference
21 # next setting allows pdf font to be readable in Adobe Illustrator
22 pdf.fonttype : 42 # set fonts to TrueType (otherwise it will be 3
23 # and the text will be vectorized.
24 text.dvipnghack : True # primarily to clean up font appearance on Mac
25
26 The advantage is that there is less to do to get an exported file cleaned and ready for
27 publication. Fonts are not vectorized (outlined), and window colors are white.
28
29 """
30
31 class MatplotlibExporter(Exporter):
32 Name = "Matplotlib Window"
33 windows = []
34 def __init__(self, item):
35 Exporter.__init__(self, item)
36
37 def parameters(self):
38 return None
39
40 def cleanAxes(self, axl):
41 if type(axl) is not list:
42 axl = [axl]
43 for ax in axl:
44 if ax is None:
45 continue
46 for loc, spine in ax.spines.items():
47 if loc in ['left', 'bottom']:
48 pass
49 elif loc in ['right', 'top']:
50 spine.set_color('none')
51 # do not draw the spine
52 else:
53 raise ValueError('Unknown spine location: %s' % loc)
54 # turn off ticks when there is no spine
55 ax.xaxis.set_ticks_position('bottom')
56
57 def export(self, fileName=None):
58
59 if isinstance(self.item, PlotItem):
60 mpw = MatplotlibWindow()
61 MatplotlibExporter.windows.append(mpw)
62
63 stdFont = 'Arial'
64
65 fig = mpw.getFigure()
66
67 # get labels from the graphic item
68 xlabel = self.item.axes['bottom']['item'].label.toPlainText()
69 ylabel = self.item.axes['left']['item'].label.toPlainText()
70 title = self.item.titleLabel.text
71
72 ax = fig.add_subplot(111, title=title)
73 ax.clear()
74 self.cleanAxes(ax)
75 #ax.grid(True)
76 for item in self.item.curves:
77 x, y = item.getData()
78 opts = item.opts
79 pen = fn.mkPen(opts['pen'])
80 if pen.style() == QtCore.Qt.NoPen:
81 linestyle = ''
82 else:
83 linestyle = '-'
84 color = tuple([c/255. for c in fn.colorTuple(pen.color())])
85 symbol = opts['symbol']
86 if symbol == 't':
87 symbol = '^'
88 symbolPen = fn.mkPen(opts['symbolPen'])
89 symbolBrush = fn.mkBrush(opts['symbolBrush'])
90 markeredgecolor = tuple([c/255. for c in fn.colorTuple(symbolPen.color())])
91 markerfacecolor = tuple([c/255. for c in fn.colorTuple(symbolBrush.color())])
92 markersize = opts['symbolSize']
93
94 if opts['fillLevel'] is not None and opts['fillBrush'] is not None:
95 fillBrush = fn.mkBrush(opts['fillBrush'])
96 fillcolor = tuple([c/255. for c in fn.colorTuple(fillBrush.color())])
97 ax.fill_between(x=x, y1=y, y2=opts['fillLevel'], facecolor=fillcolor)
98
99 pl = ax.plot(x, y, marker=symbol, color=color, linewidth=pen.width(),
100 linestyle=linestyle, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor,
101 markersize=markersize)
102 xr, yr = self.item.viewRange()
103 ax.set_xbound(*xr)
104 ax.set_ybound(*yr)
105 ax.set_xlabel(xlabel) # place the labels.
106 ax.set_ylabel(ylabel)
107 mpw.draw()
108 else:
109 raise Exception("Matplotlib export currently only works with plot items")
110
111 MatplotlibExporter.register()
112
113
114 class MatplotlibWindow(QtGui.QMainWindow):
115 def __init__(self):
116 from ..widgets import MatplotlibWidget
117 QtGui.QMainWindow.__init__(self)
118 self.mpl = MatplotlibWidget.MatplotlibWidget()
119 self.setCentralWidget(self.mpl)
120 self.show()
121
122 def __getattr__(self, attr):
123 return getattr(self.mpl, attr)
124
125 def closeEvent(self, ev):
126 MatplotlibExporter.windows.remove(self)
127
128
129
[end of pyqtgraph/exporters/Matplotlib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyqtgraph/exporters/Matplotlib.py b/pyqtgraph/exporters/Matplotlib.py
--- a/pyqtgraph/exporters/Matplotlib.py
+++ b/pyqtgraph/exporters/Matplotlib.py
@@ -124,5 +124,4 @@
def closeEvent(self, ev):
MatplotlibExporter.windows.remove(self)
-
-
+ self.deleteLater()
| {"golden_diff": "diff --git a/pyqtgraph/exporters/Matplotlib.py b/pyqtgraph/exporters/Matplotlib.py\n--- a/pyqtgraph/exporters/Matplotlib.py\n+++ b/pyqtgraph/exporters/Matplotlib.py\n@@ -124,5 +124,4 @@\n \n def closeEvent(self, ev):\n MatplotlibExporter.windows.remove(self)\n-\n-\n+ self.deleteLater()\n", "issue": "Crash on closing Matplotlib export\nE.g. when opening the Matplotlib exporter multiple times, and closing the windows again, Python crashes with a segmentation fault.\r\n\r\nThis is caused by the Matplotlib QMainWindow listening to the closeEvent and deleting the only reference of the window before it is closed properly.\n", "before_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom .Exporter import Exporter\nfrom .. import PlotItem\nfrom .. import functions as fn\n\n__all__ = ['MatplotlibExporter']\n\n\"\"\"\nIt is helpful when using the matplotlib Exporter if your\n.matplotlib/matplotlibrc file is configured appropriately.\nThe following are suggested for getting usable PDF output that\ncan be edited in Illustrator, etc.\n\nbackend : Qt4Agg\ntext.usetex : True # Assumes you have a findable LaTeX installation\ninteractive : False\nfont.family : sans-serif\nfont.sans-serif : 'Arial' # (make first in list)\nmathtext.default : sf\nfigure.facecolor : white # personal preference\n# next setting allows pdf font to be readable in Adobe Illustrator\npdf.fonttype : 42 # set fonts to TrueType (otherwise it will be 3\n # and the text will be vectorized.\ntext.dvipnghack : True # primarily to clean up font appearance on Mac\n\nThe advantage is that there is less to do to get an exported file cleaned and ready for\npublication. Fonts are not vectorized (outlined), and window colors are white.\n\n\"\"\"\n \nclass MatplotlibExporter(Exporter):\n Name = \"Matplotlib Window\"\n windows = []\n def __init__(self, item):\n Exporter.__init__(self, item)\n \n def parameters(self):\n return None\n\n def cleanAxes(self, axl):\n if type(axl) is not list:\n axl = [axl]\n for ax in axl:\n if ax is None:\n continue\n for loc, spine in ax.spines.items():\n if loc in ['left', 'bottom']:\n pass\n elif loc in ['right', 'top']:\n spine.set_color('none')\n # do not draw the spine\n else:\n raise ValueError('Unknown spine location: %s' % loc)\n # turn off ticks when there is no spine\n ax.xaxis.set_ticks_position('bottom')\n \n def export(self, fileName=None):\n \n if isinstance(self.item, PlotItem):\n mpw = MatplotlibWindow()\n MatplotlibExporter.windows.append(mpw)\n\n stdFont = 'Arial'\n \n fig = mpw.getFigure()\n \n # get labels from the graphic item\n xlabel = self.item.axes['bottom']['item'].label.toPlainText()\n ylabel = self.item.axes['left']['item'].label.toPlainText()\n title = self.item.titleLabel.text\n\n ax = fig.add_subplot(111, title=title)\n ax.clear()\n self.cleanAxes(ax)\n #ax.grid(True)\n for item in self.item.curves:\n x, y = item.getData()\n opts = item.opts\n pen = fn.mkPen(opts['pen'])\n if pen.style() == QtCore.Qt.NoPen:\n linestyle = ''\n else:\n linestyle = '-'\n color = tuple([c/255. for c in fn.colorTuple(pen.color())])\n symbol = opts['symbol']\n if symbol == 't':\n symbol = '^'\n symbolPen = fn.mkPen(opts['symbolPen'])\n symbolBrush = fn.mkBrush(opts['symbolBrush'])\n markeredgecolor = tuple([c/255. for c in fn.colorTuple(symbolPen.color())])\n markerfacecolor = tuple([c/255. for c in fn.colorTuple(symbolBrush.color())])\n markersize = opts['symbolSize']\n \n if opts['fillLevel'] is not None and opts['fillBrush'] is not None:\n fillBrush = fn.mkBrush(opts['fillBrush'])\n fillcolor = tuple([c/255. for c in fn.colorTuple(fillBrush.color())])\n ax.fill_between(x=x, y1=y, y2=opts['fillLevel'], facecolor=fillcolor)\n \n pl = ax.plot(x, y, marker=symbol, color=color, linewidth=pen.width(), \n linestyle=linestyle, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor,\n markersize=markersize)\n xr, yr = self.item.viewRange()\n ax.set_xbound(*xr)\n ax.set_ybound(*yr)\n ax.set_xlabel(xlabel) # place the labels.\n ax.set_ylabel(ylabel)\n mpw.draw()\n else:\n raise Exception(\"Matplotlib export currently only works with plot items\")\n \nMatplotlibExporter.register() \n \n\nclass MatplotlibWindow(QtGui.QMainWindow):\n def __init__(self):\n from ..widgets import MatplotlibWidget\n QtGui.QMainWindow.__init__(self)\n self.mpl = MatplotlibWidget.MatplotlibWidget()\n self.setCentralWidget(self.mpl)\n self.show()\n \n def __getattr__(self, attr):\n return getattr(self.mpl, attr)\n \n def closeEvent(self, ev):\n MatplotlibExporter.windows.remove(self)\n\n\n", "path": "pyqtgraph/exporters/Matplotlib.py"}]} | 1,930 | 87 |
gh_patches_debug_24516 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-3275 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
git package: warn if specified revision master, main
Follow up from #3057, #3104
Previously, if a git package was unpinned, dbt tried to install it from `master` (hard coded). In #3104, this was fixed to used `HEAD` (default branch, regardless of name) and continue to warn if unpinned.
We'd like to warn as well if the user specifies the package `revision` as `'master'` or `'main'`, since either almost certainly represents the default branch. Users can disable the warning with `warn-unpinned: false`.
### Example
```yml
packages:
- git: https://github.com/fishtown-analytics/dbt-codegen
revision: master
- git: https://github.com/tailsdotcom/dbt_artifacts
revision: main
- git: https://gitlab.com/gitlab-data/snowflake_spend
- package: fishtown-analytics/audit_helper
version: 0.3.0
```
<details>
<summary> <code>$ dbt deps</code> </summary>
```
Running with dbt=0.19.0
WARNING: The git package "https://github.com/fishtown-analytics/dbt-codegen"
is pinned to the "master" branch.
This can introduce breaking changes into your project without warning!
See https://docs.getdbt.com/docs/package-management#section-specifying-package-versions
WARNING: The git package "https://github.com/tailsdotcom/dbt_artifacts"
is pinned to the "main" branch.
This can introduce breaking changes into your project without warning!
See https://docs.getdbt.com/docs/package-management#section-specifying-package-versions
WARNING: The git package "https://gitlab.com/gitlab-data/snowflake_spend"
is not pinned, using HEAD (default branch).
This can introduce breaking changes into your project without warning!
See https://docs.getdbt.com/docs/package-management#section-specifying-package-versions
WARNING: The git package "https://github.com/fishtown-analytics/dbt-codegen"
is pinned to the "master" branch.
This can introduce breaking changes into your project without warning!
See https://docs.getdbt.com/docs/package-management#section-specifying-package-versions
WARNING: The git package "https://github.com/tailsdotcom/dbt_artifacts"
is pinned to the "main" branch.
This can introduce breaking changes into your project without warning!
See https://docs.getdbt.com/docs/package-management#section-specifying-package-versions
WARNING: The git package "https://gitlab.com/gitlab-data/snowflake_spend"
is not pinned, using HEAD (default branch).
This can introduce breaking changes into your project without warning!
See https://docs.getdbt.com/docs/package-management#section-specifying-package-versions
Installing https://github.com/fishtown-analytics/dbt-codegen@master
Installed from revision master
Installing https://github.com/tailsdotcom/dbt_artifacts@main
Installed from revision main
Installing https://gitlab.com/gitlab-data/snowflake_spend@HEAD
Installed from HEAD (default branch)
Installing fishtown-analytics/[email protected]
Installed from version 0.3.0
Installing fishtown-analytics/[email protected]
Installed from version 0.6.4
```
</details>
```yml
packages:
- git: https://github.com/fishtown-analytics/dbt-codegen
revision: master
warn-unpinned: false
- git: https://github.com/tailsdotcom/dbt_artifacts
revision: main
warn-unpinned: false
- git: https://gitlab.com/gitlab-data/snowflake_spend
warn-unpinned: false
- package: fishtown-analytics/audit_helper
version: 0.3.0
```
<details>
<summary> <code>$ dbt deps</code> </summary>
```
Running with dbt=0.19.0
Installing https://github.com/fishtown-analytics/dbt-codegen@master
Installed from revision master
Installing https://github.com/tailsdotcom/dbt_artifacts@main
Installed from revision main
Installing https://gitlab.com/gitlab-data/snowflake_spend@HEAD
Installed from HEAD (default branch)
Installing fishtown-analytics/[email protected]
Installed from version 0.3.0
Installing fishtown-analytics/[email protected]
Installed from version 0.6.4
```
</details>
### Checklist
- [x] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [x] I have run this code in development and it appears to resolve the stated issue
- ~This PR includes tests, or tests are not required/relevant for this PR~
- [x] I have updated the `CHANGELOG.md` and added information about my change to the "dbt next" section.
</issue>
<code>
[start of core/dbt/clients/registry.py]
1 from functools import wraps
2 import requests
3 from dbt.exceptions import RegistryException
4 from dbt.utils import memoized
5 from dbt.logger import GLOBAL_LOGGER as logger
6 import os
7 import time
8
9 if os.getenv('DBT_PACKAGE_HUB_URL'):
10 DEFAULT_REGISTRY_BASE_URL = os.getenv('DBT_PACKAGE_HUB_URL')
11 else:
12 DEFAULT_REGISTRY_BASE_URL = 'https://hub.getdbt.com/'
13
14
15 def _get_url(url, registry_base_url=None):
16 if registry_base_url is None:
17 registry_base_url = DEFAULT_REGISTRY_BASE_URL
18
19 return '{}{}'.format(registry_base_url, url)
20
21
22 def _wrap_exceptions(fn):
23 @wraps(fn)
24 def wrapper(*args, **kwargs):
25 max_attempts = 5
26 attempt = 0
27 while True:
28 attempt += 1
29 try:
30 return fn(*args, **kwargs)
31 except requests.exceptions.ConnectionError as exc:
32 if attempt < max_attempts:
33 time.sleep(1)
34 continue
35
36 raise RegistryException(
37 'Unable to connect to registry hub'
38 ) from exc
39 return wrapper
40
41
42 @_wrap_exceptions
43 def _get(path, registry_base_url=None):
44 url = _get_url(path, registry_base_url)
45 logger.debug('Making package registry request: GET {}'.format(url))
46 resp = requests.get(url)
47 logger.debug('Response from registry: GET {} {}'.format(url,
48 resp.status_code))
49 resp.raise_for_status()
50 return resp.json()
51
52
53 def index(registry_base_url=None):
54 return _get('api/v1/index.json', registry_base_url)
55
56
57 index_cached = memoized(index)
58
59
60 def packages(registry_base_url=None):
61 return _get('api/v1/packages.json', registry_base_url)
62
63
64 def package(name, registry_base_url=None):
65 return _get('api/v1/{}.json'.format(name), registry_base_url)
66
67
68 def package_version(name, version, registry_base_url=None):
69 return _get('api/v1/{}/{}.json'.format(name, version), registry_base_url)
70
71
72 def get_available_versions(name):
73 response = package(name)
74 return list(response['versions'])
75
[end of core/dbt/clients/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dbt/clients/registry.py b/core/dbt/clients/registry.py
--- a/core/dbt/clients/registry.py
+++ b/core/dbt/clients/registry.py
@@ -28,11 +28,10 @@
attempt += 1
try:
return fn(*args, **kwargs)
- except requests.exceptions.ConnectionError as exc:
+ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as exc:
if attempt < max_attempts:
time.sleep(1)
continue
-
raise RegistryException(
'Unable to connect to registry hub'
) from exc
@@ -43,7 +42,7 @@
def _get(path, registry_base_url=None):
url = _get_url(path, registry_base_url)
logger.debug('Making package registry request: GET {}'.format(url))
- resp = requests.get(url)
+ resp = requests.get(url, timeout=30)
logger.debug('Response from registry: GET {} {}'.format(url,
resp.status_code))
resp.raise_for_status()
| {"golden_diff": "diff --git a/core/dbt/clients/registry.py b/core/dbt/clients/registry.py\n--- a/core/dbt/clients/registry.py\n+++ b/core/dbt/clients/registry.py\n@@ -28,11 +28,10 @@\n attempt += 1\n try:\n return fn(*args, **kwargs)\n- except requests.exceptions.ConnectionError as exc:\n+ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as exc:\n if attempt < max_attempts:\n time.sleep(1)\n continue\n-\n raise RegistryException(\n 'Unable to connect to registry hub'\n ) from exc\n@@ -43,7 +42,7 @@\n def _get(path, registry_base_url=None):\n url = _get_url(path, registry_base_url)\n logger.debug('Making package registry request: GET {}'.format(url))\n- resp = requests.get(url)\n+ resp = requests.get(url, timeout=30)\n logger.debug('Response from registry: GET {} {}'.format(url,\n resp.status_code))\n resp.raise_for_status()\n", "issue": "git package: warn if specified revision master, main\nFollow up from #3057, #3104\r\n\r\nPreviously, if a git package was unpinned, dbt tried to install it from `master` (hard coded). In #3104, this was fixed to used `HEAD` (default branch, regardless of name) and continue to warn if unpinned.\r\n\r\nWe'd like to warn as well if the user specifies the package `revision` as `'master'` or `'main'`, since either almost certainly represents the default branch. Users can disable the warning with `warn-unpinned: false`.\r\n\r\n### Example\r\n\r\n```yml\r\npackages:\r\n - git: https://github.com/fishtown-analytics/dbt-codegen \r\n revision: master\r\n - git: https://github.com/tailsdotcom/dbt_artifacts \r\n revision: main\r\n - git: https://gitlab.com/gitlab-data/snowflake_spend\r\n - package: fishtown-analytics/audit_helper\r\n version: 0.3.0\r\n```\r\n\r\n<details>\r\n<summary> <code>$ dbt deps</code> </summary>\r\n\r\n```\r\nRunning with dbt=0.19.0\r\nWARNING: The git package \"https://github.com/fishtown-analytics/dbt-codegen\"\r\n\tis pinned to the \"master\" branch.\r\n\tThis can introduce breaking changes into your project without warning!\r\n\r\nSee https://docs.getdbt.com/docs/package-management#section-specifying-package-versions\r\nWARNING: The git package \"https://github.com/tailsdotcom/dbt_artifacts\"\r\n\tis pinned to the \"main\" branch.\r\n\tThis can introduce breaking changes into your project without warning!\r\n\r\nSee https://docs.getdbt.com/docs/package-management#section-specifying-package-versions\r\nWARNING: The git package \"https://gitlab.com/gitlab-data/snowflake_spend\"\r\n\tis not pinned, using HEAD (default branch).\r\n\tThis can introduce breaking changes into your project without warning!\r\n\r\nSee https://docs.getdbt.com/docs/package-management#section-specifying-package-versions\r\nWARNING: The git package \"https://github.com/fishtown-analytics/dbt-codegen\"\r\n\tis pinned to the \"master\" branch.\r\n\tThis can introduce breaking changes into your project without warning!\r\n\r\nSee https://docs.getdbt.com/docs/package-management#section-specifying-package-versions\r\nWARNING: The git package \"https://github.com/tailsdotcom/dbt_artifacts\"\r\n\tis pinned to the \"main\" branch.\r\n\tThis can introduce breaking changes into your project without warning!\r\n\r\nSee https://docs.getdbt.com/docs/package-management#section-specifying-package-versions\r\nWARNING: The git package \"https://gitlab.com/gitlab-data/snowflake_spend\"\r\n\tis not pinned, using HEAD (default branch).\r\n\tThis can introduce breaking changes into your project without warning!\r\n\r\nSee https://docs.getdbt.com/docs/package-management#section-specifying-package-versions\r\nInstalling https://github.com/fishtown-analytics/dbt-codegen@master\r\n Installed from revision master\r\n\r\nInstalling https://github.com/tailsdotcom/dbt_artifacts@main\r\n Installed from revision main\r\n\r\nInstalling https://gitlab.com/gitlab-data/snowflake_spend@HEAD\r\n Installed from HEAD (default branch)\r\n\r\nInstalling fishtown-analytics/[email protected]\r\n Installed from version 0.3.0\r\n\r\nInstalling fishtown-analytics/[email protected]\r\n Installed from version 0.6.4\r\n```\r\n\r\n</details>\r\n\r\n```yml\r\npackages:\r\n - git: https://github.com/fishtown-analytics/dbt-codegen \r\n revision: master\r\n warn-unpinned: false\r\n - git: https://github.com/tailsdotcom/dbt_artifacts \r\n revision: main\r\n warn-unpinned: false\r\n - git: https://gitlab.com/gitlab-data/snowflake_spend\r\n warn-unpinned: false\r\n - package: fishtown-analytics/audit_helper\r\n version: 0.3.0\r\n```\r\n\r\n<details>\r\n<summary> <code>$ dbt deps</code> </summary>\r\n\r\n```\r\nRunning with dbt=0.19.0\r\nInstalling https://github.com/fishtown-analytics/dbt-codegen@master\r\n Installed from revision master\r\n\r\nInstalling https://github.com/tailsdotcom/dbt_artifacts@main\r\n Installed from revision main\r\n\r\nInstalling https://gitlab.com/gitlab-data/snowflake_spend@HEAD\r\n Installed from HEAD (default branch)\r\n\r\nInstalling fishtown-analytics/[email protected]\r\n Installed from version 0.3.0\r\n\r\nInstalling fishtown-analytics/[email protected]\r\n Installed from version 0.6.4\r\n```\r\n</details>\r\n\r\n### Checklist\r\n - [x] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)\r\n - [x] I have run this code in development and it appears to resolve the stated issue\r\n - ~This PR includes tests, or tests are not required/relevant for this PR~\r\n - [x] I have updated the `CHANGELOG.md` and added information about my change to the \"dbt next\" section.\r\n\n", "before_files": [{"content": "from functools import wraps\nimport requests\nfrom dbt.exceptions import RegistryException\nfrom dbt.utils import memoized\nfrom dbt.logger import GLOBAL_LOGGER as logger\nimport os\nimport time\n\nif os.getenv('DBT_PACKAGE_HUB_URL'):\n DEFAULT_REGISTRY_BASE_URL = os.getenv('DBT_PACKAGE_HUB_URL')\nelse:\n DEFAULT_REGISTRY_BASE_URL = 'https://hub.getdbt.com/'\n\n\ndef _get_url(url, registry_base_url=None):\n if registry_base_url is None:\n registry_base_url = DEFAULT_REGISTRY_BASE_URL\n\n return '{}{}'.format(registry_base_url, url)\n\n\ndef _wrap_exceptions(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n max_attempts = 5\n attempt = 0\n while True:\n attempt += 1\n try:\n return fn(*args, **kwargs)\n except requests.exceptions.ConnectionError as exc:\n if attempt < max_attempts:\n time.sleep(1)\n continue\n\n raise RegistryException(\n 'Unable to connect to registry hub'\n ) from exc\n return wrapper\n\n\n@_wrap_exceptions\ndef _get(path, registry_base_url=None):\n url = _get_url(path, registry_base_url)\n logger.debug('Making package registry request: GET {}'.format(url))\n resp = requests.get(url)\n logger.debug('Response from registry: GET {} {}'.format(url,\n resp.status_code))\n resp.raise_for_status()\n return resp.json()\n\n\ndef index(registry_base_url=None):\n return _get('api/v1/index.json', registry_base_url)\n\n\nindex_cached = memoized(index)\n\n\ndef packages(registry_base_url=None):\n return _get('api/v1/packages.json', registry_base_url)\n\n\ndef package(name, registry_base_url=None):\n return _get('api/v1/{}.json'.format(name), registry_base_url)\n\n\ndef package_version(name, version, registry_base_url=None):\n return _get('api/v1/{}/{}.json'.format(name, version), registry_base_url)\n\n\ndef get_available_versions(name):\n response = package(name)\n return list(response['versions'])\n", "path": "core/dbt/clients/registry.py"}]} | 2,262 | 232 |
gh_patches_debug_4023 | rasdani/github-patches | git_diff | pymedusa__Medusa-7842 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API v2 - Authenticate Fail
I'm trying to use the APIv2 but the authorization fails.
What is wrong with my resquests?
#############Request#############
POST /api/v2/authenticate HTTP/1.1
Host: 192.168.17.204:8081
User-Agent: FlexGet/3.1.15 (www.flexget.com)
Accept-Encoding: gzip, deflate
Accept: */*
Connection: keep-alive
Content-Length: 46
Content-Type: application/json
{"username": "medusa", "password": "password"}
#############Response#############
HTTP/1.1 200 OK
Server: TornadoServer/5.1.1
Content-Type: application/json; charset=UTF-8
Date: Thu, 06 Feb 2020 15:58:10 GMT
X-Medusa-Server: 0.3.11
Access-Control-Allow-Origin: *
Access-Control-Allow-Headers: Origin, Accept, Authorization, Content-Type, X-Requested-With, X-CSRF-Token, X-Api-Key, X-Medusa-Server
Access-Control-Allow-Methods: OPTIONS, POST
Content-Length: 297
Vary: Accept-Encoding
{"token": "b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJNZWR1c2EgMC4zLjExIiwiaWF0IjoxNTgxMDA0NjkwLCJqdGkiOiJwZ25rdXI2WDZrOEdRZjBleGc1OCIsImV4cCI6MTU4MTA5MTA5MCwidXNlcm5hbWUiOiJtZWR1c2EiLCJhcGlLZXkiOiI3NWVhYWM4ZTY3YzRhNWIyODQ5MmZmZjk3ODRjNDZhMCJ9.LU7fdfIU9wFVvg_nsJpPzUgOUQ8juPR0t6_uACfr3Zc'"}
#############Request#############
GET /api/v2/series?limit=1000 HTTP/1.1
Host: 192.168.17.204:8081
User-Agent: FlexGet/3.1.15 (www.flexget.com)
Accept-Encoding: gzip, deflate
Accept: */*
Connection: keep-alive
authorization: Bearer b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJNZWR1c2EgMC4zLjExIiwiaWF0IjoxNTgxMDA0NjkwLCJqdGkiOiJwZ25rdXI2WDZrOEdRZjBleGc1OCIsImV4cCI6MTU4MTA5MTA5MCwidXNlcm5hbWUiOiJtZWR1c2EiLCJhcGlLZXkiOiI3NWVhYWM4ZTY3YzRhNWIyODQ5MmZmZjk3ODRjNDZhMCJ9.LU7fdfIU9wFVvg_nsJpPzUgOUQ8juPR0t6_uACfr3Zc'
#############Response#############
HTTP/1.1 401 Unauthorized
Server: TornadoServer/5.1.1
Content-Type: application/json; charset=UTF-8
Date: Thu, 06 Feb 2020 15:58:10 GMT
X-Medusa-Server: 0.3.11
Access-Control-Allow-Origin: *
Access-Control-Allow-Headers: Origin, Accept, Authorization, Content-Type, X-Requested-With, X-CSRF-Token, X-Api-Key, X-Medusa-Server
Access-Control-Allow-Methods: OPTIONS, GET, POST, PATCH, DELETE
Content-Length: 27
Vary: Accept-Encoding
{"error": "Invalid token."}
</issue>
<code>
[start of medusa/server/api/v2/auth.py]
1 # coding=utf-8
2 """Request handler for authentication."""
3 from __future__ import unicode_literals
4
5 import logging
6 import random
7 import string
8 import time
9 from builtins import range
10
11 import jwt
12
13 from medusa import app, helpers, notifiers
14 from medusa.logger.adapters.style import BraceAdapter
15 from medusa.server.api.v2.base import BaseRequestHandler
16
17 from six import text_type
18
19 from tornado.escape import json_decode
20
21 log = BraceAdapter(logging.getLogger(__name__))
22 log.logger.addHandler(logging.NullHandler())
23
24
25 class AuthHandler(BaseRequestHandler):
26 """Auth request handler."""
27
28 #: resource name
29 name = 'authenticate'
30 #: allowed HTTP methods
31 allowed_methods = ('POST', )
32
33 def _check_authentication(self):
34 """Override authentication check for the authentication endpoint."""
35 return None
36
37 def post(self, *args, **kwargs):
38 """Request JWT."""
39 username = app.WEB_USERNAME
40 password = app.WEB_PASSWORD
41
42 # If the user hasn't set a username and/or password just let them login
43 if not username.strip() or not password.strip():
44 return self._login()
45
46 if not self.request.body:
47 return self._failed_login(error='No Credentials Provided')
48
49 if self.request.headers['content-type'] != 'application/json':
50 return self._failed_login(error='Incorrect content-type')
51
52 request_body = json_decode(self.request.body)
53 submitted_username = request_body.get('username')
54 submitted_password = request_body.get('password')
55 submitted_exp = request_body.get('exp', 86400)
56 if username != submitted_username or password != submitted_password:
57 return self._failed_login(error='Invalid credentials')
58
59 return self._login(submitted_exp)
60
61 def _login(self, exp=86400):
62 self.set_header('Content-Type', 'application/json')
63 if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):
64 notifiers.notify_login(self.request.remote_ip)
65
66 log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})
67 time_now = int(time.time())
68 return self._ok(data={
69 'token': jwt.encode({
70 'iss': 'Medusa ' + text_type(app.APP_VERSION),
71 'iat': time_now,
72 # @TODO: The jti should be saved so we can revoke tokens
73 'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),
74 'exp': time_now + int(exp),
75 'username': app.WEB_USERNAME,
76 'apiKey': app.API_KEY
77 }, app.ENCRYPTION_SECRET, algorithm='HS256')
78 })
79
80 def _failed_login(self, error=None):
81 log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {
82 'user': app.WEB_USERNAME,
83 'ip': self.request.remote_ip
84 })
85 return self._unauthorized(error=error)
86
[end of medusa/server/api/v2/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/server/api/v2/auth.py b/medusa/server/api/v2/auth.py
--- a/medusa/server/api/v2/auth.py
+++ b/medusa/server/api/v2/auth.py
@@ -74,7 +74,7 @@
'exp': time_now + int(exp),
'username': app.WEB_USERNAME,
'apiKey': app.API_KEY
- }, app.ENCRYPTION_SECRET, algorithm='HS256')
+ }, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')
})
def _failed_login(self, error=None):
| {"golden_diff": "diff --git a/medusa/server/api/v2/auth.py b/medusa/server/api/v2/auth.py\n--- a/medusa/server/api/v2/auth.py\n+++ b/medusa/server/api/v2/auth.py\n@@ -74,7 +74,7 @@\n 'exp': time_now + int(exp),\n 'username': app.WEB_USERNAME,\n 'apiKey': app.API_KEY\n- }, app.ENCRYPTION_SECRET, algorithm='HS256')\n+ }, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')\n })\n \n def _failed_login(self, error=None):\n", "issue": "API v2 - Authenticate Fail\nI'm trying to use the APIv2 but the authorization fails.\r\nWhat is wrong with my resquests?\r\n\r\n#############Request#############\r\nPOST /api/v2/authenticate HTTP/1.1\r\nHost: 192.168.17.204:8081\r\nUser-Agent: FlexGet/3.1.15 (www.flexget.com)\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nContent-Length: 46\r\nContent-Type: application/json\r\n\r\n{\"username\": \"medusa\", \"password\": \"password\"}\r\n\r\n#############Response#############\r\nHTTP/1.1 200 OK\r\nServer: TornadoServer/5.1.1\r\nContent-Type: application/json; charset=UTF-8\r\nDate: Thu, 06 Feb 2020 15:58:10 GMT\r\nX-Medusa-Server: 0.3.11\r\nAccess-Control-Allow-Origin: *\r\nAccess-Control-Allow-Headers: Origin, Accept, Authorization, Content-Type, X-Requested-With, X-CSRF-Token, X-Api-Key, X-Medusa-Server\r\nAccess-Control-Allow-Methods: OPTIONS, POST\r\nContent-Length: 297\r\nVary: Accept-Encoding\r\n\r\n{\"token\": \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJNZWR1c2EgMC4zLjExIiwiaWF0IjoxNTgxMDA0NjkwLCJqdGkiOiJwZ25rdXI2WDZrOEdRZjBleGc1OCIsImV4cCI6MTU4MTA5MTA5MCwidXNlcm5hbWUiOiJtZWR1c2EiLCJhcGlLZXkiOiI3NWVhYWM4ZTY3YzRhNWIyODQ5MmZmZjk3ODRjNDZhMCJ9.LU7fdfIU9wFVvg_nsJpPzUgOUQ8juPR0t6_uACfr3Zc'\"}\r\n\r\n\r\n\r\n\r\n#############Request#############\r\nGET /api/v2/series?limit=1000 HTTP/1.1\r\nHost: 192.168.17.204:8081\r\nUser-Agent: FlexGet/3.1.15 (www.flexget.com)\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nauthorization: Bearer b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJNZWR1c2EgMC4zLjExIiwiaWF0IjoxNTgxMDA0NjkwLCJqdGkiOiJwZ25rdXI2WDZrOEdRZjBleGc1OCIsImV4cCI6MTU4MTA5MTA5MCwidXNlcm5hbWUiOiJtZWR1c2EiLCJhcGlLZXkiOiI3NWVhYWM4ZTY3YzRhNWIyODQ5MmZmZjk3ODRjNDZhMCJ9.LU7fdfIU9wFVvg_nsJpPzUgOUQ8juPR0t6_uACfr3Zc'\r\n\r\n#############Response#############\r\nHTTP/1.1 401 Unauthorized\r\nServer: TornadoServer/5.1.1\r\nContent-Type: application/json; charset=UTF-8\r\nDate: Thu, 06 Feb 2020 15:58:10 GMT\r\nX-Medusa-Server: 0.3.11\r\nAccess-Control-Allow-Origin: *\r\nAccess-Control-Allow-Headers: Origin, Accept, Authorization, Content-Type, X-Requested-With, X-CSRF-Token, X-Api-Key, X-Medusa-Server\r\nAccess-Control-Allow-Methods: OPTIONS, GET, POST, PATCH, DELETE\r\nContent-Length: 27\r\nVary: Accept-Encoding\r\n\r\n{\"error\": \"Invalid token.\"}\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Request handler for authentication.\"\"\"\nfrom __future__ import unicode_literals\n\nimport logging\nimport random\nimport string\nimport time\nfrom builtins import range\n\nimport jwt\n\nfrom medusa import app, helpers, notifiers\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.server.api.v2.base import BaseRequestHandler\n\nfrom six import text_type\n\nfrom tornado.escape import json_decode\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass AuthHandler(BaseRequestHandler):\n \"\"\"Auth request handler.\"\"\"\n\n #: resource name\n name = 'authenticate'\n #: allowed HTTP methods\n allowed_methods = ('POST', )\n\n def _check_authentication(self):\n \"\"\"Override authentication check for the authentication endpoint.\"\"\"\n return None\n\n def post(self, *args, **kwargs):\n \"\"\"Request JWT.\"\"\"\n username = app.WEB_USERNAME\n password = app.WEB_PASSWORD\n\n # If the user hasn't set a username and/or password just let them login\n if not username.strip() or not password.strip():\n return self._login()\n\n if not self.request.body:\n return self._failed_login(error='No Credentials Provided')\n\n if self.request.headers['content-type'] != 'application/json':\n return self._failed_login(error='Incorrect content-type')\n\n request_body = json_decode(self.request.body)\n submitted_username = request_body.get('username')\n submitted_password = request_body.get('password')\n submitted_exp = request_body.get('exp', 86400)\n if username != submitted_username or password != submitted_password:\n return self._failed_login(error='Invalid credentials')\n\n return self._login(submitted_exp)\n\n def _login(self, exp=86400):\n self.set_header('Content-Type', 'application/json')\n if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):\n notifiers.notify_login(self.request.remote_ip)\n\n log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})\n time_now = int(time.time())\n return self._ok(data={\n 'token': jwt.encode({\n 'iss': 'Medusa ' + text_type(app.APP_VERSION),\n 'iat': time_now,\n # @TODO: The jti should be saved so we can revoke tokens\n 'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),\n 'exp': time_now + int(exp),\n 'username': app.WEB_USERNAME,\n 'apiKey': app.API_KEY\n }, app.ENCRYPTION_SECRET, algorithm='HS256')\n })\n\n def _failed_login(self, error=None):\n log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {\n 'user': app.WEB_USERNAME,\n 'ip': self.request.remote_ip\n })\n return self._unauthorized(error=error)\n", "path": "medusa/server/api/v2/auth.py"}]} | 2,269 | 138 |
gh_patches_debug_17516 | rasdani/github-patches | git_diff | tensorflow__addons-691 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WeightNormalization layer can't save in h5
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
Ubuntu 16.04.6 LTS
- TensorFlow version and how it was installed (source or binary):
tensorflow-gpu 2.0.0, binary
- TensorFlow-Addons version and how it was installed (source or binary):
tensorflow-addons 0.6.0, binary
- Python version:
Python 3.6.9
- Is GPU used? (yes/no):
yes
**Describe the bug**
WeightNormalization layer can't save to h5
A clear and concise description of what the bug is.
WeightNormalization causing duplicate names of variables.
**Code to reproduce the issue**
```python
import tensorflow as tf
import tensorflow_addons as tfa
conv = tf.keras.layers.Conv1D(1, 1, name='conv')
wn_conv = tfa.layers.WeightNormalization(conv)
model = tf.keras.Sequential(layers=[wn_conv])
model.build([1, 2, 3])
print([w.name for w in model.layers[0].weights])
model.save_weights('model.h5')
```
Provide a reproducible test case that is the bare minimum necessary to generate the problem.
See above.
**Other info / logs**
```
['weight_normalization/g:0', 'weight_normalization/kernel:0', 'weight_normalization/bias:0', 'weight_normalization/initialized:0', 'weight_normalization/kernel:0', 'weight_normalization/bias:0']
Traceback (most recent call last):
File "/home/ubuntu/hankcs/laser/tests/playground/wn_bug.py", line 14, in <module>
model.save_weights('model.h5')
File "/home/ubuntu/.local/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/network.py", line 1074, in save_weights
saving.save_weights_to_hdf5_group(f, self.layers)
File "/home/ubuntu/.local/lib/python3.6/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py", line 631, in save_weights_to_hdf5_group
param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
File "/home/ubuntu/.local/lib/python3.6/site-packages/h5py/_hl/group.py", line 139, in create_dataset
self[name] = dset
File "/home/ubuntu/.local/lib/python3.6/site-packages/h5py/_hl/group.py", line 371, in __setitem__
h5o.link(obj.id, self.id, name, lcpl=lcpl, lapl=self._lapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5o.pyx", line 202, in h5py.h5o.link
RuntimeError: Unable to create link (name already exists)
```
There are 2 `weight_normalization/kernel:0`.
</issue>
<code>
[start of tensorflow_addons/layers/wrappers.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # =============================================================================
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 import tensorflow as tf
20
21
22 @tf.keras.utils.register_keras_serializable(package='Addons')
23 class WeightNormalization(tf.keras.layers.Wrapper):
24 """This wrapper reparameterizes a layer by decoupling the weight's
25 magnitude and direction.
26
27 This speeds up convergence by improving the
28 conditioning of the optimization problem.
29 Weight Normalization: A Simple Reparameterization to Accelerate
30 Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
31 Tim Salimans, Diederik P. Kingma (2016)
32 WeightNormalization wrapper works for keras and tf layers.
33 ```python
34 net = WeightNormalization(
35 tf.keras.layers.Conv2D(2, 2, activation='relu'),
36 input_shape=(32, 32, 3),
37 data_init=True)(x)
38 net = WeightNormalization(
39 tf.keras.layers.Conv2D(16, 5, activation='relu'),
40 data_init=True)(net)
41 net = WeightNormalization(
42 tf.keras.layers.Dense(120, activation='relu'),
43 data_init=True)(net)
44 net = WeightNormalization(
45 tf.keras.layers.Dense(n_classes),
46 data_init=True)(net)
47 ```
48 Arguments:
49 layer: a layer instance.
50 data_init: If `True` use data dependent variable initialization
51 Raises:
52 ValueError: If not initialized with a `Layer` instance.
53 ValueError: If `Layer` does not contain a `kernel` of weights
54 NotImplementedError: If `data_init` is True and running graph execution
55 """
56
57 def __init__(self, layer, data_init=True, **kwargs):
58 super(WeightNormalization, self).__init__(layer, **kwargs)
59 self.data_init = data_init
60 self._track_trackable(layer, name='layer')
61
62 def build(self, input_shape):
63 """Build `Layer`"""
64 input_shape = tf.TensorShape(input_shape).as_list()
65 self.input_spec = tf.keras.layers.InputSpec(
66 shape=[None] + input_shape[1:])
67
68 if not self.layer.built:
69 self.layer.build(input_shape)
70
71 if not hasattr(self.layer, 'kernel'):
72 raise ValueError('`WeightNormalization` must wrap a layer that'
73 ' contains a `kernel` for weights')
74
75 # The kernel's filter or unit dimension is -1
76 self.layer_depth = int(self.layer.kernel.shape[-1])
77 self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))
78
79 self.g = self.add_weight(
80 name='g',
81 shape=(self.layer_depth,),
82 initializer='ones',
83 dtype=self.layer.kernel.dtype,
84 trainable=True)
85 self.v = self.layer.kernel
86
87 self._initialized = self.add_weight(
88 name='initialized',
89 shape=None,
90 initializer='zeros',
91 dtype=tf.dtypes.bool,
92 trainable=False)
93
94 if self.data_init:
95 # Used for data initialization in self._data_dep_init.
96 layer_config = tf.keras.layers.serialize(self.layer)
97 layer_config['config']['trainable'] = False
98 self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)
99 self._naked_clone_layer.build(input_shape)
100 self._naked_clone_layer.set_weights(self.layer.get_weights())
101 self._naked_clone_layer.activation = None
102
103 self.built = True
104
105 def call(self, inputs):
106 """Call `Layer`"""
107
108 def _do_nothing():
109 return tf.identity(self.g)
110
111 def _update_weights():
112 # Ensure we read `self.g` after _update_weights.
113 with tf.control_dependencies(self._initialize_weights(inputs)):
114 return tf.identity(self.g)
115
116 g = tf.cond(self._initialized, _do_nothing, _update_weights)
117
118 with tf.name_scope('compute_weights'):
119 # Replace kernel by normalized weight variable.
120 self.layer.kernel = tf.nn.l2_normalize(
121 self.v, axis=self.kernel_norm_axes) * g
122
123 # Ensure we calculate result after updating kernel.
124 update_kernel = tf.identity(self.layer.kernel)
125 with tf.control_dependencies([update_kernel]):
126 outputs = self.layer(inputs)
127 return outputs
128
129 def compute_output_shape(self, input_shape):
130 return tf.TensorShape(
131 self.layer.compute_output_shape(input_shape).as_list())
132
133 def _initialize_weights(self, inputs):
134 """Initialize weight g.
135
136 The initial value of g could either from the initial value in v,
137 or by the input value if self.data_init is True.
138 """
139 with tf.control_dependencies([
140 tf.debugging.assert_equal( # pylint: disable=bad-continuation
141 self._initialized,
142 False,
143 message='The layer has been initialized.')
144 ]):
145 if self.data_init:
146 assign_tensors = self._data_dep_init(inputs)
147 else:
148 assign_tensors = self._init_norm()
149 assign_tensors.append(self._initialized.assign(True))
150 return assign_tensors
151
152 def _init_norm(self):
153 """Set the weight g with the norm of the weight vector."""
154 with tf.name_scope('init_norm'):
155 v_flat = tf.reshape(self.v, [-1, self.layer_depth])
156 v_norm = tf.linalg.norm(v_flat, axis=0)
157 g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))
158 return [g_tensor]
159
160 def _data_dep_init(self, inputs):
161 """Data dependent initialization."""
162 with tf.name_scope('data_dep_init'):
163 # Generate data dependent init values
164 x_init = self._naked_clone_layer(inputs)
165 data_norm_axes = list(range(x_init.shape.rank - 1))
166 m_init, v_init = tf.nn.moments(x_init, data_norm_axes)
167 scale_init = 1. / tf.math.sqrt(v_init + 1e-10)
168
169 # Assign data dependent init values
170 g_tensor = self.g.assign(self.g * scale_init)
171 if hasattr(self.layer, 'bias'):
172 bias_tensor = self.layer.bias.assign(-m_init * scale_init)
173 return [g_tensor, bias_tensor]
174 else:
175 return [g_tensor]
176
177 def get_config(self):
178 config = {'data_init': self.data_init}
179 base_config = super(WeightNormalization, self).get_config()
180 return dict(list(base_config.items()) + list(config.items()))
181
[end of tensorflow_addons/layers/wrappers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py
--- a/tensorflow_addons/layers/wrappers.py
+++ b/tensorflow_addons/layers/wrappers.py
@@ -93,12 +93,14 @@
if self.data_init:
# Used for data initialization in self._data_dep_init.
- layer_config = tf.keras.layers.serialize(self.layer)
- layer_config['config']['trainable'] = False
- self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)
- self._naked_clone_layer.build(input_shape)
- self._naked_clone_layer.set_weights(self.layer.get_weights())
- self._naked_clone_layer.activation = None
+ with tf.name_scope('data_dep_init'):
+ layer_config = tf.keras.layers.serialize(self.layer)
+ layer_config['config']['trainable'] = False
+ self._naked_clone_layer = tf.keras.layers.deserialize(
+ layer_config)
+ self._naked_clone_layer.build(input_shape)
+ self._naked_clone_layer.set_weights(self.layer.get_weights())
+ self._naked_clone_layer.activation = None
self.built = True
| {"golden_diff": "diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py\n--- a/tensorflow_addons/layers/wrappers.py\n+++ b/tensorflow_addons/layers/wrappers.py\n@@ -93,12 +93,14 @@\n \n if self.data_init:\n # Used for data initialization in self._data_dep_init.\n- layer_config = tf.keras.layers.serialize(self.layer)\n- layer_config['config']['trainable'] = False\n- self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)\n- self._naked_clone_layer.build(input_shape)\n- self._naked_clone_layer.set_weights(self.layer.get_weights())\n- self._naked_clone_layer.activation = None\n+ with tf.name_scope('data_dep_init'):\n+ layer_config = tf.keras.layers.serialize(self.layer)\n+ layer_config['config']['trainable'] = False\n+ self._naked_clone_layer = tf.keras.layers.deserialize(\n+ layer_config)\n+ self._naked_clone_layer.build(input_shape)\n+ self._naked_clone_layer.set_weights(self.layer.get_weights())\n+ self._naked_clone_layer.activation = None\n \n self.built = True\n", "issue": "WeightNormalization layer can't save in h5\n**System information**\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):\r\nUbuntu 16.04.6 LTS\r\n- TensorFlow version and how it was installed (source or binary):\r\ntensorflow-gpu 2.0.0, binary\r\n- TensorFlow-Addons version and how it was installed (source or binary):\r\ntensorflow-addons 0.6.0, binary\r\n- Python version:\r\nPython 3.6.9\r\n- Is GPU used? (yes/no):\r\nyes\r\n**Describe the bug**\r\nWeightNormalization layer can't save to h5\r\n\r\nA clear and concise description of what the bug is.\r\nWeightNormalization causing duplicate names of variables.\r\n\r\n**Code to reproduce the issue**\r\n\r\n```python\r\nimport tensorflow as tf\r\nimport tensorflow_addons as tfa\r\n\r\nconv = tf.keras.layers.Conv1D(1, 1, name='conv')\r\nwn_conv = tfa.layers.WeightNormalization(conv)\r\n\r\nmodel = tf.keras.Sequential(layers=[wn_conv])\r\nmodel.build([1, 2, 3])\r\nprint([w.name for w in model.layers[0].weights])\r\nmodel.save_weights('model.h5')\r\n```\r\n\r\nProvide a reproducible test case that is the bare minimum necessary to generate the problem.\r\nSee above.\r\n\r\n**Other info / logs**\r\n\r\n```\r\n['weight_normalization/g:0', 'weight_normalization/kernel:0', 'weight_normalization/bias:0', 'weight_normalization/initialized:0', 'weight_normalization/kernel:0', 'weight_normalization/bias:0']\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/hankcs/laser/tests/playground/wn_bug.py\", line 14, in <module>\r\n model.save_weights('model.h5')\r\n File \"/home/ubuntu/.local/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/network.py\", line 1074, in save_weights\r\n saving.save_weights_to_hdf5_group(f, self.layers)\r\n File \"/home/ubuntu/.local/lib/python3.6/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py\", line 631, in save_weights_to_hdf5_group\r\n param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)\r\n File \"/home/ubuntu/.local/lib/python3.6/site-packages/h5py/_hl/group.py\", line 139, in create_dataset\r\n self[name] = dset\r\n File \"/home/ubuntu/.local/lib/python3.6/site-packages/h5py/_hl/group.py\", line 371, in __setitem__\r\n h5o.link(obj.id, self.id, name, lcpl=lcpl, lapl=self._lapl)\r\n File \"h5py/_objects.pyx\", line 54, in h5py._objects.with_phil.wrapper\r\n File \"h5py/_objects.pyx\", line 55, in h5py._objects.with_phil.wrapper\r\n File \"h5py/h5o.pyx\", line 202, in h5py.h5o.link\r\nRuntimeError: Unable to create link (name already exists)\r\n```\r\n\r\nThere are 2 `weight_normalization/kernel:0`.\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass WeightNormalization(tf.keras.layers.Wrapper):\n \"\"\"This wrapper reparameterizes a layer by decoupling the weight's\n magnitude and direction.\n\n This speeds up convergence by improving the\n conditioning of the optimization problem.\n Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868\n Tim Salimans, Diederik P. Kingma (2016)\n WeightNormalization wrapper works for keras and tf layers.\n ```python\n net = WeightNormalization(\n tf.keras.layers.Conv2D(2, 2, activation='relu'),\n input_shape=(32, 32, 3),\n data_init=True)(x)\n net = WeightNormalization(\n tf.keras.layers.Conv2D(16, 5, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(120, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(n_classes),\n data_init=True)(net)\n ```\n Arguments:\n layer: a layer instance.\n data_init: If `True` use data dependent variable initialization\n Raises:\n ValueError: If not initialized with a `Layer` instance.\n ValueError: If `Layer` does not contain a `kernel` of weights\n NotImplementedError: If `data_init` is True and running graph execution\n \"\"\"\n\n def __init__(self, layer, data_init=True, **kwargs):\n super(WeightNormalization, self).__init__(layer, **kwargs)\n self.data_init = data_init\n self._track_trackable(layer, name='layer')\n\n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n input_shape = tf.TensorShape(input_shape).as_list()\n self.input_spec = tf.keras.layers.InputSpec(\n shape=[None] + input_shape[1:])\n\n if not self.layer.built:\n self.layer.build(input_shape)\n\n if not hasattr(self.layer, 'kernel'):\n raise ValueError('`WeightNormalization` must wrap a layer that'\n ' contains a `kernel` for weights')\n\n # The kernel's filter or unit dimension is -1\n self.layer_depth = int(self.layer.kernel.shape[-1])\n self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))\n\n self.g = self.add_weight(\n name='g',\n shape=(self.layer_depth,),\n initializer='ones',\n dtype=self.layer.kernel.dtype,\n trainable=True)\n self.v = self.layer.kernel\n\n self._initialized = self.add_weight(\n name='initialized',\n shape=None,\n initializer='zeros',\n dtype=tf.dtypes.bool,\n trainable=False)\n\n if self.data_init:\n # Used for data initialization in self._data_dep_init.\n layer_config = tf.keras.layers.serialize(self.layer)\n layer_config['config']['trainable'] = False\n self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)\n self._naked_clone_layer.build(input_shape)\n self._naked_clone_layer.set_weights(self.layer.get_weights())\n self._naked_clone_layer.activation = None\n\n self.built = True\n\n def call(self, inputs):\n \"\"\"Call `Layer`\"\"\"\n\n def _do_nothing():\n return tf.identity(self.g)\n\n def _update_weights():\n # Ensure we read `self.g` after _update_weights.\n with tf.control_dependencies(self._initialize_weights(inputs)):\n return tf.identity(self.g)\n\n g = tf.cond(self._initialized, _do_nothing, _update_weights)\n\n with tf.name_scope('compute_weights'):\n # Replace kernel by normalized weight variable.\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * g\n\n # Ensure we calculate result after updating kernel.\n update_kernel = tf.identity(self.layer.kernel)\n with tf.control_dependencies([update_kernel]):\n outputs = self.layer(inputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return tf.TensorShape(\n self.layer.compute_output_shape(input_shape).as_list())\n\n def _initialize_weights(self, inputs):\n \"\"\"Initialize weight g.\n\n The initial value of g could either from the initial value in v,\n or by the input value if self.data_init is True.\n \"\"\"\n with tf.control_dependencies([\n tf.debugging.assert_equal( # pylint: disable=bad-continuation\n self._initialized,\n False,\n message='The layer has been initialized.')\n ]):\n if self.data_init:\n assign_tensors = self._data_dep_init(inputs)\n else:\n assign_tensors = self._init_norm()\n assign_tensors.append(self._initialized.assign(True))\n return assign_tensors\n\n def _init_norm(self):\n \"\"\"Set the weight g with the norm of the weight vector.\"\"\"\n with tf.name_scope('init_norm'):\n v_flat = tf.reshape(self.v, [-1, self.layer_depth])\n v_norm = tf.linalg.norm(v_flat, axis=0)\n g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))\n return [g_tensor]\n\n def _data_dep_init(self, inputs):\n \"\"\"Data dependent initialization.\"\"\"\n with tf.name_scope('data_dep_init'):\n # Generate data dependent init values\n x_init = self._naked_clone_layer(inputs)\n data_norm_axes = list(range(x_init.shape.rank - 1))\n m_init, v_init = tf.nn.moments(x_init, data_norm_axes)\n scale_init = 1. / tf.math.sqrt(v_init + 1e-10)\n\n # Assign data dependent init values\n g_tensor = self.g.assign(self.g * scale_init)\n if hasattr(self.layer, 'bias'):\n bias_tensor = self.layer.bias.assign(-m_init * scale_init)\n return [g_tensor, bias_tensor]\n else:\n return [g_tensor]\n\n def get_config(self):\n config = {'data_init': self.data_init}\n base_config = super(WeightNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "tensorflow_addons/layers/wrappers.py"}]} | 3,190 | 271 |
gh_patches_debug_28017 | rasdani/github-patches | git_diff | chainer__chainer-1483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot report int value
I couldn't report int value. We need to check type of an input value here:
https://github.com/pfnet/chainer/blob/master/chainer/reporter.py#L244
</issue>
<code>
[start of chainer/reporter.py]
1 import collections
2 import contextlib
3
4 import numpy
5 import six
6
7 from chainer import cuda
8 from chainer import variable
9
10
11 class Reporter(object):
12
13 """Object to which observed values are reported.
14
15 Reporter is used to collect values that users want to watch. The reporter
16 object holds a mapping from value names to the actually observed values.
17 We call this mapping `observations`.
18
19 When a value is passed to the reporter, an object called `observer` can be
20 optionally attached. In this case, the name of the observer is added as the
21 prefix of the value name. The observer name should be registered
22 beforehand.
23
24 See the following example::
25
26 >>> from chainer import Reporter, report, report_scope
27 >>>
28 >>> reporter = Reporter()
29 >>> observer = object() # it can be an arbitrary (reference) object
30 >>> reporter.add_observer('my_observer:', observer)
31 >>> observation = {}
32 >>> with reporter.scope(observation):
33 ... reporter.report({'x': 1}, observer)
34 ...
35 >>> observation
36 {'my_observer:x': 1}
37
38 There are also a global API to add values::
39
40 >>> observation = {}
41 >>> with report_scope(observation):
42 ... report({'x': 1}, observer)
43 ...
44 >>> observation
45 {'my_observer:x': 1}
46
47 The most important application of Reporter is to report observed values
48 from each link or chain in the training and validation procedures.
49 :class:`~chainer.training.Trainer` and some extensions prepare their own
50 Reporter object with the hierarchy of the target link registered as
51 observers. We can use :func:`report` function inside any links and chains
52 to report the observed values (e.g., training loss, accuracy, activation
53 statistics, etc.).
54
55 Attributes:
56 observation: Dictionary of observed values.
57
58 """
59 def __init__(self):
60 self._observer_names = {}
61 self.observation = {}
62
63 def __enter__(self):
64 """Makes this reporter object current."""
65 _reporters.append(self)
66
67 def __exit__(self, exc_type, exc_value, traceback):
68 """Recovers the previous reporter object to the current."""
69 _reporters.pop()
70
71 @contextlib.contextmanager
72 def scope(self, observation):
73 """Creates a scope to report observed values to ``observation``.
74
75 This is a context manager to be passed to ``with`` statements. In this
76 scope, the observation dictionary is changed to the given one.
77
78 It also makes this reporter object current.
79
80 Args:
81 observation (dict): Observation dictionary. All observations
82 reported inside of the ``with`` statement are written to this
83 dictionary.
84
85 """
86 old = self.observation
87 self.observation = observation
88 self.__enter__()
89 yield
90 self.__exit__(None, None, None)
91 self.observation = old
92
93 def add_observer(self, name, observer):
94 """Registers an observer of values.
95
96 Observer defines a scope of names for observed values. Values observed
97 with the observer are registered with names prefixed by the observer
98 name.
99
100 Args:
101 name (str): Name of the observer.
102 observer: The observer object. Note that the reporter distinguishes
103 the observers by their object ids (i.e., ``id(owner)``), rather
104 than the object equality.
105
106 """
107 self._observer_names[id(observer)] = name
108
109 def add_observers(self, prefix, observers):
110 """Registers multiple observers at once.
111
112 This is a convenient method to register multiple objects at once.
113
114 Args:
115 prefix (str): Prefix of each name of observers.
116 observers: Iterator of name and observer pairs.
117
118 """
119 for name, observer in observers:
120 self._observer_names[id(observer)] = prefix + name
121
122 def report(self, values, observer=None):
123 """Reports observed values.
124
125 The values are written with the key, prefixed by the name of the
126 observer object if given.
127
128 Args:
129 values (dict): Dictionary of observed values.
130 observer: Observer object. Its object ID is used to retrieve the
131 observer name, which is used as the prefix of the registration
132 name of the observed value.
133
134 """
135 if observer is not None:
136 observer_id = id(observer)
137 if observer_id not in self._observer_names:
138 raise KeyError(
139 'Given observer is not registered to the reporter.')
140 observer_name = self._observer_names[observer_id]
141 for key, value in six.iteritems(values):
142 name = '%s/%s' % (observer_name, key)
143 self.observation[name] = value
144 else:
145 self.observation.update(values)
146
147
148 _reporters = []
149
150
151 def get_current_reporter():
152 """Returns the current reporter object."""
153 return _reporters[-1]
154
155
156 def report(values, observer=None):
157 """Reports observed values with the current reporter object.
158
159 Any reporter object can be set current by the ``with`` statement. This
160 function calls the :meth:`Report.report` method of the current reporter.
161 If no reporter object is current, this function does nothing.
162
163 .. admonition:: Example
164
165 The most typical example is a use within links and chains. Suppose that
166 a link is registered to the current reporter as an observer (for
167 example, the target link of the optimizer is automatically registered to
168 the reporter of the :class:`~chainer.training.Trainer`). We can report
169 some values from the link as follows::
170
171 class MyRegressor(chainer.Chain):
172 def __init__(self, predictor):
173 super(MyRegressor, self).__init__(predictor=predictor)
174
175 def __call__(self, x, y):
176 # This chain just computes the mean absolute and squared
177 # errors between the prediction and y.
178 pred = self.predictor(x)
179 abs_error = F.sum(F.abs(pred - y)) / len(x.data)
180 loss = F.mean_squared_error(pred, y)
181
182 # Report the mean absolute and squared errors.
183 report({'abs_error': abs_error, 'squared_error': loss}, self)
184
185 return loss
186
187 If the link is named ``'main'`` in the hierarchy (which is the default
188 name of the target link in the
189 :class:`~chainer.training.StandardUpdater`), these reported values are
190 named ``'main/abs_error'`` and ``'main/squared_error'``. If these values
191 are reported inside the :class:`~chainer.training.extension.Evaluator`
192 extension, ``'validation/'`` is added at the head of the link name, thus
193 the item names are changed to ``'validation/main/abs_error'`` and
194 ``'validation/main/squared_error'`` (``'validation'`` is the default
195 name of the Evaluator extension).
196
197 Args:
198 values (dict): Dictionary of observed values.
199 observer: Observer object. Its object ID is used to retrieve the
200 observer name, which is used as the prefix of the registration name
201 of the observed value.
202
203 """
204 if _reporters:
205 current = _reporters[-1]
206 current.report(values, observer)
207
208
209 @contextlib.contextmanager
210 def report_scope(observation):
211 """Returns a report scope with the current reporter.
212
213 This is equivalent to ``get_current_reporter().scope(observation)``,
214 except that it does not make the reporter current redundantly.
215
216 """
217 current = _reporters[-1]
218 old = current.observation
219 current.observation = observation
220 yield
221 current.observation = old
222
223
224 class Summary(object):
225
226 """Online summarization of a sequence of scalars.
227
228 Summary computes the statistics of given scalars online.
229
230 """
231 def __init__(self):
232 self._x = 0
233 self._x2 = 0
234 self._n = 0
235
236 def add(self, value):
237 """Adds a scalar value.
238
239 Args:
240 value: Scalar value to accumulate. It is either a NumPy scalar or
241 a zero-dimensional array (on CPU or GPU).
242
243 """
244 with cuda.get_device(value):
245 self._x += value
246 self._x2 += value * value
247 self._n += 1
248
249 def compute_mean(self):
250 """Computes the mean."""
251 x, n = self._x, self._n
252 with cuda.get_device(x):
253 return x / n
254
255 def make_statistics(self):
256 """Computes and returns the mean and standard deviation values.
257
258 Returns:
259 tuple: Mean and standard deviation values.
260
261 """
262 x, n = self._x, self._n
263 xp = cuda.get_array_module(x)
264 with cuda.get_device(x):
265 mean = x / n
266 var = self._x2 / n - mean * mean
267 std = xp.sqrt(var, var)
268 return mean, std
269
270
271 class DictSummary(object):
272
273 """Online summarization of a sequence of dictionaries.
274
275 ``DictSummary`` computes the statistics of a given set of scalars online.
276 It only computes the statistics for scalar values and variables of scalar
277 values in the dictionaries.
278
279 """
280 def __init__(self):
281 self._summaries = collections.defaultdict(Summary)
282
283 def add(self, d):
284 """Adds a dictionary of scalars.
285
286 Args:
287 d (dict): Dictionary of scalars to accumulate. Only elements of
288 scalars, zero-dimensional arrays, and variables of
289 zero-dimensional arrays are accumulated.
290
291 """
292 summaries = self._summaries
293 for k, v in six.iteritems(d):
294 if isinstance(v, variable.Variable):
295 v = v.data
296 if numpy.isscalar(v) or getattr(v, 'ndim', -1) == 0:
297 summaries[k].add(v)
298
299 def compute_mean(self):
300 """Creates a dictionary of mean values.
301
302 It returns a single dictionary that holds a mean value for each entry
303 added to the summary.
304
305 Returns:
306 dict: Dictionary of mean values.
307
308 """
309 return {name: summary.compute_mean()
310 for name, summary in six.iteritems(self._summaries)}
311
312 def make_statistics(self):
313 """Creates a dictionary of statistics.
314
315 It returns a single dictionary that holds mean and standard deviation
316 values for every entry added to the summary. For an entry of name
317 ``'key'``, these values are added to the dictionary by names ``'key'``
318 and ``'key.std'``, respectively.
319
320 Returns:
321 dict: Dictionary of statistics of all entries.
322
323 """
324 stats = {}
325 for name, summary in six.iteritems(self._summaries):
326 mean, std = summary.make_statistics()
327 stats[name] = mean
328 stats[name + '.std'] = std
329
330 return stats
331
[end of chainer/reporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/reporter.py b/chainer/reporter.py
--- a/chainer/reporter.py
+++ b/chainer/reporter.py
@@ -221,6 +221,13 @@
current.observation = old
+def _get_device(x):
+ if numpy.isscalar(x):
+ return cuda.DummyDevice
+ else:
+ return cuda.get_device(x)
+
+
class Summary(object):
"""Online summarization of a sequence of scalars.
@@ -241,7 +248,7 @@
a zero-dimensional array (on CPU or GPU).
"""
- with cuda.get_device(value):
+ with _get_device(value):
self._x += value
self._x2 += value * value
self._n += 1
@@ -249,7 +256,7 @@
def compute_mean(self):
"""Computes the mean."""
x, n = self._x, self._n
- with cuda.get_device(x):
+ with _get_device(x):
return x / n
def make_statistics(self):
@@ -261,10 +268,10 @@
"""
x, n = self._x, self._n
xp = cuda.get_array_module(x)
- with cuda.get_device(x):
+ with _get_device(x):
mean = x / n
var = self._x2 / n - mean * mean
- std = xp.sqrt(var, var)
+ std = xp.sqrt(var)
return mean, std
| {"golden_diff": "diff --git a/chainer/reporter.py b/chainer/reporter.py\n--- a/chainer/reporter.py\n+++ b/chainer/reporter.py\n@@ -221,6 +221,13 @@\n current.observation = old\n \n \n+def _get_device(x):\n+ if numpy.isscalar(x):\n+ return cuda.DummyDevice\n+ else:\n+ return cuda.get_device(x)\n+\n+\n class Summary(object):\n \n \"\"\"Online summarization of a sequence of scalars.\n@@ -241,7 +248,7 @@\n a zero-dimensional array (on CPU or GPU).\n \n \"\"\"\n- with cuda.get_device(value):\n+ with _get_device(value):\n self._x += value\n self._x2 += value * value\n self._n += 1\n@@ -249,7 +256,7 @@\n def compute_mean(self):\n \"\"\"Computes the mean.\"\"\"\n x, n = self._x, self._n\n- with cuda.get_device(x):\n+ with _get_device(x):\n return x / n\n \n def make_statistics(self):\n@@ -261,10 +268,10 @@\n \"\"\"\n x, n = self._x, self._n\n xp = cuda.get_array_module(x)\n- with cuda.get_device(x):\n+ with _get_device(x):\n mean = x / n\n var = self._x2 / n - mean * mean\n- std = xp.sqrt(var, var)\n+ std = xp.sqrt(var)\n return mean, std\n", "issue": "Cannot report int value\nI couldn't report int value. We need to check type of an input value here:\nhttps://github.com/pfnet/chainer/blob/master/chainer/reporter.py#L244\n\n", "before_files": [{"content": "import collections\nimport contextlib\n\nimport numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import variable\n\n\nclass Reporter(object):\n\n \"\"\"Object to which observed values are reported.\n\n Reporter is used to collect values that users want to watch. The reporter\n object holds a mapping from value names to the actually observed values.\n We call this mapping `observations`.\n\n When a value is passed to the reporter, an object called `observer` can be\n optionally attached. In this case, the name of the observer is added as the\n prefix of the value name. The observer name should be registered\n beforehand.\n\n See the following example::\n\n >>> from chainer import Reporter, report, report_scope\n >>>\n >>> reporter = Reporter()\n >>> observer = object() # it can be an arbitrary (reference) object\n >>> reporter.add_observer('my_observer:', observer)\n >>> observation = {}\n >>> with reporter.scope(observation):\n ... reporter.report({'x': 1}, observer)\n ...\n >>> observation\n {'my_observer:x': 1}\n\n There are also a global API to add values::\n\n >>> observation = {}\n >>> with report_scope(observation):\n ... report({'x': 1}, observer)\n ...\n >>> observation\n {'my_observer:x': 1}\n\n The most important application of Reporter is to report observed values\n from each link or chain in the training and validation procedures.\n :class:`~chainer.training.Trainer` and some extensions prepare their own\n Reporter object with the hierarchy of the target link registered as\n observers. We can use :func:`report` function inside any links and chains\n to report the observed values (e.g., training loss, accuracy, activation\n statistics, etc.).\n\n Attributes:\n observation: Dictionary of observed values.\n\n \"\"\"\n def __init__(self):\n self._observer_names = {}\n self.observation = {}\n\n def __enter__(self):\n \"\"\"Makes this reporter object current.\"\"\"\n _reporters.append(self)\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Recovers the previous reporter object to the current.\"\"\"\n _reporters.pop()\n\n @contextlib.contextmanager\n def scope(self, observation):\n \"\"\"Creates a scope to report observed values to ``observation``.\n\n This is a context manager to be passed to ``with`` statements. In this\n scope, the observation dictionary is changed to the given one.\n\n It also makes this reporter object current.\n\n Args:\n observation (dict): Observation dictionary. All observations\n reported inside of the ``with`` statement are written to this\n dictionary.\n\n \"\"\"\n old = self.observation\n self.observation = observation\n self.__enter__()\n yield\n self.__exit__(None, None, None)\n self.observation = old\n\n def add_observer(self, name, observer):\n \"\"\"Registers an observer of values.\n\n Observer defines a scope of names for observed values. Values observed\n with the observer are registered with names prefixed by the observer\n name.\n\n Args:\n name (str): Name of the observer.\n observer: The observer object. Note that the reporter distinguishes\n the observers by their object ids (i.e., ``id(owner)``), rather\n than the object equality.\n\n \"\"\"\n self._observer_names[id(observer)] = name\n\n def add_observers(self, prefix, observers):\n \"\"\"Registers multiple observers at once.\n\n This is a convenient method to register multiple objects at once.\n\n Args:\n prefix (str): Prefix of each name of observers.\n observers: Iterator of name and observer pairs.\n\n \"\"\"\n for name, observer in observers:\n self._observer_names[id(observer)] = prefix + name\n\n def report(self, values, observer=None):\n \"\"\"Reports observed values.\n\n The values are written with the key, prefixed by the name of the\n observer object if given.\n\n Args:\n values (dict): Dictionary of observed values.\n observer: Observer object. Its object ID is used to retrieve the\n observer name, which is used as the prefix of the registration\n name of the observed value.\n\n \"\"\"\n if observer is not None:\n observer_id = id(observer)\n if observer_id not in self._observer_names:\n raise KeyError(\n 'Given observer is not registered to the reporter.')\n observer_name = self._observer_names[observer_id]\n for key, value in six.iteritems(values):\n name = '%s/%s' % (observer_name, key)\n self.observation[name] = value\n else:\n self.observation.update(values)\n\n\n_reporters = []\n\n\ndef get_current_reporter():\n \"\"\"Returns the current reporter object.\"\"\"\n return _reporters[-1]\n\n\ndef report(values, observer=None):\n \"\"\"Reports observed values with the current reporter object.\n\n Any reporter object can be set current by the ``with`` statement. This\n function calls the :meth:`Report.report` method of the current reporter.\n If no reporter object is current, this function does nothing.\n\n .. admonition:: Example\n\n The most typical example is a use within links and chains. Suppose that\n a link is registered to the current reporter as an observer (for\n example, the target link of the optimizer is automatically registered to\n the reporter of the :class:`~chainer.training.Trainer`). We can report\n some values from the link as follows::\n\n class MyRegressor(chainer.Chain):\n def __init__(self, predictor):\n super(MyRegressor, self).__init__(predictor=predictor)\n\n def __call__(self, x, y):\n # This chain just computes the mean absolute and squared\n # errors between the prediction and y.\n pred = self.predictor(x)\n abs_error = F.sum(F.abs(pred - y)) / len(x.data)\n loss = F.mean_squared_error(pred, y)\n\n # Report the mean absolute and squared errors.\n report({'abs_error': abs_error, 'squared_error': loss}, self)\n\n return loss\n\n If the link is named ``'main'`` in the hierarchy (which is the default\n name of the target link in the\n :class:`~chainer.training.StandardUpdater`), these reported values are\n named ``'main/abs_error'`` and ``'main/squared_error'``. If these values\n are reported inside the :class:`~chainer.training.extension.Evaluator`\n extension, ``'validation/'`` is added at the head of the link name, thus\n the item names are changed to ``'validation/main/abs_error'`` and\n ``'validation/main/squared_error'`` (``'validation'`` is the default\n name of the Evaluator extension).\n\n Args:\n values (dict): Dictionary of observed values.\n observer: Observer object. Its object ID is used to retrieve the\n observer name, which is used as the prefix of the registration name\n of the observed value.\n\n \"\"\"\n if _reporters:\n current = _reporters[-1]\n current.report(values, observer)\n\n\[email protected]\ndef report_scope(observation):\n \"\"\"Returns a report scope with the current reporter.\n\n This is equivalent to ``get_current_reporter().scope(observation)``,\n except that it does not make the reporter current redundantly.\n\n \"\"\"\n current = _reporters[-1]\n old = current.observation\n current.observation = observation\n yield\n current.observation = old\n\n\nclass Summary(object):\n\n \"\"\"Online summarization of a sequence of scalars.\n\n Summary computes the statistics of given scalars online.\n\n \"\"\"\n def __init__(self):\n self._x = 0\n self._x2 = 0\n self._n = 0\n\n def add(self, value):\n \"\"\"Adds a scalar value.\n\n Args:\n value: Scalar value to accumulate. It is either a NumPy scalar or\n a zero-dimensional array (on CPU or GPU).\n\n \"\"\"\n with cuda.get_device(value):\n self._x += value\n self._x2 += value * value\n self._n += 1\n\n def compute_mean(self):\n \"\"\"Computes the mean.\"\"\"\n x, n = self._x, self._n\n with cuda.get_device(x):\n return x / n\n\n def make_statistics(self):\n \"\"\"Computes and returns the mean and standard deviation values.\n\n Returns:\n tuple: Mean and standard deviation values.\n\n \"\"\"\n x, n = self._x, self._n\n xp = cuda.get_array_module(x)\n with cuda.get_device(x):\n mean = x / n\n var = self._x2 / n - mean * mean\n std = xp.sqrt(var, var)\n return mean, std\n\n\nclass DictSummary(object):\n\n \"\"\"Online summarization of a sequence of dictionaries.\n\n ``DictSummary`` computes the statistics of a given set of scalars online.\n It only computes the statistics for scalar values and variables of scalar\n values in the dictionaries.\n\n \"\"\"\n def __init__(self):\n self._summaries = collections.defaultdict(Summary)\n\n def add(self, d):\n \"\"\"Adds a dictionary of scalars.\n\n Args:\n d (dict): Dictionary of scalars to accumulate. Only elements of\n scalars, zero-dimensional arrays, and variables of\n zero-dimensional arrays are accumulated.\n\n \"\"\"\n summaries = self._summaries\n for k, v in six.iteritems(d):\n if isinstance(v, variable.Variable):\n v = v.data\n if numpy.isscalar(v) or getattr(v, 'ndim', -1) == 0:\n summaries[k].add(v)\n\n def compute_mean(self):\n \"\"\"Creates a dictionary of mean values.\n\n It returns a single dictionary that holds a mean value for each entry\n added to the summary.\n\n Returns:\n dict: Dictionary of mean values.\n\n \"\"\"\n return {name: summary.compute_mean()\n for name, summary in six.iteritems(self._summaries)}\n\n def make_statistics(self):\n \"\"\"Creates a dictionary of statistics.\n\n It returns a single dictionary that holds mean and standard deviation\n values for every entry added to the summary. For an entry of name\n ``'key'``, these values are added to the dictionary by names ``'key'``\n and ``'key.std'``, respectively.\n\n Returns:\n dict: Dictionary of statistics of all entries.\n\n \"\"\"\n stats = {}\n for name, summary in six.iteritems(self._summaries):\n mean, std = summary.make_statistics()\n stats[name] = mean\n stats[name + '.std'] = std\n\n return stats\n", "path": "chainer/reporter.py"}]} | 3,819 | 348 |
gh_patches_debug_20683 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-133 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RBF Kernel Change Breaks Testing Code
The change to RBFKernel in 84fccd898c45c08279fb5c109e6e234f3a47588a may break something about our prediction code.
I am not totally sure what the problem is yet, but I isolated this as the problem with `git bisect` and have a reasonable test case where results are significantly worse with the commit in compared to after a revert commit.
It seems like the stability issues we encountered when making this change in the past don't come up in the unit tests, but do on some real datasets.
I can try to push my test case to a branch as well, although it relies on a UCI dataset.
@Balandat @gpleiss
</issue>
<code>
[start of gpytorch/kernels/rbf_kernel.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 from __future__ import unicode_literals
5
6 from .kernel import Kernel
7
8
9 class RBFKernel(Kernel):
10
11 def __init__(
12 self,
13 ard_num_dims=None,
14 log_lengthscale_bounds=(-10000, 10000),
15 eps=1e-5,
16 active_dims=None,
17 ):
18 super(RBFKernel, self).__init__(
19 has_lengthscale=True,
20 ard_num_dims=ard_num_dims,
21 log_lengthscale_bounds=log_lengthscale_bounds,
22 active_dims=active_dims,
23 )
24 self.eps = eps
25
26 def forward(self, x1, x2):
27 lengthscales = self.log_lengthscale.exp() + self.eps
28 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales)
29 return diff.pow_(2).sum(-1).mul_(-0.5).exp_()
30
[end of gpytorch/kernels/rbf_kernel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/kernels/rbf_kernel.py b/gpytorch/kernels/rbf_kernel.py
--- a/gpytorch/kernels/rbf_kernel.py
+++ b/gpytorch/kernels/rbf_kernel.py
@@ -3,6 +3,7 @@
from __future__ import print_function
from __future__ import unicode_literals
+import math
from .kernel import Kernel
@@ -12,7 +13,7 @@
self,
ard_num_dims=None,
log_lengthscale_bounds=(-10000, 10000),
- eps=1e-5,
+ eps=1e-6,
active_dims=None,
):
super(RBFKernel, self).__init__(
@@ -24,6 +25,6 @@
self.eps = eps
def forward(self, x1, x2):
- lengthscales = self.log_lengthscale.exp() + self.eps
+ lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales)
- return diff.pow_(2).sum(-1).mul_(-0.5).exp_()
+ return diff.pow_(2).sum(-1).mul_(-1).exp_()
| {"golden_diff": "diff --git a/gpytorch/kernels/rbf_kernel.py b/gpytorch/kernels/rbf_kernel.py\n--- a/gpytorch/kernels/rbf_kernel.py\n+++ b/gpytorch/kernels/rbf_kernel.py\n@@ -3,6 +3,7 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import math\n from .kernel import Kernel\n \n \n@@ -12,7 +13,7 @@\n self,\n ard_num_dims=None,\n log_lengthscale_bounds=(-10000, 10000),\n- eps=1e-5,\n+ eps=1e-6,\n active_dims=None,\n ):\n super(RBFKernel, self).__init__(\n@@ -24,6 +25,6 @@\n self.eps = eps\n \n def forward(self, x1, x2):\n- lengthscales = self.log_lengthscale.exp() + self.eps\n+ lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)\n diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales)\n- return diff.pow_(2).sum(-1).mul_(-0.5).exp_()\n+ return diff.pow_(2).sum(-1).mul_(-1).exp_()\n", "issue": "RBF Kernel Change Breaks Testing Code\nThe change to RBFKernel in 84fccd898c45c08279fb5c109e6e234f3a47588a may break something about our prediction code. \r\n\r\nI am not totally sure what the problem is yet, but I isolated this as the problem with `git bisect` and have a reasonable test case where results are significantly worse with the commit in compared to after a revert commit. \r\n\r\nIt seems like the stability issues we encountered when making this change in the past don't come up in the unit tests, but do on some real datasets.\r\n\r\nI can try to push my test case to a branch as well, although it relies on a UCI dataset.\r\n\r\n@Balandat @gpleiss \n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom .kernel import Kernel\n\n\nclass RBFKernel(Kernel):\n\n def __init__(\n self,\n ard_num_dims=None,\n log_lengthscale_bounds=(-10000, 10000),\n eps=1e-5,\n active_dims=None,\n ):\n super(RBFKernel, self).__init__(\n has_lengthscale=True,\n ard_num_dims=ard_num_dims,\n log_lengthscale_bounds=log_lengthscale_bounds,\n active_dims=active_dims,\n )\n self.eps = eps\n\n def forward(self, x1, x2):\n lengthscales = self.log_lengthscale.exp() + self.eps\n diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales)\n return diff.pow_(2).sum(-1).mul_(-0.5).exp_()\n", "path": "gpytorch/kernels/rbf_kernel.py"}]} | 982 | 293 |
gh_patches_debug_706 | rasdani/github-patches | git_diff | deepset-ai__haystack-3705 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bad Semaphore initialization in RequestLimiter
**Describe the bug**
RequestLimiter takes a number as parameter and use it to set up a Semaphore. The issue is that the environment variable indicates the concurrent allowed requests per worker. When the semaphore is created (https://github.com/deepset-ai/haystack/blob/6790eaf7d8be05c5674d97a75cc5783e00a66875/rest_api/rest_api/controller/utils.py#L13), this value is set down by 1. This is clearly not what the project tried to achieve (at least per naming).
**Error message**
REST API will always return it's busy, error 503 when CONCURRENT_REQUEST_PER_WORKER is equal to CONCURRENT_REQUEST_PER_WORKER -1. When user set the concurrency to 1, it will never be able to call the API, since the Semaphore declaration will be Semaphore(0)
**Expected behavior**
Being able to set the request limits using the env variable CONCURRENT_REQUEST_PER_WORKER
**Additional context**
**To Reproduce**
**FAQ Check**
- [x] Have you had a look at [our new FAQ page](https://haystack.deepset.ai/overview/faq)?
**System:**
- OS: Ubuntu
- GPU/CPU: i7/ Nvidia
- Haystack version (commit or version number): 1.9
- DocumentStore:
- Reader:
- Retriever:
</issue>
<code>
[start of rest_api/rest_api/controller/utils.py]
1 from typing import Type, NewType
2
3 import inspect
4 from contextlib import contextmanager
5 from threading import Semaphore
6
7 from fastapi import Form, HTTPException
8 from pydantic import BaseModel
9
10
11 class RequestLimiter:
12 def __init__(self, limit):
13 self.semaphore = Semaphore(limit - 1)
14
15 @contextmanager
16 def run(self):
17 acquired = self.semaphore.acquire(blocking=False)
18 if not acquired:
19 raise HTTPException(status_code=503, detail="The server is busy processing requests.")
20 try:
21 yield acquired
22 finally:
23 self.semaphore.release()
24
25
26 StringId = NewType("StringId", str)
27
28
29 def as_form(cls: Type[BaseModel]):
30 """
31 Adds an as_form class method to decorated models. The as_form class method
32 can be used with FastAPI endpoints
33 """
34 new_params = [
35 inspect.Parameter(
36 field.alias,
37 inspect.Parameter.POSITIONAL_ONLY,
38 default=(Form(field.default) if not field.required else Form(...)),
39 )
40 for field in cls.__fields__.values()
41 ]
42
43 async def _as_form(**data):
44 return cls(**data)
45
46 sig = inspect.signature(_as_form)
47 sig = sig.replace(parameters=new_params)
48 _as_form.__signature__ = sig # type: ignore
49 setattr(cls, "as_form", _as_form)
50 return cls
51
[end of rest_api/rest_api/controller/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rest_api/rest_api/controller/utils.py b/rest_api/rest_api/controller/utils.py
--- a/rest_api/rest_api/controller/utils.py
+++ b/rest_api/rest_api/controller/utils.py
@@ -10,7 +10,7 @@
class RequestLimiter:
def __init__(self, limit):
- self.semaphore = Semaphore(limit - 1)
+ self.semaphore = Semaphore(limit)
@contextmanager
def run(self):
| {"golden_diff": "diff --git a/rest_api/rest_api/controller/utils.py b/rest_api/rest_api/controller/utils.py\n--- a/rest_api/rest_api/controller/utils.py\n+++ b/rest_api/rest_api/controller/utils.py\n@@ -10,7 +10,7 @@\n \n class RequestLimiter:\n def __init__(self, limit):\n- self.semaphore = Semaphore(limit - 1)\n+ self.semaphore = Semaphore(limit)\n \n @contextmanager\n def run(self):\n", "issue": "Bad Semaphore initialization in RequestLimiter\n**Describe the bug**\r\nRequestLimiter takes a number as parameter and use it to set up a Semaphore. The issue is that the environment variable indicates the concurrent allowed requests per worker. When the semaphore is created (https://github.com/deepset-ai/haystack/blob/6790eaf7d8be05c5674d97a75cc5783e00a66875/rest_api/rest_api/controller/utils.py#L13), this value is set down by 1. This is clearly not what the project tried to achieve (at least per naming). \r\n\r\n**Error message**\r\nREST API will always return it's busy, error 503 when CONCURRENT_REQUEST_PER_WORKER is equal to CONCURRENT_REQUEST_PER_WORKER -1. When user set the concurrency to 1, it will never be able to call the API, since the Semaphore declaration will be Semaphore(0)\r\n\r\n**Expected behavior**\r\nBeing able to set the request limits using the env variable CONCURRENT_REQUEST_PER_WORKER\r\n\r\n**Additional context**\r\n\r\n\r\n**To Reproduce**\r\n\r\n\r\n**FAQ Check**\r\n- [x] Have you had a look at [our new FAQ page](https://haystack.deepset.ai/overview/faq)?\r\n\r\n**System:**\r\n - OS: Ubuntu\r\n - GPU/CPU: i7/ Nvidia\r\n - Haystack version (commit or version number): 1.9\r\n - DocumentStore:\r\n - Reader:\r\n - Retriever:\r\n\n", "before_files": [{"content": "from typing import Type, NewType\n\nimport inspect\nfrom contextlib import contextmanager\nfrom threading import Semaphore\n\nfrom fastapi import Form, HTTPException\nfrom pydantic import BaseModel\n\n\nclass RequestLimiter:\n def __init__(self, limit):\n self.semaphore = Semaphore(limit - 1)\n\n @contextmanager\n def run(self):\n acquired = self.semaphore.acquire(blocking=False)\n if not acquired:\n raise HTTPException(status_code=503, detail=\"The server is busy processing requests.\")\n try:\n yield acquired\n finally:\n self.semaphore.release()\n\n\nStringId = NewType(\"StringId\", str)\n\n\ndef as_form(cls: Type[BaseModel]):\n \"\"\"\n Adds an as_form class method to decorated models. The as_form class method\n can be used with FastAPI endpoints\n \"\"\"\n new_params = [\n inspect.Parameter(\n field.alias,\n inspect.Parameter.POSITIONAL_ONLY,\n default=(Form(field.default) if not field.required else Form(...)),\n )\n for field in cls.__fields__.values()\n ]\n\n async def _as_form(**data):\n return cls(**data)\n\n sig = inspect.signature(_as_form)\n sig = sig.replace(parameters=new_params)\n _as_form.__signature__ = sig # type: ignore\n setattr(cls, \"as_form\", _as_form)\n return cls\n", "path": "rest_api/rest_api/controller/utils.py"}]} | 1,253 | 99 |
gh_patches_debug_1505 | rasdani/github-patches | git_diff | pydantic__pydantic-4329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Serialization -> de-serialisation fails for small timedelta (< 100 microseconds)
### Checks
* [x] I added a descriptive title to this issue
* [x] I have searched (google, github) for similar issues and couldn't find anything
* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug
<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->
# Bug
The serialization -> de-serialization of a model with small `timedelta` raises a `ValidationError`. The de-serialization fails only when the `timedelta` is below 100 microseconds, see the following example:
```py
from datetime import timedelta
from pydantic import BaseModel
class Model(BaseModel):
duration: timedelta
# This works
model = Model(duration=timedelta(microseconds=100))
Model.parse_raw(model.json())
# This Fails
model = Model(duration=timedelta(microseconds=99))
Model.parse_raw(model.json())
```
Last line throws the following error:
```py
pydantic.error_wrappers.ValidationError: 1 validation error for Model
duration
invalid duration format (type=value_error.duration)
```
I believe the error comes from the `parse_duration` function, and in particular the line where the input value is converted to `str`.
https://github.com/samuelcolvin/pydantic/blob/c256dccbb383a7fd462f62fcb5d55558eb3cb108/pydantic/datetime_parse.py#L226-L231
Indeed `str(0.0001)` gives `"0.0001"` but `str(0.000099)` gives `"9.9e-5"`, thus the `re.match` fails.
Changing `value = str(value)` to `value = "f{value:.6f}"` should fix this. I would be happy to create a PR to solve the issue.
# System information
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.8.2
pydantic compiled: True
install path: <my-home>/.pyenv/versions/3.7.11/envs/pydantic/lib/python3.7/site-packages/pydantic
python version: 3.7.11 (default, Aug 31 2021, 20:43:02) [Clang 10.0.1 (clang-1001.0.46.4)]
platform: Darwin-19.6.0-x86_64-i386-64bit
optional deps. installed: ['typing-extensions']
```
</issue>
<code>
[start of pydantic/datetime_parse.py]
1 """
2 Functions to parse datetime objects.
3
4 We're using regular expressions rather than time.strptime because:
5 - They provide both validation and parsing.
6 - They're more flexible for datetimes.
7 - The date/datetime/time constructors produce friendlier error messages.
8
9 Stolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at
10 9718fa2e8abe430c3526a9278dd976443d4ae3c6
11
12 Changed to:
13 * use standard python datetime types not django.utils.timezone
14 * raise ValueError when regex doesn't match rather than returning None
15 * support parsing unix timestamps for dates and datetimes
16 """
17 import re
18 from datetime import date, datetime, time, timedelta, timezone
19 from typing import Dict, Optional, Type, Union
20
21 from . import errors
22
23 date_expr = r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
24 time_expr = (
25 r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
26 r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
27 r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
28 )
29
30 date_re = re.compile(f'{date_expr}$')
31 time_re = re.compile(time_expr)
32 datetime_re = re.compile(f'{date_expr}[T ]{time_expr}')
33
34 standard_duration_re = re.compile(
35 r'^'
36 r'(?:(?P<days>-?\d+) (days?, )?)?'
37 r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?'
38 r'(?:(?P<minutes>-?\d+):)?'
39 r'(?P<seconds>-?\d+)'
40 r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
41 r'$'
42 )
43
44 # Support the sections of ISO 8601 date representation that are accepted by timedelta
45 iso8601_duration_re = re.compile(
46 r'^(?P<sign>[-+]?)'
47 r'P'
48 r'(?:(?P<days>\d+(.\d+)?)D)?'
49 r'(?:T'
50 r'(?:(?P<hours>\d+(.\d+)?)H)?'
51 r'(?:(?P<minutes>\d+(.\d+)?)M)?'
52 r'(?:(?P<seconds>\d+(.\d+)?)S)?'
53 r')?'
54 r'$'
55 )
56
57 EPOCH = datetime(1970, 1, 1)
58 # if greater than this, the number is in ms, if less than or equal it's in seconds
59 # (in seconds this is 11th October 2603, in ms it's 20th August 1970)
60 MS_WATERSHED = int(2e10)
61 # slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
62 MAX_NUMBER = int(3e20)
63 StrBytesIntFloat = Union[str, bytes, int, float]
64
65
66 def get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
67 if isinstance(value, (int, float)):
68 return value
69 try:
70 return float(value)
71 except ValueError:
72 return None
73 except TypeError:
74 raise TypeError(f'invalid type; expected {native_expected_type}, string, bytes, int or float')
75
76
77 def from_unix_seconds(seconds: Union[int, float]) -> datetime:
78 if seconds > MAX_NUMBER:
79 return datetime.max
80 elif seconds < -MAX_NUMBER:
81 return datetime.min
82
83 while abs(seconds) > MS_WATERSHED:
84 seconds /= 1000
85 dt = EPOCH + timedelta(seconds=seconds)
86 return dt.replace(tzinfo=timezone.utc)
87
88
89 def _parse_timezone(value: Optional[str], error: Type[Exception]) -> Union[None, int, timezone]:
90 if value == 'Z':
91 return timezone.utc
92 elif value is not None:
93 offset_mins = int(value[-2:]) if len(value) > 3 else 0
94 offset = 60 * int(value[1:3]) + offset_mins
95 if value[0] == '-':
96 offset = -offset
97 try:
98 return timezone(timedelta(minutes=offset))
99 except ValueError:
100 raise error()
101 else:
102 return None
103
104
105 def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
106 """
107 Parse a date/int/float/string and return a datetime.date.
108
109 Raise ValueError if the input is well formatted but not a valid date.
110 Raise ValueError if the input isn't well formatted.
111 """
112 if isinstance(value, date):
113 if isinstance(value, datetime):
114 return value.date()
115 else:
116 return value
117
118 number = get_numeric(value, 'date')
119 if number is not None:
120 return from_unix_seconds(number).date()
121
122 if isinstance(value, bytes):
123 value = value.decode()
124
125 match = date_re.match(value) # type: ignore
126 if match is None:
127 raise errors.DateError()
128
129 kw = {k: int(v) for k, v in match.groupdict().items()}
130
131 try:
132 return date(**kw)
133 except ValueError:
134 raise errors.DateError()
135
136
137 def parse_time(value: Union[time, StrBytesIntFloat]) -> time:
138 """
139 Parse a time/string and return a datetime.time.
140
141 Raise ValueError if the input is well formatted but not a valid time.
142 Raise ValueError if the input isn't well formatted, in particular if it contains an offset.
143 """
144 if isinstance(value, time):
145 return value
146
147 number = get_numeric(value, 'time')
148 if number is not None:
149 if number >= 86400:
150 # doesn't make sense since the time time loop back around to 0
151 raise errors.TimeError()
152 return (datetime.min + timedelta(seconds=number)).time()
153
154 if isinstance(value, bytes):
155 value = value.decode()
156
157 match = time_re.match(value) # type: ignore
158 if match is None:
159 raise errors.TimeError()
160
161 kw = match.groupdict()
162 if kw['microsecond']:
163 kw['microsecond'] = kw['microsecond'].ljust(6, '0')
164
165 tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.TimeError)
166 kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
167 kw_['tzinfo'] = tzinfo
168
169 try:
170 return time(**kw_) # type: ignore
171 except ValueError:
172 raise errors.TimeError()
173
174
175 def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
176 """
177 Parse a datetime/int/float/string and return a datetime.datetime.
178
179 This function supports time zone offsets. When the input contains one,
180 the output uses a timezone with a fixed offset from UTC.
181
182 Raise ValueError if the input is well formatted but not a valid datetime.
183 Raise ValueError if the input isn't well formatted.
184 """
185 if isinstance(value, datetime):
186 return value
187
188 number = get_numeric(value, 'datetime')
189 if number is not None:
190 return from_unix_seconds(number)
191
192 if isinstance(value, bytes):
193 value = value.decode()
194
195 match = datetime_re.match(value) # type: ignore
196 if match is None:
197 raise errors.DateTimeError()
198
199 kw = match.groupdict()
200 if kw['microsecond']:
201 kw['microsecond'] = kw['microsecond'].ljust(6, '0')
202
203 tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.DateTimeError)
204 kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
205 kw_['tzinfo'] = tzinfo
206
207 try:
208 return datetime(**kw_) # type: ignore
209 except ValueError:
210 raise errors.DateTimeError()
211
212
213 def parse_duration(value: StrBytesIntFloat) -> timedelta:
214 """
215 Parse a duration int/float/string and return a datetime.timedelta.
216
217 The preferred format for durations in Django is '%d %H:%M:%S.%f'.
218
219 Also supports ISO 8601 representation.
220 """
221 if isinstance(value, timedelta):
222 return value
223
224 if isinstance(value, (int, float)):
225 # below code requires a string
226 value = str(value)
227 elif isinstance(value, bytes):
228 value = value.decode()
229
230 try:
231 match = standard_duration_re.match(value) or iso8601_duration_re.match(value)
232 except TypeError:
233 raise TypeError('invalid type; expected timedelta, string, bytes, int or float')
234
235 if not match:
236 raise errors.DurationError()
237
238 kw = match.groupdict()
239 sign = -1 if kw.pop('sign', '+') == '-' else 1
240 if kw.get('microseconds'):
241 kw['microseconds'] = kw['microseconds'].ljust(6, '0')
242
243 if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
244 kw['microseconds'] = '-' + kw['microseconds']
245
246 kw_ = {k: float(v) for k, v in kw.items() if v is not None}
247
248 return sign * timedelta(**kw_)
249
[end of pydantic/datetime_parse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydantic/datetime_parse.py b/pydantic/datetime_parse.py
--- a/pydantic/datetime_parse.py
+++ b/pydantic/datetime_parse.py
@@ -223,7 +223,7 @@
if isinstance(value, (int, float)):
# below code requires a string
- value = str(value)
+ value = f'{value:f}'
elif isinstance(value, bytes):
value = value.decode()
| {"golden_diff": "diff --git a/pydantic/datetime_parse.py b/pydantic/datetime_parse.py\n--- a/pydantic/datetime_parse.py\n+++ b/pydantic/datetime_parse.py\n@@ -223,7 +223,7 @@\n \n if isinstance(value, (int, float)):\n # below code requires a string\n- value = str(value)\n+ value = f'{value:f}'\n elif isinstance(value, bytes):\n value = value.decode()\n", "issue": "Serialization -> de-serialisation fails for small timedelta (< 100 microseconds)\n### Checks\r\n\r\n* [x] I added a descriptive title to this issue\r\n* [x] I have searched (google, github) for similar issues and couldn't find anything\r\n* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug\r\n\r\n<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->\r\n\r\n# Bug\r\n\r\nThe serialization -> de-serialization of a model with small `timedelta` raises a `ValidationError`. The de-serialization fails only when the `timedelta` is below 100 microseconds, see the following example:\r\n\r\n```py\r\nfrom datetime import timedelta\r\nfrom pydantic import BaseModel\r\n\r\nclass Model(BaseModel):\r\n duration: timedelta\r\n\r\n\r\n# This works\r\nmodel = Model(duration=timedelta(microseconds=100))\r\nModel.parse_raw(model.json())\r\n# This Fails\r\nmodel = Model(duration=timedelta(microseconds=99))\r\nModel.parse_raw(model.json())\r\n```\r\n\r\nLast line throws the following error:\r\n```py\r\npydantic.error_wrappers.ValidationError: 1 validation error for Model\r\nduration\r\n invalid duration format (type=value_error.duration)\r\n```\r\n\r\nI believe the error comes from the `parse_duration` function, and in particular the line where the input value is converted to `str`.\r\nhttps://github.com/samuelcolvin/pydantic/blob/c256dccbb383a7fd462f62fcb5d55558eb3cb108/pydantic/datetime_parse.py#L226-L231\r\nIndeed `str(0.0001)` gives `\"0.0001\"` but `str(0.000099)` gives `\"9.9e-5\"`, thus the `re.match` fails.\r\n\r\nChanging `value = str(value)` to `value = \"f{value:.6f}\"` should fix this. I would be happy to create a PR to solve the issue.\r\n\r\n# System information\r\n\r\nOutput of `python -c \"import pydantic.utils; print(pydantic.utils.version_info())\"`:\r\n```\r\n pydantic version: 1.8.2\r\n pydantic compiled: True\r\n install path: <my-home>/.pyenv/versions/3.7.11/envs/pydantic/lib/python3.7/site-packages/pydantic\r\n python version: 3.7.11 (default, Aug 31 2021, 20:43:02) [Clang 10.0.1 (clang-1001.0.46.4)]\r\n platform: Darwin-19.6.0-x86_64-i386-64bit\r\n optional deps. installed: ['typing-extensions']\r\n```\n", "before_files": [{"content": "\"\"\"\nFunctions to parse datetime objects.\n\nWe're using regular expressions rather than time.strptime because:\n- They provide both validation and parsing.\n- They're more flexible for datetimes.\n- The date/datetime/time constructors produce friendlier error messages.\n\nStolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at\n9718fa2e8abe430c3526a9278dd976443d4ae3c6\n\nChanged to:\n* use standard python datetime types not django.utils.timezone\n* raise ValueError when regex doesn't match rather than returning None\n* support parsing unix timestamps for dates and datetimes\n\"\"\"\nimport re\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom typing import Dict, Optional, Type, Union\n\nfrom . import errors\n\ndate_expr = r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})'\ntime_expr = (\n r'(?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})'\n r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n r'(?P<tzinfo>Z|[+-]\\d{2}(?::?\\d{2})?)?$'\n)\n\ndate_re = re.compile(f'{date_expr}$')\ntime_re = re.compile(time_expr)\ndatetime_re = re.compile(f'{date_expr}[T ]{time_expr}')\n\nstandard_duration_re = re.compile(\n r'^'\n r'(?:(?P<days>-?\\d+) (days?, )?)?'\n r'((?:(?P<hours>-?\\d+):)(?=\\d+:\\d+))?'\n r'(?:(?P<minutes>-?\\d+):)?'\n r'(?P<seconds>-?\\d+)'\n r'(?:\\.(?P<microseconds>\\d{1,6})\\d{0,6})?'\n r'$'\n)\n\n# Support the sections of ISO 8601 date representation that are accepted by timedelta\niso8601_duration_re = re.compile(\n r'^(?P<sign>[-+]?)'\n r'P'\n r'(?:(?P<days>\\d+(.\\d+)?)D)?'\n r'(?:T'\n r'(?:(?P<hours>\\d+(.\\d+)?)H)?'\n r'(?:(?P<minutes>\\d+(.\\d+)?)M)?'\n r'(?:(?P<seconds>\\d+(.\\d+)?)S)?'\n r')?'\n r'$'\n)\n\nEPOCH = datetime(1970, 1, 1)\n# if greater than this, the number is in ms, if less than or equal it's in seconds\n# (in seconds this is 11th October 2603, in ms it's 20th August 1970)\nMS_WATERSHED = int(2e10)\n# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9\nMAX_NUMBER = int(3e20)\nStrBytesIntFloat = Union[str, bytes, int, float]\n\n\ndef get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:\n if isinstance(value, (int, float)):\n return value\n try:\n return float(value)\n except ValueError:\n return None\n except TypeError:\n raise TypeError(f'invalid type; expected {native_expected_type}, string, bytes, int or float')\n\n\ndef from_unix_seconds(seconds: Union[int, float]) -> datetime:\n if seconds > MAX_NUMBER:\n return datetime.max\n elif seconds < -MAX_NUMBER:\n return datetime.min\n\n while abs(seconds) > MS_WATERSHED:\n seconds /= 1000\n dt = EPOCH + timedelta(seconds=seconds)\n return dt.replace(tzinfo=timezone.utc)\n\n\ndef _parse_timezone(value: Optional[str], error: Type[Exception]) -> Union[None, int, timezone]:\n if value == 'Z':\n return timezone.utc\n elif value is not None:\n offset_mins = int(value[-2:]) if len(value) > 3 else 0\n offset = 60 * int(value[1:3]) + offset_mins\n if value[0] == '-':\n offset = -offset\n try:\n return timezone(timedelta(minutes=offset))\n except ValueError:\n raise error()\n else:\n return None\n\n\ndef parse_date(value: Union[date, StrBytesIntFloat]) -> date:\n \"\"\"\n Parse a date/int/float/string and return a datetime.date.\n\n Raise ValueError if the input is well formatted but not a valid date.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n if isinstance(value, datetime):\n return value.date()\n else:\n return value\n\n number = get_numeric(value, 'date')\n if number is not None:\n return from_unix_seconds(number).date()\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = date_re.match(value) # type: ignore\n if match is None:\n raise errors.DateError()\n\n kw = {k: int(v) for k, v in match.groupdict().items()}\n\n try:\n return date(**kw)\n except ValueError:\n raise errors.DateError()\n\n\ndef parse_time(value: Union[time, StrBytesIntFloat]) -> time:\n \"\"\"\n Parse a time/string and return a datetime.time.\n\n Raise ValueError if the input is well formatted but not a valid time.\n Raise ValueError if the input isn't well formatted, in particular if it contains an offset.\n \"\"\"\n if isinstance(value, time):\n return value\n\n number = get_numeric(value, 'time')\n if number is not None:\n if number >= 86400:\n # doesn't make sense since the time time loop back around to 0\n raise errors.TimeError()\n return (datetime.min + timedelta(seconds=number)).time()\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = time_re.match(value) # type: ignore\n if match is None:\n raise errors.TimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.TimeError)\n kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}\n kw_['tzinfo'] = tzinfo\n\n try:\n return time(**kw_) # type: ignore\n except ValueError:\n raise errors.TimeError()\n\n\ndef parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:\n \"\"\"\n Parse a datetime/int/float/string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raise ValueError if the input is well formatted but not a valid datetime.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, datetime):\n return value\n\n number = get_numeric(value, 'datetime')\n if number is not None:\n return from_unix_seconds(number)\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = datetime_re.match(value) # type: ignore\n if match is None:\n raise errors.DateTimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.DateTimeError)\n kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}\n kw_['tzinfo'] = tzinfo\n\n try:\n return datetime(**kw_) # type: ignore\n except ValueError:\n raise errors.DateTimeError()\n\n\ndef parse_duration(value: StrBytesIntFloat) -> timedelta:\n \"\"\"\n Parse a duration int/float/string and return a datetime.timedelta.\n\n The preferred format for durations in Django is '%d %H:%M:%S.%f'.\n\n Also supports ISO 8601 representation.\n \"\"\"\n if isinstance(value, timedelta):\n return value\n\n if isinstance(value, (int, float)):\n # below code requires a string\n value = str(value)\n elif isinstance(value, bytes):\n value = value.decode()\n\n try:\n match = standard_duration_re.match(value) or iso8601_duration_re.match(value)\n except TypeError:\n raise TypeError('invalid type; expected timedelta, string, bytes, int or float')\n\n if not match:\n raise errors.DurationError()\n\n kw = match.groupdict()\n sign = -1 if kw.pop('sign', '+') == '-' else 1\n if kw.get('microseconds'):\n kw['microseconds'] = kw['microseconds'].ljust(6, '0')\n\n if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):\n kw['microseconds'] = '-' + kw['microseconds']\n\n kw_ = {k: float(v) for k, v in kw.items() if v is not None}\n\n return sign * timedelta(**kw_)\n", "path": "pydantic/datetime_parse.py"}]} | 3,966 | 102 |
gh_patches_debug_468 | rasdani/github-patches | git_diff | chainer__chainer-1562 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
inconsistency between cupy.vstack and numpy.vstack
```
In [1]: import cupy, numpy
In [2]: a = cupy.arange(12).reshape(3, 4)
In [3]: cupy.vstack([a])
Out[3]: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
In [4]: numpy.vstack([a.get()])
Out[4]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
```
</issue>
<code>
[start of cupy/manipulation/join.py]
1 import numpy
2 import six
3
4 import cupy
5
6
7 def column_stack(tup):
8 """Stacks 1-D and 2-D arrays as columns into a 2-D array.
9
10 A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays
11 are concatenated along the second axis.
12
13 Args:
14 tup (sequence of arrays): 1-D or 2-D arrays to be stacked.
15
16 Returns:
17 cupy.ndarray: A new 2-D array of stacked columns.
18
19 .. seealso:: :func:`numpy.column_stack`
20
21 """
22 if any(not isinstance(a, cupy.ndarray) for a in tup):
23 raise TypeError('Only cupy arrays can be column stacked')
24
25 lst = list(tup)
26 for i, a in enumerate(lst):
27 if a.ndim == 1:
28 a = a[:, cupy.newaxis]
29 lst[i] = a
30 elif a.ndim != 2:
31 raise ValueError(
32 'Only 1 or 2 dimensional arrays can be column stacked')
33
34 return concatenate(lst, axis=1)
35
36
37 def concatenate(tup, axis=0):
38 """Joins arrays along an axis.
39
40 Args:
41 tup (sequence of arrays): Arrays to be joined. All of these should have
42 same dimensionalities except the specified axis.
43 axis (int): The axis to join arrays along.
44
45 Returns:
46 cupy.ndarray: Joined array.
47
48 .. seealso:: :func:`numpy.concatenate`
49
50 """
51 ndim = None
52 shape = None
53 for a in tup:
54 if not isinstance(a, cupy.ndarray):
55 raise TypeError('Only cupy arrays can be concatenated')
56 if a.ndim == 0:
57 raise TypeError('zero-dimensional arrays cannot be concatenated')
58 if ndim is None:
59 ndim = a.ndim
60 shape = list(a.shape)
61 axis = _get_positive_axis(a.ndim, axis)
62 continue
63
64 if a.ndim != ndim:
65 raise ValueError(
66 'All arrays to concatenate must have the same ndim')
67 if any(i != axis and shape[i] != a.shape[i]
68 for i in six.moves.range(ndim)):
69 raise ValueError(
70 'All arrays must have same shape except the axis to '
71 'concatenate')
72 shape[axis] += a.shape[axis]
73
74 if ndim is None:
75 raise ValueError('Cannot concatenate from empty tuple')
76
77 dtype = numpy.find_common_type([a.dtype for a in tup], [])
78 ret = cupy.empty(shape, dtype=dtype)
79
80 skip = (slice(None),) * axis
81 i = 0
82 for a in tup:
83 aw = a.shape[axis]
84 ret[skip + (slice(i, i + aw),)] = a
85 i += aw
86
87 return ret
88
89
90 def dstack(tup):
91 """Stacks arrays along the third axis.
92
93 Args:
94 tup (sequence of arrays): Arrays to be stacked. Each array is converted
95 by :func:`cupy.atleast_3d` before stacking.
96
97 Returns:
98 cupy.ndarray: Stacked array.
99
100 .. seealso:: :func:`numpy.dstack`
101
102 """
103 return concatenate(cupy.atleast_3d(*tup), 2)
104
105
106 def hstack(tup):
107 """Stacks arrays horizontally.
108
109 If an input array has one dimension, then the array is treated as a
110 horizontal vector and stacked along the first axis. Otherwise, the array is
111 stacked along the second axis.
112
113 Args:
114 tup (sequence of arrays): Arrays to be stacked.
115
116 Returns:
117 cupy.ndarray: Stacked array.
118
119 .. seealso:: :func:`numpy.hstack`
120
121 """
122 arrs = [cupy.atleast_1d(a) for a in tup]
123 axis = 1
124 if arrs[0].ndim == 1:
125 axis = 0
126 return concatenate(arrs, axis)
127
128
129 def vstack(tup):
130 """Stacks arrays vertically.
131
132 If an input array has one dimension, then the array is treated as a
133 horizontal vector and stacked along the additional axis at the head.
134 Otherwise, the array is stacked along the first axis.
135
136 Args:
137 tup (sequence of arrays): Arrays to be stacked. Each array is converted
138 by :func:`cupy.atleast_2d` before stacking.
139
140 Returns:
141 cupy.ndarray: Stacked array.
142
143 .. seealso:: :func:`numpy.dstack`
144
145 """
146 return concatenate(cupy.atleast_2d(*tup), 0)
147
148
149 def stack(tup, axis=0):
150 """Stacks arrays along a new axis.
151
152 Args:
153 tup (sequence of arrays): Arrays to be stacked.
154 axis (int): Axis along which the arrays are stacked.
155
156 Returns:
157 cupy.ndarray: Stacked array.
158
159 .. seealso:: :func:`numpy.stack`
160 """
161 return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)
162
163
164 def _get_positive_axis(ndim, axis):
165 a = axis
166 if a < 0:
167 a += ndim
168 if a < 0 or a >= ndim:
169 raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))
170 return a
171
[end of cupy/manipulation/join.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/manipulation/join.py b/cupy/manipulation/join.py
--- a/cupy/manipulation/join.py
+++ b/cupy/manipulation/join.py
@@ -143,7 +143,7 @@
.. seealso:: :func:`numpy.dstack`
"""
- return concatenate(cupy.atleast_2d(*tup), 0)
+ return concatenate([cupy.atleast_2d(m) for m in tup], 0)
def stack(tup, axis=0):
| {"golden_diff": "diff --git a/cupy/manipulation/join.py b/cupy/manipulation/join.py\n--- a/cupy/manipulation/join.py\n+++ b/cupy/manipulation/join.py\n@@ -143,7 +143,7 @@\n .. seealso:: :func:`numpy.dstack`\n \n \"\"\"\n- return concatenate(cupy.atleast_2d(*tup), 0)\n+ return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n \n \n def stack(tup, axis=0):\n", "issue": "inconsistency between cupy.vstack and numpy.vstack\n```\nIn [1]: import cupy, numpy\nIn [2]: a = cupy.arange(12).reshape(3, 4)\nIn [3]: cupy.vstack([a])\nOut[3]: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\nIn [4]: numpy.vstack([a.get()])\nOut[4]: \narray([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]])\n```\n\n", "before_files": [{"content": "import numpy\nimport six\n\nimport cupy\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int): The axis to join arrays along.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n ndim = None\n shape = None\n for a in tup:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be concatenated')\n if a.ndim == 0:\n raise TypeError('zero-dimensional arrays cannot be concatenated')\n if ndim is None:\n ndim = a.ndim\n shape = list(a.shape)\n axis = _get_positive_axis(a.ndim, axis)\n continue\n\n if a.ndim != ndim:\n raise ValueError(\n 'All arrays to concatenate must have the same ndim')\n if any(i != axis and shape[i] != a.shape[i]\n for i in six.moves.range(ndim)):\n raise ValueError(\n 'All arrays must have same shape except the axis to '\n 'concatenate')\n shape[axis] += a.shape[axis]\n\n if ndim is None:\n raise ValueError('Cannot concatenate from empty tuple')\n\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\n ret = cupy.empty(shape, dtype=dtype)\n\n skip = (slice(None),) * axis\n i = 0\n for a in tup:\n aw = a.shape[axis]\n ret[skip + (slice(i, i + aw),)] = a\n i += aw\n\n return ret\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate(cupy.atleast_3d(*tup), 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate(cupy.atleast_2d(*tup), 0)\n\n\ndef stack(tup, axis=0):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\n\n\ndef _get_positive_axis(ndim, axis):\n a = axis\n if a < 0:\n a += ndim\n if a < 0 or a >= ndim:\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n return a\n", "path": "cupy/manipulation/join.py"}]} | 2,286 | 123 |
gh_patches_debug_7374 | rasdani/github-patches | git_diff | freedomofpress__securedrop-1436 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Merge changes from 0.3.10 into develop
The 0.3.10 release has one major change, rotation of the signing key, plus a few docs changes. Backport those changes into the develop branch so staging/prod VMs still work past key expiry.
</issue>
<code>
[start of securedrop/version.py]
1 __version__ = '0.3.9'
2
[end of securedrop/version.py]
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # SecureDrop documentation build configuration file, created by
4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import sys
16 import os
17 import shlex
18
19 # Detect if we're being built by Read the Docs
20 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs
21 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
22
23 # If extensions (or modules to document with autodoc) are in another directory,
24 # add these directories to sys.path here. If the directory is relative to the
25 # documentation root, use os.path.abspath to make it absolute, like shown here.
26 #sys.path.insert(0, os.path.abspath('.'))
27
28 # -- General configuration ------------------------------------------------
29
30 # If your documentation needs a minimal Sphinx version, state it here.
31 #needs_sphinx = '1.0'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = ['sphinx.ext.todo', ]
37
38 # Add any paths that contain templates here, relative to this directory.
39 templates_path = ['_templates']
40
41 # The suffix(es) of source filenames.
42 # You can specify multiple suffix as a list of string:
43 # source_suffix = ['.rst', '.md']
44 source_suffix = '.rst'
45
46 # The encoding of source files.
47 #source_encoding = 'utf-8-sig'
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = u'SecureDrop'
54 copyright = u'2015, Freedom of the Press Foundation'
55 author = u'SecureDrop Team and Contributors'
56
57 # The version info for the project you're documenting, acts as replacement for
58 # |version| and |release|, also used in various other places throughout the
59 # built documents.
60 #
61 # The short X.Y version.
62 version = '0.3.9'
63 # The full version, including alpha/beta/rc tags.
64 release = '0.3.9'
65
66 # The language for content autogenerated by Sphinx. Refer to documentation
67 # for a list of supported languages.
68 #
69 # This is also used if you do content translation via gettext catalogs.
70 # Usually you set "language" from the command line for these cases.
71 language = None
72
73 # There are two options for replacing |today|: either, you set today to some
74 # non-false value, then it is used:
75 #today = ''
76 # Else, today_fmt is used as the format for a strftime call.
77 #today_fmt = '%B %d, %Y'
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 exclude_patterns = ['_build']
82
83 # The reST default role (used for this markup: `text`) to use for all
84 # documents.
85 #default_role = None
86
87 # If true, '()' will be appended to :func: etc. cross-reference text.
88 #add_function_parentheses = True
89
90 # If true, the current module name will be prepended to all description
91 # unit titles (such as .. function::).
92 #add_module_names = True
93
94 # If true, sectionauthor and moduleauthor directives will be shown in the
95 # output. They are ignored by default.
96 #show_authors = False
97
98 # The name of the Pygments (syntax highlighting) style to use.
99 pygments_style = 'sphinx'
100
101 # A list of ignored prefixes for module index sorting.
102 #modindex_common_prefix = []
103
104 # If true, keep warnings as "system message" paragraphs in the built documents.
105 #keep_warnings = False
106
107 # If true, `todo` and `todoList` produce output, else they produce nothing.
108 todo_include_todos = False
109
110
111 # -- Options for HTML output ----------------------------------------------
112
113 # The theme to use for HTML and HTML Help pages. See the documentation for
114 # a list of builtin themes.
115 if on_rtd:
116 html_theme = 'default'
117 else:
118 try:
119 # If you want to build the docs locally using the RTD theme,
120 # you may need to install it: ``pip install sphinx_rtd_theme``.
121 # https://github.com/snide/sphinx_rtd_theme#via-package
122 import sphinx_rtd_theme
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125 except ImportError:
126 # This theme is included with Sphinx and is quite nice (based
127 # on the Pocoo themes), but since we're using the RTD theme
128 # for the production docs, it's best to use that to avoid
129 # issues due to discrepancies between the themes.
130 html_theme = 'alabaster'
131
132 # Theme options are theme-specific and customize the look and feel of a theme
133 # further. For a list of options available for each theme, see the
134 # documentation.
135 #html_theme_options = {}
136
137 # Add any paths that contain custom themes here, relative to this directory.
138 #html_theme_path = []
139
140 # The name for this set of Sphinx documents. If None, it defaults to
141 # "<project> v<release> documentation".
142 #html_title = None
143
144 # A shorter title for the navigation bar. Default is the same as html_title.
145 #html_short_title = None
146
147 # The name of an image file (relative to this directory) to place at the top
148 # of the sidebar.
149 #html_logo = None
150
151 # The name of an image file (within the static path) to use as favicon of the
152 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
153 # pixels large.
154 #html_favicon = None
155
156 # Add any paths that contain custom static files (such as style sheets) here,
157 # relative to this directory. They are copied after the builtin static files,
158 # so a file named "default.css" will overwrite the builtin "default.css".
159 html_static_path = ['_static']
160
161 # Add any extra paths that contain custom files (such as robots.txt or
162 # .htaccess) here, relative to this directory. These files are copied
163 # directly to the root of the documentation.
164 #html_extra_path = []
165
166 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
167 # using the given strftime format.
168 #html_last_updated_fmt = '%b %d, %Y'
169
170 # If true, SmartyPants will be used to convert quotes and dashes to
171 # typographically correct entities.
172 #html_use_smartypants = True
173
174 # Custom sidebar templates, maps document names to template names.
175 #html_sidebars = {}
176
177 # Additional templates that should be rendered to pages, maps page names to
178 # template names.
179 #html_additional_pages = {}
180
181 # If false, no module index is generated.
182 #html_domain_indices = True
183
184 # If false, no index is generated.
185 #html_use_index = True
186
187 # If true, the index is split into individual pages for each letter.
188 #html_split_index = False
189
190 # If true, links to the reST sources are added to the pages.
191 #html_show_sourcelink = True
192
193 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
194 #html_show_sphinx = True
195
196 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
197 #html_show_copyright = True
198
199 # If true, an OpenSearch description file will be output, and all pages will
200 # contain a <link> tag referring to it. The value of this option must be the
201 # base URL from which the finished HTML is served.
202 #html_use_opensearch = ''
203
204 # This is the file name suffix for HTML files (e.g. ".xhtml").
205 #html_file_suffix = None
206
207 # Language to be used for generating the HTML full-text search index.
208 # Sphinx supports the following languages:
209 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
210 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
211 #html_search_language = 'en'
212
213 # A dictionary with options for the search language support, empty by default.
214 # Now only 'ja' uses this config value
215 #html_search_options = {'type': 'default'}
216
217 # The name of a javascript file (relative to the configuration directory) that
218 # implements a search results scorer. If empty, the default will be used.
219 #html_search_scorer = 'scorer.js'
220
221 # Output file base name for HTML help builder.
222 htmlhelp_basename = 'SecureDropdoc'
223
224 # -- Options for LaTeX output ---------------------------------------------
225
226 latex_elements = {
227 # The paper size ('letterpaper' or 'a4paper').
228 #'papersize': 'letterpaper',
229
230 # The font size ('10pt', '11pt' or '12pt').
231 #'pointsize': '10pt',
232
233 # Additional stuff for the LaTeX preamble.
234 #'preamble': '',
235
236 # Latex figure (float) alignment
237 #'figure_align': 'htbp',
238 }
239
240 # Grouping the document tree into LaTeX files. List of tuples
241 # (source start file, target name, title,
242 # author, documentclass [howto, manual, or own class]).
243 latex_documents = [
244 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',
245 author, 'manual'),
246 ]
247
248 # The name of an image file (relative to this directory) to place at the top of
249 # the title page.
250 #latex_logo = None
251
252 # For "manual" documents, if this is true, then toplevel headings are parts,
253 # not chapters.
254 #latex_use_parts = False
255
256 # If true, show page references after internal links.
257 #latex_show_pagerefs = False
258
259 # If true, show URL addresses after external links.
260 #latex_show_urls = False
261
262 # Documents to append as an appendix to all manuals.
263 #latex_appendices = []
264
265 # If false, no module index is generated.
266 #latex_domain_indices = True
267
268
269 # -- Options for manual page output ---------------------------------------
270
271 # One entry per manual page. List of tuples
272 # (source start file, name, description, authors, manual section).
273 man_pages = [
274 (master_doc, 'securedrop', u'SecureDrop Documentation',
275 [author], 1)
276 ]
277
278 # If true, show URL addresses after external links.
279 #man_show_urls = False
280
281
282 # -- Options for Texinfo output -------------------------------------------
283
284 # Grouping the document tree into Texinfo files. List of tuples
285 # (source start file, target name, title, author,
286 # dir menu entry, description, category)
287 texinfo_documents = [
288 (master_doc, 'SecureDrop', u'SecureDrop Documentation',
289 author, 'SecureDrop', 'One line description of project.',
290 'Miscellaneous'),
291 ]
292
293 # Documents to append as an appendix to all manuals.
294 #texinfo_appendices = []
295
296 # If false, no module index is generated.
297 #texinfo_domain_indices = True
298
299 # How to display URL addresses: 'footnote', 'no', or 'inline'.
300 #texinfo_show_urls = 'footnote'
301
302 # If true, do not generate a @detailmenu in the "Top" node's menu.
303 #texinfo_no_detailmenu = False
304
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -59,9 +59,9 @@
# built documents.
#
# The short X.Y version.
-version = '0.3.9'
+version = '0.3.10'
# The full version, including alpha/beta/rc tags.
-release = '0.3.9'
+release = '0.3.10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/securedrop/version.py b/securedrop/version.py
--- a/securedrop/version.py
+++ b/securedrop/version.py
@@ -1 +1 @@
-__version__ = '0.3.9'
+__version__ = '0.3.10'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -59,9 +59,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '0.3.9'\n+version = '0.3.10'\n # The full version, including alpha/beta/rc tags.\n-release = '0.3.9'\n+release = '0.3.10'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = '0.3.9'\n+__version__ = '0.3.10'\n", "issue": "Merge changes from 0.3.10 into develop \nThe 0.3.10 release has one major change, rotation of the signing key, plus a few docs changes. Backport those changes into the develop branch so staging/prod VMs still work past key expiry.\n\n", "before_files": [{"content": "__version__ = '0.3.9'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2015, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.3.9'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.3.9'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}]} | 3,975 | 185 |
gh_patches_debug_30321 | rasdani/github-patches | git_diff | pwndbg__pwndbg-433 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug of function get_load_segment_info in readelf.py
when I got readelf result like below
```
readelf --program-headers /bin/ls | grep "LOAD" -A 10
LOAD 0x0000000000000000 0x0000000000400000 0x0000000000400000
0x000000000001da64 0x000000000001da64 R E 200000
```
the function crashed at line 65
```
65 fsize, msize, read, write, execute, align = re_secnd.match(line).groups()
```
the reason is in the regex format
```python
re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(**0x**[0-9A-Fa-f]+)"
```
I mean, "0x" should not a absolute prefix of align number
</issue>
<code>
[start of pwndbg/wrappers/readelf.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import re
9
10 import pwndbg.wrappers
11
12 cmd_name = "readelf"
13
14 @pwndbg.wrappers.OnlyWithCommand(cmd_name)
15 def get_jmpslots():
16 local_path = pwndbg.file.get_file(pwndbg.proc.exe)
17 cmd = [get_jmpslots.cmd_path, "--relocs", local_path]
18 readelf_out = pwndbg.wrappers.call_cmd(cmd)
19
20 return filter(_extract_jumps, readelf_out.splitlines())
21
22 def _extract_jumps(line):
23 '''
24 Checks for records in `readelf --relocs <binary>` which has type e.g. `R_X86_64_JUMP_SLO`
25 NOTE: Because of that we DO NOT display entries that are not writeable (due to FULL RELRO)
26 as they have `R_X86_64_GLOB_DAT` type.
27
28 It might be good to display them seperately in the future.
29 '''
30 try:
31 if "JUMP" in line.split()[2]:
32 return line
33 else:
34 return False
35 except IndexError:
36 return False
37
38 @pwndbg.wrappers.OnlyWithCommand(cmd_name)
39 def get_load_segment_info():
40 '''
41 Looks for LOAD sections by parsing the output of `readelf --program-headers <binary>`
42 '''
43 local_path = pwndbg.file.get_file(pwndbg.proc.exe)
44 cmd = [get_jmpslots.cmd_path, "--program-headers", local_path]
45 readelf_out = pwndbg.wrappers.call_cmd(cmd)
46
47 segments = []
48 load_found = False
49
50 # Output from readelf is
51 # Type Offset VirtAddr PhysAddr
52 # FileSiz MemSiz Flags Align
53 # LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000
54 # 0x0000000000000830 0x0000000000000830 R E 0x200000
55 # Account for this using two regular expressions
56 re_first = re.compile(r"\s+LOAD\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)")
57 re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(0x[0-9A-Fa-f]+)")
58 hex2int = lambda x: int(x, 16)
59
60 for line in readelf_out.splitlines():
61 if "LOAD" in line:
62 load_found = True
63 offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())
64 elif load_found:
65 fsize, msize, read, write, execute, align = re_secnd.match(line).groups()
66 fsize, msize, align = map(hex2int, (fsize, msize, align))
67 read = read == "R"
68 write = write == "W"
69 execute = execute == "E"
70
71 segments.append({"Offset": offset,
72 "VirtAddr": vaddr,
73 "PhysAddr": paddr,
74 "FileSiz": fsize,
75 "MemSiz": msize,
76 "FlagsRead": read,
77 "FlagsWrite": write,
78 "FlagsExecute": execute})
79
80 load_found = False
81
82 return segments
83
[end of pwndbg/wrappers/readelf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py
--- a/pwndbg/wrappers/readelf.py
+++ b/pwndbg/wrappers/readelf.py
@@ -52,9 +52,15 @@
# FileSiz MemSiz Flags Align
# LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000
# 0x0000000000000830 0x0000000000000830 R E 0x200000
+ #
+ ############################################################################
+ #
+ # NOTE: On some readelf versions the Align column might not be prefixed with 0x
+ # See https://github.com/pwndbg/pwndbg/issues/427
+ #
# Account for this using two regular expressions
re_first = re.compile(r"\s+LOAD\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)")
- re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(0x[0-9A-Fa-f]+)")
+ re_secnd = re.compile(r"\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\s+(0x)?([0-9A-Fa-f]+)")
hex2int = lambda x: int(x, 16)
for line in readelf_out.splitlines():
@@ -62,8 +68,8 @@
load_found = True
offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())
elif load_found:
- fsize, msize, read, write, execute, align = re_secnd.match(line).groups()
- fsize, msize, align = map(hex2int, (fsize, msize, align))
+ fsize, msize, read, write, execute, _optional_prefix, align = re_secnd.match(line).groups()
+ fsize, msize, align = map(hex2int, (fsize, msize, '0x' + align))
read = read == "R"
write = write == "W"
execute = execute == "E"
| {"golden_diff": "diff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py\n--- a/pwndbg/wrappers/readelf.py\n+++ b/pwndbg/wrappers/readelf.py\n@@ -52,9 +52,15 @@\n # FileSiz MemSiz Flags Align\n # LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000\n # 0x0000000000000830 0x0000000000000830 R E 0x200000\n+ #\n+ ############################################################################\n+ #\n+ # NOTE: On some readelf versions the Align column might not be prefixed with 0x\n+ # See https://github.com/pwndbg/pwndbg/issues/427\n+ #\n # Account for this using two regular expressions\n re_first = re.compile(r\"\\s+LOAD\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)\")\n- re_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(0x[0-9A-Fa-f]+)\")\n+ re_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(0x)?([0-9A-Fa-f]+)\")\n hex2int = lambda x: int(x, 16)\n \n for line in readelf_out.splitlines():\n@@ -62,8 +68,8 @@\n load_found = True\n offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())\n elif load_found:\n- fsize, msize, read, write, execute, align = re_secnd.match(line).groups()\n- fsize, msize, align = map(hex2int, (fsize, msize, align))\n+ fsize, msize, read, write, execute, _optional_prefix, align = re_secnd.match(line).groups()\n+ fsize, msize, align = map(hex2int, (fsize, msize, '0x' + align))\n read = read == \"R\"\n write = write == \"W\"\n execute = execute == \"E\"\n", "issue": "bug of function get_load_segment_info in readelf.py\nwhen I got readelf result like below\r\n```\r\nreadelf --program-headers /bin/ls | grep \"LOAD\" -A 10\r\n LOAD 0x0000000000000000 0x0000000000400000 0x0000000000400000\r\n 0x000000000001da64 0x000000000001da64 R E 200000\r\n```\r\nthe function crashed at line 65\r\n```\r\n65 fsize, msize, read, write, execute, align = re_secnd.match(line).groups()\r\n```\r\nthe reason is in the regex format\r\n```python\r\nre_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(**0x**[0-9A-Fa-f]+)\"\r\n```\r\nI mean, \"0x\" should not a absolute prefix of align number\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport pwndbg.wrappers\n\ncmd_name = \"readelf\"\n\[email protected](cmd_name)\ndef get_jmpslots():\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_jmpslots.cmd_path, \"--relocs\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n return filter(_extract_jumps, readelf_out.splitlines())\n\ndef _extract_jumps(line):\n '''\n Checks for records in `readelf --relocs <binary>` which has type e.g. `R_X86_64_JUMP_SLO`\n NOTE: Because of that we DO NOT display entries that are not writeable (due to FULL RELRO)\n as they have `R_X86_64_GLOB_DAT` type.\n\n It might be good to display them seperately in the future.\n '''\n try:\n if \"JUMP\" in line.split()[2]:\n return line\n else:\n return False\n except IndexError:\n return False\n\[email protected](cmd_name)\ndef get_load_segment_info():\n '''\n Looks for LOAD sections by parsing the output of `readelf --program-headers <binary>`\n '''\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_jmpslots.cmd_path, \"--program-headers\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n segments = []\n load_found = False\n\n # Output from readelf is \n # Type Offset VirtAddr PhysAddr\n # FileSiz MemSiz Flags Align\n # LOAD 0x0000000000000000 0x0000000000000000 0x0000000000000000\n # 0x0000000000000830 0x0000000000000830 R E 0x200000\n # Account for this using two regular expressions\n re_first = re.compile(r\"\\s+LOAD\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+)\")\n re_secnd = re.compile(r\"\\s+(0x[0-9A-Fa-f]+) (0x[0-9A-Fa-f]+) (.)(.)(.)\\s+(0x[0-9A-Fa-f]+)\")\n hex2int = lambda x: int(x, 16)\n\n for line in readelf_out.splitlines():\n if \"LOAD\" in line:\n load_found = True\n offset, vaddr, paddr = map(hex2int, re_first.match(line).groups())\n elif load_found:\n fsize, msize, read, write, execute, align = re_secnd.match(line).groups()\n fsize, msize, align = map(hex2int, (fsize, msize, align))\n read = read == \"R\"\n write = write == \"W\"\n execute = execute == \"E\"\n\n segments.append({\"Offset\": offset,\n \"VirtAddr\": vaddr,\n \"PhysAddr\": paddr,\n \"FileSiz\": fsize,\n \"MemSiz\": msize,\n \"FlagsRead\": read,\n \"FlagsWrite\": write,\n \"FlagsExecute\": execute})\n\n load_found = False\n\n return segments\n", "path": "pwndbg/wrappers/readelf.py"}]} | 1,854 | 625 |
gh_patches_debug_17862 | rasdani/github-patches | git_diff | kivy__kivy-2700 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SDL2 - crash on loading asyncimage loading gif?
relevant log:
Traceback (most recent call last):
File "/home/chozabu/git/KivEntEd/main.py", line 1289, in <module>
KivEntEd().run()
File "/usr/local/lib/python2.7/dist-packages/kivy/app.py", line 825, in run
runTouchApp()
File "/usr/local/lib/python2.7/dist-packages/kivy/base.py", line 484, in runTouchApp
EventLoop.window.mainloop()
File "/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py", line 364, in mainloop
self._mainloop()
File "/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py", line 268, in _mainloop
EventLoop.idle()
File "/usr/local/lib/python2.7/dist-packages/kivy/base.py", line 324, in idle
Clock.tick()
File "/usr/local/lib/python2.7/dist-packages/kivy/clock.py", line 482, in tick
self._process_events()
File "/usr/local/lib/python2.7/dist-packages/kivy/clock.py", line 614, in _process_events
event.tick(self._last_tick, remove)
File "/usr/local/lib/python2.7/dist-packages/kivy/clock.py", line 373, in tick
ret = callback(self._dt)
File "/home/chozabu/git/KivEntEd/ui_elements.py", line 121, in initUI
self.screenShot.source = serverURL+"/downloadSS?fullname="+self.info['filename']+".png"
File "kivy/properties.pyx", line 377, in kivy.properties.Property.__set__ (kivy/properties.c:4346)
File "kivy/properties.pyx", line 409, in kivy.properties.Property.set (kivy/properties.c:4861)
File "kivy/properties.pyx", line 460, in kivy.properties.Property.dispatch (kivy/properties.c:5437)
File "kivy/_event.pyx", line 1046, in kivy._event.EventObservers.dispatch (kivy/_event.c:10980)
File "/usr/local/lib/python2.7/dist-packages/kivy/uix/image.py", line 327, in _load_source
anim_delay=self.anim_delay)
File "/usr/local/lib/python2.7/dist-packages/kivy/loader.py", line 432, in image
client = ProxyImage(self.loading_image,
File "/usr/local/lib/python2.7/dist-packages/kivy/loader.py", line 163, in _get_loading_image
self._loading_image = ImageLoader.load(filename=loading_png_fn)
File "/usr/local/lib/python2.7/dist-packages/kivy/core/image/__init__.py", line 385, in load
im = loader(filename, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/kivy/core/image/**init**.py", line 164, in **init**
self._data = self.load(filename)
File "/usr/local/lib/python2.7/dist-packages/kivy/core/image/img_sdl2.py", line 34, in load
raise Exception('SDL2: Unable to load image')
Exception: SDL2: Unable to load image
</issue>
<code>
[start of kivy/core/image/img_sdl2.py]
1 '''
2 SDL2 image loader
3 =================
4 '''
5
6 __all__ = ('ImageLoaderSDL2', )
7
8 from kivy.compat import PY2
9 from kivy.logger import Logger
10 from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
11 from kivy.core.image import _img_sdl2
12
13
14 class ImageLoaderSDL2(ImageLoaderBase):
15 '''Image loader based on the PIL library'''
16
17 def _ensure_ext(self):
18 _img_sdl2.init()
19
20 @staticmethod
21 def extensions():
22 '''Return accepted extensions for this loader'''
23 return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',
24 'webp', 'xcf', 'xpm', 'xv')
25
26 @staticmethod
27 def can_save():
28 return True
29
30 def load(self, filename):
31 info = _img_sdl2.load(filename)
32 if not info:
33 Logger.warning('Image: Unable to load image <%s>' % filename)
34 raise Exception('SDL2: Unable to load image')
35
36 w, h, fmt, pixels, rowlength = info
37
38 # update internals
39 self.filename = filename
40 return [ImageData(
41 w, h, fmt, pixels, source=filename,
42 rowlength=rowlength)]
43
44 @staticmethod
45 def save(filename, width, height, fmt, pixels, flipped):
46 # TODO implement the save for sdl2
47 #surface = SDL2.image.fromstring(
48 # pixels, (width, height), fmt.upper(), False)
49 #SDL2.image.save(surface, filename)
50 _img_sdl2.save(filename, width, height, fmt, pixels, flipped)
51 return True
52
53
54 # register
55 ImageLoader.register(ImageLoaderSDL2)
56
[end of kivy/core/image/img_sdl2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/core/image/img_sdl2.py b/kivy/core/image/img_sdl2.py
--- a/kivy/core/image/img_sdl2.py
+++ b/kivy/core/image/img_sdl2.py
@@ -20,7 +20,7 @@
@staticmethod
def extensions():
'''Return accepted extensions for this loader'''
- return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',
+ return ('bmp', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',
'webp', 'xcf', 'xpm', 'xv')
@staticmethod
@@ -43,10 +43,6 @@
@staticmethod
def save(filename, width, height, fmt, pixels, flipped):
- # TODO implement the save for sdl2
- #surface = SDL2.image.fromstring(
- # pixels, (width, height), fmt.upper(), False)
- #SDL2.image.save(surface, filename)
_img_sdl2.save(filename, width, height, fmt, pixels, flipped)
return True
| {"golden_diff": "diff --git a/kivy/core/image/img_sdl2.py b/kivy/core/image/img_sdl2.py\n--- a/kivy/core/image/img_sdl2.py\n+++ b/kivy/core/image/img_sdl2.py\n@@ -20,7 +20,7 @@\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n- return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',\n+ return ('bmp', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',\n 'webp', 'xcf', 'xpm', 'xv')\n \n @staticmethod\n@@ -43,10 +43,6 @@\n \n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped):\n- # TODO implement the save for sdl2\n- #surface = SDL2.image.fromstring(\n- # pixels, (width, height), fmt.upper(), False)\n- #SDL2.image.save(surface, filename)\n _img_sdl2.save(filename, width, height, fmt, pixels, flipped)\n return True\n", "issue": "SDL2 - crash on loading asyncimage loading gif?\nrelevant log:\n\nTraceback (most recent call last):\n File \"/home/chozabu/git/KivEntEd/main.py\", line 1289, in <module>\n KivEntEd().run()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/app.py\", line 825, in run\n runTouchApp()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/base.py\", line 484, in runTouchApp\n EventLoop.window.mainloop()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py\", line 364, in mainloop\n self._mainloop()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/window/window_pygame.py\", line 268, in _mainloop\n EventLoop.idle()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/base.py\", line 324, in idle\n Clock.tick()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/clock.py\", line 482, in tick\n self._process_events()\n File \"/usr/local/lib/python2.7/dist-packages/kivy/clock.py\", line 614, in _process_events\n event.tick(self._last_tick, remove)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/clock.py\", line 373, in tick\n ret = callback(self._dt)\n File \"/home/chozabu/git/KivEntEd/ui_elements.py\", line 121, in initUI\n self.screenShot.source = serverURL+\"/downloadSS?fullname=\"+self.info['filename']+\".png\"\n File \"kivy/properties.pyx\", line 377, in kivy.properties.Property.__set__ (kivy/properties.c:4346)\n File \"kivy/properties.pyx\", line 409, in kivy.properties.Property.set (kivy/properties.c:4861)\n File \"kivy/properties.pyx\", line 460, in kivy.properties.Property.dispatch (kivy/properties.c:5437)\n File \"kivy/_event.pyx\", line 1046, in kivy._event.EventObservers.dispatch (kivy/_event.c:10980)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/uix/image.py\", line 327, in _load_source\n anim_delay=self.anim_delay)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/loader.py\", line 432, in image\n client = ProxyImage(self.loading_image,\n File \"/usr/local/lib/python2.7/dist-packages/kivy/loader.py\", line 163, in _get_loading_image\n self._loading_image = ImageLoader.load(filename=loading_png_fn)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/image/__init__.py\", line 385, in load\n im = loader(filename, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/image/**init**.py\", line 164, in **init**\n self._data = self.load(filename)\n File \"/usr/local/lib/python2.7/dist-packages/kivy/core/image/img_sdl2.py\", line 34, in load\n raise Exception('SDL2: Unable to load image')\nException: SDL2: Unable to load image\n\n", "before_files": [{"content": "'''\nSDL2 image loader\n=================\n'''\n\n__all__ = ('ImageLoaderSDL2', )\n\nfrom kivy.compat import PY2\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\nfrom kivy.core.image import _img_sdl2\n\n\nclass ImageLoaderSDL2(ImageLoaderBase):\n '''Image loader based on the PIL library'''\n\n def _ensure_ext(self):\n _img_sdl2.init()\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n return ('bmp', 'gif', 'jpg', 'jpeg', 'lbm', 'pcx', 'png', 'pnm', 'tga', 'tiff',\n 'webp', 'xcf', 'xpm', 'xv')\n\n @staticmethod\n def can_save():\n return True\n\n def load(self, filename):\n info = _img_sdl2.load(filename)\n if not info:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise Exception('SDL2: Unable to load image')\n\n w, h, fmt, pixels, rowlength = info\n\n # update internals\n self.filename = filename\n return [ImageData(\n w, h, fmt, pixels, source=filename,\n rowlength=rowlength)]\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped):\n # TODO implement the save for sdl2\n #surface = SDL2.image.fromstring(\n # pixels, (width, height), fmt.upper(), False)\n #SDL2.image.save(surface, filename)\n _img_sdl2.save(filename, width, height, fmt, pixels, flipped)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderSDL2)\n", "path": "kivy/core/image/img_sdl2.py"}]} | 1,812 | 276 |
gh_patches_debug_19620 | rasdani/github-patches | git_diff | sopel-irc__sopel-2166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
announce error on python3
<!-- Before reporting a bug, please search both open *and closed* issues to
see if it has already been reported. If you can, try to reproduce the problem
on an unmodified copy of the `master` branch first, as sometimes bugs are found
and fixed without a report. If the problem is unreported and persists in
`master`, please help us fix it quickly by filling out as much of this
information as you can. Thanks! -->
### Description
.announce results in an error on python 3
### Reproduction steps
1. Setup a instance on python3 (specifically I got the error on v3.7)
2. Try to use .announce
3. there will be a error
### Expected behavior
Works without errors.
### Logs
```
If applicable, add logs to help us figure out what's happening. Raw logs are
super helpful! Logs are usually found in ~/.sopel/logs, depending on your
configuration.
```
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: [2021-07-22 17:04:51,684] sopel.bot ERROR - Unexpected error ('dict_keys' object is not subscriptable) from MacFan4000 at 2
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: Traceback (most recent call last):
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/bot.py", line 757, in call_rule
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: rule.execute(sopel, trigger)
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugins/rules.py", line 1057, in execute
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: exit_code = self._handler(bot, trigger)
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugin.py", line 1071, in guarded
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: return function(bot, trigger, *args, **kwargs)
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py", line 44, in announce
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: for cgroup in channels:
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File "/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py", line 24, in _chunks
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: yield items[delim:delim + size]
Jul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: TypeError: 'dict_keys' object is not subscriptable
### Environment
- Sopel `.version`: [e.g. 7.0.0 or d416e19] 7.1.2
- Sopel installed via: [apt, pip, `setup.py install`, source, ?] pip
- Python version: [e.g. 3.6.9] 3.7
- Operating system: [e.g. Debian 10] Debian Buster
- IRCd `/version`: [e.g. InspIRCd 3.0.1] Libera Chat
- Relevant plugins: [adminchannel, weather, custom\_thing.py, ?] announce
### Notes
I believe https://github.com/sopel-irc/sopel/commit/b7b6b46a84e29e26a6a6b921debf57735661a4c0#diff-a9aa50736c17c299dac1ad9cb5ea1b835fb638c91bbd8c547990ffd9d67daa74 broke it due to .keys() not working the same way on python3 as it does on python2.
</issue>
<code>
[start of sopel/modules/announce.py]
1 """
2 announce.py - Sopel Announcement Plugin
3 Sends announcements to all channels the bot has joined.
4 Copyright © 2013, Elad Alfassa, <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import generator_stop
10
11 from sopel import plugin
12
13
14 def _chunks(items, size):
15 """Break a list of items into groups.
16
17 :param items: the collection of items to chunk
18 :type items: :term:`iterable`
19 :param int size: the size of each chunk
20 """
21 # from https://stackoverflow.com/a/312464/5991 with modified names for readability
22 for delim in range(0, len(items), size):
23 yield items[delim:delim + size]
24
25
26 @plugin.command('announce')
27 @plugin.example('.announce Some important message here')
28 @plugin.require_admin('Sorry, I can\'t let you do that', reply=True)
29 @plugin.output_prefix('[ANNOUNCEMENT] ')
30 def announce(bot, trigger):
31 """Send an announcement to all channels the bot is in."""
32 if trigger.group(2) is None:
33 bot.reply('Announce what? I need a message to say.')
34 return
35
36 size = 1
37 try:
38 size = bot.isupport.TARGMAX.get('PRIVMSG', size)
39 except AttributeError:
40 pass
41
42 channels = _chunks(bot.channels.keys(), size)
43 for cgroup in channels:
44 bot.say(trigger.group(2), ','.join(cgroup))
45
46 bot.reply('Announce complete.')
47
[end of sopel/modules/announce.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py
--- a/sopel/modules/announce.py
+++ b/sopel/modules/announce.py
@@ -8,6 +8,8 @@
"""
from __future__ import generator_stop
+import itertools
+
from sopel import plugin
@@ -17,10 +19,18 @@
:param items: the collection of items to chunk
:type items: :term:`iterable`
:param int size: the size of each chunk
+ :return: a :term:`generator` of chunks
+ :rtype: :term:`generator` of :class:`tuple`
"""
- # from https://stackoverflow.com/a/312464/5991 with modified names for readability
- for delim in range(0, len(items), size):
- yield items[delim:delim + size]
+ # This approach is safer than slicing with non-subscriptable types,
+ # for example `dict_keys` objects
+ iterator = iter(items)
+ # TODO: Simplify to assignment expression (`while cond := expr`)
+ # when dropping Python 3.7
+ chunk = tuple(itertools.islice(iterator, size))
+ while chunk:
+ yield chunk
+ chunk = tuple(itertools.islice(iterator, size))
@plugin.command('announce')
| {"golden_diff": "diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py\n--- a/sopel/modules/announce.py\n+++ b/sopel/modules/announce.py\n@@ -8,6 +8,8 @@\n \"\"\"\n from __future__ import generator_stop\n \n+import itertools\n+\n from sopel import plugin\n \n \n@@ -17,10 +19,18 @@\n :param items: the collection of items to chunk\n :type items: :term:`iterable`\n :param int size: the size of each chunk\n+ :return: a :term:`generator` of chunks\n+ :rtype: :term:`generator` of :class:`tuple`\n \"\"\"\n- # from https://stackoverflow.com/a/312464/5991 with modified names for readability\n- for delim in range(0, len(items), size):\n- yield items[delim:delim + size]\n+ # This approach is safer than slicing with non-subscriptable types,\n+ # for example `dict_keys` objects\n+ iterator = iter(items)\n+ # TODO: Simplify to assignment expression (`while cond := expr`)\n+ # when dropping Python 3.7\n+ chunk = tuple(itertools.islice(iterator, size))\n+ while chunk:\n+ yield chunk\n+ chunk = tuple(itertools.islice(iterator, size))\n \n \n @plugin.command('announce')\n", "issue": "announce error on python3\n<!-- Before reporting a bug, please search both open *and closed* issues to\r\nsee if it has already been reported. If you can, try to reproduce the problem\r\non an unmodified copy of the `master` branch first, as sometimes bugs are found\r\nand fixed without a report. If the problem is unreported and persists in\r\n`master`, please help us fix it quickly by filling out as much of this\r\ninformation as you can. Thanks! -->\r\n\r\n### Description\r\n.announce results in an error on python 3\r\n\r\n### Reproduction steps\r\n1. Setup a instance on python3 (specifically I got the error on v3.7)\r\n2. Try to use .announce\r\n3. there will be a error\r\n\r\n### Expected behavior\r\nWorks without errors.\r\n\r\n### Logs\r\n```\r\nIf applicable, add logs to help us figure out what's happening. Raw logs are\r\nsuper helpful! Logs are usually found in ~/.sopel/logs, depending on your\r\nconfiguration.\r\n```\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: [2021-07-22 17:04:51,684] sopel.bot ERROR - Unexpected error ('dict_keys' object is not subscriptable) from MacFan4000 at 2\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: Traceback (most recent call last):\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/bot.py\", line 757, in call_rule\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: rule.execute(sopel, trigger)\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugins/rules.py\", line 1057, in execute\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: exit_code = self._handler(bot, trigger)\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/plugin.py\", line 1071, in guarded\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: return function(bot, trigger, *args, **kwargs)\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py\", line 44, in announce\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: for cgroup in channels:\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: File \"/srv/sopelbots/prodvenv/lib/python3.7/site-packages/sopel/modules/announce.py\", line 24, in _chunks\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: yield items[delim:delim + size]\r\nJul 22 17:04:51 bots1.mirahezebots.org sopel[31484]: TypeError: 'dict_keys' object is not subscriptable\r\n### Environment\r\n- Sopel `.version`: [e.g. 7.0.0 or d416e19] 7.1.2\r\n- Sopel installed via: [apt, pip, `setup.py install`, source, ?] pip\r\n- Python version: [e.g. 3.6.9] 3.7\r\n- Operating system: [e.g. Debian 10] Debian Buster\r\n- IRCd `/version`: [e.g. InspIRCd 3.0.1] Libera Chat\r\n- Relevant plugins: [adminchannel, weather, custom\\_thing.py, ?] announce\r\n\r\n### Notes\r\nI believe https://github.com/sopel-irc/sopel/commit/b7b6b46a84e29e26a6a6b921debf57735661a4c0#diff-a9aa50736c17c299dac1ad9cb5ea1b835fb638c91bbd8c547990ffd9d67daa74 broke it due to .keys() not working the same way on python3 as it does on python2.\r\n\n", "before_files": [{"content": "\"\"\"\nannounce.py - Sopel Announcement Plugin\nSends announcements to all channels the bot has joined.\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom sopel import plugin\n\n\ndef _chunks(items, size):\n \"\"\"Break a list of items into groups.\n\n :param items: the collection of items to chunk\n :type items: :term:`iterable`\n :param int size: the size of each chunk\n \"\"\"\n # from https://stackoverflow.com/a/312464/5991 with modified names for readability\n for delim in range(0, len(items), size):\n yield items[delim:delim + size]\n\n\[email protected]('announce')\[email protected]('.announce Some important message here')\[email protected]_admin('Sorry, I can\\'t let you do that', reply=True)\[email protected]_prefix('[ANNOUNCEMENT] ')\ndef announce(bot, trigger):\n \"\"\"Send an announcement to all channels the bot is in.\"\"\"\n if trigger.group(2) is None:\n bot.reply('Announce what? I need a message to say.')\n return\n\n size = 1\n try:\n size = bot.isupport.TARGMAX.get('PRIVMSG', size)\n except AttributeError:\n pass\n\n channels = _chunks(bot.channels.keys(), size)\n for cgroup in channels:\n bot.say(trigger.group(2), ','.join(cgroup))\n\n bot.reply('Announce complete.')\n", "path": "sopel/modules/announce.py"}]} | 2,156 | 310 |
gh_patches_debug_6235 | rasdani/github-patches | git_diff | statsmodels__statsmodels-970 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lowes test failure Ubuntu
https://launchpadlibrarian.net/144807626/buildlog_ubuntu-raring-amd64.statsmodels_0.5.0~ppa17~revno-1430~raring1_UPLOADING.txt.gz
```
======================================================================
ERROR: statsmodels.nonparametric.tests.test_lowess.TestLowess.test_options
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/tests/test_lowess.py", line 144, in test_options
return_sorted=False)
File "/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/smoothers_lowess.py", line 182, in lowess
yfitted_[mask_valid] = yfitted
ValueError: NumPy boolean array indexing assignment cannot assign 20 input values to the 17 output values where the mask is true
```
</issue>
<code>
[start of statsmodels/nonparametric/smoothers_lowess.py]
1 # -*- coding: utf-8 -*-
2 """Lowess - wrapper for cythonized extension
3
4 Author : Chris Jordan-Squire
5 Author : Carl Vogel
6 Author : Josef Perktold
7
8 """
9
10 import numpy as np
11 from ._smoothers_lowess import lowess as _lowess
12
13 def lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, is_sorted=False,
14 missing='drop', return_sorted=True):
15 '''LOWESS (Locally Weighted Scatterplot Smoothing)
16
17 A lowess function that outs smoothed estimates of endog
18 at the given exog values from points (exog, endog)
19
20 Parameters
21 ----------
22 endog: 1-D numpy array
23 The y-values of the observed points
24 exog: 1-D numpy array
25 The x-values of the observed points
26 frac: float
27 Between 0 and 1. The fraction of the data used
28 when estimating each y-value.
29 it: int
30 The number of residual-based reweightings
31 to perform.
32 delta: float
33 Distance within which to use linear-interpolation
34 instead of weighted regression.
35 is_sorted : bool
36 If False (default), then the data will be sorted by exog before
37 calculating lowess. If True, then it is assumed that the data is
38 already sorted by exog.
39 missing : str
40 Available options are 'none', 'drop', and 'raise'. If 'none', no nan
41 checking is done. If 'drop', any observations with nans are dropped.
42 If 'raise', an error is raised. Default is 'drop'.
43 return_sorted : bool
44 If True (default), then the returned array is sorted by exog and has
45 missing (nan or infinite) observations removed.
46 If False, then the returned array is in the same length and the same
47 sequence of observations as the input array.
48
49 Returns
50 -------
51 out: ndarray, float
52 The returned array is two-dimensional if return_sorted is True, and
53 one dimensional if return_sorted is False.
54 If return_sorted is True, then a numpy array with two columns. The
55 first column contains the sorted x (exog) values and the second column
56 the associated estimated y (endog) values.
57 If return_sorted is False, then only the fitted values are returned,
58 and the observations will be in the same order as the input arrays.
59
60 Notes
61 -----
62 This lowess function implements the algorithm given in the
63 reference below using local linear estimates.
64
65 Suppose the input data has N points. The algorithm works by
66 estimating the `smooth` y_i by taking the frac*N closest points
67 to (x_i,y_i) based on their x values and estimating y_i
68 using a weighted linear regression. The weight for (x_j,y_j)
69 is tricube function applied to |x_i-x_j|.
70
71 If it > 1, then further weighted local linear regressions
72 are performed, where the weights are the same as above
73 times the _lowess_bisquare function of the residuals. Each iteration
74 takes approximately the same amount of time as the original fit,
75 so these iterations are expensive. They are most useful when
76 the noise has extremely heavy tails, such as Cauchy noise.
77 Noise with less heavy-tails, such as t-distributions with df>2,
78 are less problematic. The weights downgrade the influence of
79 points with large residuals. In the extreme case, points whose
80 residuals are larger than 6 times the median absolute residual
81 are given weight 0.
82
83 `delta` can be used to save computations. For each `x_i`, regressions
84 are skipped for points closer than `delta`. The next regression is
85 fit for the farthest point within delta of `x_i` and all points in
86 between are estimated by linearly interpolating between the two
87 regression fits.
88
89 Judicious choice of delta can cut computation time considerably
90 for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.
91
92 Some experimentation is likely required to find a good
93 choice of `frac` and `iter` for a particular dataset.
94
95 References
96 ----------
97 Cleveland, W.S. (1979) "Robust Locally Weighted Regression
98 and Smoothing Scatterplots". Journal of the American Statistical
99 Association 74 (368): 829-836.
100
101 Examples
102 --------
103 The below allows a comparison between how different the fits from
104 lowess for different values of frac can be.
105
106 >>> import numpy as np
107 >>> import statsmodels.api as sm
108 >>> lowess = sm.nonparametric.lowess
109 >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
110 >>> y = np.sin(x) + np.random.normal(size=len(x))
111 >>> z = lowess(y, x)
112 >>> w = lowess(y, x, frac=1./3)
113
114 This gives a similar comparison for when it is 0 vs not.
115
116 >>> import numpy as np
117 >>> import scipy.stats as stats
118 >>> import statsmodels.api as sm
119 >>> lowess = sm.nonparametric.lowess
120 >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
121 >>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
122 >>> z = lowess(y, x, frac= 1./3, it=0)
123 >>> w = lowess(y, x, frac=1./3)
124
125 '''
126
127 endog = np.asarray(endog, float)
128 exog = np.asarray(exog, float)
129
130 # Inputs should be vectors (1-D arrays) of the
131 # same length.
132 if exog.ndim != 1:
133 raise ValueError('exog must be a vector')
134 if endog.ndim != 1:
135 raise ValueError('endog must be a vector')
136 if endog.shape[0] != exog.shape[0] :
137 raise ValueError('exog and endog must have same length')
138
139 if missing in ['drop', 'raise']:
140 # Cut out missing values
141 mask_valid = (np.isfinite(exog) & np.isfinite(endog))
142 all_valid = np.all(mask_valid)
143 if all_valid:
144 y = endog
145 x = exog
146 else:
147 if missing == 'drop':
148 x = exog[mask_valid]
149 y = endog[mask_valid]
150 else:
151 raise ValueError('nan or inf found in data')
152 elif missing == 'none':
153 y = endog
154 x = exog
155 all_valid = True # we assume it's true if missing='none'
156 else:
157 raise ValueError("missing can only be 'none', 'drop' or 'raise'")
158
159 if not is_sorted:
160 # Sort both inputs according to the ascending order of x values
161 sort_index = np.argsort(x)
162 x = np.array(x[sort_index])
163 y = np.array(y[sort_index])
164
165 res = _lowess(y, x, frac=frac, it=it, delta=delta)
166 _, yfitted = res.T
167
168 if return_sorted or (all_valid and is_sorted):
169 return res
170 else:
171 # rebuild yfitted with original indices
172 # a bit messy: y might have been selected twice
173 if not is_sorted:
174 yfitted_ = np.empty_like(endog)
175 yfitted_.fill(np.nan)
176 yfitted_[sort_index] = yfitted
177 yfitted = yfitted_
178
179 if not all_valid:
180 yfitted_ = np.empty_like(endog)
181 yfitted_.fill(np.nan)
182 yfitted_[mask_valid] = yfitted
183 yfitted = yfitted_
184
185 # we don't need to return exog anymore
186 return yfitted
187
[end of statsmodels/nonparametric/smoothers_lowess.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/statsmodels/nonparametric/smoothers_lowess.py b/statsmodels/nonparametric/smoothers_lowess.py
--- a/statsmodels/nonparametric/smoothers_lowess.py
+++ b/statsmodels/nonparametric/smoothers_lowess.py
@@ -171,7 +171,7 @@
# rebuild yfitted with original indices
# a bit messy: y might have been selected twice
if not is_sorted:
- yfitted_ = np.empty_like(endog)
+ yfitted_ = np.empty_like(y)
yfitted_.fill(np.nan)
yfitted_[sort_index] = yfitted
yfitted = yfitted_
| {"golden_diff": "diff --git a/statsmodels/nonparametric/smoothers_lowess.py b/statsmodels/nonparametric/smoothers_lowess.py\n--- a/statsmodels/nonparametric/smoothers_lowess.py\n+++ b/statsmodels/nonparametric/smoothers_lowess.py\n@@ -171,7 +171,7 @@\n # rebuild yfitted with original indices\n # a bit messy: y might have been selected twice\n if not is_sorted:\n- yfitted_ = np.empty_like(endog)\n+ yfitted_ = np.empty_like(y)\n yfitted_.fill(np.nan)\n yfitted_[sort_index] = yfitted\n yfitted = yfitted_\n", "issue": "lowes test failure Ubuntu\nhttps://launchpadlibrarian.net/144807626/buildlog_ubuntu-raring-amd64.statsmodels_0.5.0~ppa17~revno-1430~raring1_UPLOADING.txt.gz\n\n```\n======================================================================\nERROR: statsmodels.nonparametric.tests.test_lowess.TestLowess.test_options\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/dist-packages/nose/case.py\", line 197, in runTest\n self.test(*self.arg)\n File \"/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/tests/test_lowess.py\", line 144, in test_options\n return_sorted=False)\n File \"/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/smoothers_lowess.py\", line 182, in lowess\n yfitted_[mask_valid] = yfitted\nValueError: NumPy boolean array indexing assignment cannot assign 20 input values to the 17 output values where the mask is true\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Lowess - wrapper for cythonized extension\n\nAuthor : Chris Jordan-Squire\nAuthor : Carl Vogel\nAuthor : Josef Perktold\n\n\"\"\"\n\nimport numpy as np\nfrom ._smoothers_lowess import lowess as _lowess\n\ndef lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, is_sorted=False,\n missing='drop', return_sorted=True):\n '''LOWESS (Locally Weighted Scatterplot Smoothing)\n\n A lowess function that outs smoothed estimates of endog\n at the given exog values from points (exog, endog)\n\n Parameters\n ----------\n endog: 1-D numpy array\n The y-values of the observed points\n exog: 1-D numpy array\n The x-values of the observed points\n frac: float\n Between 0 and 1. The fraction of the data used\n when estimating each y-value.\n it: int\n The number of residual-based reweightings\n to perform.\n delta: float\n Distance within which to use linear-interpolation\n instead of weighted regression.\n is_sorted : bool\n If False (default), then the data will be sorted by exog before\n calculating lowess. If True, then it is assumed that the data is\n already sorted by exog.\n missing : str\n Available options are 'none', 'drop', and 'raise'. If 'none', no nan\n checking is done. If 'drop', any observations with nans are dropped.\n If 'raise', an error is raised. Default is 'drop'.\n return_sorted : bool\n If True (default), then the returned array is sorted by exog and has\n missing (nan or infinite) observations removed.\n If False, then the returned array is in the same length and the same\n sequence of observations as the input array.\n\n Returns\n -------\n out: ndarray, float\n The returned array is two-dimensional if return_sorted is True, and\n one dimensional if return_sorted is False.\n If return_sorted is True, then a numpy array with two columns. The\n first column contains the sorted x (exog) values and the second column\n the associated estimated y (endog) values.\n If return_sorted is False, then only the fitted values are returned,\n and the observations will be in the same order as the input arrays.\n\n Notes\n -----\n This lowess function implements the algorithm given in the\n reference below using local linear estimates.\n\n Suppose the input data has N points. The algorithm works by\n estimating the `smooth` y_i by taking the frac*N closest points\n to (x_i,y_i) based on their x values and estimating y_i\n using a weighted linear regression. The weight for (x_j,y_j)\n is tricube function applied to |x_i-x_j|.\n\n If it > 1, then further weighted local linear regressions\n are performed, where the weights are the same as above\n times the _lowess_bisquare function of the residuals. Each iteration\n takes approximately the same amount of time as the original fit,\n so these iterations are expensive. They are most useful when\n the noise has extremely heavy tails, such as Cauchy noise.\n Noise with less heavy-tails, such as t-distributions with df>2,\n are less problematic. The weights downgrade the influence of\n points with large residuals. In the extreme case, points whose\n residuals are larger than 6 times the median absolute residual\n are given weight 0.\n\n `delta` can be used to save computations. For each `x_i`, regressions\n are skipped for points closer than `delta`. The next regression is\n fit for the farthest point within delta of `x_i` and all points in\n between are estimated by linearly interpolating between the two\n regression fits.\n\n Judicious choice of delta can cut computation time considerably\n for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.\n\n Some experimentation is likely required to find a good\n choice of `frac` and `iter` for a particular dataset.\n\n References\n ----------\n Cleveland, W.S. (1979) \"Robust Locally Weighted Regression\n and Smoothing Scatterplots\". Journal of the American Statistical\n Association 74 (368): 829-836.\n\n Examples\n --------\n The below allows a comparison between how different the fits from\n lowess for different values of frac can be.\n\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> lowess = sm.nonparametric.lowess\n >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)\n >>> y = np.sin(x) + np.random.normal(size=len(x))\n >>> z = lowess(y, x)\n >>> w = lowess(y, x, frac=1./3)\n\n This gives a similar comparison for when it is 0 vs not.\n\n >>> import numpy as np\n >>> import scipy.stats as stats\n >>> import statsmodels.api as sm\n >>> lowess = sm.nonparametric.lowess\n >>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)\n >>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))\n >>> z = lowess(y, x, frac= 1./3, it=0)\n >>> w = lowess(y, x, frac=1./3)\n\n '''\n\n endog = np.asarray(endog, float)\n exog = np.asarray(exog, float)\n\n # Inputs should be vectors (1-D arrays) of the\n # same length.\n if exog.ndim != 1:\n raise ValueError('exog must be a vector')\n if endog.ndim != 1:\n raise ValueError('endog must be a vector')\n if endog.shape[0] != exog.shape[0] :\n raise ValueError('exog and endog must have same length')\n\n if missing in ['drop', 'raise']:\n # Cut out missing values\n mask_valid = (np.isfinite(exog) & np.isfinite(endog))\n all_valid = np.all(mask_valid)\n if all_valid:\n y = endog\n x = exog\n else:\n if missing == 'drop':\n x = exog[mask_valid]\n y = endog[mask_valid]\n else:\n raise ValueError('nan or inf found in data')\n elif missing == 'none':\n y = endog\n x = exog\n all_valid = True # we assume it's true if missing='none'\n else:\n raise ValueError(\"missing can only be 'none', 'drop' or 'raise'\")\n\n if not is_sorted:\n # Sort both inputs according to the ascending order of x values\n sort_index = np.argsort(x)\n x = np.array(x[sort_index])\n y = np.array(y[sort_index])\n\n res = _lowess(y, x, frac=frac, it=it, delta=delta)\n _, yfitted = res.T\n\n if return_sorted or (all_valid and is_sorted):\n return res\n else:\n # rebuild yfitted with original indices\n # a bit messy: y might have been selected twice\n if not is_sorted:\n yfitted_ = np.empty_like(endog)\n yfitted_.fill(np.nan)\n yfitted_[sort_index] = yfitted\n yfitted = yfitted_\n\n if not all_valid:\n yfitted_ = np.empty_like(endog)\n yfitted_.fill(np.nan)\n yfitted_[mask_valid] = yfitted\n yfitted = yfitted_\n\n # we don't need to return exog anymore\n return yfitted\n", "path": "statsmodels/nonparametric/smoothers_lowess.py"}]} | 3,069 | 155 |
gh_patches_debug_8548 | rasdani/github-patches | git_diff | sktime__sktime-1571 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC] Transformers missing from API Reference
#### Describe the issue linked to the documentation
<!--
Tell us about the confusion introduced in the documentation.
-->
There have been transformers that have recently been improved but have not been added the API Reference.
For example, STLTransformer, CLaSPTransformer, etc.
#### Suggest a potential alternative/fix
<!--
Tell us how we could improve the documentation in this regard.
-->
These need to be added to the documentation.
In the future we'll have to try and be good about following our [reviewer guide](https://www.sktime.org/en/stable/reviewer_guide.html) and waiting to merge until things are added to docs. Otherwise, it is hard for users to know what functionality exists.
</issue>
<code>
[start of sktime/transformations/series/difference.py]
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Class to iteratively apply differences to a time series."""
5 __author__ = ["Ryan Kuhns"]
6 __all__ = ["Differencer"]
7
8 import numpy as np
9 import pandas as pd
10 from sklearn.utils import check_array
11
12 from sktime.forecasting.base import ForecastingHorizon
13 from sktime.transformations.base import _SeriesToSeriesTransformer
14 from sktime.utils.validation import is_int
15 from sktime.utils.validation.series import check_series
16
17
18 def _check_lags(lags):
19 msg = " ".join(
20 [
21 "`lags` should be provided as a positive integer scaler, or",
22 "a list, tuple or np.ndarray of positive integers,"
23 f"but found {type(lags)}.",
24 ]
25 )
26 non_positive_msg = "`lags` should be positive integers."
27 if isinstance(lags, int):
28 if lags <= 0:
29 raise ValueError(non_positive_msg)
30 lags = check_array([lags], ensure_2d=False)
31 elif isinstance(lags, (list, tuple, np.ndarray)):
32 if not all([is_int(lag) for lag in lags]):
33 raise TypeError(msg)
34 lags = check_array(lags, ensure_2d=False)
35 if (lags <= 0).any():
36 raise ValueError(non_positive_msg)
37 else:
38 raise TypeError(msg)
39
40 return lags
41
42
43 def _diff_transform(Z, lags):
44 Zt = Z.copy()
45
46 if len(lags) == 0:
47 return Zt
48
49 else:
50 for lag in lags:
51 Zt = Zt.diff(lag)
52 return Zt
53
54
55 def _inverse_diff(Z, lag):
56 for i in range(lag):
57 Z.iloc[i::lag] = Z.iloc[i::lag].cumsum()
58
59 return Z
60
61
62 class Differencer(_SeriesToSeriesTransformer):
63 """Apply iterative differences to a timeseries.
64
65 The transformation works for univariate and multivariate timeseries. However,
66 the multivariate case applies the same differencing to every series.
67
68 Difference transformations are applied at the specified lags in the order
69 provided.
70
71 For example, given a timeseries with monthly periodicity, using lags=[1, 12]
72 corresponds to applying a standard first difference to handle trend, and
73 followed by a seasonal difference (at lag 12) to attempt to account for
74 seasonal dependence.
75
76 To provide a higher-order difference at the same lag list the lag multiple
77 times. For example, lags=[1, 1] takes iterative first differences like may
78 be needed for a series that is integrated of order 2.
79
80 Parameters
81 ----------
82 lags : int or array-like, default = 1
83 The lags used to difference the data.
84 If a single `int` value is
85
86 drop_na : bool, default = True
87 Whether the differencer should drop the initial observations that
88 contain missing values as a result of the differencing operation(s).
89
90 Attributes
91 ----------
92 lags : int or array-like
93 Lags used to perform the differencing of the input series.
94
95 drop_na : bool
96 Stores whether the Differencer drops the initial observations that contain
97 missing values as a result of the differencing operation(s).
98
99 Example
100 -------
101 >>> from sktime.transformations.series.difference import Differencer
102 >>> from sktime.datasets import load_airline
103 >>> y = load_airline()
104 >>> transformer = Differencer(lags=[1, 12])
105 >>> y_transform = transformer.fit_transform(y)
106 """
107
108 _tags = {
109 "fit-in-transform": False,
110 "transform-returns-same-time-index": False,
111 "univariate-only": False,
112 }
113
114 def __init__(self, lags=1, drop_na=True):
115 self.lags = lags
116 self.drop_na = drop_na
117 self._Z = None
118 self._lags = None
119 self._cumulative_lags = None
120 self._prior_cum_lags = None
121 self._prior_lags = None
122 super(Differencer, self).__init__()
123
124 def _check_inverse_transform_index(self, Z):
125 """Check fitted series contains indices needed in inverse_transform."""
126 first_idx = Z.index.min()
127 orig_first_idx, orig_last_idx = self._Z.index.min(), self._Z.index.max()
128
129 is_contained_by_fitted_z = False
130 is_future = False
131
132 if first_idx < orig_first_idx:
133 msg = [
134 "Some indices of `Z` are prior to timeseries used in `fit`.",
135 "Reconstruction via `inverse_transform` is not possible.",
136 ]
137 raise ValueError(" ".join(msg))
138
139 elif Z.index.difference(self._Z.index).shape[0] == 0:
140 is_contained_by_fitted_z = True
141
142 elif first_idx > orig_last_idx:
143 is_future = True
144
145 pad_z_inv = self.drop_na or is_future
146
147 cutoff = Z.index[0] if pad_z_inv else Z.index[self._cumulative_lags[-1]]
148 fh = ForecastingHorizon(np.arange(-1, -(self._cumulative_lags[-1] + 1), -1))
149 index = fh.to_absolute(cutoff).to_pandas()
150 index_diff = index.difference(self._Z.index)
151
152 if index_diff.shape[0] != 0 and not is_contained_by_fitted_z:
153 msg = [
154 f"Inverse transform requires indices {index}",
155 "to have been stored in `fit()`,",
156 f"but the indices {index_diff} were not found.",
157 ]
158 raise ValueError(" ".join(msg))
159
160 return is_contained_by_fitted_z, pad_z_inv
161
162 def _fit(self, Z, X=None):
163 """Logic used by fit method on `Z`.
164
165 Parameters
166 ----------
167 Z : pd.Series or pd.DataFrame
168 A timeseries to apply the specified transformation on.
169
170 Returns
171 -------
172 self
173 """
174 self._lags = _check_lags(self.lags)
175 self._prior_lags = np.roll(self._lags, shift=1)
176 self._prior_lags[0] = 0
177 self._cumulative_lags = self._lags.cumsum()
178 self._prior_cum_lags = np.zeros_like(self._cumulative_lags)
179 self._prior_cum_lags[1:] = self._cumulative_lags[:-1]
180 self._Z = Z.copy()
181 return self
182
183 def _transform(self, Z, X=None):
184 """Logic used by `transform` to apply transformation to `Z`.
185
186 Differences are applied at lags specified in `lags`.
187
188 Parameters
189 ----------
190 Z : pd.Series or pd.DataFrame
191 The timeseries to apply the specified transformation on.
192
193 Returns
194 -------
195 Zt : pd.Series or pd.DataFrame
196 The transformed timeseries.
197 """
198 Zt = _diff_transform(Z, self._lags)
199 if self.drop_na:
200 Zt = Zt.iloc[self._cumulative_lags[-1] :]
201 return Zt
202
203 def _inverse_transform(self, Z, X=None):
204 """Logic used by `inverse_transform` to reverse transformation on `Z`.
205
206 Parameters
207 ----------
208 Z : pd.Series or pd.DataFrame
209 A time series to reverse the transformation on.
210
211 Returns
212 -------
213 Z_inv : pd.Series or pd.DataFrame
214 The reconstructed timeseries after the transformation has been reversed.
215 """
216 is_df = isinstance(Z, pd.DataFrame)
217 is_contained_by_fit_z, pad_z_inv = self._check_inverse_transform_index(Z)
218
219 # If `Z` is entirely contained in fitted `_Z` we can just return
220 # the values from the timeseires stored in `fit` as a shortcut
221 if is_contained_by_fit_z:
222 Z_inv = self._Z.loc[Z.index, :] if is_df else self._Z.loc[Z.index]
223
224 else:
225 Z_inv = Z.copy()
226 for i, lag_info in enumerate(
227 zip(self._lags[::-1], self._prior_cum_lags[::-1])
228 ):
229 lag, prior_cum_lag = lag_info
230 _lags = self._lags[::-1][i + 1 :]
231 _transformed = _diff_transform(self._Z, _lags)
232
233 # Determine index values for initial values needed to reverse
234 # the differencing for the specified lag
235 if pad_z_inv:
236 cutoff = Z_inv.index[0]
237 else:
238 cutoff = Z_inv.index[prior_cum_lag + lag]
239 fh = ForecastingHorizon(np.arange(-1, -(lag + 1), -1))
240 index = fh.to_absolute(cutoff).to_pandas()
241
242 if is_df:
243 prior_n_timepoint_values = _transformed.loc[index, :]
244 else:
245 prior_n_timepoint_values = _transformed.loc[index]
246 if pad_z_inv:
247 Z_inv = pd.concat([prior_n_timepoint_values, Z_inv])
248 else:
249 Z_inv.update(prior_n_timepoint_values)
250
251 Z_inv = _inverse_diff(Z_inv, lag)
252
253 if pad_z_inv:
254 Z_inv = Z_inv.loc[Z.index, :] if is_df else Z_inv.loc[Z.index]
255
256 return Z_inv
257
258 def fit(self, Z, X=None):
259 """Fit the transformation on input series `Z`.
260
261 Parameters
262 ----------
263 Z : pd.Series or pd.DataFrame
264 A time series to apply the specified transformation on.
265
266 Returns
267 -------
268 self
269 """
270 Z = check_series(Z)
271
272 self._fit(Z, X=X)
273
274 self._is_fitted = True
275 return self
276
277 def transform(self, Z, X=None):
278 """Return transformed version of input series `Z`.
279
280 Parameters
281 ----------
282 Z : pd.Series or pd.DataFrame
283 A time series to apply the specified transformation on.
284
285 Returns
286 -------
287 Zt : pd.Series or pd.DataFrame
288 Transformed version of input series `Z`.
289 """
290 self.check_is_fitted()
291 Z = check_series(Z)
292
293 Zt = self._transform(Z, X=X)
294
295 return Zt
296
297 def inverse_transform(self, Z, X=None):
298 """Reverse transformation on input series `Z`.
299
300 Parameters
301 ----------
302 Z : pd.Series or pd.DataFrame
303 A time series to reverse the transformation on.
304
305 Returns
306 -------
307 Z_inv : pd.Series or pd.DataFrame
308 The reconstructed timeseries after the transformation has been reversed.
309 """
310 self.check_is_fitted()
311 Z = check_series(Z)
312
313 Z_inv = self._inverse_transform(Z, X=X)
314
315 return Z_inv
316
[end of sktime/transformations/series/difference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/transformations/series/difference.py b/sktime/transformations/series/difference.py
--- a/sktime/transformations/series/difference.py
+++ b/sktime/transformations/series/difference.py
@@ -96,8 +96,8 @@
Stores whether the Differencer drops the initial observations that contain
missing values as a result of the differencing operation(s).
- Example
- -------
+ Examples
+ --------
>>> from sktime.transformations.series.difference import Differencer
>>> from sktime.datasets import load_airline
>>> y = load_airline()
| {"golden_diff": "diff --git a/sktime/transformations/series/difference.py b/sktime/transformations/series/difference.py\n--- a/sktime/transformations/series/difference.py\n+++ b/sktime/transformations/series/difference.py\n@@ -96,8 +96,8 @@\n Stores whether the Differencer drops the initial observations that contain\n missing values as a result of the differencing operation(s).\n \n- Example\n- -------\n+ Examples\n+ --------\n >>> from sktime.transformations.series.difference import Differencer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n", "issue": "[DOC] Transformers missing from API Reference\n#### Describe the issue linked to the documentation\r\n\r\n<!--\r\nTell us about the confusion introduced in the documentation.\r\n-->\r\n\r\nThere have been transformers that have recently been improved but have not been added the API Reference. \r\n\r\nFor example, STLTransformer, CLaSPTransformer, etc.\r\n\r\n#### Suggest a potential alternative/fix\r\n\r\n<!--\r\nTell us how we could improve the documentation in this regard.\r\n-->\r\n\r\nThese need to be added to the documentation. \r\n\r\nIn the future we'll have to try and be good about following our [reviewer guide](https://www.sktime.org/en/stable/reviewer_guide.html) and waiting to merge until things are added to docs. Otherwise, it is hard for users to know what functionality exists.\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Class to iteratively apply differences to a time series.\"\"\"\n__author__ = [\"Ryan Kuhns\"]\n__all__ = [\"Differencer\"]\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import check_array\n\nfrom sktime.forecasting.base import ForecastingHorizon\nfrom sktime.transformations.base import _SeriesToSeriesTransformer\nfrom sktime.utils.validation import is_int\nfrom sktime.utils.validation.series import check_series\n\n\ndef _check_lags(lags):\n msg = \" \".join(\n [\n \"`lags` should be provided as a positive integer scaler, or\",\n \"a list, tuple or np.ndarray of positive integers,\"\n f\"but found {type(lags)}.\",\n ]\n )\n non_positive_msg = \"`lags` should be positive integers.\"\n if isinstance(lags, int):\n if lags <= 0:\n raise ValueError(non_positive_msg)\n lags = check_array([lags], ensure_2d=False)\n elif isinstance(lags, (list, tuple, np.ndarray)):\n if not all([is_int(lag) for lag in lags]):\n raise TypeError(msg)\n lags = check_array(lags, ensure_2d=False)\n if (lags <= 0).any():\n raise ValueError(non_positive_msg)\n else:\n raise TypeError(msg)\n\n return lags\n\n\ndef _diff_transform(Z, lags):\n Zt = Z.copy()\n\n if len(lags) == 0:\n return Zt\n\n else:\n for lag in lags:\n Zt = Zt.diff(lag)\n return Zt\n\n\ndef _inverse_diff(Z, lag):\n for i in range(lag):\n Z.iloc[i::lag] = Z.iloc[i::lag].cumsum()\n\n return Z\n\n\nclass Differencer(_SeriesToSeriesTransformer):\n \"\"\"Apply iterative differences to a timeseries.\n\n The transformation works for univariate and multivariate timeseries. However,\n the multivariate case applies the same differencing to every series.\n\n Difference transformations are applied at the specified lags in the order\n provided.\n\n For example, given a timeseries with monthly periodicity, using lags=[1, 12]\n corresponds to applying a standard first difference to handle trend, and\n followed by a seasonal difference (at lag 12) to attempt to account for\n seasonal dependence.\n\n To provide a higher-order difference at the same lag list the lag multiple\n times. For example, lags=[1, 1] takes iterative first differences like may\n be needed for a series that is integrated of order 2.\n\n Parameters\n ----------\n lags : int or array-like, default = 1\n The lags used to difference the data.\n If a single `int` value is\n\n drop_na : bool, default = True\n Whether the differencer should drop the initial observations that\n contain missing values as a result of the differencing operation(s).\n\n Attributes\n ----------\n lags : int or array-like\n Lags used to perform the differencing of the input series.\n\n drop_na : bool\n Stores whether the Differencer drops the initial observations that contain\n missing values as a result of the differencing operation(s).\n\n Example\n -------\n >>> from sktime.transformations.series.difference import Differencer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n >>> transformer = Differencer(lags=[1, 12])\n >>> y_transform = transformer.fit_transform(y)\n \"\"\"\n\n _tags = {\n \"fit-in-transform\": False,\n \"transform-returns-same-time-index\": False,\n \"univariate-only\": False,\n }\n\n def __init__(self, lags=1, drop_na=True):\n self.lags = lags\n self.drop_na = drop_na\n self._Z = None\n self._lags = None\n self._cumulative_lags = None\n self._prior_cum_lags = None\n self._prior_lags = None\n super(Differencer, self).__init__()\n\n def _check_inverse_transform_index(self, Z):\n \"\"\"Check fitted series contains indices needed in inverse_transform.\"\"\"\n first_idx = Z.index.min()\n orig_first_idx, orig_last_idx = self._Z.index.min(), self._Z.index.max()\n\n is_contained_by_fitted_z = False\n is_future = False\n\n if first_idx < orig_first_idx:\n msg = [\n \"Some indices of `Z` are prior to timeseries used in `fit`.\",\n \"Reconstruction via `inverse_transform` is not possible.\",\n ]\n raise ValueError(\" \".join(msg))\n\n elif Z.index.difference(self._Z.index).shape[0] == 0:\n is_contained_by_fitted_z = True\n\n elif first_idx > orig_last_idx:\n is_future = True\n\n pad_z_inv = self.drop_na or is_future\n\n cutoff = Z.index[0] if pad_z_inv else Z.index[self._cumulative_lags[-1]]\n fh = ForecastingHorizon(np.arange(-1, -(self._cumulative_lags[-1] + 1), -1))\n index = fh.to_absolute(cutoff).to_pandas()\n index_diff = index.difference(self._Z.index)\n\n if index_diff.shape[0] != 0 and not is_contained_by_fitted_z:\n msg = [\n f\"Inverse transform requires indices {index}\",\n \"to have been stored in `fit()`,\",\n f\"but the indices {index_diff} were not found.\",\n ]\n raise ValueError(\" \".join(msg))\n\n return is_contained_by_fitted_z, pad_z_inv\n\n def _fit(self, Z, X=None):\n \"\"\"Logic used by fit method on `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A timeseries to apply the specified transformation on.\n\n Returns\n -------\n self\n \"\"\"\n self._lags = _check_lags(self.lags)\n self._prior_lags = np.roll(self._lags, shift=1)\n self._prior_lags[0] = 0\n self._cumulative_lags = self._lags.cumsum()\n self._prior_cum_lags = np.zeros_like(self._cumulative_lags)\n self._prior_cum_lags[1:] = self._cumulative_lags[:-1]\n self._Z = Z.copy()\n return self\n\n def _transform(self, Z, X=None):\n \"\"\"Logic used by `transform` to apply transformation to `Z`.\n\n Differences are applied at lags specified in `lags`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n The timeseries to apply the specified transformation on.\n\n Returns\n -------\n Zt : pd.Series or pd.DataFrame\n The transformed timeseries.\n \"\"\"\n Zt = _diff_transform(Z, self._lags)\n if self.drop_na:\n Zt = Zt.iloc[self._cumulative_lags[-1] :]\n return Zt\n\n def _inverse_transform(self, Z, X=None):\n \"\"\"Logic used by `inverse_transform` to reverse transformation on `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to reverse the transformation on.\n\n Returns\n -------\n Z_inv : pd.Series or pd.DataFrame\n The reconstructed timeseries after the transformation has been reversed.\n \"\"\"\n is_df = isinstance(Z, pd.DataFrame)\n is_contained_by_fit_z, pad_z_inv = self._check_inverse_transform_index(Z)\n\n # If `Z` is entirely contained in fitted `_Z` we can just return\n # the values from the timeseires stored in `fit` as a shortcut\n if is_contained_by_fit_z:\n Z_inv = self._Z.loc[Z.index, :] if is_df else self._Z.loc[Z.index]\n\n else:\n Z_inv = Z.copy()\n for i, lag_info in enumerate(\n zip(self._lags[::-1], self._prior_cum_lags[::-1])\n ):\n lag, prior_cum_lag = lag_info\n _lags = self._lags[::-1][i + 1 :]\n _transformed = _diff_transform(self._Z, _lags)\n\n # Determine index values for initial values needed to reverse\n # the differencing for the specified lag\n if pad_z_inv:\n cutoff = Z_inv.index[0]\n else:\n cutoff = Z_inv.index[prior_cum_lag + lag]\n fh = ForecastingHorizon(np.arange(-1, -(lag + 1), -1))\n index = fh.to_absolute(cutoff).to_pandas()\n\n if is_df:\n prior_n_timepoint_values = _transformed.loc[index, :]\n else:\n prior_n_timepoint_values = _transformed.loc[index]\n if pad_z_inv:\n Z_inv = pd.concat([prior_n_timepoint_values, Z_inv])\n else:\n Z_inv.update(prior_n_timepoint_values)\n\n Z_inv = _inverse_diff(Z_inv, lag)\n\n if pad_z_inv:\n Z_inv = Z_inv.loc[Z.index, :] if is_df else Z_inv.loc[Z.index]\n\n return Z_inv\n\n def fit(self, Z, X=None):\n \"\"\"Fit the transformation on input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to apply the specified transformation on.\n\n Returns\n -------\n self\n \"\"\"\n Z = check_series(Z)\n\n self._fit(Z, X=X)\n\n self._is_fitted = True\n return self\n\n def transform(self, Z, X=None):\n \"\"\"Return transformed version of input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to apply the specified transformation on.\n\n Returns\n -------\n Zt : pd.Series or pd.DataFrame\n Transformed version of input series `Z`.\n \"\"\"\n self.check_is_fitted()\n Z = check_series(Z)\n\n Zt = self._transform(Z, X=X)\n\n return Zt\n\n def inverse_transform(self, Z, X=None):\n \"\"\"Reverse transformation on input series `Z`.\n\n Parameters\n ----------\n Z : pd.Series or pd.DataFrame\n A time series to reverse the transformation on.\n\n Returns\n -------\n Z_inv : pd.Series or pd.DataFrame\n The reconstructed timeseries after the transformation has been reversed.\n \"\"\"\n self.check_is_fitted()\n Z = check_series(Z)\n\n Z_inv = self._inverse_transform(Z, X=X)\n\n return Z_inv\n", "path": "sktime/transformations/series/difference.py"}]} | 3,911 | 144 |
gh_patches_debug_42848 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-7202 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DigitalConcertHall] cannot download videos under /films
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
All
### Provide a description that is worded well enough to be understood
A major part of the site Digital Concert Hall is [concert videos](https://www.digitalconcerthall.com/en/concerts) which yt-dlp handles just fine. But when I try to download videos [here](https://www.digitalconcerthall.com/en/films), e.g. [this one](https://www.digitalconcerthall.com/en/film/388) which lasts 44 minutes (can be played with the login credentials `--username [email protected] --password 3PXx-5.NDNmMD2!` which you can use), it only downloads the trailer which lasts 39 seconds. Using build from commit `ecfe479`.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['https://www.digitalconcerthall.com/en/film/388', '--username', 'PRIVATE', '--password', 'PRIVATE', '-vU']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] [392389b7d] (zip)
[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-6.3.4-arch2-1-x86_64-with-glibc2.37 (OpenSSL 3.0.8 7 Feb 2023, glibc 2.37)
[debug] exe versions: ffmpeg 6.0 (setts), ffprobe 6.0, rtmpdump 2.4
[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2023.05.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-11.0.3
[debug] Proxy map: {}
[debug] Loaded 1842 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Available version: [email protected], Current version: [email protected]
Current Build Hash: 5bfb7e4a15c47b2ff99b149b7c841e98ef606a4af18fd6330ddf3a0ed4a7ea19
yt-dlp is up to date ([email protected])
[generic] Extracting URL: https://www.digitalconcerthall.com/en/film/388
[generic] 388: Downloading webpage
WARNING: [generic] Falling back on generic information extractor
[generic] 388: Extracting information
[debug] Looking for embeds
[debug] Identified a JSON LD
[generic] Extracting URL: https://world-vod.dchdns.net/preview/388-t/preview.mp4#__youtubedl_smuggle=%7B%22force_videoid%22%3A+%22388%22%2C+%22to_generic%22%3A+true%2C+%22http_headers%22%3A+%7B%22Referer%22%3A+%22https%3A%2F%2Fwww.digitalconcerthall.com%2Fen%2Ffilm%2F388%22%7D%7D
[generic] 388: Downloading webpage
WARNING: [generic] URL could be a direct video link, returning it as such.
[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec:vp9.2(10), channels, acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id
[debug] Default format spec: bestvideo*+bestaudio/best
[info] 388: Downloading 1 format(s): 0
[debug] Invoking http downloader on "https://world-vod.dchdns.net/preview/388-t/preview.mp4"
[download] Destination: The Berliner Philharmoniker and Frank Peter Zimmermann [388].mp4
[download] 100% of 16.16MiB in 00:00:07 at 2.05MiB/s
```
</issue>
<code>
[start of yt_dlp/extractor/digitalconcerthall.py]
1 from .common import InfoExtractor
2
3 from ..utils import (
4 ExtractorError,
5 parse_resolution,
6 traverse_obj,
7 try_get,
8 urlencode_postdata,
9 )
10
11
12 class DigitalConcertHallIE(InfoExtractor):
13 IE_DESC = 'DigitalConcertHall extractor'
14 _VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'
15 _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'
16 _ACCESS_TOKEN = None
17 _NETRC_MACHINE = 'digitalconcerthall'
18 _TESTS = [{
19 'note': 'Playlist with only one video',
20 'url': 'https://www.digitalconcerthall.com/en/concert/53201',
21 'info_dict': {
22 'id': '53201-1',
23 'ext': 'mp4',
24 'composer': 'Kurt Weill',
25 'title': '[Magic Night]',
26 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$',
27 'upload_date': '20210624',
28 'timestamp': 1624548600,
29 'duration': 2798,
30 'album_artist': 'Members of the Berliner Philharmoniker / Simon Rössler',
31 },
32 'params': {'skip_download': 'm3u8'},
33 }, {
34 'note': 'Concert with several works and an interview',
35 'url': 'https://www.digitalconcerthall.com/en/concert/53785',
36 'info_dict': {
37 'id': '53785',
38 'album_artist': 'Berliner Philharmoniker / Kirill Petrenko',
39 'title': 'Kirill Petrenko conducts Mendelssohn and Shostakovich',
40 },
41 'params': {'skip_download': 'm3u8'},
42 'playlist_count': 3,
43 }]
44
45 def _perform_login(self, username, password):
46 token_response = self._download_json(
47 self._OAUTH_URL,
48 None, 'Obtaining token', errnote='Unable to obtain token', data=urlencode_postdata({
49 'affiliate': 'none',
50 'grant_type': 'device',
51 'device_vendor': 'unknown',
52 'app_id': 'dch.webapp',
53 'app_version': '1.0.0',
54 'client_secret': '2ySLN+2Fwb',
55 }), headers={
56 'Content-Type': 'application/x-www-form-urlencoded',
57 })
58 self._ACCESS_TOKEN = token_response['access_token']
59 try:
60 self._download_json(
61 self._OAUTH_URL,
62 None, note='Logging in', errnote='Unable to login', data=urlencode_postdata({
63 'grant_type': 'password',
64 'username': username,
65 'password': password,
66 }), headers={
67 'Content-Type': 'application/x-www-form-urlencoded',
68 'Referer': 'https://www.digitalconcerthall.com',
69 'Authorization': f'Bearer {self._ACCESS_TOKEN}'
70 })
71 except ExtractorError:
72 self.raise_login_required(msg='Login info incorrect')
73
74 def _real_initialize(self):
75 if not self._ACCESS_TOKEN:
76 self.raise_login_required(method='password')
77
78 def _entries(self, items, language, **kwargs):
79 for item in items:
80 video_id = item['id']
81 stream_info = self._download_json(
82 self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={
83 'Accept': 'application/json',
84 'Authorization': f'Bearer {self._ACCESS_TOKEN}',
85 'Accept-Language': language
86 })
87
88 m3u8_url = traverse_obj(
89 stream_info, ('channel', lambda k, _: k.startswith('vod_mixed'), 'stream', 0, 'url'), get_all=False)
90 formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', 'm3u8_native', fatal=False)
91
92 yield {
93 'id': video_id,
94 'title': item.get('title'),
95 'composer': item.get('name_composer'),
96 'url': m3u8_url,
97 'formats': formats,
98 'duration': item.get('duration_total'),
99 'timestamp': traverse_obj(item, ('date', 'published')),
100 'description': item.get('short_description') or stream_info.get('short_description'),
101 **kwargs,
102 'chapters': [{
103 'start_time': chapter.get('time'),
104 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),
105 'title': chapter.get('text'),
106 } for chapter in item['cuepoints']] if item.get('cuepoints') else None,
107 }
108
109 def _real_extract(self, url):
110 language, video_id = self._match_valid_url(url).group('language', 'id')
111 if not language:
112 language = 'en'
113
114 thumbnail_url = self._html_search_regex(
115 r'(https?://images\.digitalconcerthall\.com/cms/thumbnails/.*\.jpg)',
116 self._download_webpage(url, video_id), 'thumbnail')
117 thumbnails = [{
118 'url': thumbnail_url,
119 **parse_resolution(thumbnail_url)
120 }]
121
122 vid_info = self._download_json(
123 f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={
124 'Accept': 'application/json',
125 'Accept-Language': language
126 })
127 album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')
128
129 return {
130 '_type': 'playlist',
131 'id': video_id,
132 'title': vid_info.get('title'),
133 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,
134 thumbnails=thumbnails, album_artist=album_artist),
135 'thumbnails': thumbnails,
136 'album_artist': album_artist,
137 }
138
[end of yt_dlp/extractor/digitalconcerthall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/yt_dlp/extractor/digitalconcerthall.py b/yt_dlp/extractor/digitalconcerthall.py
--- a/yt_dlp/extractor/digitalconcerthall.py
+++ b/yt_dlp/extractor/digitalconcerthall.py
@@ -11,7 +11,7 @@
class DigitalConcertHallIE(InfoExtractor):
IE_DESC = 'DigitalConcertHall extractor'
- _VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'
+ _VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/(?P<type>film|concert)/(?P<id>[0-9]+)'
_OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'
_ACCESS_TOKEN = None
_NETRC_MACHINE = 'digitalconcerthall'
@@ -40,6 +40,19 @@
},
'params': {'skip_download': 'm3u8'},
'playlist_count': 3,
+ }, {
+ 'url': 'https://www.digitalconcerthall.com/en/film/388',
+ 'info_dict': {
+ 'id': '388',
+ 'ext': 'mp4',
+ 'title': 'The Berliner Philharmoniker and Frank Peter Zimmermann',
+ 'description': 'md5:cfe25a7044fa4be13743e5089b5b5eb2',
+ 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$',
+ 'upload_date': '20220714',
+ 'timestamp': 1657785600,
+ 'album_artist': 'Frank Peter Zimmermann / Benedikt von Bernstorff / Jakob von Bernstorff',
+ },
+ 'params': {'skip_download': 'm3u8'},
}]
def _perform_login(self, username, password):
@@ -75,7 +88,7 @@
if not self._ACCESS_TOKEN:
self.raise_login_required(method='password')
- def _entries(self, items, language, **kwargs):
+ def _entries(self, items, language, type_, **kwargs):
for item in items:
video_id = item['id']
stream_info = self._download_json(
@@ -103,11 +116,11 @@
'start_time': chapter.get('time'),
'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),
'title': chapter.get('text'),
- } for chapter in item['cuepoints']] if item.get('cuepoints') else None,
+ } for chapter in item['cuepoints']] if item.get('cuepoints') and type_ == 'concert' else None,
}
def _real_extract(self, url):
- language, video_id = self._match_valid_url(url).group('language', 'id')
+ language, type_, video_id = self._match_valid_url(url).group('language', 'type', 'id')
if not language:
language = 'en'
@@ -120,18 +133,18 @@
}]
vid_info = self._download_json(
- f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={
+ f'https://api.digitalconcerthall.com/v2/{type_}/{video_id}', video_id, headers={
'Accept': 'application/json',
'Accept-Language': language
})
album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')
+ videos = [vid_info] if type_ == 'film' else traverse_obj(vid_info, ('_embedded', ..., ...))
return {
'_type': 'playlist',
'id': video_id,
'title': vid_info.get('title'),
- 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,
- thumbnails=thumbnails, album_artist=album_artist),
+ 'entries': self._entries(videos, language, thumbnails=thumbnails, album_artist=album_artist, type_=type_),
'thumbnails': thumbnails,
'album_artist': album_artist,
}
| {"golden_diff": "diff --git a/yt_dlp/extractor/digitalconcerthall.py b/yt_dlp/extractor/digitalconcerthall.py\n--- a/yt_dlp/extractor/digitalconcerthall.py\n+++ b/yt_dlp/extractor/digitalconcerthall.py\n@@ -11,7 +11,7 @@\n \n class DigitalConcertHallIE(InfoExtractor):\n IE_DESC = 'DigitalConcertHall extractor'\n- _VALID_URL = r'https?://(?:www\\.)?digitalconcerthall\\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'\n+ _VALID_URL = r'https?://(?:www\\.)?digitalconcerthall\\.com/(?P<language>[a-z]+)/(?P<type>film|concert)/(?P<id>[0-9]+)'\n _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'\n _ACCESS_TOKEN = None\n _NETRC_MACHINE = 'digitalconcerthall'\n@@ -40,6 +40,19 @@\n },\n 'params': {'skip_download': 'm3u8'},\n 'playlist_count': 3,\n+ }, {\n+ 'url': 'https://www.digitalconcerthall.com/en/film/388',\n+ 'info_dict': {\n+ 'id': '388',\n+ 'ext': 'mp4',\n+ 'title': 'The Berliner Philharmoniker and Frank Peter Zimmermann',\n+ 'description': 'md5:cfe25a7044fa4be13743e5089b5b5eb2',\n+ 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\\.jpg$',\n+ 'upload_date': '20220714',\n+ 'timestamp': 1657785600,\n+ 'album_artist': 'Frank Peter Zimmermann / Benedikt von Bernstorff / Jakob von Bernstorff',\n+ },\n+ 'params': {'skip_download': 'm3u8'},\n }]\n \n def _perform_login(self, username, password):\n@@ -75,7 +88,7 @@\n if not self._ACCESS_TOKEN:\n self.raise_login_required(method='password')\n \n- def _entries(self, items, language, **kwargs):\n+ def _entries(self, items, language, type_, **kwargs):\n for item in items:\n video_id = item['id']\n stream_info = self._download_json(\n@@ -103,11 +116,11 @@\n 'start_time': chapter.get('time'),\n 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),\n 'title': chapter.get('text'),\n- } for chapter in item['cuepoints']] if item.get('cuepoints') else None,\n+ } for chapter in item['cuepoints']] if item.get('cuepoints') and type_ == 'concert' else None,\n }\n \n def _real_extract(self, url):\n- language, video_id = self._match_valid_url(url).group('language', 'id')\n+ language, type_, video_id = self._match_valid_url(url).group('language', 'type', 'id')\n if not language:\n language = 'en'\n \n@@ -120,18 +133,18 @@\n }]\n \n vid_info = self._download_json(\n- f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={\n+ f'https://api.digitalconcerthall.com/v2/{type_}/{video_id}', video_id, headers={\n 'Accept': 'application/json',\n 'Accept-Language': language\n })\n album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')\n+ videos = [vid_info] if type_ == 'film' else traverse_obj(vid_info, ('_embedded', ..., ...))\n \n return {\n '_type': 'playlist',\n 'id': video_id,\n 'title': vid_info.get('title'),\n- 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,\n- thumbnails=thumbnails, album_artist=album_artist),\n+ 'entries': self._entries(videos, language, thumbnails=thumbnails, album_artist=album_artist, type_=type_),\n 'thumbnails': thumbnails,\n 'album_artist': album_artist,\n }\n", "issue": "[DigitalConcertHall] cannot download videos under /films\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting that yt-dlp is broken on a **supported** site\n- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nAll\n\n### Provide a description that is worded well enough to be understood\n\nA major part of the site Digital Concert Hall is [concert videos](https://www.digitalconcerthall.com/en/concerts) which yt-dlp handles just fine. But when I try to download videos [here](https://www.digitalconcerthall.com/en/films), e.g. [this one](https://www.digitalconcerthall.com/en/film/388) which lasts 44 minutes (can be played with the login credentials `--username [email protected] --password 3PXx-5.NDNmMD2!` which you can use), it only downloads the trailer which lasts 39 seconds. Using build from commit `ecfe479`.\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['https://www.digitalconcerthall.com/en/film/388', '--username', 'PRIVATE', '--password', 'PRIVATE', '-vU']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] [392389b7d] (zip)\r\n[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-6.3.4-arch2-1-x86_64-with-glibc2.37 (OpenSSL 3.0.8 7 Feb 2023, glibc 2.37)\r\n[debug] exe versions: ffmpeg 6.0 (setts), ffprobe 6.0, rtmpdump 2.4\r\n[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2023.05.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-11.0.3\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1842 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nAvailable version: [email protected], Current version: [email protected]\r\nCurrent Build Hash: 5bfb7e4a15c47b2ff99b149b7c841e98ef606a4af18fd6330ddf3a0ed4a7ea19\r\nyt-dlp is up to date ([email protected])\r\n[generic] Extracting URL: https://www.digitalconcerthall.com/en/film/388\r\n[generic] 388: Downloading webpage\r\nWARNING: [generic] Falling back on generic information extractor\r\n[generic] 388: Extracting information\r\n[debug] Looking for embeds\r\n[debug] Identified a JSON LD\r\n[generic] Extracting URL: https://world-vod.dchdns.net/preview/388-t/preview.mp4#__youtubedl_smuggle=%7B%22force_videoid%22%3A+%22388%22%2C+%22to_generic%22%3A+true%2C+%22http_headers%22%3A+%7B%22Referer%22%3A+%22https%3A%2F%2Fwww.digitalconcerthall.com%2Fen%2Ffilm%2F388%22%7D%7D\r\n[generic] 388: Downloading webpage\r\nWARNING: [generic] URL could be a direct video link, returning it as such.\r\n[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec:vp9.2(10), channels, acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id\r\n[debug] Default format spec: bestvideo*+bestaudio/best\r\n[info] 388: Downloading 1 format(s): 0\r\n[debug] Invoking http downloader on \"https://world-vod.dchdns.net/preview/388-t/preview.mp4\"\r\n[download] Destination: The Berliner Philharmoniker and Frank Peter Zimmermann [388].mp4\r\n[download] 100% of 16.16MiB in 00:00:07 at 2.05MiB/s\n```\n\n", "before_files": [{"content": "from .common import InfoExtractor\n\nfrom ..utils import (\n ExtractorError,\n parse_resolution,\n traverse_obj,\n try_get,\n urlencode_postdata,\n)\n\n\nclass DigitalConcertHallIE(InfoExtractor):\n IE_DESC = 'DigitalConcertHall extractor'\n _VALID_URL = r'https?://(?:www\\.)?digitalconcerthall\\.com/(?P<language>[a-z]+)/concert/(?P<id>[0-9]+)'\n _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'\n _ACCESS_TOKEN = None\n _NETRC_MACHINE = 'digitalconcerthall'\n _TESTS = [{\n 'note': 'Playlist with only one video',\n 'url': 'https://www.digitalconcerthall.com/en/concert/53201',\n 'info_dict': {\n 'id': '53201-1',\n 'ext': 'mp4',\n 'composer': 'Kurt Weill',\n 'title': '[Magic Night]',\n 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\\.jpg$',\n 'upload_date': '20210624',\n 'timestamp': 1624548600,\n 'duration': 2798,\n 'album_artist': 'Members of the Berliner Philharmoniker / Simon R\u00f6ssler',\n },\n 'params': {'skip_download': 'm3u8'},\n }, {\n 'note': 'Concert with several works and an interview',\n 'url': 'https://www.digitalconcerthall.com/en/concert/53785',\n 'info_dict': {\n 'id': '53785',\n 'album_artist': 'Berliner Philharmoniker / Kirill Petrenko',\n 'title': 'Kirill Petrenko conducts Mendelssohn and Shostakovich',\n },\n 'params': {'skip_download': 'm3u8'},\n 'playlist_count': 3,\n }]\n\n def _perform_login(self, username, password):\n token_response = self._download_json(\n self._OAUTH_URL,\n None, 'Obtaining token', errnote='Unable to obtain token', data=urlencode_postdata({\n 'affiliate': 'none',\n 'grant_type': 'device',\n 'device_vendor': 'unknown',\n 'app_id': 'dch.webapp',\n 'app_version': '1.0.0',\n 'client_secret': '2ySLN+2Fwb',\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n })\n self._ACCESS_TOKEN = token_response['access_token']\n try:\n self._download_json(\n self._OAUTH_URL,\n None, note='Logging in', errnote='Unable to login', data=urlencode_postdata({\n 'grant_type': 'password',\n 'username': username,\n 'password': password,\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Referer': 'https://www.digitalconcerthall.com',\n 'Authorization': f'Bearer {self._ACCESS_TOKEN}'\n })\n except ExtractorError:\n self.raise_login_required(msg='Login info incorrect')\n\n def _real_initialize(self):\n if not self._ACCESS_TOKEN:\n self.raise_login_required(method='password')\n\n def _entries(self, items, language, **kwargs):\n for item in items:\n video_id = item['id']\n stream_info = self._download_json(\n self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={\n 'Accept': 'application/json',\n 'Authorization': f'Bearer {self._ACCESS_TOKEN}',\n 'Accept-Language': language\n })\n\n m3u8_url = traverse_obj(\n stream_info, ('channel', lambda k, _: k.startswith('vod_mixed'), 'stream', 0, 'url'), get_all=False)\n formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', 'm3u8_native', fatal=False)\n\n yield {\n 'id': video_id,\n 'title': item.get('title'),\n 'composer': item.get('name_composer'),\n 'url': m3u8_url,\n 'formats': formats,\n 'duration': item.get('duration_total'),\n 'timestamp': traverse_obj(item, ('date', 'published')),\n 'description': item.get('short_description') or stream_info.get('short_description'),\n **kwargs,\n 'chapters': [{\n 'start_time': chapter.get('time'),\n 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']),\n 'title': chapter.get('text'),\n } for chapter in item['cuepoints']] if item.get('cuepoints') else None,\n }\n\n def _real_extract(self, url):\n language, video_id = self._match_valid_url(url).group('language', 'id')\n if not language:\n language = 'en'\n\n thumbnail_url = self._html_search_regex(\n r'(https?://images\\.digitalconcerthall\\.com/cms/thumbnails/.*\\.jpg)',\n self._download_webpage(url, video_id), 'thumbnail')\n thumbnails = [{\n 'url': thumbnail_url,\n **parse_resolution(thumbnail_url)\n }]\n\n vid_info = self._download_json(\n f'https://api.digitalconcerthall.com/v2/concert/{video_id}', video_id, headers={\n 'Accept': 'application/json',\n 'Accept-Language': language\n })\n album_artist = ' / '.join(traverse_obj(vid_info, ('_links', 'artist', ..., 'name')) or '')\n\n return {\n '_type': 'playlist',\n 'id': video_id,\n 'title': vid_info.get('title'),\n 'entries': self._entries(traverse_obj(vid_info, ('_embedded', ..., ...)), language,\n thumbnails=thumbnails, album_artist=album_artist),\n 'thumbnails': thumbnails,\n 'album_artist': album_artist,\n }\n", "path": "yt_dlp/extractor/digitalconcerthall.py"}]} | 3,759 | 1,019 |
gh_patches_debug_9904 | rasdani/github-patches | git_diff | nipy__nipype-2432 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
workflow with iterables and cloned nodes fail when expanding iterables
### Summary
When running a workflow which includes a cloned node and iterables the workflow will fail when expanding the iterables because the id of the cloned node will be the same as the original one.
### Actual behavior
Will result in an error:
Traceback (most recent call last):
File "<ipython-input-55-177d6eaeef2c>", line 27, in <module>
workflow.run()
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/workflows.py", line 592, in run
execgraph = generate_expanded_graph(deepcopy(flatgraph))
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py", line 1042, in generate_expanded_graph
iterable_prefix, inode.synchronize)
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py", line 733, in _merge_graphs
raise Exception(("Execution graph does not have a unique set of node "
Exception: Execution graph does not have a unique set of node names. Please rerun the workflow
### Expected behavior
Will execute normally without the errors.
### How to replicate the behavior
The following workflow will produce the error.
### Script/Workflow details
```python
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
def addstr(string):
string = ('%s+2' % string )
return string
subject_list = ['sub-001', 'sub-002']
inputnode = pe.Node(niu.IdentityInterface(fields = ['subject']),
name = 'inputnode')
inputnode.iterables = [('subject', subject_list)]
node_1 = pe.Node(niu.Function(input_names='string',
output_names= 'string',
function = addstr),name='node_1')
node_2 = node_1.clone('node_2')
workflow = pe.Workflow(name='my_workflow')
workflow.connect([(inputnode, node_1, [('subject','string')]),
(node_1, node_2, [('string','string')])])
workflow.run()
```
### Platform details:
/data/eaxfjord/anaconda2/lib/python2.7/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
{'nibabel_version': '2.2.1', 'sys_executable': '/data/eaxfjord/anaconda2/bin/python', 'networkx_version': '2.1', 'numpy_version': '1.14.0', 'sys_platform': 'linux2', 'sys_version': '2.7.13 |Anaconda custom (64-bit)| (default, Dec 20 2016, 23:09:15) \n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]', 'commit_source': 'installation', 'commit_hash': '0a5948a0', 'pkg_path': '/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype', 'nipype_version': '1.0.0', 'traits_version': '4.6.0', 'scipy_version': '1.0.0'}
1.0.0
### Execution environment
- My python environment outside container
</issue>
<code>
[start of nipype/pipeline/engine/base.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
4 # vi: set ft=python sts=4 ts=4 sw=4 et:
5 """Defines functionality for pipelined execution of interfaces
6
7 The `EngineBase` class implements the more general view of a task.
8
9 .. testsetup::
10 # Change directory to provide relative paths for doctests
11 import os
12 filepath = os.path.dirname(os.path.realpath( __file__ ))
13 datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
14 os.chdir(datadir)
15
16 """
17 from __future__ import (print_function, division, unicode_literals,
18 absolute_import)
19 from builtins import object
20
21 from copy import deepcopy
22 import re
23 import numpy as np
24
25 from ... import config
26 from ...interfaces.base import DynamicTraitedSpec
27 from ...utils.filemanip import loadpkl, savepkl
28
29
30 class EngineBase(object):
31 """Defines common attributes and functions for workflows and nodes."""
32
33 def __init__(self, name=None, base_dir=None):
34 """ Initialize base parameters of a workflow or node
35
36 Parameters
37 ----------
38 name : string (mandatory)
39 Name of this node. Name must be alphanumeric and not contain any
40 special characters (e.g., '.', '@').
41 base_dir : string
42 base output directory (will be hashed before creations)
43 default=None, which results in the use of mkdtemp
44
45 """
46 self._hierarchy = None
47 self._name = None
48
49 self.base_dir = base_dir
50 self.config = deepcopy(config._sections)
51 self.name = name
52
53 @property
54 def name(self):
55 return self._name
56
57 @name.setter
58 def name(self, name):
59 if not name or not re.match(r'^[\w-]+$', name):
60 raise ValueError('[Workflow|Node] name "%s" is not valid.' % name)
61 self._name = name
62
63 @property
64 def fullname(self):
65 if self._hierarchy:
66 return '%s.%s' % (self._hierarchy, self.name)
67 return self.name
68
69 @property
70 def inputs(self):
71 raise NotImplementedError
72
73 @property
74 def outputs(self):
75 raise NotImplementedError
76
77 def clone(self, name):
78 """Clone an EngineBase object
79
80 Parameters
81 ----------
82
83 name : string (mandatory)
84 A clone of node or workflow must have a new name
85 """
86 if name == self.name:
87 raise ValueError('Cloning requires a new name, "%s" is in use.' % name)
88 clone = deepcopy(self)
89 clone.name = name
90 return clone
91
92 def _check_outputs(self, parameter):
93 return hasattr(self.outputs, parameter)
94
95 def _check_inputs(self, parameter):
96 if isinstance(self.inputs, DynamicTraitedSpec):
97 return True
98 return hasattr(self.inputs, parameter)
99
100 def __str__(self):
101 return self.fullname
102
103 def save(self, filename=None):
104 if filename is None:
105 filename = 'temp.pklz'
106 savepkl(filename, self)
107
108 def load(self, filename):
109 if '.npz' in filename:
110 DeprecationWarning(('npz files will be deprecated in the next '
111 'release. you can use numpy to open them.'))
112 return np.load(filename)
113 return loadpkl(filename)
114
[end of nipype/pipeline/engine/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py
--- a/nipype/pipeline/engine/base.py
+++ b/nipype/pipeline/engine/base.py
@@ -84,9 +84,12 @@
A clone of node or workflow must have a new name
"""
if name == self.name:
- raise ValueError('Cloning requires a new name, "%s" is in use.' % name)
+ raise ValueError('Cloning requires a new name, "%s" is '
+ 'in use.' % name)
clone = deepcopy(self)
clone.name = name
+ if hasattr(clone, '_id'):
+ clone._id = name
return clone
def _check_outputs(self, parameter):
| {"golden_diff": "diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py\n--- a/nipype/pipeline/engine/base.py\n+++ b/nipype/pipeline/engine/base.py\n@@ -84,9 +84,12 @@\n A clone of node or workflow must have a new name\n \"\"\"\n if name == self.name:\n- raise ValueError('Cloning requires a new name, \"%s\" is in use.' % name)\n+ raise ValueError('Cloning requires a new name, \"%s\" is '\n+ 'in use.' % name)\n clone = deepcopy(self)\n clone.name = name\n+ if hasattr(clone, '_id'):\n+ clone._id = name\n return clone\n \n def _check_outputs(self, parameter):\n", "issue": "workflow with iterables and cloned nodes fail when expanding iterables\n### Summary\r\nWhen running a workflow which includes a cloned node and iterables the workflow will fail when expanding the iterables because the id of the cloned node will be the same as the original one.\r\n\r\n### Actual behavior\r\nWill result in an error:\r\nTraceback (most recent call last):\r\n\r\n File \"<ipython-input-55-177d6eaeef2c>\", line 27, in <module>\r\n workflow.run()\r\n\r\n File \"/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/workflows.py\", line 592, in run\r\n execgraph = generate_expanded_graph(deepcopy(flatgraph))\r\n\r\n File \"/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py\", line 1042, in generate_expanded_graph\r\n iterable_prefix, inode.synchronize)\r\n\r\n File \"/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py\", line 733, in _merge_graphs\r\n raise Exception((\"Execution graph does not have a unique set of node \"\r\n\r\nException: Execution graph does not have a unique set of node names. Please rerun the workflow\r\n\r\n### Expected behavior\r\nWill execute normally without the errors.\r\n\r\n### How to replicate the behavior\r\nThe following workflow will produce the error.\r\n\r\n### Script/Workflow details\r\n```python\r\nfrom nipype.interfaces import utility as niu\r\nfrom nipype.pipeline import engine as pe\r\n\r\ndef addstr(string):\r\n string = ('%s+2' % string )\r\n return string\r\n\r\nsubject_list = ['sub-001', 'sub-002']\r\n\r\ninputnode = pe.Node(niu.IdentityInterface(fields = ['subject']),\r\n name = 'inputnode')\r\ninputnode.iterables = [('subject', subject_list)]\r\n\r\nnode_1 = pe.Node(niu.Function(input_names='string',\r\n output_names= 'string',\r\n function = addstr),name='node_1')\r\nnode_2 = node_1.clone('node_2')\r\n\r\nworkflow = pe.Workflow(name='my_workflow')\r\nworkflow.connect([(inputnode, node_1, [('subject','string')]),\r\n (node_1, node_2, [('string','string')])])\r\nworkflow.run()\r\n```\r\n### Platform details:\r\n/data/eaxfjord/anaconda2/lib/python2.7/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n from ._conv import register_converters as _register_converters\r\n{'nibabel_version': '2.2.1', 'sys_executable': '/data/eaxfjord/anaconda2/bin/python', 'networkx_version': '2.1', 'numpy_version': '1.14.0', 'sys_platform': 'linux2', 'sys_version': '2.7.13 |Anaconda custom (64-bit)| (default, Dec 20 2016, 23:09:15) \\n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]', 'commit_source': 'installation', 'commit_hash': '0a5948a0', 'pkg_path': '/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype', 'nipype_version': '1.0.0', 'traits_version': '4.6.0', 'scipy_version': '1.0.0'}\r\n1.0.0\r\n\r\n\r\n### Execution environment\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Defines functionality for pipelined execution of interfaces\n\nThe `EngineBase` class implements the more general view of a task.\n\n .. testsetup::\n # Change directory to provide relative paths for doctests\n import os\n filepath = os.path.dirname(os.path.realpath( __file__ ))\n datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))\n os.chdir(datadir)\n\n\"\"\"\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\nfrom builtins import object\n\nfrom copy import deepcopy\nimport re\nimport numpy as np\n\nfrom ... import config\nfrom ...interfaces.base import DynamicTraitedSpec\nfrom ...utils.filemanip import loadpkl, savepkl\n\n\nclass EngineBase(object):\n \"\"\"Defines common attributes and functions for workflows and nodes.\"\"\"\n\n def __init__(self, name=None, base_dir=None):\n \"\"\" Initialize base parameters of a workflow or node\n\n Parameters\n ----------\n name : string (mandatory)\n Name of this node. Name must be alphanumeric and not contain any\n special characters (e.g., '.', '@').\n base_dir : string\n base output directory (will be hashed before creations)\n default=None, which results in the use of mkdtemp\n\n \"\"\"\n self._hierarchy = None\n self._name = None\n\n self.base_dir = base_dir\n self.config = deepcopy(config._sections)\n self.name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n if not name or not re.match(r'^[\\w-]+$', name):\n raise ValueError('[Workflow|Node] name \"%s\" is not valid.' % name)\n self._name = name\n\n @property\n def fullname(self):\n if self._hierarchy:\n return '%s.%s' % (self._hierarchy, self.name)\n return self.name\n\n @property\n def inputs(self):\n raise NotImplementedError\n\n @property\n def outputs(self):\n raise NotImplementedError\n\n def clone(self, name):\n \"\"\"Clone an EngineBase object\n\n Parameters\n ----------\n\n name : string (mandatory)\n A clone of node or workflow must have a new name\n \"\"\"\n if name == self.name:\n raise ValueError('Cloning requires a new name, \"%s\" is in use.' % name)\n clone = deepcopy(self)\n clone.name = name\n return clone\n\n def _check_outputs(self, parameter):\n return hasattr(self.outputs, parameter)\n\n def _check_inputs(self, parameter):\n if isinstance(self.inputs, DynamicTraitedSpec):\n return True\n return hasattr(self.inputs, parameter)\n\n def __str__(self):\n return self.fullname\n\n def save(self, filename=None):\n if filename is None:\n filename = 'temp.pklz'\n savepkl(filename, self)\n\n def load(self, filename):\n if '.npz' in filename:\n DeprecationWarning(('npz files will be deprecated in the next '\n 'release. you can use numpy to open them.'))\n return np.load(filename)\n return loadpkl(filename)\n", "path": "nipype/pipeline/engine/base.py"}]} | 2,360 | 169 |
gh_patches_debug_22285 | rasdani/github-patches | git_diff | cocotb__cocotb-1410 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logging non-string messages leads to AttributeError
Test code:
```
dut._log.info(dut.empty.value)
# dut.empty is a signal in the DUT, .value gets its BinaryValue object
```
This worked fine in older versions of cocotb, haven't attempted to find exact regression range yet. Now I get this trace:
```
Traceback (most recent call last):
File "/usr/lib64/python3.7/logging/__init__.py", line 1034, in emit
msg = self.format(record)
File "/usr/lib64/python3.7/logging/__init__.py", line 880, in format
return fmt.format(record)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/log.py", line 201, in format
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
AttributeError: 'BinaryValue' object has no attribute 'split'
Call stack:
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 355, in react
self._event_loop(trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 448, in _event_loop
self.schedule(coro, trigger=trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 758, in schedule
result = coroutine._advance(send_outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 264, in _advance
return super(RunningTest, self)._advance(outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 146, in _advance
return outcome.send(self._coro)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/outcomes.py", line 38, in send
return gen.send(self.value)
File "path/to/test_fifo.py", line 45, in test_fifo_manual
dut._log.info(dut.empty.value)
```
I'll try to come up with a test and a fix soon-ish.
</issue>
<code>
[start of cocotb/log.py]
1 # Copyright (c) 2013, 2018 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Everything related to logging
30 """
31
32 import os
33 import sys
34 import logging
35 import warnings
36
37 from cocotb.utils import get_sim_time, want_color_output
38
39 import cocotb.ANSI as ANSI
40
41 if "COCOTB_REDUCED_LOG_FMT" in os.environ:
42 _suppress = True
43 else:
44 _suppress = False
45
46 # Column alignment
47 _LEVEL_CHARS = len("CRITICAL") # noqa
48 _RECORD_CHARS = 35 # noqa
49 _FILENAME_CHARS = 20 # noqa
50 _LINENO_CHARS = 4 # noqa
51 _FUNCNAME_CHARS = 31 # noqa
52
53
54 def default_config():
55 """ Apply the default cocotb log formatting to the root logger.
56
57 This hooks up the logger to write to stdout, using either
58 :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending
59 on whether colored output is requested.
60
61 The logging level for cocotb logs is set based on the
62 :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.
63
64 If desired, this logging configuration can be overwritten by calling
65 ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by
66 manually resetting the root logger instance, for which examples can be
67 found online.
68 """
69 # construct an appropriate handler
70 hdlr = logging.StreamHandler(sys.stdout)
71 if want_color_output():
72 hdlr.setFormatter(SimColourLogFormatter())
73 else:
74 hdlr.setFormatter(SimLogFormatter())
75
76 logging.setLoggerClass(SimBaseLog) # For backwards compatibility
77 logging.basicConfig()
78 logging.getLogger().handlers = [hdlr] # overwrite default handlers
79
80 # apply level settings for cocotb
81 log = logging.getLogger('cocotb')
82 level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
83 try:
84 _default_log = getattr(logging, level)
85 except AttributeError:
86 log.error("Unable to set logging level to %r" % level)
87 _default_log = logging.INFO
88 log.setLevel(_default_log)
89
90 # Notify GPI of log level, which it uses as an optimization to avoid
91 # calling into Python.
92 if "COCOTB_SIM" in os.environ:
93 import simulator
94 simulator.log_level(_default_log)
95
96
97 class SimBaseLog(logging.getLoggerClass()):
98 """ This class only exists for backwards compatibility """
99
100 @property
101 def logger(self):
102 warnings.warn(
103 "the .logger attribute should not be used now that `SimLog` "
104 "returns a native logger instance directly.",
105 DeprecationWarning, stacklevel=2)
106 return self
107
108 @property
109 def colour(self):
110 warnings.warn(
111 "the .colour attribute may be removed in future, use the "
112 "equivalent `cocotb.utils.want_color_output()` instead",
113 DeprecationWarning, stacklevel=2)
114 return want_color_output()
115
116
117 # this used to be a class, hence the unusual capitalization
118 def SimLog(name, ident=None):
119 """ Like logging.getLogger, but append a numeric identifier to the name """
120 if ident is not None:
121 name = "%s.0x%x" % (name, ident)
122 return logging.getLogger(name)
123
124
125 class SimLogFormatter(logging.Formatter):
126 """Log formatter to provide consistent log message handling."""
127
128 # Removes the arguments from the base class. Docstring needed to make
129 # sphinx happy.
130 def __init__(self):
131 """ Takes no arguments. """
132 super().__init__()
133
134 # Justify and truncate
135 @staticmethod
136 def ljust(string, chars):
137 if len(string) > chars:
138 return ".." + string[(chars - 2) * -1:]
139 return string.ljust(chars)
140
141 @staticmethod
142 def rjust(string, chars):
143 if len(string) > chars:
144 return ".." + string[(chars - 2) * -1:]
145 return string.rjust(chars)
146
147 def _format(self, level, record, msg, coloured=False):
148 time_ns = get_sim_time('ns')
149 simtime = "%6.2fns" % (time_ns)
150 prefix = simtime.rjust(11) + ' ' + level + ' '
151 if not _suppress:
152 prefix += self.ljust(record.name, _RECORD_CHARS) + \
153 self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \
154 ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \
155 ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '
156
157 # these lines are copied from the builtin logger
158 if record.exc_info:
159 # Cache the traceback text to avoid converting it multiple times
160 # (it's constant anyway)
161 if not record.exc_text:
162 record.exc_text = self.formatException(record.exc_info)
163 if record.exc_text:
164 if msg[-1:] != "\n":
165 msg = msg + "\n"
166 msg = msg + record.exc_text
167
168 prefix_len = len(prefix)
169 if coloured:
170 prefix_len -= (len(level) - _LEVEL_CHARS)
171 pad = "\n" + " " * (prefix_len)
172 return prefix + pad.join(msg.split('\n'))
173
174 def format(self, record):
175 """Prettify the log output, annotate with simulation time"""
176 if record.args:
177 msg = record.msg % record.args
178 else:
179 msg = record.msg
180
181 msg = str(msg)
182 level = record.levelname.ljust(_LEVEL_CHARS)
183
184 return self._format(level, record, msg)
185
186
187 class SimColourLogFormatter(SimLogFormatter):
188 """Log formatter to provide consistent log message handling."""
189
190 loglevel2colour = {
191 logging.DEBUG : "%s",
192 logging.INFO : ANSI.COLOR_INFO + "%s" + ANSI.COLOR_DEFAULT,
193 logging.WARNING : ANSI.COLOR_WARNING + "%s" + ANSI.COLOR_DEFAULT,
194 logging.ERROR : ANSI.COLOR_ERROR + "%s" + ANSI.COLOR_DEFAULT,
195 logging.CRITICAL: ANSI.COLOR_CRITICAL + "%s" + ANSI.COLOR_DEFAULT,
196 }
197
198 def format(self, record):
199 """Prettify the log output, annotate with simulation time"""
200
201 if record.args:
202 msg = record.msg % record.args
203 else:
204 msg = record.msg
205
206 # Need to colour each line in case coloring is applied in the message
207 msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
208 level = (SimColourLogFormatter.loglevel2colour[record.levelno] %
209 record.levelname.ljust(_LEVEL_CHARS))
210
211 return self._format(level, record, msg, coloured=True)
212
213
214 def _filter_from_c(logger_name, level):
215 return logging.getLogger(logger_name).isEnabledFor(level)
216
217
218 def _log_from_c(logger_name, level, filename, lineno, msg, function_name):
219 """
220 This is for use from the C world, and allows us to insert C stack
221 information.
222 """
223 logger = logging.getLogger(logger_name)
224 if logger.isEnabledFor(level):
225 record = logger.makeRecord(
226 logger.name,
227 level,
228 filename,
229 lineno,
230 msg,
231 None,
232 None,
233 function_name
234 )
235 logger.handle(record)
236
[end of cocotb/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cocotb/log.py b/cocotb/log.py
--- a/cocotb/log.py
+++ b/cocotb/log.py
@@ -167,12 +167,8 @@
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
- if record.args:
- msg = record.msg % record.args
- else:
- msg = record.msg
- msg = str(msg)
+ msg = record.getMessage()
level = record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg)
@@ -192,10 +188,7 @@
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
- if record.args:
- msg = record.msg % record.args
- else:
- msg = record.msg
+ msg = record.getMessage()
# Need to colour each line in case coloring is applied in the message
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
| {"golden_diff": "diff --git a/cocotb/log.py b/cocotb/log.py\n--- a/cocotb/log.py\n+++ b/cocotb/log.py\n@@ -167,12 +167,8 @@\n \n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n- if record.args:\n- msg = record.msg % record.args\n- else:\n- msg = record.msg\n \n- msg = str(msg)\n+ msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n \n return self._format(level, record, msg)\n@@ -192,10 +188,7 @@\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n \n- if record.args:\n- msg = record.msg % record.args\n- else:\n- msg = record.msg\n+ msg = record.getMessage()\n \n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n", "issue": "Logging non-string messages leads to AttributeError\nTest code:\r\n\r\n```\r\ndut._log.info(dut.empty.value)\r\n# dut.empty is a signal in the DUT, .value gets its BinaryValue object\r\n```\r\n\r\nThis worked fine in older versions of cocotb, haven't attempted to find exact regression range yet. Now I get this trace:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.7/logging/__init__.py\", line 1034, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib64/python3.7/logging/__init__.py\", line 880, in format\r\n return fmt.format(record)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/log.py\", line 201, in format\r\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\r\nAttributeError: 'BinaryValue' object has no attribute 'split'\r\nCall stack:\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py\", line 355, in react\r\n self._event_loop(trigger)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py\", line 448, in _event_loop\r\n self.schedule(coro, trigger=trigger)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py\", line 758, in schedule\r\n result = coroutine._advance(send_outcome)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py\", line 264, in _advance\r\n return super(RunningTest, self)._advance(outcome)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py\", line 146, in _advance\r\n return outcome.send(self._coro)\r\n File \"/home/philipp/.local/lib/python3.7/site-packages/cocotb/outcomes.py\", line 38, in send\r\n return gen.send(self.value)\r\n File \"path/to/test_fifo.py\", line 45, in test_fifo_manual\r\n dut._log.info(dut.empty.value)\r\n```\r\n\r\nI'll try to come up with a test and a fix soon-ish.\n", "before_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import get_sim_time, want_color_output\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance, for which examples can be\n found online.\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError:\n log.error(\"Unable to set logging level to %r\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n if \"COCOTB_SIM\" in os.environ:\n import simulator\n simulator.log_level(_default_log)\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\" Takes no arguments. \"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n time_ns = get_sim_time('ns')\n simtime = \"%6.2fns\" % (time_ns)\n prefix = simtime.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n if record.args:\n msg = record.msg % record.args\n else:\n msg = record.msg\n\n msg = str(msg)\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n if record.args:\n msg = record.msg % record.args\n else:\n msg = record.msg\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}]} | 3,628 | 257 |
gh_patches_debug_7100 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1665 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Dudley Council, UK - Not Loading
### I Have A Problem With:
A specific source
### What's Your Problem
Now the Xmas alternative dates have been removed from the council website it's no longer loading my bin dates into my calendar and showing the below error messages in the HA log file
### Source (if relevant)
dudley_gov_uk
### Logs
```Shell
2024-01-10 01:19:15.591 ERROR (SyncWorker_4) [waste_collection_schedule.source_shell] fetch failed for source Dudley Metropolitan Borough Council:
Traceback (most recent call last):
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch
entries = self._source.fetch()
^^^^^^^^^^^^^^^^^^^^
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py", line 107, in fetch
xmas_map = self.get_xmas_map(footer_panel)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py", line 71, in get_xmas_map
footer_panel.find("table").find("tr"),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'find'
```
### Relevant Configuration
_No response_
### Checklist Source Error
- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [x] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py]
1 import re
2 from datetime import date, datetime, timedelta
3
4 import requests
5 from bs4 import BeautifulSoup
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7
8 TITLE = "Dudley Metropolitan Borough Council"
9 DESCRIPTION = "Source for Dudley Metropolitan Borough Council, UK."
10 URL = "https://dudley.gov.uk"
11 TEST_CASES = {
12 "Test_001": {"uprn": "90090715"},
13 "Test_002": {"uprn": 90104555},
14 "Test_003": {"uprn": "90164803"},
15 "Test_004": {"uprn": 90092621},
16 }
17 ICON_MAP = {"RECYCLING": "mdi:recycle", "GARDEN": "mdi:leaf", "REFUSE": "mdi:trash-can"}
18 REGEX = {
19 "DATES": r"(\d+ \w{3})",
20 "DAYS": r"every: (Monday|Tuesday|Wednesday|Thursday|Friday)",
21 }
22 DAYS = {
23 "Monday": 0,
24 "Tuesday": 1,
25 "Wednesday": 2,
26 "Thursday": 3,
27 "Friday": 4,
28 "Saturday": 5,
29 "Sunday": 6,
30 }
31
32
33 class Source:
34 def __init__(self, uprn: str | int):
35 self._uprn = str(uprn)
36
37 def check_date(self, d: str, t: datetime, y: int):
38 """
39 Get date, append year, and increment year if date is >1 month in the past.
40
41 This tries to deal year-end dates when the YEAR is missing
42 """
43 d += " " + str(y)
44 try:
45 date = datetime.strptime(d, "%d %b %Y")
46 except ValueError:
47 date = datetime.strptime(d, "%A %d %b %Y")
48 if (date - t) < timedelta(days=-31):
49 date = date.replace(year=date.year + 1)
50 return date.date()
51
52 def append_entries(self, d: datetime, w: str, e: list) -> list:
53 e.append(
54 Collection(
55 date=d,
56 t=w,
57 icon=ICON_MAP.get(w.upper()),
58 )
59 )
60 return e
61
62 def get_xmas_map(self, footer_panel) -> dict[date, date]:
63 if not (
64 footer_panel
65 and footer_panel.find("table")
66 and footer_panel.find("table").find("tr")
67 ):
68 print(
69 footer_panel,
70 footer_panel.find("table"),
71 footer_panel.find("table").find("tr"),
72 )
73 return {}
74 xmas_map: dict = {}
75 today = datetime.now()
76 yr = int(today.year)
77 for tr in footer_panel.find("table").findAll("tr")[1:]:
78 try:
79 moved, moved_to = tr.findAll("td")
80 moved = self.check_date(moved.text, today, yr)
81 moved_to = self.check_date(moved_to.text, today, yr)
82 xmas_map[moved] = moved_to
83 except Exception as e:
84 print(e)
85 continue
86 return xmas_map
87
88 def fetch(self):
89 today = datetime.now()
90 today = today.replace(hour=0, minute=0, second=0, microsecond=0)
91 yr = int(today.year)
92
93 s = requests.Session()
94 r = s.get(
95 f"https://maps.dudley.gov.uk/?action=SetAddress&UniqueId={self._uprn}"
96 )
97 soup = BeautifulSoup(r.text, "html.parser")
98
99 panel = soup.find("div", {"aria-label": "Refuse and Recycling Collection"})
100 panel_data = panel.find("div", {"class": "atPanelData"})
101 waste_data = panel_data.text.split("Next")[
102 1:
103 ] # remove first element it just contains general info
104
105 # get table of holiday moved dates (only around xmas)
106 footer_panel = panel.find("div", {"class": "atPanelFooter"})
107 xmas_map = self.get_xmas_map(footer_panel)
108
109 entries = []
110 # Deal with Recycling and Garden collections
111 for item in waste_data:
112 text = item.replace("\r\n", "").strip()
113 if "recycling" in text:
114 dates = re.findall(REGEX["DATES"], text)
115 for dt in dates:
116 dt = self.check_date(dt, today, yr)
117 dt = xmas_map.get(dt, dt)
118 self.append_entries(dt, "Recycling", entries)
119 elif "garden" in text:
120 dates = re.findall(REGEX["DATES"], text)
121 for dt in dates:
122 dt = self.check_date(dt, today, yr)
123 dt = xmas_map.get(dt, dt)
124 self.append_entries(dt, "Garden", entries)
125
126 # Refuse collections only have a DAY not a date, so work out dates for the next few collections
127 refuse_day = re.findall(REGEX["DAYS"], panel_data.text)[0]
128 refuse_date = today + timedelta((int(DAYS[refuse_day]) - today.weekday()) % 7)
129 for i in range(0, 4):
130 temp_date = (refuse_date + timedelta(days=7 * i)).date()
131 temp_date = xmas_map.get(temp_date, temp_date)
132 self.append_entries(temp_date, "Refuse", entries)
133
134 return entries
135
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py
@@ -65,11 +65,6 @@
and footer_panel.find("table")
and footer_panel.find("table").find("tr")
):
- print(
- footer_panel,
- footer_panel.find("table"),
- footer_panel.find("table").find("tr"),
- )
return {}
xmas_map: dict = {}
today = datetime.now()
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\n@@ -65,11 +65,6 @@\n and footer_panel.find(\"table\")\n and footer_panel.find(\"table\").find(\"tr\")\n ):\n- print(\n- footer_panel,\n- footer_panel.find(\"table\"),\n- footer_panel.find(\"table\").find(\"tr\"),\n- )\n return {}\n xmas_map: dict = {}\n today = datetime.now()\n", "issue": "[Bug]: Dudley Council, UK - Not Loading\n### I Have A Problem With:\r\n\r\nA specific source\r\n\r\n### What's Your Problem\r\n\r\nNow the Xmas alternative dates have been removed from the council website it's no longer loading my bin dates into my calendar and showing the below error messages in the HA log file\r\n\r\n### Source (if relevant)\r\n\r\ndudley_gov_uk\r\n\r\n### Logs\r\n\r\n```Shell\r\n2024-01-10 01:19:15.591 ERROR (SyncWorker_4) [waste_collection_schedule.source_shell] fetch failed for source Dudley Metropolitan Borough Council:\r\nTraceback (most recent call last):\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch\r\n entries = self._source.fetch()\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\", line 107, in fetch\r\n xmas_map = self.get_xmas_map(footer_panel)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py\", line 71, in get_xmas_map\r\n footer_panel.find(\"table\").find(\"tr\"),\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nAttributeError: 'NoneType' object has no attribute 'find'\r\n\r\n```\r\n\r\n\r\n### Relevant Configuration\r\n\r\n_No response_\r\n\r\n### Checklist Source Error\r\n\r\n- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\r\n- [X] Checked that the website of your service provider is still working\r\n- [x] Tested my attributes on the service provider website (if possible)\r\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\r\n\r\n### Checklist Sensor Error\r\n\r\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\r\n\r\n### Required\r\n\r\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\r\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nfrom datetime import date, datetime, timedelta\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Dudley Metropolitan Borough Council\"\nDESCRIPTION = \"Source for Dudley Metropolitan Borough Council, UK.\"\nURL = \"https://dudley.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"90090715\"},\n \"Test_002\": {\"uprn\": 90104555},\n \"Test_003\": {\"uprn\": \"90164803\"},\n \"Test_004\": {\"uprn\": 90092621},\n}\nICON_MAP = {\"RECYCLING\": \"mdi:recycle\", \"GARDEN\": \"mdi:leaf\", \"REFUSE\": \"mdi:trash-can\"}\nREGEX = {\n \"DATES\": r\"(\\d+ \\w{3})\",\n \"DAYS\": r\"every: (Monday|Tuesday|Wednesday|Thursday|Friday)\",\n}\nDAYS = {\n \"Monday\": 0,\n \"Tuesday\": 1,\n \"Wednesday\": 2,\n \"Thursday\": 3,\n \"Friday\": 4,\n \"Saturday\": 5,\n \"Sunday\": 6,\n}\n\n\nclass Source:\n def __init__(self, uprn: str | int):\n self._uprn = str(uprn)\n\n def check_date(self, d: str, t: datetime, y: int):\n \"\"\"\n Get date, append year, and increment year if date is >1 month in the past.\n\n This tries to deal year-end dates when the YEAR is missing\n \"\"\"\n d += \" \" + str(y)\n try:\n date = datetime.strptime(d, \"%d %b %Y\")\n except ValueError:\n date = datetime.strptime(d, \"%A %d %b %Y\")\n if (date - t) < timedelta(days=-31):\n date = date.replace(year=date.year + 1)\n return date.date()\n\n def append_entries(self, d: datetime, w: str, e: list) -> list:\n e.append(\n Collection(\n date=d,\n t=w,\n icon=ICON_MAP.get(w.upper()),\n )\n )\n return e\n\n def get_xmas_map(self, footer_panel) -> dict[date, date]:\n if not (\n footer_panel\n and footer_panel.find(\"table\")\n and footer_panel.find(\"table\").find(\"tr\")\n ):\n print(\n footer_panel,\n footer_panel.find(\"table\"),\n footer_panel.find(\"table\").find(\"tr\"),\n )\n return {}\n xmas_map: dict = {}\n today = datetime.now()\n yr = int(today.year)\n for tr in footer_panel.find(\"table\").findAll(\"tr\")[1:]:\n try:\n moved, moved_to = tr.findAll(\"td\")\n moved = self.check_date(moved.text, today, yr)\n moved_to = self.check_date(moved_to.text, today, yr)\n xmas_map[moved] = moved_to\n except Exception as e:\n print(e)\n continue\n return xmas_map\n\n def fetch(self):\n today = datetime.now()\n today = today.replace(hour=0, minute=0, second=0, microsecond=0)\n yr = int(today.year)\n\n s = requests.Session()\n r = s.get(\n f\"https://maps.dudley.gov.uk/?action=SetAddress&UniqueId={self._uprn}\"\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n panel = soup.find(\"div\", {\"aria-label\": \"Refuse and Recycling Collection\"})\n panel_data = panel.find(\"div\", {\"class\": \"atPanelData\"})\n waste_data = panel_data.text.split(\"Next\")[\n 1:\n ] # remove first element it just contains general info\n\n # get table of holiday moved dates (only around xmas)\n footer_panel = panel.find(\"div\", {\"class\": \"atPanelFooter\"})\n xmas_map = self.get_xmas_map(footer_panel)\n\n entries = []\n # Deal with Recycling and Garden collections\n for item in waste_data:\n text = item.replace(\"\\r\\n\", \"\").strip()\n if \"recycling\" in text:\n dates = re.findall(REGEX[\"DATES\"], text)\n for dt in dates:\n dt = self.check_date(dt, today, yr)\n dt = xmas_map.get(dt, dt)\n self.append_entries(dt, \"Recycling\", entries)\n elif \"garden\" in text:\n dates = re.findall(REGEX[\"DATES\"], text)\n for dt in dates:\n dt = self.check_date(dt, today, yr)\n dt = xmas_map.get(dt, dt)\n self.append_entries(dt, \"Garden\", entries)\n\n # Refuse collections only have a DAY not a date, so work out dates for the next few collections\n refuse_day = re.findall(REGEX[\"DAYS\"], panel_data.text)[0]\n refuse_date = today + timedelta((int(DAYS[refuse_day]) - today.weekday()) % 7)\n for i in range(0, 4):\n temp_date = (refuse_date + timedelta(days=7 * i)).date()\n temp_date = xmas_map.get(temp_date, temp_date)\n self.append_entries(temp_date, \"Refuse\", entries)\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/dudley_gov_uk.py"}]} | 2,575 | 172 |
gh_patches_debug_6896 | rasdani/github-patches | git_diff | pantsbuild__pants-20719 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"export" goal docs have unclosed call out
**Describe the bug**
The warning callout about "exporting tools requires ..." seems to be unclosed:
- https://www.pantsbuild.org/2.18/reference/goals/export
- https://www.pantsbuild.org/2.19/reference/goals/export
- https://www.pantsbuild.org/2.20/reference/goals/export

**Pants version**
2.18 onwards
**OS**
macOS
**Additional info**
Introduced in #20604
</issue>
<code>
[start of src/python/pants/core/goals/export.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import itertools
7 import os
8 from dataclasses import dataclass
9 from typing import Iterable, Mapping, Sequence, cast
10
11 from pants.base.build_root import BuildRoot
12 from pants.core.goals.generate_lockfiles import (
13 GenerateToolLockfileSentinel,
14 KnownUserResolveNames,
15 KnownUserResolveNamesRequest,
16 UnrecognizedResolveNamesError,
17 )
18 from pants.core.util_rules.distdir import DistDir
19 from pants.core.util_rules.environments import _warn_on_non_local_environments
20 from pants.engine.collection import Collection
21 from pants.engine.console import Console
22 from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
23 from pants.engine.environment import EnvironmentName
24 from pants.engine.fs import EMPTY_DIGEST, AddPrefix, Digest, MergeDigests, Workspace
25 from pants.engine.goal import Goal, GoalSubsystem
26 from pants.engine.internals.selectors import Effect, Get, MultiGet
27 from pants.engine.process import InteractiveProcess, InteractiveProcessResult
28 from pants.engine.rules import collect_rules, goal_rule
29 from pants.engine.target import FilteredTargets, Target
30 from pants.engine.unions import UnionMembership, union
31 from pants.option.option_types import StrListOption
32 from pants.util.dirutil import safe_rmtree
33 from pants.util.frozendict import FrozenDict
34 from pants.util.strutil import softwrap
35
36
37 class ExportError(Exception):
38 pass
39
40
41 @union(in_scope_types=[EnvironmentName])
42 @dataclass(frozen=True)
43 class ExportRequest:
44 """A union for exportable data provided by a backend.
45
46 Subclass and install a member of this type to export data.
47 """
48
49 targets: Sequence[Target]
50
51
52 @dataclass(frozen=True)
53 class PostProcessingCommand:
54 """A command to run as a local process after an exported digest is materialized."""
55
56 # Values in the argv tuple can contain the format specifier "{digest_root}", which will be
57 # substituted with the (absolute) path to the location under distdir in which the
58 # digest is materialized.
59 argv: tuple[str, ...]
60 # The command will be run with an environment consisting of just PATH, set to the Pants
61 # process's own PATH env var, plus these extra env vars.
62 extra_env: FrozenDict[str, str]
63
64 def __init__(
65 self,
66 argv: Iterable[str],
67 extra_env: Mapping[str, str] = FrozenDict(),
68 ):
69 object.__setattr__(self, "argv", tuple(argv))
70 object.__setattr__(self, "extra_env", FrozenDict(extra_env))
71
72
73 @dataclass(frozen=True)
74 class ExportResult:
75 description: str
76 # Materialize digests under this reldir.
77 reldir: str
78 # Materialize this digest.
79 digest: Digest
80 # Run these commands as local processes after the digest is materialized.
81 post_processing_cmds: tuple[PostProcessingCommand, ...]
82 # Set for the common special case of exporting a resolve, and names that resolve.
83 # Set to None for other export results.
84 resolve: str | None
85
86 def __init__(
87 self,
88 description: str,
89 reldir: str,
90 *,
91 digest: Digest = EMPTY_DIGEST,
92 post_processing_cmds: Iterable[PostProcessingCommand] = tuple(),
93 resolve: str | None = None,
94 ):
95 object.__setattr__(self, "description", description)
96 object.__setattr__(self, "reldir", reldir)
97 object.__setattr__(self, "digest", digest)
98 object.__setattr__(self, "post_processing_cmds", tuple(post_processing_cmds))
99 object.__setattr__(self, "resolve", resolve)
100
101
102 class ExportResults(Collection[ExportResult]):
103 pass
104
105
106 class ExportSubsystem(GoalSubsystem):
107 name = "export"
108 help = softwrap(
109 """
110 Export Pants data for use in other tools, such as IDEs.
111
112 :::caution Exporting tools requires creating a custom lockfile for them
113 Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)
114 :::
115 """
116 )
117
118 # NB: Only options that are relevant across many/most backends and languages
119 # should be defined here. Backend-specific options should be defined in that backend
120 # as plugin options on this subsystem.
121
122 # Exporting resolves is a common use-case for `export`, often the primary one, so we
123 # add affordances for it at the core goal level.
124 resolve = StrListOption(
125 default=[],
126 help="Export the specified resolve(s). The export format is backend-specific, "
127 "e.g., Python resolves are exported as virtualenvs.",
128 )
129
130
131 class Export(Goal):
132 subsystem_cls = ExportSubsystem
133 environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
134
135
136 @goal_rule
137 async def export(
138 console: Console,
139 targets: FilteredTargets,
140 workspace: Workspace,
141 union_membership: UnionMembership,
142 build_root: BuildRoot,
143 dist_dir: DistDir,
144 export_subsys: ExportSubsystem,
145 ) -> Export:
146 request_types = cast("Iterable[type[ExportRequest]]", union_membership.get(ExportRequest))
147 requests = tuple(request_type(targets) for request_type in request_types)
148 all_results = await MultiGet(Get(ExportResults, ExportRequest, request) for request in requests)
149 flattened_results = [res for results in all_results for res in results]
150
151 await _warn_on_non_local_environments(targets, "the `export` goal")
152
153 prefixed_digests = await MultiGet(
154 Get(Digest, AddPrefix(result.digest, result.reldir)) for result in flattened_results
155 )
156 output_dir = os.path.join(str(dist_dir.relpath), "export")
157 for result in flattened_results:
158 digest_root = os.path.join(build_root.path, output_dir, result.reldir)
159 safe_rmtree(digest_root)
160 merged_digest = await Get(Digest, MergeDigests(prefixed_digests))
161 dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))
162 workspace.write_digest(dist_digest)
163 environment = await Get(EnvironmentVars, EnvironmentVarsRequest(["PATH"]))
164 resolves_exported = set()
165 for result in flattened_results:
166 result_dir = os.path.join(output_dir, result.reldir)
167 digest_root = os.path.join(build_root.path, result_dir)
168 for cmd in result.post_processing_cmds:
169 argv = tuple(arg.format(digest_root=digest_root) for arg in cmd.argv)
170 ip = InteractiveProcess(
171 argv=argv,
172 env={"PATH": environment.get("PATH", ""), **cmd.extra_env},
173 run_in_workspace=True,
174 )
175 ipr = await Effect(InteractiveProcessResult, InteractiveProcess, ip)
176 if ipr.exit_code:
177 raise ExportError(f"Failed to write {result.description} to {result_dir}")
178 if result.resolve:
179 resolves_exported.add(result.resolve)
180 console.print_stdout(f"Wrote {result.description} to {result_dir}")
181
182 unexported_resolves = sorted((set(export_subsys.resolve) - resolves_exported))
183 if unexported_resolves:
184 all_known_user_resolve_names = await MultiGet(
185 Get(KnownUserResolveNames, KnownUserResolveNamesRequest, request())
186 for request in union_membership.get(KnownUserResolveNamesRequest)
187 )
188 all_valid_resolve_names = sorted(
189 {
190 *itertools.chain.from_iterable(kurn.names for kurn in all_known_user_resolve_names),
191 *(
192 sentinel.resolve_name
193 for sentinel in union_membership.get(GenerateToolLockfileSentinel)
194 ),
195 }
196 )
197 raise UnrecognizedResolveNamesError(
198 unexported_resolves,
199 all_valid_resolve_names,
200 description_of_origin="the option --export-resolve",
201 )
202
203 return Export(exit_code=0)
204
205
206 def rules():
207 return collect_rules()
208
[end of src/python/pants/core/goals/export.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/core/goals/export.py b/src/python/pants/core/goals/export.py
--- a/src/python/pants/core/goals/export.py
+++ b/src/python/pants/core/goals/export.py
@@ -110,7 +110,9 @@
Export Pants data for use in other tools, such as IDEs.
:::caution Exporting tools requires creating a custom lockfile for them
+
Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)
+
:::
"""
)
| {"golden_diff": "diff --git a/src/python/pants/core/goals/export.py b/src/python/pants/core/goals/export.py\n--- a/src/python/pants/core/goals/export.py\n+++ b/src/python/pants/core/goals/export.py\n@@ -110,7 +110,9 @@\n Export Pants data for use in other tools, such as IDEs.\n \n :::caution Exporting tools requires creating a custom lockfile for them\n+\n Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)\n+\n :::\n \"\"\"\n )\n", "issue": "\"export\" goal docs have unclosed call out\n**Describe the bug**\r\n\r\nThe warning callout about \"exporting tools requires ...\" seems to be unclosed:\r\n\r\n- https://www.pantsbuild.org/2.18/reference/goals/export\r\n- https://www.pantsbuild.org/2.19/reference/goals/export\r\n- https://www.pantsbuild.org/2.20/reference/goals/export\r\n\r\n\r\n\r\n**Pants version**\r\n2.18 onwards\r\n\r\n**OS**\r\nmacOS\r\n\r\n**Additional info**\r\nIntroduced in #20604 \n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport itertools\nimport os\nfrom dataclasses import dataclass\nfrom typing import Iterable, Mapping, Sequence, cast\n\nfrom pants.base.build_root import BuildRoot\nfrom pants.core.goals.generate_lockfiles import (\n GenerateToolLockfileSentinel,\n KnownUserResolveNames,\n KnownUserResolveNamesRequest,\n UnrecognizedResolveNamesError,\n)\nfrom pants.core.util_rules.distdir import DistDir\nfrom pants.core.util_rules.environments import _warn_on_non_local_environments\nfrom pants.engine.collection import Collection\nfrom pants.engine.console import Console\nfrom pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest\nfrom pants.engine.environment import EnvironmentName\nfrom pants.engine.fs import EMPTY_DIGEST, AddPrefix, Digest, MergeDigests, Workspace\nfrom pants.engine.goal import Goal, GoalSubsystem\nfrom pants.engine.internals.selectors import Effect, Get, MultiGet\nfrom pants.engine.process import InteractiveProcess, InteractiveProcessResult\nfrom pants.engine.rules import collect_rules, goal_rule\nfrom pants.engine.target import FilteredTargets, Target\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.option.option_types import StrListOption\nfrom pants.util.dirutil import safe_rmtree\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.strutil import softwrap\n\n\nclass ExportError(Exception):\n pass\n\n\n@union(in_scope_types=[EnvironmentName])\n@dataclass(frozen=True)\nclass ExportRequest:\n \"\"\"A union for exportable data provided by a backend.\n\n Subclass and install a member of this type to export data.\n \"\"\"\n\n targets: Sequence[Target]\n\n\n@dataclass(frozen=True)\nclass PostProcessingCommand:\n \"\"\"A command to run as a local process after an exported digest is materialized.\"\"\"\n\n # Values in the argv tuple can contain the format specifier \"{digest_root}\", which will be\n # substituted with the (absolute) path to the location under distdir in which the\n # digest is materialized.\n argv: tuple[str, ...]\n # The command will be run with an environment consisting of just PATH, set to the Pants\n # process's own PATH env var, plus these extra env vars.\n extra_env: FrozenDict[str, str]\n\n def __init__(\n self,\n argv: Iterable[str],\n extra_env: Mapping[str, str] = FrozenDict(),\n ):\n object.__setattr__(self, \"argv\", tuple(argv))\n object.__setattr__(self, \"extra_env\", FrozenDict(extra_env))\n\n\n@dataclass(frozen=True)\nclass ExportResult:\n description: str\n # Materialize digests under this reldir.\n reldir: str\n # Materialize this digest.\n digest: Digest\n # Run these commands as local processes after the digest is materialized.\n post_processing_cmds: tuple[PostProcessingCommand, ...]\n # Set for the common special case of exporting a resolve, and names that resolve.\n # Set to None for other export results.\n resolve: str | None\n\n def __init__(\n self,\n description: str,\n reldir: str,\n *,\n digest: Digest = EMPTY_DIGEST,\n post_processing_cmds: Iterable[PostProcessingCommand] = tuple(),\n resolve: str | None = None,\n ):\n object.__setattr__(self, \"description\", description)\n object.__setattr__(self, \"reldir\", reldir)\n object.__setattr__(self, \"digest\", digest)\n object.__setattr__(self, \"post_processing_cmds\", tuple(post_processing_cmds))\n object.__setattr__(self, \"resolve\", resolve)\n\n\nclass ExportResults(Collection[ExportResult]):\n pass\n\n\nclass ExportSubsystem(GoalSubsystem):\n name = \"export\"\n help = softwrap(\n \"\"\"\n Export Pants data for use in other tools, such as IDEs.\n\n :::caution Exporting tools requires creating a custom lockfile for them\n Follow [the instructions for creating tool lockfiles](../../docs/python/overview/lockfiles#lockfiles-for-tools)\n :::\n \"\"\"\n )\n\n # NB: Only options that are relevant across many/most backends and languages\n # should be defined here. Backend-specific options should be defined in that backend\n # as plugin options on this subsystem.\n\n # Exporting resolves is a common use-case for `export`, often the primary one, so we\n # add affordances for it at the core goal level.\n resolve = StrListOption(\n default=[],\n help=\"Export the specified resolve(s). The export format is backend-specific, \"\n \"e.g., Python resolves are exported as virtualenvs.\",\n )\n\n\nclass Export(Goal):\n subsystem_cls = ExportSubsystem\n environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY\n\n\n@goal_rule\nasync def export(\n console: Console,\n targets: FilteredTargets,\n workspace: Workspace,\n union_membership: UnionMembership,\n build_root: BuildRoot,\n dist_dir: DistDir,\n export_subsys: ExportSubsystem,\n) -> Export:\n request_types = cast(\"Iterable[type[ExportRequest]]\", union_membership.get(ExportRequest))\n requests = tuple(request_type(targets) for request_type in request_types)\n all_results = await MultiGet(Get(ExportResults, ExportRequest, request) for request in requests)\n flattened_results = [res for results in all_results for res in results]\n\n await _warn_on_non_local_environments(targets, \"the `export` goal\")\n\n prefixed_digests = await MultiGet(\n Get(Digest, AddPrefix(result.digest, result.reldir)) for result in flattened_results\n )\n output_dir = os.path.join(str(dist_dir.relpath), \"export\")\n for result in flattened_results:\n digest_root = os.path.join(build_root.path, output_dir, result.reldir)\n safe_rmtree(digest_root)\n merged_digest = await Get(Digest, MergeDigests(prefixed_digests))\n dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))\n workspace.write_digest(dist_digest)\n environment = await Get(EnvironmentVars, EnvironmentVarsRequest([\"PATH\"]))\n resolves_exported = set()\n for result in flattened_results:\n result_dir = os.path.join(output_dir, result.reldir)\n digest_root = os.path.join(build_root.path, result_dir)\n for cmd in result.post_processing_cmds:\n argv = tuple(arg.format(digest_root=digest_root) for arg in cmd.argv)\n ip = InteractiveProcess(\n argv=argv,\n env={\"PATH\": environment.get(\"PATH\", \"\"), **cmd.extra_env},\n run_in_workspace=True,\n )\n ipr = await Effect(InteractiveProcessResult, InteractiveProcess, ip)\n if ipr.exit_code:\n raise ExportError(f\"Failed to write {result.description} to {result_dir}\")\n if result.resolve:\n resolves_exported.add(result.resolve)\n console.print_stdout(f\"Wrote {result.description} to {result_dir}\")\n\n unexported_resolves = sorted((set(export_subsys.resolve) - resolves_exported))\n if unexported_resolves:\n all_known_user_resolve_names = await MultiGet(\n Get(KnownUserResolveNames, KnownUserResolveNamesRequest, request())\n for request in union_membership.get(KnownUserResolveNamesRequest)\n )\n all_valid_resolve_names = sorted(\n {\n *itertools.chain.from_iterable(kurn.names for kurn in all_known_user_resolve_names),\n *(\n sentinel.resolve_name\n for sentinel in union_membership.get(GenerateToolLockfileSentinel)\n ),\n }\n )\n raise UnrecognizedResolveNamesError(\n unexported_resolves,\n all_valid_resolve_names,\n description_of_origin=\"the option --export-resolve\",\n )\n\n return Export(exit_code=0)\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/core/goals/export.py"}]} | 2,940 | 128 |
gh_patches_debug_25864 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1089 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement filtering options for date & time types
## Problem
- We need to ensure that records that include columns of all Date & Time types support the following filters via API:
- between {x} and {y}
- is {x}
- is not {x}
- before {x}
- after {x}
- on or before {x}
- or or after {x}
- is empty
- is not empty
- We need to also ensure that values of the filters don't have to be an exact date. We should accept natural language like "next month", or "tomorrow".
- We could use https://dateparser.readthedocs.io/
This involves:
- Implementing the filters in the backend
- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type
- Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)
## Additional context
- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.
- This issue is blocked by implementation of Date & Time types:
- #424
- #425
- #426
- #557 provides some context on storing filters.
Implement filtering options for duration type
## Problem
- We need to ensure that records that include columns of duration types support the following filters via API:
- between {x} and {y}
- equals {x}
- does not equal {x}
- greater than {x}
- less than {x}
- greater than or equals {x}
- less than or equals {x}
- is empty
- is not empty
- We need to also ensure that values of the filters don't have to be a number. We should accept natural language like "an hour", or "2 days".
- We could use something like https://github.com/oleiade/durations or https://github.com/wroberts/pytimeparse
This involves:
- Implementing the filters in the backend
- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type
- Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)
## Additional context
- These filters are the same as those for Number types. We should just reuse those.
- The only additional thing we need to implement is converting natural language to numbers.
- Relevant issue: #385
- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.
- This issue is blocked by implementation of #430
- #557 provides some context on storing filters.
Marking as blocked until #385 and #430 are complete.
</issue>
<code>
[start of db/types/base.py]
1 from enum import Enum
2
3 from sqlalchemy import create_engine
4
5 from db import constants
6
7 from db.functions import hints
8
9 from frozendict import frozendict
10
11
12 CHAR = 'char'
13 STRING = 'string'
14 VARCHAR = 'varchar'
15
16
17 class PostgresType(Enum):
18 """
19 This only includes built-in Postgres types that SQLAlchemy supports.
20 SQLAlchemy doesn't support XML. See zzzeek's comment on:
21 https://stackoverflow.com/questions/16153512/using-postgresql-xml-data-type-with-sqlalchemy
22 The values are keys returned by get_available_types.
23 """
24 _ARRAY = '_array'
25 BIGINT = 'bigint'
26 BIT_VARYING = 'bit varying'
27 BIT = 'bit'
28 BOOLEAN = 'boolean'
29 BYTEA = 'bytea'
30 CHAR = '"char"'
31 CHARACTER_VARYING = 'character varying'
32 CHARACTER = 'character'
33 CIDR = 'cidr'
34 DATE = 'date'
35 DATERANGE = 'daterange'
36 DECIMAL = 'decimal'
37 DOUBLE_PRECISION = 'double precision'
38 FLOAT = 'float'
39 HSTORE = 'hstore'
40 INET = 'inet'
41 INT4RANGE = 'int4range'
42 INT8RANGE = 'int8range'
43 INTEGER = 'integer'
44 INTERVAL = 'interval'
45 JSON = 'json'
46 JSONB = 'jsonb'
47 MACADDR = 'macaddr'
48 MONEY = 'money'
49 NAME = 'name'
50 NUMERIC = 'numeric'
51 NUMRANGE = 'numrange'
52 OID = 'oid'
53 REAL = 'real'
54 REGCLASS = 'regclass'
55 SMALLINT = 'smallint'
56 TEXT = 'text'
57 TIME = 'time'
58 TIME_WITH_TIME_ZONE = 'time with time zone'
59 TIME_WITHOUT_TIME_ZONE = 'time without time zone'
60 TIMESTAMP = 'timestamp'
61 TIMESTAMP_WITH_TIME_ZONE = 'timestamp with time zone'
62 TIMESTAMP_WITHOUT_TIME_ZONE = 'timestamp without time zone'
63 TSRANGE = 'tsrange'
64 TSTZRANGE = 'tstzrange'
65 TSVECTOR = 'tsvector'
66 UUID = 'uuid'
67
68
69 class MathesarCustomType(Enum):
70 """
71 This is a list of custom Mathesar DB types.
72 Keys returned by get_available_types are of the format 'mathesar_types.VALUE'
73 """
74 EMAIL = 'email'
75 URI = 'uri'
76 MATHESAR_MONEY = 'mathesar_money'
77
78
79 _known_vanilla_db_types = tuple(postgres_type for postgres_type in PostgresType)
80
81
82 _known_custom_db_types = tuple(mathesar_custom_type for mathesar_custom_type in MathesarCustomType)
83
84
85 # Known database types are those that are defined on our PostgresType and MathesarCustomType Enums.
86 known_db_types = _known_vanilla_db_types + _known_custom_db_types
87
88
89 # Origin: https://www.python.org/dev/peps/pep-0616/#id17
90 def _remove_prefix(self: str, prefix: str, /) -> str:
91 """
92 This will remove the passed prefix, if it's there.
93 Otherwise, it will return the string unchanged.
94 """
95 if self.startswith(prefix):
96 return self[len(prefix):]
97 else:
98 return self[:]
99
100
101 def get_db_type_enum_from_id(db_type_id):
102 """
103 Gets an instance of either the PostgresType enum or the MathesarCustomType enum corresponding
104 to the provided db_type_id. If the id doesn't correspond to any of the mentioned enums,
105 returns None.
106 """
107 try:
108 return PostgresType(db_type_id)
109 except ValueError:
110 try:
111 # Sometimes MA type identifiers are qualified like so: `mathesar_types.uri`.
112 # We want to remove that prefix, when it's there, because MathesarCustomType
113 # enum stores type ids without a qualifier (e.g. `uri`).
114 possible_prefix = _ma_type_qualifier_prefix + '.'
115 preprocessed_db_type_id = _remove_prefix(db_type_id, possible_prefix)
116 return MathesarCustomType(preprocessed_db_type_id)
117 except ValueError:
118 return None
119
120
121 def _build_db_types_hinted():
122 """
123 Builds up a map of db types to hintsets.
124 """
125 # Start out by defining some hints manually.
126 db_types_hinted = {
127 PostgresType.BOOLEAN: tuple([
128 hints.boolean
129 ]),
130 MathesarCustomType.URI: tuple([
131 hints.uri
132 ]),
133 MathesarCustomType.EMAIL: tuple([
134 hints.email
135 ]),
136 }
137
138 # Then, start adding hints automatically.
139 # This is for many-to-many relationships, i.e. adding multiple identical hintsets to the
140 # hintsets of multiple db types.
141 def _add_to_db_type_hintsets(db_types, hints):
142 """
143 Mutates db_types_hinted to map every hint in `hints` to every DB type in `db_types`.
144 """
145 for db_type in db_types:
146 if db_type in db_types_hinted:
147 updated_hintset = tuple(set(db_types_hinted[db_type] + tuple(hints)))
148 db_types_hinted[db_type] = updated_hintset
149 else:
150 db_types_hinted[db_type] = tuple(hints)
151
152 # all types get the "any" hint
153 all_db_types = known_db_types
154 hints_for_all_db_types = (hints.any,)
155 _add_to_db_type_hintsets(all_db_types, hints_for_all_db_types)
156
157 # string-like types get the "string_like" hint
158 string_like_db_types = (
159 PostgresType.CHARACTER_VARYING,
160 PostgresType.CHARACTER,
161 PostgresType.TEXT,
162 MathesarCustomType.URI,
163 MathesarCustomType.EMAIL,
164 )
165 hints_for_string_like_types = (hints.string_like,)
166 _add_to_db_type_hintsets(string_like_db_types, hints_for_string_like_types)
167
168 # numeric types get the "comparable" hint
169 numeric_db_types = (
170 PostgresType.BIGINT,
171 PostgresType.DECIMAL,
172 PostgresType.DOUBLE_PRECISION,
173 PostgresType.FLOAT,
174 PostgresType.INTEGER,
175 PostgresType.SMALLINT,
176 PostgresType.NUMERIC,
177 PostgresType.REAL,
178 )
179 hints_for_numeric_db_types = (hints.comparable,)
180 _add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)
181
182 return frozendict(db_types_hinted)
183
184
185 db_types_hinted = _build_db_types_hinted()
186
187
188 SCHEMA = f"{constants.MATHESAR_PREFIX}types"
189 # Since we want to have our identifiers quoted appropriately for use in
190 # PostgreSQL, we want to use the postgres dialect preparer to set this up.
191 preparer = create_engine("postgresql://").dialect.identifier_preparer
192
193
194 # Should usually equal `mathesar_types`
195 _ma_type_qualifier_prefix = preparer.quote_schema(SCHEMA)
196
197
198 def get_qualified_name(name):
199 return ".".join([_ma_type_qualifier_prefix, name])
200
201
202 def get_available_types(engine):
203 """
204 Returns a dict where the keys are database type names defined on the database associated with
205 provided Engine, and the values are their SQLAlchemy classes.
206 """
207 return engine.dialect.ischema_names
208
209
210 def get_available_known_db_types(engine):
211 """
212 Returns database types that are both available on the database and known through our Enums
213 above.
214 """
215 available_db_types = get_available_types(engine)
216 return tuple(
217 known_db_type
218 for known_db_type in known_db_types
219 if known_db_type.value in available_db_types
220 )
221
222
223 def get_db_type_name(sa_type, engine):
224 try:
225 db_type = sa_type.compile(dialect=engine.dialect)
226 except TypeError:
227 db_type = sa_type().compile(dialect=engine.dialect)
228 return db_type
229
[end of db/types/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/types/base.py b/db/types/base.py
--- a/db/types/base.py
+++ b/db/types/base.py
@@ -156,8 +156,10 @@
# string-like types get the "string_like" hint
string_like_db_types = (
- PostgresType.CHARACTER_VARYING,
+ PostgresType.CHAR,
PostgresType.CHARACTER,
+ PostgresType.CHARACTER_VARYING,
+ PostgresType.NAME,
PostgresType.TEXT,
MathesarCustomType.URI,
MathesarCustomType.EMAIL,
@@ -175,10 +177,25 @@
PostgresType.SMALLINT,
PostgresType.NUMERIC,
PostgresType.REAL,
+ PostgresType.MONEY,
)
hints_for_numeric_db_types = (hints.comparable,)
_add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)
+ # time related types get the "comparable" hint
+ time_related_db_types = (
+ PostgresType.DATE,
+ PostgresType.TIME,
+ PostgresType.TIME_WITH_TIME_ZONE,
+ PostgresType.TIME_WITHOUT_TIME_ZONE,
+ PostgresType.TIMESTAMP,
+ PostgresType.TIMESTAMP_WITH_TIME_ZONE,
+ PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE,
+ PostgresType.INTERVAL,
+ )
+ hints_for_time_related_types = (hints.comparable,)
+ _add_to_db_type_hintsets(time_related_db_types, hints_for_time_related_types)
+
return frozendict(db_types_hinted)
| {"golden_diff": "diff --git a/db/types/base.py b/db/types/base.py\n--- a/db/types/base.py\n+++ b/db/types/base.py\n@@ -156,8 +156,10 @@\n \n # string-like types get the \"string_like\" hint\n string_like_db_types = (\n- PostgresType.CHARACTER_VARYING,\n+ PostgresType.CHAR,\n PostgresType.CHARACTER,\n+ PostgresType.CHARACTER_VARYING,\n+ PostgresType.NAME,\n PostgresType.TEXT,\n MathesarCustomType.URI,\n MathesarCustomType.EMAIL,\n@@ -175,10 +177,25 @@\n PostgresType.SMALLINT,\n PostgresType.NUMERIC,\n PostgresType.REAL,\n+ PostgresType.MONEY,\n )\n hints_for_numeric_db_types = (hints.comparable,)\n _add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)\n \n+ # time related types get the \"comparable\" hint\n+ time_related_db_types = (\n+ PostgresType.DATE,\n+ PostgresType.TIME,\n+ PostgresType.TIME_WITH_TIME_ZONE,\n+ PostgresType.TIME_WITHOUT_TIME_ZONE,\n+ PostgresType.TIMESTAMP,\n+ PostgresType.TIMESTAMP_WITH_TIME_ZONE,\n+ PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE,\n+ PostgresType.INTERVAL,\n+ )\n+ hints_for_time_related_types = (hints.comparable,)\n+ _add_to_db_type_hintsets(time_related_db_types, hints_for_time_related_types)\n+\n return frozendict(db_types_hinted)\n", "issue": "Implement filtering options for date & time types\n## Problem\r\n- We need to ensure that records that include columns of all Date & Time types support the following filters via API: \r\n - between {x} and {y}\r\n - is {x}\r\n - is not {x}\r\n - before {x}\r\n - after {x}\r\n - on or before {x}\r\n - or or after {x}\r\n - is empty\r\n - is not empty\r\n- We need to also ensure that values of the filters don't have to be an exact date. We should accept natural language like \"next month\", or \"tomorrow\".\r\n - We could use https://dateparser.readthedocs.io/\r\n\r\nThis involves:\r\n- Implementing the filters in the backend\r\n- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type\r\n - Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)\r\n\r\n## Additional context\r\n- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.\r\n- This issue is blocked by implementation of Date & Time types: \r\n - #424 \r\n - #425 \r\n - #426\r\n- #557 provides some context on storing filters.\nImplement filtering options for duration type\n## Problem\r\n- We need to ensure that records that include columns of duration types support the following filters via API: \r\n - between {x} and {y}\r\n - equals {x}\r\n - does not equal {x}\r\n - greater than {x}\r\n - less than {x}\r\n - greater than or equals {x}\r\n - less than or equals {x}\r\n - is empty\r\n - is not empty\r\n- We need to also ensure that values of the filters don't have to be a number. We should accept natural language like \"an hour\", or \"2 days\".\r\n - We could use something like https://github.com/oleiade/durations or https://github.com/wroberts/pytimeparse\r\n\r\nThis involves:\r\n- Implementing the filters in the backend\r\n- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type\r\n - Filter information should include the number of parameters needing to be passed in (e.g. `between` needs 2 parameters, `is empty` needs 0)\r\n\r\n## Additional context\r\n- These filters are the same as those for Number types. We should just reuse those.\r\n - The only additional thing we need to implement is converting natural language to numbers.\r\n - Relevant issue: #385 \r\n- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.\r\n- This issue is blocked by implementation of #430 \r\n- #557 provides some context on storing filters.\r\n\r\nMarking as blocked until #385 and #430 are complete.\n", "before_files": [{"content": "from enum import Enum\n\nfrom sqlalchemy import create_engine\n\nfrom db import constants\n\nfrom db.functions import hints\n\nfrom frozendict import frozendict\n\n\nCHAR = 'char'\nSTRING = 'string'\nVARCHAR = 'varchar'\n\n\nclass PostgresType(Enum):\n \"\"\"\n This only includes built-in Postgres types that SQLAlchemy supports.\n SQLAlchemy doesn't support XML. See zzzeek's comment on:\n https://stackoverflow.com/questions/16153512/using-postgresql-xml-data-type-with-sqlalchemy\n The values are keys returned by get_available_types.\n \"\"\"\n _ARRAY = '_array'\n BIGINT = 'bigint'\n BIT_VARYING = 'bit varying'\n BIT = 'bit'\n BOOLEAN = 'boolean'\n BYTEA = 'bytea'\n CHAR = '\"char\"'\n CHARACTER_VARYING = 'character varying'\n CHARACTER = 'character'\n CIDR = 'cidr'\n DATE = 'date'\n DATERANGE = 'daterange'\n DECIMAL = 'decimal'\n DOUBLE_PRECISION = 'double precision'\n FLOAT = 'float'\n HSTORE = 'hstore'\n INET = 'inet'\n INT4RANGE = 'int4range'\n INT8RANGE = 'int8range'\n INTEGER = 'integer'\n INTERVAL = 'interval'\n JSON = 'json'\n JSONB = 'jsonb'\n MACADDR = 'macaddr'\n MONEY = 'money'\n NAME = 'name'\n NUMERIC = 'numeric'\n NUMRANGE = 'numrange'\n OID = 'oid'\n REAL = 'real'\n REGCLASS = 'regclass'\n SMALLINT = 'smallint'\n TEXT = 'text'\n TIME = 'time'\n TIME_WITH_TIME_ZONE = 'time with time zone'\n TIME_WITHOUT_TIME_ZONE = 'time without time zone'\n TIMESTAMP = 'timestamp'\n TIMESTAMP_WITH_TIME_ZONE = 'timestamp with time zone'\n TIMESTAMP_WITHOUT_TIME_ZONE = 'timestamp without time zone'\n TSRANGE = 'tsrange'\n TSTZRANGE = 'tstzrange'\n TSVECTOR = 'tsvector'\n UUID = 'uuid'\n\n\nclass MathesarCustomType(Enum):\n \"\"\"\n This is a list of custom Mathesar DB types.\n Keys returned by get_available_types are of the format 'mathesar_types.VALUE'\n \"\"\"\n EMAIL = 'email'\n URI = 'uri'\n MATHESAR_MONEY = 'mathesar_money'\n\n\n_known_vanilla_db_types = tuple(postgres_type for postgres_type in PostgresType)\n\n\n_known_custom_db_types = tuple(mathesar_custom_type for mathesar_custom_type in MathesarCustomType)\n\n\n# Known database types are those that are defined on our PostgresType and MathesarCustomType Enums.\nknown_db_types = _known_vanilla_db_types + _known_custom_db_types\n\n\n# Origin: https://www.python.org/dev/peps/pep-0616/#id17\ndef _remove_prefix(self: str, prefix: str, /) -> str:\n \"\"\"\n This will remove the passed prefix, if it's there.\n Otherwise, it will return the string unchanged.\n \"\"\"\n if self.startswith(prefix):\n return self[len(prefix):]\n else:\n return self[:]\n\n\ndef get_db_type_enum_from_id(db_type_id):\n \"\"\"\n Gets an instance of either the PostgresType enum or the MathesarCustomType enum corresponding\n to the provided db_type_id. If the id doesn't correspond to any of the mentioned enums,\n returns None.\n \"\"\"\n try:\n return PostgresType(db_type_id)\n except ValueError:\n try:\n # Sometimes MA type identifiers are qualified like so: `mathesar_types.uri`.\n # We want to remove that prefix, when it's there, because MathesarCustomType\n # enum stores type ids without a qualifier (e.g. `uri`).\n possible_prefix = _ma_type_qualifier_prefix + '.'\n preprocessed_db_type_id = _remove_prefix(db_type_id, possible_prefix)\n return MathesarCustomType(preprocessed_db_type_id)\n except ValueError:\n return None\n\n\ndef _build_db_types_hinted():\n \"\"\"\n Builds up a map of db types to hintsets.\n \"\"\"\n # Start out by defining some hints manually.\n db_types_hinted = {\n PostgresType.BOOLEAN: tuple([\n hints.boolean\n ]),\n MathesarCustomType.URI: tuple([\n hints.uri\n ]),\n MathesarCustomType.EMAIL: tuple([\n hints.email\n ]),\n }\n\n # Then, start adding hints automatically.\n # This is for many-to-many relationships, i.e. adding multiple identical hintsets to the\n # hintsets of multiple db types.\n def _add_to_db_type_hintsets(db_types, hints):\n \"\"\"\n Mutates db_types_hinted to map every hint in `hints` to every DB type in `db_types`.\n \"\"\"\n for db_type in db_types:\n if db_type in db_types_hinted:\n updated_hintset = tuple(set(db_types_hinted[db_type] + tuple(hints)))\n db_types_hinted[db_type] = updated_hintset\n else:\n db_types_hinted[db_type] = tuple(hints)\n\n # all types get the \"any\" hint\n all_db_types = known_db_types\n hints_for_all_db_types = (hints.any,)\n _add_to_db_type_hintsets(all_db_types, hints_for_all_db_types)\n\n # string-like types get the \"string_like\" hint\n string_like_db_types = (\n PostgresType.CHARACTER_VARYING,\n PostgresType.CHARACTER,\n PostgresType.TEXT,\n MathesarCustomType.URI,\n MathesarCustomType.EMAIL,\n )\n hints_for_string_like_types = (hints.string_like,)\n _add_to_db_type_hintsets(string_like_db_types, hints_for_string_like_types)\n\n # numeric types get the \"comparable\" hint\n numeric_db_types = (\n PostgresType.BIGINT,\n PostgresType.DECIMAL,\n PostgresType.DOUBLE_PRECISION,\n PostgresType.FLOAT,\n PostgresType.INTEGER,\n PostgresType.SMALLINT,\n PostgresType.NUMERIC,\n PostgresType.REAL,\n )\n hints_for_numeric_db_types = (hints.comparable,)\n _add_to_db_type_hintsets(numeric_db_types, hints_for_numeric_db_types)\n\n return frozendict(db_types_hinted)\n\n\ndb_types_hinted = _build_db_types_hinted()\n\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\n# Should usually equal `mathesar_types`\n_ma_type_qualifier_prefix = preparer.quote_schema(SCHEMA)\n\n\ndef get_qualified_name(name):\n return \".\".join([_ma_type_qualifier_prefix, name])\n\n\ndef get_available_types(engine):\n \"\"\"\n Returns a dict where the keys are database type names defined on the database associated with\n provided Engine, and the values are their SQLAlchemy classes.\n \"\"\"\n return engine.dialect.ischema_names\n\n\ndef get_available_known_db_types(engine):\n \"\"\"\n Returns database types that are both available on the database and known through our Enums\n above.\n \"\"\"\n available_db_types = get_available_types(engine)\n return tuple(\n known_db_type\n for known_db_type in known_db_types\n if known_db_type.value in available_db_types\n )\n\n\ndef get_db_type_name(sa_type, engine):\n try:\n db_type = sa_type.compile(dialect=engine.dialect)\n except TypeError:\n db_type = sa_type().compile(dialect=engine.dialect)\n return db_type\n", "path": "db/types/base.py"}]} | 3,484 | 354 |
gh_patches_debug_7990 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1380 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
enable Rich terminal output
## Task
- [ ] add Rich to this project with the command `poetry add rich --group dev`
- [ ] follow the Rich [configuration instructions](https://rich.readthedocs.io/en/stable/introduction.html)
- [ ] add the following code to the `LOGGING = {...}` configuration in the project settings
```py
"formatters": {
"rich": {"datefmt": "[%X]"},
},
```
```py
"console": {
"class": "rich.logging.RichHandler",
"filters": ["require_debug_true"],
"formatter": "rich",
"level": "DEBUG",
"rich_tracebacks": True,
"tracebacks_show_locals": True,
},
```
</issue>
<code>
[start of project/core/settings.py]
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 # False if not in os.environ
10 DEBUG = os.getenv("DEBUG", False)
11
12 # defaults to second value if not found in os.environ
13 DJANGO_HOST = os.getenv("DJANGO_HOST", "LOCALHOST")
14
15 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16 SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "TEST_KEY_FOR_DEVELOPMENT")
17 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost", "0.0.0.0"]
18
19 INSTALLED_APPS = (
20 "django.contrib.admin",
21 "django.contrib.auth",
22 "django.contrib.contenttypes",
23 "django.contrib.sessions",
24 "django.contrib.messages",
25 "django.contrib.staticfiles",
26 "django_extensions",
27 "storages",
28 "core",
29 "rest_framework",
30 "accounts.apps.AccountsConfig",
31 "threads",
32 "notifications",
33 "corsheaders",
34 "taggit",
35 "categories",
36 "notification",
37 )
38
39 MIDDLEWARE = [
40 "corsheaders.middleware.CorsMiddleware",
41 "django.middleware.security.SecurityMiddleware",
42 "whitenoise.middleware.WhiteNoiseMiddleware",
43 "django.contrib.sessions.middleware.SessionMiddleware",
44 "django.middleware.common.CommonMiddleware",
45 "django.middleware.csrf.CsrfViewMiddleware",
46 "django.contrib.auth.middleware.AuthenticationMiddleware",
47 # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
48 "django.contrib.messages.middleware.MessageMiddleware",
49 "django.middleware.clickjacking.XFrameOptionsMiddleware",
50 ]
51
52 CSRF_USE_SESSIONS = (
53 True # Store the CSRF token in the users session instead of in a cookie
54 )
55
56 CORS_ORIGIN_ALLOW_ALL = True
57 ROOT_URLCONF = "core.urls"
58
59 # SSL Setup
60 if DJANGO_HOST != "LOCALHOST":
61 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
62 SECURE_SSL_REDIRECT = True
63 SESSION_COOKIE_SECURE = True
64 CSRF_COOKIE_SECURE = True
65
66 # Internationalization & Localization
67 LANGUAGE_CODE = "en-us"
68 TIME_ZONE = "UTC"
69 USE_I18N = True
70 USE_L10N = True
71 USE_TZ = True
72
73 TEMPLATES = [
74 {
75 "BACKEND": "django.template.backends.django.DjangoTemplates",
76 "DIRS": [
77 os.path.join(BASE_DIR, "threads/templates/threads"),
78 os.path.join(BASE_DIR, "accounts/templates/accounts"),
79 ], # TODO: Add non-webapp template directory
80 "APP_DIRS": True,
81 "OPTIONS": {
82 "context_processors": [
83 "django.template.context_processors.debug",
84 "django.template.context_processors.request",
85 "django.contrib.auth.context_processors.auth",
86 "django.contrib.messages.context_processors.messages",
87 ],
88 },
89 },
90 ]
91
92 WSGI_APPLICATION = "core.wsgi.application"
93
94 # Apex Contact for Production Errors
95 ADMINS = [("Development Team", "[email protected]")]
96
97 STATIC_URL = "/static/"
98 STATICFILES_DIRS = (os.path.join(BASE_DIR, "core/templates/static"),)
99 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
100
101 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
102 MEDIA_URL = "/media/"
103
104 # TODO: re-organize and simplify staticfiles settings
105 if "CIVIWIKI_LOCAL_NAME" not in os.environ:
106 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
107
108 # Use DATABASE_URL in production
109 DATABASE_URL = os.getenv("DATABASE_URL")
110
111 if DATABASE_URL is not None:
112 DATABASES = {"default": DATABASE_URL}
113 else:
114 # Default to sqlite for simplicity in development
115 DATABASES = {
116 "default": {
117 "ENGINE": "django.db.backends.sqlite3",
118 "NAME": BASE_DIR + "/" + "db.sqlite3",
119 }
120 }
121
122 # Email Backend Setup
123 if "EMAIL_HOST" not in os.environ:
124 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
125 EMAIL_HOST_USER = "[email protected]"
126 else:
127 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
128 EMAIL_HOST = os.getenv("EMAIL_HOST")
129 EMAIL_PORT = os.getenv("EMAIL_PORT")
130 EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
131 EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
132 EMAIL_USE_SSL = True
133 DEFAULT_FROM_EMAIL = EMAIL_HOST
134
135 # Notification API Settings
136 NOTIFICATIONS_SOFT_DELETE = True
137 NOTIFICATIONS_USE_JSONFIELD = True
138
139 # Django REST API Settings
140 DEFAULT_RENDERER_CLASSES = ("rest_framework.renderers.JSONRenderer",)
141
142 if DEBUG:
143 # Browsable HTML - Enabled only in Debug mode (dev)
144 DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (
145 "rest_framework.renderers.BrowsableAPIRenderer",
146 )
147
148 REST_FRAMEWORK = {
149 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
150 "DEFAULT_RENDERER_CLASSES": DEFAULT_RENDERER_CLASSES,
151 "DEFAULT_AUTHENTICATION_CLASSES": (
152 "rest_framework.authentication.BasicAuthentication",
153 "rest_framework.authentication.SessionAuthentication",
154 ),
155 }
156
157 # CORS Settings
158 CORS_ORIGIN_ALLOW_ALL = True
159
160 # Custom User model
161 AUTH_USER_MODEL = "accounts.User"
162
163 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
164
165 # Login Logout URLS
166 LOGIN_URL = "login/"
167 LOGIN_REDIRECT_URL = "/"
168 LOGOUT_REDIRECT_URL = "/"
169
170 AUTH_PASSWORD_VALIDATORS = [
171 {
172 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501
173 },
174 {
175 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
176 "OPTIONS": {
177 "min_length": 4,
178 },
179 },
180 {
181 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
182 },
183 {
184 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
185 },
186 ]
187
[end of project/core/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/project/core/settings.py b/project/core/settings.py
--- a/project/core/settings.py
+++ b/project/core/settings.py
@@ -184,3 +184,20 @@
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
+
+LOGGING = {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "formatters": {"rich": {"datefmt": "[%X]"}},
+ "handlers": {
+ "console": {
+ "class": "rich.logging.RichHandler",
+ "formatter": "rich",
+ "level": "WARNING",
+ # "filters": ["require_debug_true"],
+ "rich_tracebacks": True,
+ "tracebacks_show_locals": True,
+ }
+ },
+ "loggers": {"django": {"handlers": ["console"]}},
+}
| {"golden_diff": "diff --git a/project/core/settings.py b/project/core/settings.py\n--- a/project/core/settings.py\n+++ b/project/core/settings.py\n@@ -184,3 +184,20 @@\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n+\n+LOGGING = {\n+ \"version\": 1,\n+ \"disable_existing_loggers\": False,\n+ \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n+ \"handlers\": {\n+ \"console\": {\n+ \"class\": \"rich.logging.RichHandler\",\n+ \"formatter\": \"rich\",\n+ \"level\": \"WARNING\",\n+ # \"filters\": [\"require_debug_true\"],\n+ \"rich_tracebacks\": True,\n+ \"tracebacks_show_locals\": True,\n+ }\n+ },\n+ \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n+}\n", "issue": "enable Rich terminal output\n## Task\r\n\r\n- [ ] add Rich to this project with the command `poetry add rich --group dev`\r\n- [ ] follow the Rich [configuration instructions](https://rich.readthedocs.io/en/stable/introduction.html)\r\n- [ ] add the following code to the `LOGGING = {...}` configuration in the project settings\r\n\r\n```py\r\n\"formatters\": {\r\n \"rich\": {\"datefmt\": \"[%X]\"},\r\n},\r\n```\r\n\r\n```py\r\n\"console\": {\r\n \"class\": \"rich.logging.RichHandler\",\r\n \"filters\": [\"require_debug_true\"],\r\n \"formatter\": \"rich\",\r\n \"level\": \"DEBUG\",\r\n \"rich_tracebacks\": True,\r\n \"tracebacks_show_locals\": True,\r\n},\r\n```\n", "before_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n)\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n", "path": "project/core/settings.py"}]} | 2,414 | 196 |
gh_patches_debug_54184 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2846 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Runtime error during SVI inference when using poutine.do()
### Issue Description
Setting: a simple model with 2 latent Gaussians z1 and z2, giving rise to x ~ N( z1+z2, I).
In this setting p(z2 | x, z1) should be the same as p(z2 | x, do(z1)).
I wanted to check whether the current Pyro interface reflects this and it seems it does not.
My initial thought is that there is a difference in how .do() and .condition() broadcast the constants across the plate context.
### Environment
- OS and python version: MacOS 10.14.6, Python: 3.8.6
- PyTorch version: 1.9.0.dev20210502 (nightly version)
- Pyro version: 1.6.0.
### Code Snippet
Replication code:
https://pastebin.com/Ki2PYX7z
</issue>
<code>
[start of pyro/poutine/do_messenger.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import numbers
5 import warnings
6
7 import torch
8
9 from .messenger import Messenger
10 from .runtime import apply_stack
11
12
13 class DoMessenger(Messenger):
14 """
15 Given a stochastic function with some sample statements
16 and a dictionary of values at names,
17 set the return values of those sites equal to the values
18 as if they were hard-coded to those values
19 and introduce fresh sample sites with the same names
20 whose values do not propagate.
21
22 Composes freely with :func:`~pyro.poutine.handlers.condition`
23 to represent counterfactual distributions over potential outcomes.
24 See Single World Intervention Graphs [1] for additional details and theory.
25
26 Consider the following Pyro program:
27
28 >>> def model(x):
29 ... s = pyro.param("s", torch.tensor(0.5))
30 ... z = pyro.sample("z", dist.Normal(x, s))
31 ... return z ** 2
32
33 To intervene with a value for site `z`, we can write
34
35 >>> intervened_model = pyro.poutine.do(model, data={"z": torch.tensor(1.)})
36
37 This is equivalent to replacing `z = pyro.sample("z", ...)` with
38 `z = torch.tensor(1.)`
39 and introducing a fresh sample site pyro.sample("z", ...) whose value is not used elsewhere.
40
41 References
42
43 [1] `Single World Intervention Graphs: A Primer`,
44 Thomas Richardson, James Robins
45
46 :param fn: a stochastic function (callable containing Pyro primitive calls)
47 :param data: a ``dict`` mapping sample site names to interventions
48 :returns: stochastic function decorated with a :class:`~pyro.poutine.do_messenger.DoMessenger`
49 """
50 def __init__(self, data):
51 super().__init__()
52 self.data = data
53 self._intervener_id = str(id(self))
54
55 def _pyro_sample(self, msg):
56 if msg.get('_intervener_id', None) != self._intervener_id and \
57 self.data.get(msg['name']) is not None:
58
59 if msg.get('_intervener_id', None) is not None:
60 warnings.warn(
61 "Attempting to intervene on variable {} multiple times,"
62 "this is almost certainly incorrect behavior".format(msg['name']),
63 RuntimeWarning)
64
65 msg['_intervener_id'] = self._intervener_id
66
67 # split node, avoid reapplying self recursively to new node
68 new_msg = msg.copy()
69 apply_stack(new_msg)
70
71 # apply intervention
72 intervention = self.data[msg['name']]
73 msg['name'] = msg['name'] + "__CF" # mangle old name
74
75 if isinstance(intervention, (numbers.Number, torch.Tensor)):
76 msg['value'] = intervention
77 msg['is_observed'] = True
78 msg['stop'] = True
79 else:
80 raise NotImplementedError(
81 "Interventions of type {} not implemented (yet)".format(type(intervention)))
82
83 return None
84
[end of pyro/poutine/do_messenger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/poutine/do_messenger.py b/pyro/poutine/do_messenger.py
--- a/pyro/poutine/do_messenger.py
+++ b/pyro/poutine/do_messenger.py
@@ -66,6 +66,7 @@
# split node, avoid reapplying self recursively to new node
new_msg = msg.copy()
+ new_msg["cond_indep_stack"] = () # avoid entering plates twice
apply_stack(new_msg)
# apply intervention
| {"golden_diff": "diff --git a/pyro/poutine/do_messenger.py b/pyro/poutine/do_messenger.py\n--- a/pyro/poutine/do_messenger.py\n+++ b/pyro/poutine/do_messenger.py\n@@ -66,6 +66,7 @@\n \n # split node, avoid reapplying self recursively to new node\n new_msg = msg.copy()\n+ new_msg[\"cond_indep_stack\"] = () # avoid entering plates twice\n apply_stack(new_msg)\n \n # apply intervention\n", "issue": "[bug] Runtime error during SVI inference when using poutine.do()\n### Issue Description\r\n\r\nSetting: a simple model with 2 latent Gaussians z1 and z2, giving rise to x ~ N( z1+z2, I).\r\n\r\nIn this setting p(z2 | x, z1) should be the same as p(z2 | x, do(z1)). \r\n\r\nI wanted to check whether the current Pyro interface reflects this and it seems it does not.\r\n\r\nMy initial thought is that there is a difference in how .do() and .condition() broadcast the constants across the plate context.\r\n\r\n### Environment\r\n\r\n - OS and python version: MacOS 10.14.6, Python: 3.8.6\r\n - PyTorch version: 1.9.0.dev20210502 (nightly version)\r\n - Pyro version: 1.6.0.\r\n\r\n### Code Snippet\r\n\r\nReplication code:\r\nhttps://pastebin.com/Ki2PYX7z\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport numbers\nimport warnings\n\nimport torch\n\nfrom .messenger import Messenger\nfrom .runtime import apply_stack\n\n\nclass DoMessenger(Messenger):\n \"\"\"\n Given a stochastic function with some sample statements\n and a dictionary of values at names,\n set the return values of those sites equal to the values\n as if they were hard-coded to those values\n and introduce fresh sample sites with the same names\n whose values do not propagate.\n\n Composes freely with :func:`~pyro.poutine.handlers.condition`\n to represent counterfactual distributions over potential outcomes.\n See Single World Intervention Graphs [1] for additional details and theory.\n\n Consider the following Pyro program:\n\n >>> def model(x):\n ... s = pyro.param(\"s\", torch.tensor(0.5))\n ... z = pyro.sample(\"z\", dist.Normal(x, s))\n ... return z ** 2\n\n To intervene with a value for site `z`, we can write\n\n >>> intervened_model = pyro.poutine.do(model, data={\"z\": torch.tensor(1.)})\n\n This is equivalent to replacing `z = pyro.sample(\"z\", ...)` with\n `z = torch.tensor(1.)`\n and introducing a fresh sample site pyro.sample(\"z\", ...) whose value is not used elsewhere.\n\n References\n\n [1] `Single World Intervention Graphs: A Primer`,\n Thomas Richardson, James Robins\n\n :param fn: a stochastic function (callable containing Pyro primitive calls)\n :param data: a ``dict`` mapping sample site names to interventions\n :returns: stochastic function decorated with a :class:`~pyro.poutine.do_messenger.DoMessenger`\n \"\"\"\n def __init__(self, data):\n super().__init__()\n self.data = data\n self._intervener_id = str(id(self))\n\n def _pyro_sample(self, msg):\n if msg.get('_intervener_id', None) != self._intervener_id and \\\n self.data.get(msg['name']) is not None:\n\n if msg.get('_intervener_id', None) is not None:\n warnings.warn(\n \"Attempting to intervene on variable {} multiple times,\"\n \"this is almost certainly incorrect behavior\".format(msg['name']),\n RuntimeWarning)\n\n msg['_intervener_id'] = self._intervener_id\n\n # split node, avoid reapplying self recursively to new node\n new_msg = msg.copy()\n apply_stack(new_msg)\n\n # apply intervention\n intervention = self.data[msg['name']]\n msg['name'] = msg['name'] + \"__CF\" # mangle old name\n\n if isinstance(intervention, (numbers.Number, torch.Tensor)):\n msg['value'] = intervention\n msg['is_observed'] = True\n msg['stop'] = True\n else:\n raise NotImplementedError(\n \"Interventions of type {} not implemented (yet)\".format(type(intervention)))\n\n return None\n", "path": "pyro/poutine/do_messenger.py"}]} | 1,596 | 110 |
gh_patches_debug_1515 | rasdani/github-patches | git_diff | docker__docker-py-832 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
decode_auth function does not handle utf-8 logins or password
HI
I have found that the function **decode_auth** (line 96, [file](https://github.com/docker/docker-py/blob/master/docker/auth/auth.py)) fails when decoding UTF-8 passwords from the .dockercfg file, and **load_config** returning an empty config.
I have checked and docker hub can handle UTF-8 passwords, this code proves that:
``` python
# coding=utf-8
from docker import Client
cred = { 'username': <user>, 'password': <utf-8 password> }
c = Client(base_url='unix://var/run/docker.sock')
res = c.pull(repository='<private container>', tag='latest', auth_config=cred)
print(res)
```
Thank you
</issue>
<code>
[start of docker/auth/auth.py]
1 # Copyright 2013 dotCloud inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import base64
16 import json
17 import logging
18 import os
19 import warnings
20
21 import six
22
23 from .. import constants
24 from .. import errors
25
26 INDEX_NAME = 'index.docker.io'
27 INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
28 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
29 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
30
31 log = logging.getLogger(__name__)
32
33
34 def resolve_repository_name(repo_name, insecure=False):
35 if insecure:
36 warnings.warn(
37 constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(
38 'resolve_repository_name()'
39 ), DeprecationWarning
40 )
41
42 if '://' in repo_name:
43 raise errors.InvalidRepository(
44 'Repository name cannot contain a scheme ({0})'.format(repo_name))
45 parts = repo_name.split('/', 1)
46 if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
47 # This is a docker index repo (ex: foo/bar or ubuntu)
48 return INDEX_NAME, repo_name
49 if len(parts) < 2:
50 raise errors.InvalidRepository(
51 'Invalid repository name ({0})'.format(repo_name))
52
53 if 'index.docker.io' in parts[0]:
54 raise errors.InvalidRepository(
55 'Invalid repository name, try "{0}" instead'.format(parts[1])
56 )
57
58 return parts[0], parts[1]
59
60
61 def resolve_authconfig(authconfig, registry=None):
62 """
63 Returns the authentication data from the given auth configuration for a
64 specific registry. As with the Docker client, legacy entries in the config
65 with full URLs are stripped down to hostnames before checking for a match.
66 Returns None if no match was found.
67 """
68 # Default to the public index server
69 registry = convert_to_hostname(registry) if registry else INDEX_NAME
70 log.debug("Looking for auth entry for {0}".format(repr(registry)))
71
72 if registry in authconfig:
73 log.debug("Found {0}".format(repr(registry)))
74 return authconfig[registry]
75
76 for key, config in six.iteritems(authconfig):
77 if convert_to_hostname(key) == registry:
78 log.debug("Found {0}".format(repr(key)))
79 return config
80
81 log.debug("No entry found")
82 return None
83
84
85 def convert_to_hostname(url):
86 return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
87
88
89 def encode_auth(auth_info):
90 return base64.b64encode(auth_info.get('username', '') + b':' +
91 auth_info.get('password', ''))
92
93
94 def decode_auth(auth):
95 if isinstance(auth, six.string_types):
96 auth = auth.encode('ascii')
97 s = base64.b64decode(auth)
98 login, pwd = s.split(b':', 1)
99 return login.decode('ascii'), pwd.decode('ascii')
100
101
102 def encode_header(auth):
103 auth_json = json.dumps(auth).encode('ascii')
104 return base64.urlsafe_b64encode(auth_json)
105
106
107 def parse_auth(entries):
108 """
109 Parses authentication entries
110
111 Args:
112 entries: Dict of authentication entries.
113
114 Returns:
115 Authentication registry.
116 """
117
118 conf = {}
119 for registry, entry in six.iteritems(entries):
120 username, password = decode_auth(entry['auth'])
121 log.debug(
122 'Found entry (registry={0}, username={1})'
123 .format(repr(registry), repr(username))
124 )
125 conf[registry] = {
126 'username': username,
127 'password': password,
128 'email': entry['email'],
129 'serveraddress': registry,
130 }
131 return conf
132
133
134 def find_config_file(config_path=None):
135 environment_path = os.path.join(
136 os.environ.get('DOCKER_CONFIG'),
137 os.path.basename(DOCKER_CONFIG_FILENAME)
138 ) if os.environ.get('DOCKER_CONFIG') else None
139
140 paths = [
141 config_path, # 1
142 environment_path, # 2
143 os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
144 os.path.join(
145 os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
146 ) # 4
147 ]
148
149 for path in paths:
150 if path and os.path.exists(path):
151 return path
152 return None
153
154
155 def load_config(config_path=None):
156 """
157 Loads authentication data from a Docker configuration file in the given
158 root directory or if config_path is passed use given path.
159 Lookup priority:
160 explicit config_path parameter > DOCKER_CONFIG environment variable >
161 ~/.docker/config.json > ~/.dockercfg
162 """
163
164 config_file = find_config_file(config_path)
165
166 if not config_file:
167 log.debug("File doesn't exist")
168 return {}
169
170 try:
171 with open(config_file) as f:
172 data = json.load(f)
173 if data.get('auths'):
174 log.debug("Found 'auths' section")
175 return parse_auth(data['auths'])
176 else:
177 log.debug("Couldn't find 'auths' section")
178 f.seek(0)
179 return parse_auth(json.load(f))
180 except (IOError, KeyError, ValueError) as e:
181 # Likely missing new Docker config file or it's in an
182 # unknown format, continue to attempt to read old location
183 # and format.
184 log.debug(e)
185
186 log.debug("Attempting to parse legacy auth file format")
187 try:
188 data = []
189 with open(config_file) as f:
190 for line in f.readlines():
191 data.append(line.strip().split(' = ')[1])
192 if len(data) < 2:
193 # Not enough data
194 raise errors.InvalidConfigFile(
195 'Invalid or empty configuration file!'
196 )
197
198 username, password = decode_auth(data[0])
199 return {
200 INDEX_NAME: {
201 'username': username,
202 'password': password,
203 'email': data[1],
204 'serveraddress': INDEX_URL,
205 }
206 }
207 except Exception as e:
208 log.debug(e)
209 pass
210
211 log.debug("All parsing attempts failed - returning empty config")
212 return {}
213
[end of docker/auth/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/auth/auth.py b/docker/auth/auth.py
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -96,7 +96,7 @@
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
- return login.decode('ascii'), pwd.decode('ascii')
+ return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
| {"golden_diff": "diff --git a/docker/auth/auth.py b/docker/auth/auth.py\n--- a/docker/auth/auth.py\n+++ b/docker/auth/auth.py\n@@ -96,7 +96,7 @@\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n- return login.decode('ascii'), pwd.decode('ascii')\n+ return login.decode('utf8'), pwd.decode('utf8')\n \n \n def encode_header(auth):\n", "issue": "decode_auth function does not handle utf-8 logins or password\nHI\n\nI have found that the function **decode_auth** (line 96, [file](https://github.com/docker/docker-py/blob/master/docker/auth/auth.py)) fails when decoding UTF-8 passwords from the .dockercfg file, and **load_config** returning an empty config.\n\nI have checked and docker hub can handle UTF-8 passwords, this code proves that:\n\n``` python\n# coding=utf-8\nfrom docker import Client\ncred = { 'username': <user>, 'password': <utf-8 password> }\nc = Client(base_url='unix://var/run/docker.sock')\nres = c.pull(repository='<private container>', tag='latest', auth_config=cred)\nprint(res)\n```\n\nThank you\n\n", "before_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport logging\nimport os\nimport warnings\n\nimport six\n\nfrom .. import constants\nfrom .. import errors\n\nINDEX_NAME = 'index.docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name, insecure=False):\n if insecure:\n warnings.warn(\n constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(\n 'resolve_repository_name()'\n ), DeprecationWarning\n )\n\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name))\n parts = repo_name.split('/', 1)\n if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':\n # This is a docker index repo (ex: foo/bar or ubuntu)\n return INDEX_NAME, repo_name\n if len(parts) < 2:\n raise errors.InvalidRepository(\n 'Invalid repository name ({0})'.format(repo_name))\n\n if 'index.docker.io' in parts[0]:\n raise errors.InvalidRepository(\n 'Invalid repository name, try \"{0}\" instead'.format(parts[1])\n )\n\n return parts[0], parts[1]\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = convert_to_hostname(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if convert_to_hostname(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef encode_auth(auth_info):\n return base64.b64encode(auth_info.get('username', '') + b':' +\n auth_info.get('password', ''))\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('ascii'), pwd.decode('ascii')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry['email'],\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n environment_path = os.path.join(\n os.environ.get('DOCKER_CONFIG'),\n os.path.basename(DOCKER_CONFIG_FILENAME)\n ) if os.environ.get('DOCKER_CONFIG') else None\n\n paths = [\n config_path, # 1\n environment_path, # 2\n os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(\n os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME\n ) # 4\n ]\n\n for path in paths:\n if path and os.path.exists(path):\n return path\n return None\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n\n config_file = find_config_file(config_path)\n\n if not config_file:\n log.debug(\"File doesn't exist\")\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n return parse_auth(data['auths'])\n else:\n log.debug(\"Couldn't find 'auths' section\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}]} | 2,697 | 107 |
gh_patches_debug_2456 | rasdani/github-patches | git_diff | biolab__orange3-text-358 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Guardian: Fix failing tests on Travis
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.3.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.15.dev
##### Expected behavior
Tests pass.
##### Actual behavior
Guardian tests is failing.
##### Steps to reproduce the behavior
##### Additional info (worksheets, data, screenshots, ...)
Fix tests.
</issue>
<code>
[start of orangecontrib/text/guardian.py]
1 """ This module fetches data from The Guardian API.
2
3 To use first create :class:`TheGuardianCredentials`:
4
5 >>> from orangecontrib.text.guardian import TheGuardianCredentials
6 >>> credentials = TheGuardianCredentials('<your-api-key>')
7
8 Then create :class:`TheGuardianAPI` object and use it for searching:
9
10 >>> from orangecontrib.text.guardian import TheGuardianAPI
11 >>> api = TheGuardianAPI(credentials)
12 >>> corpus = api.search('Slovenia', max_documents=10)
13 >>> len(corpus)
14 10
15
16 """
17
18 import requests
19 import math
20 import json
21
22 from Orange import data
23
24 from orangecontrib.text.corpus import Corpus
25
26
27 BASE_URL = 'http://content.guardianapis.com/search'
28 ARTICLES_PER_PAGE = 10
29
30
31 class TheGuardianCredentials:
32 """ The Guardian API credentials. """
33 def __init__(self, key):
34 """
35 Args:
36 key (str): The Guardian API key. Use `test` for testing purposes.
37 """
38 self.key = key
39
40 @property
41 def valid(self):
42 """ Check if given API key is valid. """
43 response = requests.get(BASE_URL, {'api-key': self.key})
44 return response.status_code != 403 # 403 == Forbidden
45
46 def __eq__(self, other):
47 return self.key == other.key
48
49
50 class TheGuardianAPI:
51 attributes = []
52
53 class_vars = [
54 (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),
55 ]
56
57 tv = data.TimeVariable('Publication Date')
58 metas = [
59 (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),
60 (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),
61 (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),
62 (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),
63 (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),
64 (data.DiscreteVariable('Type'), lambda doc: doc['type']),
65 (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),
66 (data.StringVariable('Tags'),
67 lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),
68 (data.StringVariable('URL'), lambda doc: doc['webUrl']),
69 (data.ContinuousVariable('Word Count', number_of_decimals=0),
70 lambda doc: doc['fields']['wordcount']),
71 ]
72
73 text_features = [metas[0][0], metas[1][0]] # Headline + Content
74 title_indices = [-1] # Headline
75
76 def __init__(self, credentials, on_progress=None, should_break=None):
77 """
78 Args:
79 credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.
80 on_progress (callable): Function for progress reporting.
81 should_break (callable): Function for early stopping.
82 """
83 self.per_page = ARTICLES_PER_PAGE
84 self.pages = 0
85 self.credentials = credentials
86 self.on_progress = on_progress or (lambda x, y: None)
87 self.should_break = should_break or (lambda: False)
88
89 self.results = []
90
91 def _search(self, query, from_date, to_date, page=1):
92 data = self._build_query(query, from_date, to_date, page)
93
94 response = requests.get(BASE_URL, data)
95 parsed = json.loads(response.text)
96
97 if page == 1: # store number of pages
98 self.pages = parsed['response']['pages']
99
100 self.results.extend(parsed['response']['results'])
101
102 def _build_query(self, query, from_date=None, to_date=None, page=1):
103 data = {
104 'q': query,
105 'api-key': self.credentials.key,
106 'page': str(page),
107 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',
108 'show-tags': 'all',
109 }
110 if from_date is not None:
111 data['from-date'] = from_date
112 if to_date is not None:
113 data['to-date'] = to_date
114
115 return data
116
117 def search(self, query, from_date=None, to_date=None, max_documents=None,
118 accumulate=False):
119 """
120 Search The Guardian API for articles.
121
122 Args:
123 query (str): A query for searching the articles by
124 from_date (str): Search only articles newer than the date provided.
125 Date should be in ISO format; e.g. '2016-12-31'.
126 to_date (str): Search only articles older than the date provided.
127 Date should be in ISO format; e.g. '2016-12-31'.
128 max_documents (int): Maximum number of documents to retrieve.
129 When not given, retrieve all documents.
130 accumulate (bool): A flag indicating whether to accumulate results
131 of multiple consequent search calls.
132
133 Returns:
134 :ref:`Corpus`
135 """
136 if not accumulate:
137 self.results = []
138
139 self._search(query, from_date, to_date)
140
141 pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages
142 self.on_progress(self.per_page, pages * self.per_page)
143
144 for p in range(2, pages+1): # to one based
145 if self.should_break():
146 break
147 self._search(query, from_date, to_date, p)
148 self.on_progress(p*self.per_page, pages * self.per_page)
149
150 c = Corpus.from_documents(
151 self.results, 'The Guardian', self.attributes, self.class_vars,
152 self.metas, title_indices=self.title_indices)
153 c.text_features = self.text_features
154 return c
155
156
157 if __name__ == '__main__':
158 credentials = TheGuardianCredentials('')
159 print(credentials.valid)
160 api = TheGuardianAPI(credentials=credentials)
161 c = api.search('refugees', max_documents=10)
162 print(c)
163
[end of orangecontrib/text/guardian.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py
--- a/orangecontrib/text/guardian.py
+++ b/orangecontrib/text/guardian.py
@@ -155,7 +155,7 @@
if __name__ == '__main__':
- credentials = TheGuardianCredentials('')
+ credentials = TheGuardianCredentials('test')
print(credentials.valid)
api = TheGuardianAPI(credentials=credentials)
c = api.search('refugees', max_documents=10)
| {"golden_diff": "diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py\n--- a/orangecontrib/text/guardian.py\n+++ b/orangecontrib/text/guardian.py\n@@ -155,7 +155,7 @@\n \n \n if __name__ == '__main__':\n- credentials = TheGuardianCredentials('')\n+ credentials = TheGuardianCredentials('test')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n", "issue": "Guardian: Fix failing tests on Travis\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.3.0\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.15.dev\r\n\r\n##### Expected behavior\r\nTests pass.\r\n\r\n\r\n##### Actual behavior\r\nGuardian tests is failing.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\n\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\nFix tests.\r\n\r\n\n", "before_files": [{"content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n credentials = TheGuardianCredentials('')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py"}]} | 2,385 | 119 |
gh_patches_debug_58564 | rasdani/github-patches | git_diff | codespell-project__codespell-2626 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`python setup.py check` → `twine check`
Because `setup.py ...` is deprecated, we need an alternative to `setup.py check` such as `twine`.
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2
3 from setuptools import setup
4
5 if __name__ == "__main__":
6 setup()
7
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
deleted file mode 100755
--- a/setup.py
+++ /dev/null
@@ -1,6 +0,0 @@
-#! /usr/bin/env python
-
-from setuptools import setup
-
-if __name__ == "__main__":
- setup()
| {"golden_diff": "diff --git a/setup.py b/setup.py\ndeleted file mode 100755\n--- a/setup.py\n+++ /dev/null\n@@ -1,6 +0,0 @@\n-#! /usr/bin/env python\n-\n-from setuptools import setup\n-\n-if __name__ == \"__main__\":\n- setup()\n", "issue": "`python setup.py check` \u2192 `twine check`\nBecause `setup.py ...` is deprecated, we need an alternative to `setup.py check` such as `twine`.\n", "before_files": [{"content": "#! /usr/bin/env python\n\nfrom setuptools import setup\n\nif __name__ == \"__main__\":\n setup()\n", "path": "setup.py"}]} | 599 | 68 |
gh_patches_debug_5766 | rasdani/github-patches | git_diff | napari__napari-4259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Previously selected point deleted when deleting layer
## 🐛 Bug
Recently selected points are erroneously removed when deleting new layers with the delete key. (reproduced with points and labels layer)
## To Reproduce
Steps to reproduce the behaviour:
1. Create a point on a points layer
2. Create a new points layer
3. Select the newly created points layer from the layer list (visually deselecting the point)
4. Delete newly created layer using the delete key, the last selected point will also be deleted
Please note that this issue does not occur when the layer is deleted using the bin icon, leading me to believe it is a keybinding issue (and the point must still be 'selected' in come capacity)
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
https://user-images.githubusercontent.com/95660545/156966137-b2a645a6-25ae-42b4-baf7-137e7506e20a.mp4
## Expected behaviour
It is expected that only the newly created points layer (with no points assigned to it) should be deleted, not the point as well.
<!-- A clear and concise description of what you expected to happen. -->
## Environment
napari: 0.4.15.dev68+gdd3a2afd
Platform: Windows-10-10.0.19044-SP0
Python: 3.9.7 (default, Sep 16 2021, 16:59:28) [MSC v.1916 64 bit (AMD64)]
Qt: 5.15.2
PyQt5: 5.15.6
NumPy: 1.21.5
SciPy: 1.7.3
Dask: 2022.01.0
VisPy: 0.9.6
OpenGL:
- GL version: 4.6.0 - Build 26.20.100.7372
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 1920x1080, scale 1.0
Plugins:
- console: 0.0.4
- scikit-image: 0.4.15.dev68+gdd3a2afd
- svg: 0.1.6
napari contributors (2019). napari: a multi-dimensional image viewer for python. doi:10.5281/zenodo.3555620
</issue>
<code>
[start of napari/_qt/containers/qt_layer_list.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from qtpy.QtCore import QSortFilterProxyModel, Qt
6
7 from ...layers import Layer
8 from ...utils.translations import trans
9 from ._base_item_model import SortRole, _BaseEventedItemModel
10 from ._layer_delegate import LayerDelegate
11 from .qt_list_view import QtListView
12
13 if TYPE_CHECKING:
14 from qtpy.QtGui import QKeyEvent
15 from qtpy.QtWidgets import QWidget
16
17 from ...components.layerlist import LayerList
18
19
20 class ReverseProxyModel(QSortFilterProxyModel):
21 """Proxy Model that reverses the view order of a _BaseEventedItemModel."""
22
23 def __init__(self, model: _BaseEventedItemModel) -> None:
24 super().__init__()
25 self.setSourceModel(model)
26 self.setSortRole(SortRole)
27 self.sort(0, Qt.DescendingOrder)
28
29 def dropMimeData(self, data, action, destRow, col, parent):
30 """Handle destination row for dropping with reversed indices."""
31 row = 0 if destRow == -1 else self.sourceModel().rowCount() - destRow
32 return self.sourceModel().dropMimeData(data, action, row, col, parent)
33
34
35 class QtLayerList(QtListView[Layer]):
36 """QItemView subclass specialized for the LayerList.
37
38 This is as mostly for targetting with QSS, applying the delegate and
39 reversing the view with ReverseProxyModel.
40 """
41
42 def __init__(self, root: LayerList, parent: QWidget = None):
43 super().__init__(root, parent)
44 self.setItemDelegate(LayerDelegate())
45 self.setToolTip(trans._('Layer list'))
46 font = self.font()
47 font.setPointSize(12)
48 self.setFont(font)
49
50 # This reverses the order of the items in the view,
51 # so items at the end of the list are at the top.
52 self.setModel(ReverseProxyModel(self.model()))
53
54 def keyPressEvent(self, e: QKeyEvent) -> None:
55 """Override Qt event to pass events to the viewer."""
56 if e.key() != Qt.Key_Space:
57 super().keyPressEvent(e)
58
59 e.ignore() # pass key events up to viewer
60
[end of napari/_qt/containers/qt_layer_list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/_qt/containers/qt_layer_list.py b/napari/_qt/containers/qt_layer_list.py
--- a/napari/_qt/containers/qt_layer_list.py
+++ b/napari/_qt/containers/qt_layer_list.py
@@ -53,7 +53,7 @@
def keyPressEvent(self, e: QKeyEvent) -> None:
"""Override Qt event to pass events to the viewer."""
- if e.key() != Qt.Key_Space:
+ if e.key() != Qt.Key.Key_Space:
super().keyPressEvent(e)
-
- e.ignore() # pass key events up to viewer
+ if e.key() not in (Qt.Key.Key_Backspace, Qt.Key.Key_Delete):
+ e.ignore() # pass key events up to viewer
| {"golden_diff": "diff --git a/napari/_qt/containers/qt_layer_list.py b/napari/_qt/containers/qt_layer_list.py\n--- a/napari/_qt/containers/qt_layer_list.py\n+++ b/napari/_qt/containers/qt_layer_list.py\n@@ -53,7 +53,7 @@\n \n def keyPressEvent(self, e: QKeyEvent) -> None:\n \"\"\"Override Qt event to pass events to the viewer.\"\"\"\n- if e.key() != Qt.Key_Space:\n+ if e.key() != Qt.Key.Key_Space:\n super().keyPressEvent(e)\n-\n- e.ignore() # pass key events up to viewer\n+ if e.key() not in (Qt.Key.Key_Backspace, Qt.Key.Key_Delete):\n+ e.ignore() # pass key events up to viewer\n", "issue": "Previously selected point deleted when deleting layer\n## \ud83d\udc1b Bug\r\n\r\nRecently selected points are erroneously removed when deleting new layers with the delete key. (reproduced with points and labels layer)\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behaviour:\r\n\r\n1. Create a point on a points layer\r\n2. Create a new points layer\r\n3. Select the newly created points layer from the layer list (visually deselecting the point)\r\n4. Delete newly created layer using the delete key, the last selected point will also be deleted\r\n\r\nPlease note that this issue does not occur when the layer is deleted using the bin icon, leading me to believe it is a keybinding issue (and the point must still be 'selected' in come capacity)\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\nhttps://user-images.githubusercontent.com/95660545/156966137-b2a645a6-25ae-42b4-baf7-137e7506e20a.mp4\r\n\r\n\r\n## Expected behaviour\r\nIt is expected that only the newly created points layer (with no points assigned to it) should be deleted, not the point as well.\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## Environment\r\n\r\nnapari: 0.4.15.dev68+gdd3a2afd\r\nPlatform: Windows-10-10.0.19044-SP0\r\nPython: 3.9.7 (default, Sep 16 2021, 16:59:28) [MSC v.1916 64 bit (AMD64)]\r\nQt: 5.15.2\r\nPyQt5: 5.15.6\r\nNumPy: 1.21.5\r\nSciPy: 1.7.3\r\nDask: 2022.01.0\r\nVisPy: 0.9.6\r\n\r\nOpenGL:\r\n- GL version: 4.6.0 - Build 26.20.100.7372\r\n- MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n- screen 1: resolution 1920x1080, scale 1.0\r\n\r\nPlugins:\r\n- console: 0.0.4\r\n- scikit-image: 0.4.15.dev68+gdd3a2afd\r\n- svg: 0.1.6\r\n\r\nnapari contributors (2019). napari: a multi-dimensional image viewer for python. doi:10.5281/zenodo.3555620\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom qtpy.QtCore import QSortFilterProxyModel, Qt\n\nfrom ...layers import Layer\nfrom ...utils.translations import trans\nfrom ._base_item_model import SortRole, _BaseEventedItemModel\nfrom ._layer_delegate import LayerDelegate\nfrom .qt_list_view import QtListView\n\nif TYPE_CHECKING:\n from qtpy.QtGui import QKeyEvent\n from qtpy.QtWidgets import QWidget\n\n from ...components.layerlist import LayerList\n\n\nclass ReverseProxyModel(QSortFilterProxyModel):\n \"\"\"Proxy Model that reverses the view order of a _BaseEventedItemModel.\"\"\"\n\n def __init__(self, model: _BaseEventedItemModel) -> None:\n super().__init__()\n self.setSourceModel(model)\n self.setSortRole(SortRole)\n self.sort(0, Qt.DescendingOrder)\n\n def dropMimeData(self, data, action, destRow, col, parent):\n \"\"\"Handle destination row for dropping with reversed indices.\"\"\"\n row = 0 if destRow == -1 else self.sourceModel().rowCount() - destRow\n return self.sourceModel().dropMimeData(data, action, row, col, parent)\n\n\nclass QtLayerList(QtListView[Layer]):\n \"\"\"QItemView subclass specialized for the LayerList.\n\n This is as mostly for targetting with QSS, applying the delegate and\n reversing the view with ReverseProxyModel.\n \"\"\"\n\n def __init__(self, root: LayerList, parent: QWidget = None):\n super().__init__(root, parent)\n self.setItemDelegate(LayerDelegate())\n self.setToolTip(trans._('Layer list'))\n font = self.font()\n font.setPointSize(12)\n self.setFont(font)\n\n # This reverses the order of the items in the view,\n # so items at the end of the list are at the top.\n self.setModel(ReverseProxyModel(self.model()))\n\n def keyPressEvent(self, e: QKeyEvent) -> None:\n \"\"\"Override Qt event to pass events to the viewer.\"\"\"\n if e.key() != Qt.Key_Space:\n super().keyPressEvent(e)\n\n e.ignore() # pass key events up to viewer\n", "path": "napari/_qt/containers/qt_layer_list.py"}]} | 1,737 | 175 |
gh_patches_debug_16311 | rasdani/github-patches | git_diff | spotify__luigi-368 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LuigiConfigParser::add_config_path() raises if instance() hasn't been accessed
To add a path to the list of config paths, one currently has to do:
``` python
LuigiConfigParser.instance() # remove this and get an exception
LuigiConfigParser.add_config_path(my_path)
```
because `add_config_path` tries to reload `cls._instance` which is initialized with `None`. Wouldn't it be cleaner to do a check there and only reload a non-null instance?
</issue>
<code>
[start of luigi/configuration.py]
1
2 import os
3 import logging
4 from ConfigParser import ConfigParser, NoOptionError, NoSectionError
5
6
7 class LuigiConfigParser(ConfigParser):
8 NO_DEFAULT = object()
9 _instance = None
10 _config_paths = ['/etc/luigi/client.cfg', 'client.cfg']
11 if 'LUIGI_CONFIG_PATH' in os.environ:
12 _config_paths.append(os.environ['LUIGI_CONFIG_PATH'])
13
14 @classmethod
15 def add_config_path(cls, path):
16 cls._config_paths.append(path)
17 cls._instance.reload()
18
19 @classmethod
20 def instance(cls, *args, **kwargs):
21 """ Singleton getter """
22 if cls._instance is None:
23 cls._instance = cls(*args, **kwargs)
24 loaded = cls._instance.reload()
25 logging.getLogger('luigi-interface').info('Loaded %r', loaded)
26
27 return cls._instance
28
29 def reload(self):
30 return self._instance.read(self._config_paths)
31
32 def _get_with_default(self, method, section, option, default, expected_type=None):
33 """ Gets the value of the section/option using method. Returns default if value
34 is not found. Raises an exception if the default value is not None and doesn't match
35 the expected_type.
36 """
37 try:
38 return method(self, section, option)
39 except (NoOptionError, NoSectionError):
40 if default is LuigiConfigParser.NO_DEFAULT:
41 raise
42 if expected_type is not None and default is not None and \
43 not isinstance(default, expected_type):
44 raise
45 return default
46
47 def get(self, section, option, default=NO_DEFAULT):
48 return self._get_with_default(ConfigParser.get, section, option, default)
49
50 def getboolean(self, section, option, default=NO_DEFAULT):
51 return self._get_with_default(ConfigParser.getboolean, section, option, default, bool)
52
53 def getint(self, section, option, default=NO_DEFAULT):
54 return self._get_with_default(ConfigParser.getint, section, option, default, int)
55
56 def getfloat(self, section, option, default=NO_DEFAULT):
57 return self._get_with_default(ConfigParser.getfloat, section, option, default, float)
58
59 def set(self, section, option, value):
60 if not ConfigParser.has_section(self, section):
61 ConfigParser.add_section(self, section)
62
63 return ConfigParser.set(self, section, option, value)
64
65 def get_config():
66 """ Convenience method (for backwards compatibility) for accessing config singleton """
67 return LuigiConfigParser.instance()
68
[end of luigi/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/luigi/configuration.py b/luigi/configuration.py
--- a/luigi/configuration.py
+++ b/luigi/configuration.py
@@ -14,7 +14,7 @@
@classmethod
def add_config_path(cls, path):
cls._config_paths.append(path)
- cls._instance.reload()
+ cls.reload()
@classmethod
def instance(cls, *args, **kwargs):
@@ -26,8 +26,9 @@
return cls._instance
- def reload(self):
- return self._instance.read(self._config_paths)
+ @classmethod
+ def reload(cls):
+ return cls.instance().read(cls._config_paths)
def _get_with_default(self, method, section, option, default, expected_type=None):
""" Gets the value of the section/option using method. Returns default if value
| {"golden_diff": "diff --git a/luigi/configuration.py b/luigi/configuration.py\n--- a/luigi/configuration.py\n+++ b/luigi/configuration.py\n@@ -14,7 +14,7 @@\n @classmethod\n def add_config_path(cls, path):\n cls._config_paths.append(path)\n- cls._instance.reload()\n+ cls.reload()\n \n @classmethod\n def instance(cls, *args, **kwargs):\n@@ -26,8 +26,9 @@\n \n return cls._instance\n \n- def reload(self):\n- return self._instance.read(self._config_paths)\n+ @classmethod\n+ def reload(cls):\n+ return cls.instance().read(cls._config_paths)\n \n def _get_with_default(self, method, section, option, default, expected_type=None):\n \"\"\" Gets the value of the section/option using method. Returns default if value\n", "issue": "LuigiConfigParser::add_config_path() raises if instance() hasn't been accessed\nTo add a path to the list of config paths, one currently has to do:\n\n``` python\nLuigiConfigParser.instance() # remove this and get an exception\nLuigiConfigParser.add_config_path(my_path)\n```\n\nbecause `add_config_path` tries to reload `cls._instance` which is initialized with `None`. Wouldn't it be cleaner to do a check there and only reload a non-null instance?\n\n", "before_files": [{"content": "\nimport os\nimport logging\nfrom ConfigParser import ConfigParser, NoOptionError, NoSectionError\n\n\nclass LuigiConfigParser(ConfigParser):\n NO_DEFAULT = object()\n _instance = None\n _config_paths = ['/etc/luigi/client.cfg', 'client.cfg']\n if 'LUIGI_CONFIG_PATH' in os.environ:\n _config_paths.append(os.environ['LUIGI_CONFIG_PATH'])\n\n @classmethod\n def add_config_path(cls, path):\n cls._config_paths.append(path)\n cls._instance.reload()\n\n @classmethod\n def instance(cls, *args, **kwargs):\n \"\"\" Singleton getter \"\"\"\n if cls._instance is None:\n cls._instance = cls(*args, **kwargs)\n loaded = cls._instance.reload()\n logging.getLogger('luigi-interface').info('Loaded %r', loaded)\n\n return cls._instance\n\n def reload(self):\n return self._instance.read(self._config_paths)\n\n def _get_with_default(self, method, section, option, default, expected_type=None):\n \"\"\" Gets the value of the section/option using method. Returns default if value\n is not found. Raises an exception if the default value is not None and doesn't match\n the expected_type.\n \"\"\"\n try:\n return method(self, section, option)\n except (NoOptionError, NoSectionError):\n if default is LuigiConfigParser.NO_DEFAULT:\n raise\n if expected_type is not None and default is not None and \\\n not isinstance(default, expected_type):\n raise\n return default\n\n def get(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.get, section, option, default)\n\n def getboolean(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getboolean, section, option, default, bool)\n\n def getint(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getint, section, option, default, int)\n\n def getfloat(self, section, option, default=NO_DEFAULT):\n return self._get_with_default(ConfigParser.getfloat, section, option, default, float)\n\n def set(self, section, option, value):\n if not ConfigParser.has_section(self, section):\n ConfigParser.add_section(self, section)\n\n return ConfigParser.set(self, section, option, value)\n\ndef get_config():\n \"\"\" Convenience method (for backwards compatibility) for accessing config singleton \"\"\"\n return LuigiConfigParser.instance()\n", "path": "luigi/configuration.py"}]} | 1,311 | 192 |
gh_patches_debug_8797 | rasdani/github-patches | git_diff | Kinto__kinto-1340 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`kinto create-user` doesn't override the password if the user already exists.
</issue>
<code>
[start of kinto/plugins/accounts/scripts.py]
1 import logging
2 import getpass
3 from pyramid.settings import asbool
4
5 from .utils import hash_password
6 from .views import AccountIdGenerator
7
8
9 logger = logging.getLogger(__name__)
10
11
12 def create_user(env, username=None, password=None):
13 """Administrative command to create a new user."""
14 registry = env['registry']
15 settings = registry.settings
16 readonly_mode = asbool(settings.get('readonly', False))
17 if readonly_mode:
18 message = 'Cannot create a user with a readonly server.'
19 logger.error(message)
20 return 51
21
22 if 'kinto.plugins.accounts' not in settings['includes']:
23 message = 'Cannot create a user when the accounts plugin is not installed.'
24 logger.error(message)
25 return 52
26
27 try:
28 validator = AccountIdGenerator()
29 if username is None:
30 username = input('Username: ')
31 while not validator.match(username):
32 print('{} is not a valid username.')
33 print('Username should match {0!r}, please try again.'.format(validator.regexp))
34 username = input('Username: ')
35
36 if password is None:
37 while True: # The user didn't entered twice the same password
38 password = getpass.getpass('Please enter a password for {}: '.format(username))
39 confirm = getpass.getpass('Please confirm the password: '.format(username))
40
41 if password != confirm:
42 print('Sorry, passwords do not match, please try again.')
43 else:
44 break
45 except EOFError:
46 print('User creation aborted')
47 return 53
48
49 print("Creating user '{}'".format(username))
50 record = {'id': username, 'password': hash_password(password)}
51 registry.storage.create(collection_id='account',
52 parent_id=username,
53 record=record,
54 ignore_conflict=True)
55 registry.permission.add_principal_to_ace('/accounts/{}'.format(username),
56 'write',
57 'account:{}'.format(username))
58
59 return 0
60
[end of kinto/plugins/accounts/scripts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py
--- a/kinto/plugins/accounts/scripts.py
+++ b/kinto/plugins/accounts/scripts.py
@@ -1,5 +1,7 @@
import logging
import getpass
+
+import transaction as current_transaction
from pyramid.settings import asbool
from .utils import hash_password
@@ -56,4 +58,6 @@
'write',
'account:{}'.format(username))
+ current_transaction.commit()
+
return 0
| {"golden_diff": "diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py\n--- a/kinto/plugins/accounts/scripts.py\n+++ b/kinto/plugins/accounts/scripts.py\n@@ -1,5 +1,7 @@\n import logging\n import getpass\n+\n+import transaction as current_transaction\n from pyramid.settings import asbool\n \n from .utils import hash_password\n@@ -56,4 +58,6 @@\n 'write',\n 'account:{}'.format(username))\n \n+ current_transaction.commit()\n+\n return 0\n", "issue": "`kinto create-user` doesn't override the password if the user already exists.\n\n", "before_files": [{"content": "import logging\nimport getpass\nfrom pyramid.settings import asbool\n\nfrom .utils import hash_password\nfrom .views import AccountIdGenerator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_user(env, username=None, password=None):\n \"\"\"Administrative command to create a new user.\"\"\"\n registry = env['registry']\n settings = registry.settings\n readonly_mode = asbool(settings.get('readonly', False))\n if readonly_mode:\n message = 'Cannot create a user with a readonly server.'\n logger.error(message)\n return 51\n\n if 'kinto.plugins.accounts' not in settings['includes']:\n message = 'Cannot create a user when the accounts plugin is not installed.'\n logger.error(message)\n return 52\n\n try:\n validator = AccountIdGenerator()\n if username is None:\n username = input('Username: ')\n while not validator.match(username):\n print('{} is not a valid username.')\n print('Username should match {0!r}, please try again.'.format(validator.regexp))\n username = input('Username: ')\n\n if password is None:\n while True: # The user didn't entered twice the same password\n password = getpass.getpass('Please enter a password for {}: '.format(username))\n confirm = getpass.getpass('Please confirm the password: '.format(username))\n\n if password != confirm:\n print('Sorry, passwords do not match, please try again.')\n else:\n break\n except EOFError:\n print('User creation aborted')\n return 53\n\n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n registry.storage.create(collection_id='account',\n parent_id=username,\n record=record,\n ignore_conflict=True)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n\n return 0\n", "path": "kinto/plugins/accounts/scripts.py"}]} | 1,082 | 112 |
gh_patches_debug_15585 | rasdani/github-patches | git_diff | Parsl__parsl-1987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
garbage collection vs usage tracking shutdown race condition
**Describe the bug**
I've seen this exception at least once in CI local tests. I think it comes from job completion garbage collection racing with DFK shutdown usage reporting, in examining the task dictionary.
```
parsl/tests/test_staging/test_elaborate_noop_file.py:60:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
parsl/dataflow/dflow.py:1051: in cleanup
self.usage_tracker.send_message()
parsl/dataflow/usage_tracking/usage.py:224: in send_message
message = self.construct_end_message()
parsl/dataflow/usage_tracking/usage.py:182: in construct_end_message
app_fails = len([t for t in self.dfk.tasks if
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.0 = <dict_keyiterator object at 0x7f86107ca040>
> app_fails = len([t for t in self.dfk.tasks if
self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
E RuntimeError: dictionary changed size during iteration
```
**To Reproduce**
non-deterministic in CI
**Environment**
CI, near master 02d3b93ad4fd97b2411c000cf0b8820c76e5dfef
</issue>
<code>
[start of parsl/dataflow/usage_tracking/usage.py]
1 import uuid
2 import time
3 import hashlib
4 import os
5 import getpass
6 import json
7 import logging
8 import socket
9 import sys
10 import platform
11 import multiprocessing as mp
12
13 from parsl.dataflow.states import FINAL_FAILURE_STATES
14 from parsl.version import VERSION as PARSL_VERSION
15
16 logger = logging.getLogger(__name__)
17
18
19 def async_process(fn):
20 """ Decorator function to launch a function as a separate process """
21
22 def run(*args, **kwargs):
23 proc = mp.Process(target=fn, args=args, kwargs=kwargs, name="Usage-Tracking")
24 proc.start()
25 return proc
26
27 return run
28
29
30 @async_process
31 def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
32 """Send UDP messages to usage tracker asynchronously
33
34 This multiprocessing based messenger was written to overcome the limitations
35 of signalling/terminating a thread that is blocked on a system call. This
36 messenger is created as a separate process, and initialized with 2 queues,
37 to_send to receive messages to be sent to the internet.
38
39 Args:
40 - domain_name (str) : Domain name string
41 - UDP_IP (str) : IP address YYY.YYY.YYY.YYY
42 - UDP_PORT (int) : UDP port to send out on
43 - sock_timeout (int) : Socket timeout
44 - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
45 """
46 try:
47 if message is None:
48 raise ValueError("message was none")
49
50 encoded_message = bytes(message, "utf-8")
51
52 if encoded_message is None:
53 raise ValueError("utf-8 encoding of message failed")
54
55 if domain_name:
56 try:
57 UDP_IP = socket.gethostbyname(domain_name)
58 except Exception:
59 # (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
60 pass
61
62 if UDP_IP is None:
63 raise Exception("UDP_IP is None")
64
65 if UDP_PORT is None:
66 raise Exception("UDP_PORT is None")
67
68 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
69 sock.settimeout(sock_timeout)
70 sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
71 sock.close()
72
73 except socket.timeout:
74 logger.debug("Failed to send usage tracking data: socket timeout")
75 except OSError as e:
76 logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
77 except Exception as e:
78 logger.debug("Failed to send usage tracking data: Exception: {}".format(e))
79
80
81 class UsageTracker (object):
82 """Anonymized Usage Tracking for Parsl.
83
84 Client for this is here : https://github.com/Parsl/parsl_tracking
85 This issue captures the discussion that went into functionality
86 implemented here : https://github.com/Parsl/parsl/issues/34
87
88 """
89
90 def __init__(self, dfk, ip='52.3.111.203', port=50077,
91 domain_name='tracking.parsl-project.org'):
92 """Initialize usage tracking unless the user has opted-out.
93
94 We will try to resolve the hostname specified in kwarg:domain_name
95 and if that fails attempt to use the kwarg:ip. Determining the
96 IP and sending message is threaded to avoid slowing down DFK
97 initialization.
98
99 Tracks usage stats by inspecting the internal state of the dfk.
100
101 Args:
102 - dfk (DFK object) : Data Flow Kernel object
103
104 KWargs:
105 - ip (string) : IP address
106 - port (int) : Port number, Default:50077
107 - domain_name (string) : Domain name, will override IP
108 Default: tracking.parsl-project.org
109 """
110
111 self.domain_name = domain_name
112 self.ip = ip
113 # The sock timeout will only apply to UDP send and not domain resolution
114 self.sock_timeout = 5
115 self.UDP_PORT = port
116 self.UDP_IP = None
117 self.procs = []
118 self.dfk = dfk
119 self.config = self.dfk.config
120 self.uuid = str(uuid.uuid4())
121 self.parsl_version = PARSL_VERSION
122 self.python_version = "{}.{}.{}".format(sys.version_info.major,
123 sys.version_info.minor,
124 sys.version_info.micro)
125 self.tracking_enabled = self.check_tracking_enabled()
126 logger.debug("Tracking status: {}".format(self.tracking_enabled))
127 self.initialized = False # Once first message is sent this will be True
128
129 def check_tracking_enabled(self):
130 """By default tracking is enabled.
131
132 Tracking is disabled if :
133 1. config["globals"]["usageTracking"] is set to False (Bool)
134 2. Environment variable PARSL_TRACKING is set to false (case insensitive)
135
136 """
137 track = True # By default we track usage
138
139 if not self.config.usage_tracking:
140 track = False
141
142 envvar = str(os.environ.get("PARSL_TRACKING", True)).lower()
143 if envvar == "false":
144 track = False
145
146 return track
147
148 def construct_start_message(self):
149 """Collect preliminary run info at the start of the DFK.
150
151 Returns :
152 - Message dict dumped as json string, ready for UDP
153 """
154 uname = getpass.getuser().encode('latin1')
155 hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
156 hname = socket.gethostname().encode('latin1')
157 hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
158 message = {'uuid': self.uuid,
159 'uname': hashed_username,
160 'hname': hashed_hostname,
161 'test': False, # this field previously indicated if parsl
162 # was being run in test mode, and is
163 # retained for protocol compatibility
164 'parsl_v': self.parsl_version,
165 'python_v': self.python_version,
166 'os': platform.system(),
167 'os_v': platform.release(),
168 'start': time.time()}
169
170 return json.dumps(message)
171
172 def construct_end_message(self):
173 """Collect the final run information at the time of DFK cleanup.
174
175 Returns:
176 - Message dict dumped as json string, ready for UDP
177 """
178 app_count = self.dfk.task_count
179
180 site_count = len([x for x in self.dfk.config.executors if x.managed])
181
182 app_fails = len([t for t in self.dfk.tasks if
183 self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
184
185 message = {'uuid': self.uuid,
186 'end': time.time(),
187 't_apps': app_count,
188 'sites': site_count,
189 'c_time': None,
190 'failed': app_fails,
191 'test': False, # see comment in construct_start_message
192 }
193
194 return json.dumps(message)
195
196 def send_UDP_message(self, message):
197 """Send UDP message."""
198 x = 0
199 if self.tracking_enabled:
200 try:
201 proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
202 self.procs.append(proc)
203 except Exception as e:
204 logger.debug("Usage tracking failed: {}".format(e))
205 else:
206 x = -1
207
208 return x
209
210 def send_message(self):
211 """Send message over UDP.
212
213 If tracking is disables, the bytes_sent will always be set to -1
214
215 Returns:
216 (bytes_sent, time_taken)
217 """
218 start = time.time()
219 message = None
220 if not self.initialized:
221 message = self.construct_start_message()
222 self.initialized = True
223 else:
224 message = self.construct_end_message()
225
226 self.send_UDP_message(message)
227 end = time.time()
228
229 return end - start
230
231 def __del__(self):
232 return self.close()
233
234 def close(self):
235 """We terminate (SIGTERM) the processes added to the self.procs list """
236 for proc in self.procs:
237 proc.terminate()
238
[end of parsl/dataflow/usage_tracking/usage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/dataflow/usage_tracking/usage.py b/parsl/dataflow/usage_tracking/usage.py
--- a/parsl/dataflow/usage_tracking/usage.py
+++ b/parsl/dataflow/usage_tracking/usage.py
@@ -10,7 +10,6 @@
import platform
import multiprocessing as mp
-from parsl.dataflow.states import FINAL_FAILURE_STATES
from parsl.version import VERSION as PARSL_VERSION
logger = logging.getLogger(__name__)
@@ -179,8 +178,7 @@
site_count = len([x for x in self.dfk.config.executors if x.managed])
- app_fails = len([t for t in self.dfk.tasks if
- self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
+ app_fails = self.dfk.tasks_failed_count + self.dfk.tasks_dep_fail_count
message = {'uuid': self.uuid,
'end': time.time(),
| {"golden_diff": "diff --git a/parsl/dataflow/usage_tracking/usage.py b/parsl/dataflow/usage_tracking/usage.py\n--- a/parsl/dataflow/usage_tracking/usage.py\n+++ b/parsl/dataflow/usage_tracking/usage.py\n@@ -10,7 +10,6 @@\n import platform\n import multiprocessing as mp\n \n-from parsl.dataflow.states import FINAL_FAILURE_STATES\n from parsl.version import VERSION as PARSL_VERSION\n \n logger = logging.getLogger(__name__)\n@@ -179,8 +178,7 @@\n \n site_count = len([x for x in self.dfk.config.executors if x.managed])\n \n- app_fails = len([t for t in self.dfk.tasks if\n- self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])\n+ app_fails = self.dfk.tasks_failed_count + self.dfk.tasks_dep_fail_count\n \n message = {'uuid': self.uuid,\n 'end': time.time(),\n", "issue": "garbage collection vs usage tracking shutdown race condition\n**Describe the bug**\r\nI've seen this exception at least once in CI local tests. I think it comes from job completion garbage collection racing with DFK shutdown usage reporting, in examining the task dictionary.\r\n\r\n```\r\nparsl/tests/test_staging/test_elaborate_noop_file.py:60: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\nparsl/dataflow/dflow.py:1051: in cleanup\r\n self.usage_tracker.send_message()\r\nparsl/dataflow/usage_tracking/usage.py:224: in send_message\r\n message = self.construct_end_message()\r\nparsl/dataflow/usage_tracking/usage.py:182: in construct_end_message\r\n app_fails = len([t for t in self.dfk.tasks if\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n.0 = <dict_keyiterator object at 0x7f86107ca040>\r\n> app_fails = len([t for t in self.dfk.tasks if\r\n self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])\r\nE RuntimeError: dictionary changed size during iteration\r\n\r\n```\r\n\r\n\r\n**To Reproduce**\r\nnon-deterministic in CI\r\n\r\n**Environment**\r\nCI, near master 02d3b93ad4fd97b2411c000cf0b8820c76e5dfef\r\n\n", "before_files": [{"content": "import uuid\nimport time\nimport hashlib\nimport os\nimport getpass\nimport json\nimport logging\nimport socket\nimport sys\nimport platform\nimport multiprocessing as mp\n\nfrom parsl.dataflow.states import FINAL_FAILURE_STATES\nfrom parsl.version import VERSION as PARSL_VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef async_process(fn):\n \"\"\" Decorator function to launch a function as a separate process \"\"\"\n\n def run(*args, **kwargs):\n proc = mp.Process(target=fn, args=args, kwargs=kwargs, name=\"Usage-Tracking\")\n proc.start()\n return proc\n\n return run\n\n\n@async_process\ndef udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):\n \"\"\"Send UDP messages to usage tracker asynchronously\n\n This multiprocessing based messenger was written to overcome the limitations\n of signalling/terminating a thread that is blocked on a system call. This\n messenger is created as a separate process, and initialized with 2 queues,\n to_send to receive messages to be sent to the internet.\n\n Args:\n - domain_name (str) : Domain name string\n - UDP_IP (str) : IP address YYY.YYY.YYY.YYY\n - UDP_PORT (int) : UDP port to send out on\n - sock_timeout (int) : Socket timeout\n - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet\n \"\"\"\n try:\n if message is None:\n raise ValueError(\"message was none\")\n\n encoded_message = bytes(message, \"utf-8\")\n\n if encoded_message is None:\n raise ValueError(\"utf-8 encoding of message failed\")\n\n if domain_name:\n try:\n UDP_IP = socket.gethostbyname(domain_name)\n except Exception:\n # (False, \"Domain lookup failed, defaulting to {0}\".format(UDP_IP))\n pass\n\n if UDP_IP is None:\n raise Exception(\"UDP_IP is None\")\n\n if UDP_PORT is None:\n raise Exception(\"UDP_PORT is None\")\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\n sock.settimeout(sock_timeout)\n sock.sendto(bytes(message, \"utf-8\"), (UDP_IP, UDP_PORT))\n sock.close()\n\n except socket.timeout:\n logger.debug(\"Failed to send usage tracking data: socket timeout\")\n except OSError as e:\n logger.debug(\"Failed to send usage tracking data: OSError: {}\".format(e))\n except Exception as e:\n logger.debug(\"Failed to send usage tracking data: Exception: {}\".format(e))\n\n\nclass UsageTracker (object):\n \"\"\"Anonymized Usage Tracking for Parsl.\n\n Client for this is here : https://github.com/Parsl/parsl_tracking\n This issue captures the discussion that went into functionality\n implemented here : https://github.com/Parsl/parsl/issues/34\n\n \"\"\"\n\n def __init__(self, dfk, ip='52.3.111.203', port=50077,\n domain_name='tracking.parsl-project.org'):\n \"\"\"Initialize usage tracking unless the user has opted-out.\n\n We will try to resolve the hostname specified in kwarg:domain_name\n and if that fails attempt to use the kwarg:ip. Determining the\n IP and sending message is threaded to avoid slowing down DFK\n initialization.\n\n Tracks usage stats by inspecting the internal state of the dfk.\n\n Args:\n - dfk (DFK object) : Data Flow Kernel object\n\n KWargs:\n - ip (string) : IP address\n - port (int) : Port number, Default:50077\n - domain_name (string) : Domain name, will override IP\n Default: tracking.parsl-project.org\n \"\"\"\n\n self.domain_name = domain_name\n self.ip = ip\n # The sock timeout will only apply to UDP send and not domain resolution\n self.sock_timeout = 5\n self.UDP_PORT = port\n self.UDP_IP = None\n self.procs = []\n self.dfk = dfk\n self.config = self.dfk.config\n self.uuid = str(uuid.uuid4())\n self.parsl_version = PARSL_VERSION\n self.python_version = \"{}.{}.{}\".format(sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n self.tracking_enabled = self.check_tracking_enabled()\n logger.debug(\"Tracking status: {}\".format(self.tracking_enabled))\n self.initialized = False # Once first message is sent this will be True\n\n def check_tracking_enabled(self):\n \"\"\"By default tracking is enabled.\n\n Tracking is disabled if :\n 1. config[\"globals\"][\"usageTracking\"] is set to False (Bool)\n 2. Environment variable PARSL_TRACKING is set to false (case insensitive)\n\n \"\"\"\n track = True # By default we track usage\n\n if not self.config.usage_tracking:\n track = False\n\n envvar = str(os.environ.get(\"PARSL_TRACKING\", True)).lower()\n if envvar == \"false\":\n track = False\n\n return track\n\n def construct_start_message(self):\n \"\"\"Collect preliminary run info at the start of the DFK.\n\n Returns :\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n uname = getpass.getuser().encode('latin1')\n hashed_username = hashlib.sha256(uname).hexdigest()[0:10]\n hname = socket.gethostname().encode('latin1')\n hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]\n message = {'uuid': self.uuid,\n 'uname': hashed_username,\n 'hname': hashed_hostname,\n 'test': False, # this field previously indicated if parsl\n # was being run in test mode, and is\n # retained for protocol compatibility\n 'parsl_v': self.parsl_version,\n 'python_v': self.python_version,\n 'os': platform.system(),\n 'os_v': platform.release(),\n 'start': time.time()}\n\n return json.dumps(message)\n\n def construct_end_message(self):\n \"\"\"Collect the final run information at the time of DFK cleanup.\n\n Returns:\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n app_count = self.dfk.task_count\n\n site_count = len([x for x in self.dfk.config.executors if x.managed])\n\n app_fails = len([t for t in self.dfk.tasks if\n self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])\n\n message = {'uuid': self.uuid,\n 'end': time.time(),\n 't_apps': app_count,\n 'sites': site_count,\n 'c_time': None,\n 'failed': app_fails,\n 'test': False, # see comment in construct_start_message\n }\n\n return json.dumps(message)\n\n def send_UDP_message(self, message):\n \"\"\"Send UDP message.\"\"\"\n x = 0\n if self.tracking_enabled:\n try:\n proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)\n self.procs.append(proc)\n except Exception as e:\n logger.debug(\"Usage tracking failed: {}\".format(e))\n else:\n x = -1\n\n return x\n\n def send_message(self):\n \"\"\"Send message over UDP.\n\n If tracking is disables, the bytes_sent will always be set to -1\n\n Returns:\n (bytes_sent, time_taken)\n \"\"\"\n start = time.time()\n message = None\n if not self.initialized:\n message = self.construct_start_message()\n self.initialized = True\n else:\n message = self.construct_end_message()\n\n self.send_UDP_message(message)\n end = time.time()\n\n return end - start\n\n def __del__(self):\n return self.close()\n\n def close(self):\n \"\"\"We terminate (SIGTERM) the processes added to the self.procs list \"\"\"\n for proc in self.procs:\n proc.terminate()\n", "path": "parsl/dataflow/usage_tracking/usage.py"}]} | 3,287 | 211 |
gh_patches_debug_43304 | rasdani/github-patches | git_diff | ManimCommunity__manim-1000 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove 'MovingCameraScene.camera_frame' and 'MovingCameraScene.setup'
## Enhancement proposal
[`MovingCameraScene.setup` method](https://github.com/ManimCommunity/manim/blob/2faa89e3d367c370fd101893f03efab63109ceb0/manim/scene/moving_camera_scene.py#L94-L104) only sets the attribute `camera_frame` into the `Scene` instance, pointing to `renderer.camera.frame`, but since the property [`camera` exists in `Scene` class](https://github.com/ManimCommunity/manim/blob/57f228b8eb96f9c99517a7fdb8756b0c4969d7ff/manim/scene/scene.py#L101-L103), the object `renderer.camera.frame` can be accesed using `camera.frame`.
Changing `camera_frame` by `camera.frame` and removing the useless `setup` method, the example `FollowingGraphCamera` could be rewritten as:
```python
class FollowingGraphCamera(GraphScene, MovingCameraScene):
def setup(self):
GraphScene.setup(self)
def construct(self):
self.camera.frame.save_state()
self.setup_axes(animate=False)
graph = self.get_graph(lambda x: np.sin(x),
color=BLUE,
x_min=0,
x_max=3 * PI
)
moving_dot = Dot().move_to(graph.points[0]).set_color(ORANGE)
dot_at_start_graph = Dot().move_to(graph.points[0])
dot_at_end_graph = Dot().move_to(graph.points[-1])
self.add(graph, dot_at_end_graph, dot_at_start_graph, moving_dot)
self.play(self.camera.frame.animate.scale(0.5).move_to(moving_dot))
def update_curve(mob):
mob.move_to(moving_dot.get_center())
self.camera.frame.add_updater(update_curve)
self.play(MoveAlongPath(moving_dot, graph, rate_func=linear))
self.camera.frame.remove_updater(update_curve)
self.play(Restore(self.camera.frame))
```
Studying the original example, I was confused as to why accessing `camera.frame` was done using` camera_frame`.
### Additional comments
The `setup` method [made sense in the original implementation](https://github.com/ManimCommunity/manim/blame/2981fa2b32f0b3827317efc75df56c0585c6c0f0/scene/moving_camera_scene.py#L11-L17) but with the changes that have happened since then it's just dead code now.
</issue>
<code>
[start of manim/scene/moving_camera_scene.py]
1 """A scene whose camera can be moved around.
2
3 .. SEEALSO::
4
5 :mod:`.moving_camera`
6
7
8 Examples
9 --------
10
11 .. manim:: ChangingCameraWidthAndRestore
12
13 class ChangingCameraWidthAndRestore(MovingCameraScene):
14 def construct(self):
15 text = Text("Hello World").set_color(BLUE)
16 self.add(text)
17 self.camera_frame.save_state()
18 self.play(self.camera_frame.animate.set(width=text.width * 1.2))
19 self.wait(0.3)
20 self.play(Restore(self.camera_frame))
21
22
23 .. manim:: MovingCameraCenter
24
25 class MovingCameraCenter(MovingCameraScene):
26 def construct(self):
27 s = Square(color=RED, fill_opacity=0.5).move_to(2 * LEFT)
28 t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)
29 self.wait(0.3)
30 self.add(s, t)
31 self.play(self.camera_frame.animate.move_to(s))
32 self.wait(0.3)
33 self.play(self.camera_frame.animate.move_to(t))
34
35
36 .. manim:: MovingAndZoomingCamera
37
38 class MovingAndZoomingCamera(MovingCameraScene):
39 def construct(self):
40 s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)
41 t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)
42 self.add(s, t)
43 self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))
44 self.wait(0.3)
45 self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))
46
47 self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))
48
49 .. manim:: MovingCameraOnGraph
50
51 class MovingCameraOnGraph(GraphScene, MovingCameraScene):
52 def setup(self):
53 GraphScene.setup(self)
54 MovingCameraScene.setup(self)
55 def construct(self):
56 self.camera_frame.save_state()
57 self.setup_axes(animate=False)
58 graph = self.get_graph(lambda x: np.sin(x),
59 color=WHITE,
60 x_min=0,
61 x_max=3 * PI
62 )
63 dot_at_start_graph = Dot().move_to(graph.points[0])
64 dot_at_end_graph = Dot().move_to(graph.points[-1])
65 self.add(graph, dot_at_end_graph, dot_at_start_graph)
66 self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))
67 self.play(self.camera_frame.animate.move_to(dot_at_end_graph))
68 self.play(Restore(self.camera_frame))
69 self.wait()
70
71 """
72
73 __all__ = ["MovingCameraScene"]
74
75 from ..camera.moving_camera import MovingCamera
76 from ..scene.scene import Scene
77 from ..utils.iterables import list_update
78 from ..utils.family import extract_mobject_family_members
79
80
81 class MovingCameraScene(Scene):
82 """
83 This is a Scene, with special configurations and properties that
84 make it suitable for cases where the camera must be moved around.
85
86 .. SEEALSO::
87
88 :class:`.MovingCamera`
89 """
90
91 def __init__(self, camera_class=MovingCamera, **kwargs):
92 Scene.__init__(self, camera_class=camera_class, **kwargs)
93
94 def setup(self):
95 """
96 This method is used internally by Manim
97 to set up the scene for proper use.
98 """
99 Scene.setup(self)
100 assert isinstance(self.renderer.camera, MovingCamera)
101 self.camera_frame = self.renderer.camera.frame
102 # Hmm, this currently relies on the fact that MovingCamera
103 # willd default to a full-sized frame. Is that okay?
104 return self
105
106 def get_moving_mobjects(self, *animations):
107 """
108 This method returns a list of all of the Mobjects in the Scene that
109 are moving, that are also in the animations passed.
110
111 Parameters
112 ----------
113 *animations : Animation
114 The Animations whose mobjects will be checked.
115 """
116 moving_mobjects = Scene.get_moving_mobjects(self, *animations)
117 all_moving_mobjects = extract_mobject_family_members(moving_mobjects)
118 movement_indicators = self.renderer.camera.get_mobjects_indicating_movement()
119 for movement_indicator in movement_indicators:
120 if movement_indicator in all_moving_mobjects:
121 # When one of these is moving, the camera should
122 # consider all mobjects to be moving
123 return list_update(self.mobjects, moving_mobjects)
124 return moving_mobjects
125
[end of manim/scene/moving_camera_scene.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/scene/moving_camera_scene.py b/manim/scene/moving_camera_scene.py
--- a/manim/scene/moving_camera_scene.py
+++ b/manim/scene/moving_camera_scene.py
@@ -14,10 +14,10 @@
def construct(self):
text = Text("Hello World").set_color(BLUE)
self.add(text)
- self.camera_frame.save_state()
- self.play(self.camera_frame.animate.set(width=text.width * 1.2))
+ self.camera.frame.save_state()
+ self.play(self.camera.frame.animate.set(width=text.width * 1.2))
self.wait(0.3)
- self.play(Restore(self.camera_frame))
+ self.play(Restore(self.camera.frame))
.. manim:: MovingCameraCenter
@@ -28,9 +28,9 @@
t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)
self.wait(0.3)
self.add(s, t)
- self.play(self.camera_frame.animate.move_to(s))
+ self.play(self.camera.frame.animate.move_to(s))
self.wait(0.3)
- self.play(self.camera_frame.animate.move_to(t))
+ self.play(self.camera.frame.animate.move_to(t))
.. manim:: MovingAndZoomingCamera
@@ -40,20 +40,20 @@
s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)
t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)
self.add(s, t)
- self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))
+ self.play(self.camera.frame.animate.move_to(s).set(width=s.width*2))
self.wait(0.3)
- self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))
+ self.play(self.camera.frame.animate.move_to(t).set(width=t.width*2))
- self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))
+ self.play(self.camera.frame.animate.move_to(ORIGIN).set(width=14))
.. manim:: MovingCameraOnGraph
class MovingCameraOnGraph(GraphScene, MovingCameraScene):
def setup(self):
GraphScene.setup(self)
- MovingCameraScene.setup(self)
+
def construct(self):
- self.camera_frame.save_state()
+ self.camera.frame.save_state()
self.setup_axes(animate=False)
graph = self.get_graph(lambda x: np.sin(x),
color=WHITE,
@@ -63,9 +63,9 @@
dot_at_start_graph = Dot().move_to(graph.points[0])
dot_at_end_graph = Dot().move_to(graph.points[-1])
self.add(graph, dot_at_end_graph, dot_at_start_graph)
- self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))
- self.play(self.camera_frame.animate.move_to(dot_at_end_graph))
- self.play(Restore(self.camera_frame))
+ self.play(self.camera.frame.animate.scale(0.5).move_to(dot_at_start_graph))
+ self.play(self.camera.frame.animate.move_to(dot_at_end_graph))
+ self.play(Restore(self.camera.frame))
self.wait()
"""
@@ -91,18 +91,6 @@
def __init__(self, camera_class=MovingCamera, **kwargs):
Scene.__init__(self, camera_class=camera_class, **kwargs)
- def setup(self):
- """
- This method is used internally by Manim
- to set up the scene for proper use.
- """
- Scene.setup(self)
- assert isinstance(self.renderer.camera, MovingCamera)
- self.camera_frame = self.renderer.camera.frame
- # Hmm, this currently relies on the fact that MovingCamera
- # willd default to a full-sized frame. Is that okay?
- return self
-
def get_moving_mobjects(self, *animations):
"""
This method returns a list of all of the Mobjects in the Scene that
| {"golden_diff": "diff --git a/manim/scene/moving_camera_scene.py b/manim/scene/moving_camera_scene.py\n--- a/manim/scene/moving_camera_scene.py\n+++ b/manim/scene/moving_camera_scene.py\n@@ -14,10 +14,10 @@\n def construct(self):\n text = Text(\"Hello World\").set_color(BLUE)\n self.add(text)\n- self.camera_frame.save_state()\n- self.play(self.camera_frame.animate.set(width=text.width * 1.2))\n+ self.camera.frame.save_state()\n+ self.play(self.camera.frame.animate.set(width=text.width * 1.2))\n self.wait(0.3)\n- self.play(Restore(self.camera_frame))\n+ self.play(Restore(self.camera.frame))\n \n \n .. manim:: MovingCameraCenter\n@@ -28,9 +28,9 @@\n t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)\n self.wait(0.3)\n self.add(s, t)\n- self.play(self.camera_frame.animate.move_to(s))\n+ self.play(self.camera.frame.animate.move_to(s))\n self.wait(0.3)\n- self.play(self.camera_frame.animate.move_to(t))\n+ self.play(self.camera.frame.animate.move_to(t))\n \n \n .. manim:: MovingAndZoomingCamera\n@@ -40,20 +40,20 @@\n s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)\n self.add(s, t)\n- self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))\n+ self.play(self.camera.frame.animate.move_to(s).set(width=s.width*2))\n self.wait(0.3)\n- self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))\n+ self.play(self.camera.frame.animate.move_to(t).set(width=t.width*2))\n \n- self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))\n+ self.play(self.camera.frame.animate.move_to(ORIGIN).set(width=14))\n \n .. manim:: MovingCameraOnGraph\n \n class MovingCameraOnGraph(GraphScene, MovingCameraScene):\n def setup(self):\n GraphScene.setup(self)\n- MovingCameraScene.setup(self)\n+\n def construct(self):\n- self.camera_frame.save_state()\n+ self.camera.frame.save_state()\n self.setup_axes(animate=False)\n graph = self.get_graph(lambda x: np.sin(x),\n color=WHITE,\n@@ -63,9 +63,9 @@\n dot_at_start_graph = Dot().move_to(graph.points[0])\n dot_at_end_graph = Dot().move_to(graph.points[-1])\n self.add(graph, dot_at_end_graph, dot_at_start_graph)\n- self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))\n- self.play(self.camera_frame.animate.move_to(dot_at_end_graph))\n- self.play(Restore(self.camera_frame))\n+ self.play(self.camera.frame.animate.scale(0.5).move_to(dot_at_start_graph))\n+ self.play(self.camera.frame.animate.move_to(dot_at_end_graph))\n+ self.play(Restore(self.camera.frame))\n self.wait()\n \n \"\"\"\n@@ -91,18 +91,6 @@\n def __init__(self, camera_class=MovingCamera, **kwargs):\n Scene.__init__(self, camera_class=camera_class, **kwargs)\n \n- def setup(self):\n- \"\"\"\n- This method is used internally by Manim\n- to set up the scene for proper use.\n- \"\"\"\n- Scene.setup(self)\n- assert isinstance(self.renderer.camera, MovingCamera)\n- self.camera_frame = self.renderer.camera.frame\n- # Hmm, this currently relies on the fact that MovingCamera\n- # willd default to a full-sized frame. Is that okay?\n- return self\n-\n def get_moving_mobjects(self, *animations):\n \"\"\"\n This method returns a list of all of the Mobjects in the Scene that\n", "issue": "Remove 'MovingCameraScene.camera_frame' and 'MovingCameraScene.setup'\n## Enhancement proposal\r\n\r\n[`MovingCameraScene.setup` method](https://github.com/ManimCommunity/manim/blob/2faa89e3d367c370fd101893f03efab63109ceb0/manim/scene/moving_camera_scene.py#L94-L104) only sets the attribute `camera_frame` into the `Scene` instance, pointing to `renderer.camera.frame`, but since the property [`camera` exists in `Scene` class](https://github.com/ManimCommunity/manim/blob/57f228b8eb96f9c99517a7fdb8756b0c4969d7ff/manim/scene/scene.py#L101-L103), the object `renderer.camera.frame` can be accesed using `camera.frame`.\r\n\r\nChanging `camera_frame` by `camera.frame` and removing the useless `setup` method, the example `FollowingGraphCamera` could be rewritten as:\r\n\r\n```python\r\nclass FollowingGraphCamera(GraphScene, MovingCameraScene):\r\n def setup(self):\r\n GraphScene.setup(self)\r\n\r\n def construct(self):\r\n self.camera.frame.save_state()\r\n self.setup_axes(animate=False)\r\n graph = self.get_graph(lambda x: np.sin(x),\r\n color=BLUE,\r\n x_min=0,\r\n x_max=3 * PI\r\n )\r\n moving_dot = Dot().move_to(graph.points[0]).set_color(ORANGE)\r\n\r\n dot_at_start_graph = Dot().move_to(graph.points[0])\r\n dot_at_end_graph = Dot().move_to(graph.points[-1])\r\n self.add(graph, dot_at_end_graph, dot_at_start_graph, moving_dot)\r\n self.play(self.camera.frame.animate.scale(0.5).move_to(moving_dot))\r\n\r\n def update_curve(mob):\r\n mob.move_to(moving_dot.get_center())\r\n\r\n self.camera.frame.add_updater(update_curve)\r\n self.play(MoveAlongPath(moving_dot, graph, rate_func=linear))\r\n self.camera.frame.remove_updater(update_curve)\r\n\r\n self.play(Restore(self.camera.frame))\r\n```\r\n\r\nStudying the original example, I was confused as to why accessing `camera.frame` was done using` camera_frame`.\r\n\r\n### Additional comments\r\n\r\nThe `setup` method [made sense in the original implementation](https://github.com/ManimCommunity/manim/blame/2981fa2b32f0b3827317efc75df56c0585c6c0f0/scene/moving_camera_scene.py#L11-L17) but with the changes that have happened since then it's just dead code now.\n", "before_files": [{"content": "\"\"\"A scene whose camera can be moved around.\n\n.. SEEALSO::\n\n :mod:`.moving_camera`\n\n\nExamples\n--------\n\n.. manim:: ChangingCameraWidthAndRestore\n\n class ChangingCameraWidthAndRestore(MovingCameraScene):\n def construct(self):\n text = Text(\"Hello World\").set_color(BLUE)\n self.add(text)\n self.camera_frame.save_state()\n self.play(self.camera_frame.animate.set(width=text.width * 1.2))\n self.wait(0.3)\n self.play(Restore(self.camera_frame))\n\n\n.. manim:: MovingCameraCenter\n\n class MovingCameraCenter(MovingCameraScene):\n def construct(self):\n s = Square(color=RED, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=GREEN, fill_opacity=0.5).move_to(2 * RIGHT)\n self.wait(0.3)\n self.add(s, t)\n self.play(self.camera_frame.animate.move_to(s))\n self.wait(0.3)\n self.play(self.camera_frame.animate.move_to(t))\n\n\n.. manim:: MovingAndZoomingCamera\n\n class MovingAndZoomingCamera(MovingCameraScene):\n def construct(self):\n s = Square(color=BLUE, fill_opacity=0.5).move_to(2 * LEFT)\n t = Triangle(color=YELLOW, fill_opacity=0.5).move_to(2 * RIGHT)\n self.add(s, t)\n self.play(self.camera_frame.animate.move_to(s).set(width=s.width*2))\n self.wait(0.3)\n self.play(self.camera_frame.animate.move_to(t).set(width=t.width*2))\n\n self.play(self.camera_frame.animate.move_to(ORIGIN).set(width=14))\n\n.. manim:: MovingCameraOnGraph\n\n class MovingCameraOnGraph(GraphScene, MovingCameraScene):\n def setup(self):\n GraphScene.setup(self)\n MovingCameraScene.setup(self)\n def construct(self):\n self.camera_frame.save_state()\n self.setup_axes(animate=False)\n graph = self.get_graph(lambda x: np.sin(x),\n color=WHITE,\n x_min=0,\n x_max=3 * PI\n )\n dot_at_start_graph = Dot().move_to(graph.points[0])\n dot_at_end_graph = Dot().move_to(graph.points[-1])\n self.add(graph, dot_at_end_graph, dot_at_start_graph)\n self.play(self.camera_frame.animate.scale(0.5).move_to(dot_at_start_graph))\n self.play(self.camera_frame.animate.move_to(dot_at_end_graph))\n self.play(Restore(self.camera_frame))\n self.wait()\n\n\"\"\"\n\n__all__ = [\"MovingCameraScene\"]\n\nfrom ..camera.moving_camera import MovingCamera\nfrom ..scene.scene import Scene\nfrom ..utils.iterables import list_update\nfrom ..utils.family import extract_mobject_family_members\n\n\nclass MovingCameraScene(Scene):\n \"\"\"\n This is a Scene, with special configurations and properties that\n make it suitable for cases where the camera must be moved around.\n\n .. SEEALSO::\n\n :class:`.MovingCamera`\n \"\"\"\n\n def __init__(self, camera_class=MovingCamera, **kwargs):\n Scene.__init__(self, camera_class=camera_class, **kwargs)\n\n def setup(self):\n \"\"\"\n This method is used internally by Manim\n to set up the scene for proper use.\n \"\"\"\n Scene.setup(self)\n assert isinstance(self.renderer.camera, MovingCamera)\n self.camera_frame = self.renderer.camera.frame\n # Hmm, this currently relies on the fact that MovingCamera\n # willd default to a full-sized frame. Is that okay?\n return self\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n This method returns a list of all of the Mobjects in the Scene that\n are moving, that are also in the animations passed.\n\n Parameters\n ----------\n *animations : Animation\n The Animations whose mobjects will be checked.\n \"\"\"\n moving_mobjects = Scene.get_moving_mobjects(self, *animations)\n all_moving_mobjects = extract_mobject_family_members(moving_mobjects)\n movement_indicators = self.renderer.camera.get_mobjects_indicating_movement()\n for movement_indicator in movement_indicators:\n if movement_indicator in all_moving_mobjects:\n # When one of these is moving, the camera should\n # consider all mobjects to be moving\n return list_update(self.mobjects, moving_mobjects)\n return moving_mobjects\n", "path": "manim/scene/moving_camera_scene.py"}]} | 2,381 | 907 |
gh_patches_debug_31523 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove unnecessary Ansible callback for profile_tasks
# Feature request
## Description
The file at `install_files/ansible-base/callback_plugins/profile_tasks.py` was added via #1196, to provide additional information on task performance, with the goal of aiding developers in improving the server config workflow. Since we moved to Ansible v2 in #1146, the hardcoded plugin is no longer necessary.
Instead, we can ansible add a lint to `ansible.cfg` under `[defaults]`:
```
callback_whitelist = profile_tasks
```
The simplification is possible because task profiling was [added to Ansible core as of v2](https://docs.ansible.com/ansible/devel/plugins/callback/profile_tasks.html).
## User Stories
As a maintainer, I want to delete redundant code wherever possible, and lean on upstream to handle core functionality when appropriate.
</issue>
<code>
[start of install_files/ansible-base/callback_plugins/profile_tasks.py]
1 # Source: https://github.com/jlafon/ansible-profile
2 # License: MIT
3 # More info: http://jlafon.io/ansible-profiling.html
4 # The profiling functionality will be provided by Ansible v2,
5 # since this callback_plugin has been merged into core,
6 # but we're including here to support older versions of Ansible.
7 import datetime
8 import os
9 import time
10
11
12 class CallbackModule(object):
13 """
14 A plugin for timing tasks
15 """
16 def __init__(self):
17 self.stats = {}
18 self.current = None
19
20 def playbook_on_task_start(self, name, is_conditional):
21 """
22 Logs the start of each task
23 """
24
25 if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
26 return
27
28 if self.current is not None:
29 # Record the running time of the last executed task
30 self.stats[self.current] = time.time() - self.stats[self.current]
31
32 # Record the start time of the current task
33 self.current = name
34 self.stats[self.current] = time.time()
35
36 def playbook_on_stats(self, stats):
37 """
38 Prints the timings
39 """
40
41 if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
42 return
43
44 # Record the timing of the very last task
45 if self.current is not None:
46 self.stats[self.current] = time.time() - self.stats[self.current]
47
48 # Sort the tasks by their running time
49 results = sorted(
50 self.stats.items(),
51 key=lambda value: value[1],
52 reverse=True,
53 )
54
55 # Just keep the top 10
56 results = results[:10]
57
58 # Print the timings
59 for name, elapsed in results:
60 print(
61 "{0:-<70}{1:->9}".format(
62 '{0} '.format(name),
63 ' {0:.02f}s'.format(elapsed),
64 )
65 )
66
67 total_seconds = sum([x[1] for x in self.stats.items()])
68 print("\nPlaybook finished: {0}, {1} total tasks."
69 " {2} elapsed. \n".format(
70 time.asctime(),
71 len(self.stats.items()),
72 datetime.timedelta(seconds=(int(total_seconds)))
73 )
74 )
75
[end of install_files/ansible-base/callback_plugins/profile_tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install_files/ansible-base/callback_plugins/profile_tasks.py b/install_files/ansible-base/callback_plugins/profile_tasks.py
deleted file mode 100644
--- a/install_files/ansible-base/callback_plugins/profile_tasks.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Source: https://github.com/jlafon/ansible-profile
-# License: MIT
-# More info: http://jlafon.io/ansible-profiling.html
-# The profiling functionality will be provided by Ansible v2,
-# since this callback_plugin has been merged into core,
-# but we're including here to support older versions of Ansible.
-import datetime
-import os
-import time
-
-
-class CallbackModule(object):
- """
- A plugin for timing tasks
- """
- def __init__(self):
- self.stats = {}
- self.current = None
-
- def playbook_on_task_start(self, name, is_conditional):
- """
- Logs the start of each task
- """
-
- if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
- return
-
- if self.current is not None:
- # Record the running time of the last executed task
- self.stats[self.current] = time.time() - self.stats[self.current]
-
- # Record the start time of the current task
- self.current = name
- self.stats[self.current] = time.time()
-
- def playbook_on_stats(self, stats):
- """
- Prints the timings
- """
-
- if os.getenv("ANSIBLE_PROFILE_DISABLE") is not None:
- return
-
- # Record the timing of the very last task
- if self.current is not None:
- self.stats[self.current] = time.time() - self.stats[self.current]
-
- # Sort the tasks by their running time
- results = sorted(
- self.stats.items(),
- key=lambda value: value[1],
- reverse=True,
- )
-
- # Just keep the top 10
- results = results[:10]
-
- # Print the timings
- for name, elapsed in results:
- print(
- "{0:-<70}{1:->9}".format(
- '{0} '.format(name),
- ' {0:.02f}s'.format(elapsed),
- )
- )
-
- total_seconds = sum([x[1] for x in self.stats.items()])
- print("\nPlaybook finished: {0}, {1} total tasks."
- " {2} elapsed. \n".format(
- time.asctime(),
- len(self.stats.items()),
- datetime.timedelta(seconds=(int(total_seconds)))
- )
- )
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/profile_tasks.py b/install_files/ansible-base/callback_plugins/profile_tasks.py\ndeleted file mode 100644\n--- a/install_files/ansible-base/callback_plugins/profile_tasks.py\n+++ /dev/null\n@@ -1,74 +0,0 @@\n-# Source: https://github.com/jlafon/ansible-profile\n-# License: MIT\n-# More info: http://jlafon.io/ansible-profiling.html\n-# The profiling functionality will be provided by Ansible v2,\n-# since this callback_plugin has been merged into core,\n-# but we're including here to support older versions of Ansible.\n-import datetime\n-import os\n-import time\n-\n-\n-class CallbackModule(object):\n- \"\"\"\n- A plugin for timing tasks\n- \"\"\"\n- def __init__(self):\n- self.stats = {}\n- self.current = None\n-\n- def playbook_on_task_start(self, name, is_conditional):\n- \"\"\"\n- Logs the start of each task\n- \"\"\"\n-\n- if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n- return\n-\n- if self.current is not None:\n- # Record the running time of the last executed task\n- self.stats[self.current] = time.time() - self.stats[self.current]\n-\n- # Record the start time of the current task\n- self.current = name\n- self.stats[self.current] = time.time()\n-\n- def playbook_on_stats(self, stats):\n- \"\"\"\n- Prints the timings\n- \"\"\"\n-\n- if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n- return\n-\n- # Record the timing of the very last task\n- if self.current is not None:\n- self.stats[self.current] = time.time() - self.stats[self.current]\n-\n- # Sort the tasks by their running time\n- results = sorted(\n- self.stats.items(),\n- key=lambda value: value[1],\n- reverse=True,\n- )\n-\n- # Just keep the top 10\n- results = results[:10]\n-\n- # Print the timings\n- for name, elapsed in results:\n- print(\n- \"{0:-<70}{1:->9}\".format(\n- '{0} '.format(name),\n- ' {0:.02f}s'.format(elapsed),\n- )\n- )\n-\n- total_seconds = sum([x[1] for x in self.stats.items()])\n- print(\"\\nPlaybook finished: {0}, {1} total tasks.\"\n- \" {2} elapsed. \\n\".format(\n- time.asctime(),\n- len(self.stats.items()),\n- datetime.timedelta(seconds=(int(total_seconds)))\n- )\n- )\n", "issue": "Remove unnecessary Ansible callback for profile_tasks\n# Feature request\r\n\r\n## Description\r\n\r\nThe file at `install_files/ansible-base/callback_plugins/profile_tasks.py` was added via #1196, to provide additional information on task performance, with the goal of aiding developers in improving the server config workflow. Since we moved to Ansible v2 in #1146, the hardcoded plugin is no longer necessary.\r\n\r\nInstead, we can ansible add a lint to `ansible.cfg` under `[defaults]`:\r\n\r\n```\r\ncallback_whitelist = profile_tasks\r\n```\r\n\r\nThe simplification is possible because task profiling was [added to Ansible core as of v2](https://docs.ansible.com/ansible/devel/plugins/callback/profile_tasks.html).\r\n\r\n## User Stories\r\nAs a maintainer, I want to delete redundant code wherever possible, and lean on upstream to handle core functionality when appropriate.\r\n\n", "before_files": [{"content": "# Source: https://github.com/jlafon/ansible-profile\n# License: MIT\n# More info: http://jlafon.io/ansible-profiling.html\n# The profiling functionality will be provided by Ansible v2,\n# since this callback_plugin has been merged into core,\n# but we're including here to support older versions of Ansible.\nimport datetime\nimport os\nimport time\n\n\nclass CallbackModule(object):\n \"\"\"\n A plugin for timing tasks\n \"\"\"\n def __init__(self):\n self.stats = {}\n self.current = None\n\n def playbook_on_task_start(self, name, is_conditional):\n \"\"\"\n Logs the start of each task\n \"\"\"\n\n if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n return\n\n if self.current is not None:\n # Record the running time of the last executed task\n self.stats[self.current] = time.time() - self.stats[self.current]\n\n # Record the start time of the current task\n self.current = name\n self.stats[self.current] = time.time()\n\n def playbook_on_stats(self, stats):\n \"\"\"\n Prints the timings\n \"\"\"\n\n if os.getenv(\"ANSIBLE_PROFILE_DISABLE\") is not None:\n return\n\n # Record the timing of the very last task\n if self.current is not None:\n self.stats[self.current] = time.time() - self.stats[self.current]\n\n # Sort the tasks by their running time\n results = sorted(\n self.stats.items(),\n key=lambda value: value[1],\n reverse=True,\n )\n\n # Just keep the top 10\n results = results[:10]\n\n # Print the timings\n for name, elapsed in results:\n print(\n \"{0:-<70}{1:->9}\".format(\n '{0} '.format(name),\n ' {0:.02f}s'.format(elapsed),\n )\n )\n\n total_seconds = sum([x[1] for x in self.stats.items()])\n print(\"\\nPlaybook finished: {0}, {1} total tasks.\"\n \" {2} elapsed. \\n\".format(\n time.asctime(),\n len(self.stats.items()),\n datetime.timedelta(seconds=(int(total_seconds)))\n )\n )\n", "path": "install_files/ansible-base/callback_plugins/profile_tasks.py"}]} | 1,355 | 610 |
gh_patches_debug_990 | rasdani/github-patches | git_diff | googleapis__python-bigquery-306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Needs protobuf minimum version 3.12.0
If you are still having issues, please be sure to include as much information as possible:
#### Environment details
- OS type and version: MacOS
- Python version: `python --version` 3.8
- pip version: `pip --version` 20.2.3
- `google-cloud-bigquery` version: `pip show google-cloud-bigquery` 2.0.0
#### Steps to reproduce
1. Just install the package, import somewhere and run it.
#### Code example
```python
from google.cloud import bigquery
```
#### Stack trace
```
File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 570, in _GetFieldByName
return message_descriptor.fields_by_name[field_name]
KeyError: 'proto3_optional'
...
from google.cloud import bigquery
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/__init__.py", line 35, in <module>
from google.cloud.bigquery.client import Client
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/client.py", line 57, in <module>
from google.cloud.bigquery import _pandas_helpers
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/_pandas_helpers.py", line 36, in <module>
from google.cloud.bigquery import schema
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/schema.py", line 19, in <module>
from google.cloud.bigquery_v2 import types
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/__init__.py", line 19, in <module>
from .types.encryption_config import EncryptionConfiguration
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/__init__.py", line 18, in <module>
from .encryption_config import EncryptionConfiguration
File "/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/encryption_config.py", line 29, in <module>
class EncryptionConfiguration(proto.Message):
File "/.../venv/lib/python3.8/site-packages/proto/message.py", line 215, in __new__
field=[i.descriptor for i in fields],
File "/.../venv/lib/python3.8/site-packages/proto/message.py", line 215, in <listcomp>
field=[i.descriptor for i in fields],
File "/.../venv/lib/python3.8/site-packages/proto/fields.py", line 104, in descriptor
self._descriptor = descriptor_pb2.FieldDescriptorProto(
File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 509, in init
field = _GetFieldByName(message_descriptor, field_name)
File "/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py", line 572, in _GetFieldByName
raise ValueError('Protocol message %s has no "%s" field.' %
ValueError: Protocol message FieldDescriptorProto has no "proto3_optional" field.
```
</issue>
<code>
[start of setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.22.2, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "six >=1.13.0,< 2.0.0dev",
37 ]
38 extras = {
39 "bqstorage": [
40 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
41 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
42 # installed, even though `google-cloud-bigquery-storage` specifies it
43 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
44 # See: https://github.com/googleapis/python-bigquery/issues/83 The
45 # grpc.Channel.close() method isn't added until 1.32.0.
46 # https://github.com/grpc/grpc/pull/15254
47 "grpcio >= 1.32.0, < 2.0dev",
48 "pyarrow >= 1.0.0, < 2.0dev",
49 ],
50 "pandas": [
51 "pandas>=0.23.0",
52 # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.
53 "pyarrow >= 1.0.0, < 2.0dev",
54 ],
55 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
56 "opentelemetry": [
57 "opentelemetry-api==0.9b0",
58 "opentelemetry-sdk==0.9b0",
59 "opentelemetry-instrumentation==0.9b0 ",
60 ],
61 }
62
63 all_extras = []
64
65 for extra in extras:
66 all_extras.extend(extras[extra])
67
68 extras["all"] = all_extras
69
70 # Setup boilerplate below this line.
71
72 package_root = os.path.abspath(os.path.dirname(__file__))
73
74 readme_filename = os.path.join(package_root, "README.rst")
75 with io.open(readme_filename, encoding="utf-8") as readme_file:
76 readme = readme_file.read()
77
78 version = {}
79 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
80 exec(fp.read(), version)
81 version = version["__version__"]
82
83 # Only include packages under the 'google' namespace. Do not include tests,
84 # benchmarks, etc.
85 packages = [
86 package
87 for package in setuptools.PEP420PackageFinder.find()
88 if package.startswith("google")
89 ]
90
91 # Determine which namespaces are needed.
92 namespaces = ["google"]
93 if "google.cloud" in packages:
94 namespaces.append("google.cloud")
95
96
97 setuptools.setup(
98 name=name,
99 version=version,
100 description=description,
101 long_description=readme,
102 author="Google LLC",
103 author_email="[email protected]",
104 license="Apache 2.0",
105 url="https://github.com/googleapis/python-bigquery",
106 classifiers=[
107 release_status,
108 "Intended Audience :: Developers",
109 "License :: OSI Approved :: Apache Software License",
110 "Programming Language :: Python",
111 "Programming Language :: Python :: 3",
112 "Programming Language :: Python :: 3.6",
113 "Programming Language :: Python :: 3.7",
114 "Programming Language :: Python :: 3.8",
115 "Operating System :: OS Independent",
116 "Topic :: Internet",
117 ],
118 platforms="Posix; MacOS X; Windows",
119 packages=packages,
120 namespace_packages=namespaces,
121 install_requires=dependencies,
122 extras_require=extras,
123 python_requires=">=3.6",
124 include_package_data=True,
125 zip_safe=False,
126 )
127
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,7 @@
"google-cloud-core >= 1.4.1, < 2.0dev",
"google-resumable-media >= 0.6.0, < 2.0dev",
"six >=1.13.0,< 2.0.0dev",
+ "protobuf >= 3.12.0",
]
extras = {
"bqstorage": [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,6 +34,7 @@\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n+ \"protobuf >= 3.12.0\",\n ]\n extras = {\n \"bqstorage\": [\n", "issue": "Needs protobuf minimum version 3.12.0\nIf you are still having issues, please be sure to include as much information as possible:\r\n\r\n#### Environment details\r\n\r\n - OS type and version: MacOS\r\n - Python version: `python --version` 3.8\r\n - pip version: `pip --version` 20.2.3\r\n - `google-cloud-bigquery` version: `pip show google-cloud-bigquery` 2.0.0\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Just install the package, import somewhere and run it.\r\n\r\n#### Code example\r\n\r\n```python\r\nfrom google.cloud import bigquery\r\n```\r\n\r\n#### Stack trace\r\n```\r\nFile \"/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py\", line 570, in _GetFieldByName\r\n return message_descriptor.fields_by_name[field_name]\r\nKeyError: 'proto3_optional'\r\n\r\n...\r\n\r\n from google.cloud import bigquery\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/__init__.py\", line 35, in <module>\r\n from google.cloud.bigquery.client import Client\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/client.py\", line 57, in <module>\r\n from google.cloud.bigquery import _pandas_helpers\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/_pandas_helpers.py\", line 36, in <module>\r\n from google.cloud.bigquery import schema\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery/schema.py\", line 19, in <module>\r\n from google.cloud.bigquery_v2 import types\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/__init__.py\", line 19, in <module>\r\n from .types.encryption_config import EncryptionConfiguration\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/__init__.py\", line 18, in <module>\r\n from .encryption_config import EncryptionConfiguration\r\n File \"/.../venv/lib/python3.8/site-packages/google/cloud/bigquery_v2/types/encryption_config.py\", line 29, in <module>\r\n class EncryptionConfiguration(proto.Message):\r\n File \"/.../venv/lib/python3.8/site-packages/proto/message.py\", line 215, in __new__\r\n field=[i.descriptor for i in fields],\r\n File \"/.../venv/lib/python3.8/site-packages/proto/message.py\", line 215, in <listcomp>\r\n field=[i.descriptor for i in fields],\r\n File \"/.../venv/lib/python3.8/site-packages/proto/fields.py\", line 104, in descriptor\r\n self._descriptor = descriptor_pb2.FieldDescriptorProto(\r\n File \"/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py\", line 509, in init\r\n field = _GetFieldByName(message_descriptor, field_name)\r\n File \"/.../venv/lib/python3.8/site-packages/google/protobuf/internal/python_message.py\", line 572, in _GetFieldByName\r\n raise ValueError('Protocol message %s has no \"%s\" field.' %\r\nValueError: Protocol message FieldDescriptorProto has no \"proto3_optional\" field.\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.22.2, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 2.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.9b0\",\n \"opentelemetry-sdk==0.9b0\",\n \"opentelemetry-instrumentation==0.9b0 \",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 2,607 | 117 |
gh_patches_debug_30435 | rasdani/github-patches | git_diff | celery__celery-8982 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SecureSerializer fails on certain types and binary serializers
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [X] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [X] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [X] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
- [X] I have tried to reproduce the issue with [pytest-celery](https://docs.celeryq.dev/projects/pytest-celery/en/latest/userguide/celery-bug-report.html) and added the reproduction script below.
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [X] I have verified that the issue exists against the `main` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**: N/A or Unknown
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
Alternatively, the pytest-celery plugin can be used to create standalone reproduction scripts
that can be added to this report. See the pytest-celery documentation for more information at
pytest-celery.readthedocs.io
-->
<details>
<p>
```python
app.conf.update(
security_key='/private/keys/celery/private.key',
security_certificate='/private/keys/celery/public.pem',
security_cert_store='/private/keys/celery/*.pem')
app.setup_security()
@app.task
def serializer_test_task(arg: Any) -> Any:
return arg
def test_serialize(data):
res = serializer_test_task.delay(data)
deserialized_value = res.get()
assert deserialized_value == data
test_serialize(data=b"foo") # fails to validate signature
#############
app.setup_security(serializer="pickle")
test_serialize(data="foo") # fails to serialize any value using pickle serializer
```
</p>
</details>
# Expected Behavior
# Actual Behavior
There are several bugs in the SecureSerializer feature:
- When using the 'json' serializer (default), it will always fail to validate the signature if the passed value is of type 'bytes'
- When using the other binary serializer (and not a string-based serializer like 'json'), for example 'pickle', the serializer is completely broken.
</issue>
<code>
[start of celery/security/serialization.py]
1 """Secure serializer."""
2 from kombu.serialization import dumps, loads, registry
3 from kombu.utils.encoding import bytes_to_str, ensure_bytes, str_to_bytes
4
5 from celery.app.defaults import DEFAULT_SECURITY_DIGEST
6 from celery.utils.serialization import b64decode, b64encode
7
8 from .certificate import Certificate, FSCertStore
9 from .key import PrivateKey
10 from .utils import get_digest_algorithm, reraise_errors
11
12 __all__ = ('SecureSerializer', 'register_auth')
13
14
15 class SecureSerializer:
16 """Signed serializer."""
17
18 def __init__(self, key=None, cert=None, cert_store=None,
19 digest=DEFAULT_SECURITY_DIGEST, serializer='json'):
20 self._key = key
21 self._cert = cert
22 self._cert_store = cert_store
23 self._digest = get_digest_algorithm(digest)
24 self._serializer = serializer
25
26 def serialize(self, data):
27 """Serialize data structure into string."""
28 assert self._key is not None
29 assert self._cert is not None
30 with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
31 content_type, content_encoding, body = dumps(
32 bytes_to_str(data), serializer=self._serializer)
33 # What we sign is the serialized body, not the body itself.
34 # this way the receiver doesn't have to decode the contents
35 # to verify the signature (and thus avoiding potential flaws
36 # in the decoding step).
37 body = ensure_bytes(body)
38 return self._pack(body, content_type, content_encoding,
39 signature=self._key.sign(body, self._digest),
40 signer=self._cert.get_id())
41
42 def deserialize(self, data):
43 """Deserialize data structure from string."""
44 assert self._cert_store is not None
45 with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):
46 payload = self._unpack(data)
47 signature, signer, body = (payload['signature'],
48 payload['signer'],
49 payload['body'])
50 self._cert_store[signer].verify(body, signature, self._digest)
51 return loads(bytes_to_str(body), payload['content_type'],
52 payload['content_encoding'], force=True)
53
54 def _pack(self, body, content_type, content_encoding, signer, signature,
55 sep=str_to_bytes('\x00\x01')):
56 fields = sep.join(
57 ensure_bytes(s) for s in [signer, signature, content_type,
58 content_encoding, body]
59 )
60 return b64encode(fields)
61
62 def _unpack(self, payload, sep=str_to_bytes('\x00\x01')):
63 raw_payload = b64decode(ensure_bytes(payload))
64 first_sep = raw_payload.find(sep)
65
66 signer = raw_payload[:first_sep]
67 signer_cert = self._cert_store[signer]
68
69 # shift 3 bits right to get signature length
70 # 2048bit rsa key has a signature length of 256
71 # 4096bit rsa key has a signature length of 512
72 sig_len = signer_cert.get_pubkey().key_size >> 3
73 sep_len = len(sep)
74 signature_start_position = first_sep + sep_len
75 signature_end_position = signature_start_position + sig_len
76 signature = raw_payload[
77 signature_start_position:signature_end_position
78 ]
79
80 v = raw_payload[signature_end_position + sep_len:].split(sep)
81
82 return {
83 'signer': signer,
84 'signature': signature,
85 'content_type': bytes_to_str(v[0]),
86 'content_encoding': bytes_to_str(v[1]),
87 'body': bytes_to_str(v[2]),
88 }
89
90
91 def register_auth(key=None, key_password=None, cert=None, store=None,
92 digest=DEFAULT_SECURITY_DIGEST,
93 serializer='json'):
94 """Register security serializer."""
95 s = SecureSerializer(key and PrivateKey(key, password=key_password),
96 cert and Certificate(cert),
97 store and FSCertStore(store),
98 digest, serializer=serializer)
99 registry.register('auth', s.serialize, s.deserialize,
100 content_type='application/data',
101 content_encoding='utf-8')
102
[end of celery/security/serialization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/security/serialization.py b/celery/security/serialization.py
--- a/celery/security/serialization.py
+++ b/celery/security/serialization.py
@@ -29,7 +29,8 @@
assert self._cert is not None
with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
content_type, content_encoding, body = dumps(
- bytes_to_str(data), serializer=self._serializer)
+ data, serializer=self._serializer)
+
# What we sign is the serialized body, not the body itself.
# this way the receiver doesn't have to decode the contents
# to verify the signature (and thus avoiding potential flaws
@@ -48,7 +49,7 @@
payload['signer'],
payload['body'])
self._cert_store[signer].verify(body, signature, self._digest)
- return loads(bytes_to_str(body), payload['content_type'],
+ return loads(body, payload['content_type'],
payload['content_encoding'], force=True)
def _pack(self, body, content_type, content_encoding, signer, signature,
@@ -84,7 +85,7 @@
'signature': signature,
'content_type': bytes_to_str(v[0]),
'content_encoding': bytes_to_str(v[1]),
- 'body': bytes_to_str(v[2]),
+ 'body': v[2],
}
| {"golden_diff": "diff --git a/celery/security/serialization.py b/celery/security/serialization.py\n--- a/celery/security/serialization.py\n+++ b/celery/security/serialization.py\n@@ -29,7 +29,8 @@\n assert self._cert is not None\n with reraise_errors('Unable to serialize: {0!r}', (Exception,)):\n content_type, content_encoding, body = dumps(\n- bytes_to_str(data), serializer=self._serializer)\n+ data, serializer=self._serializer)\n+\n # What we sign is the serialized body, not the body itself.\n # this way the receiver doesn't have to decode the contents\n # to verify the signature (and thus avoiding potential flaws\n@@ -48,7 +49,7 @@\n payload['signer'],\n payload['body'])\n self._cert_store[signer].verify(body, signature, self._digest)\n- return loads(bytes_to_str(body), payload['content_type'],\n+ return loads(body, payload['content_type'],\n payload['content_encoding'], force=True)\n \n def _pack(self, body, content_type, content_encoding, signer, signature,\n@@ -84,7 +85,7 @@\n 'signature': signature,\n 'content_type': bytes_to_str(v[0]),\n 'content_encoding': bytes_to_str(v[1]),\n- 'body': bytes_to_str(v[2]),\n+ 'body': v[2],\n }\n", "issue": "SecureSerializer fails on certain types and binary serializers\n<!--\r\nPlease fill this template entirely and do not erase parts of it.\r\nWe reserve the right to close without a response\r\nbug reports which are incomplete.\r\n-->\r\n# Checklist\r\n<!--\r\nTo check an item on the list replace [ ] with [x].\r\n-->\r\n- [X] I have verified that the issue exists against the `main` branch of Celery.\r\n- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.\r\n- [X] I have read the relevant section in the\r\n [contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)\r\n on reporting bugs.\r\n- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)\r\n for similar or identical bug reports.\r\n- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)\r\n for existing proposed fixes.\r\n- [X] I have checked the [commit log](https://github.com/celery/celery/commits/main)\r\n to find out if the bug was already fixed in the main branch.\r\n- [X] I have included all related issues and possible duplicate issues\r\n in this issue (If there are none, check this box anyway).\r\n- [X] I have tried to reproduce the issue with [pytest-celery](https://docs.celeryq.dev/projects/pytest-celery/en/latest/userguide/celery-bug-report.html) and added the reproduction script below.\r\n\r\n## Mandatory Debugging Information\r\n\r\n- [ ] I have included the output of ``celery -A proj report`` in the issue.\r\n (if you are not able to do this, then at least specify the Celery\r\n version affected).\r\n- [X] I have verified that the issue exists against the `main` branch of Celery.\r\n- [ ] I have included the contents of ``pip freeze`` in the issue.\r\n- [ ] I have included all the versions of all the external dependencies required\r\n to reproduce this bug.\r\n\r\n## Optional Debugging Information\r\n<!--\r\nTry some of the below if you think they are relevant.\r\nIt will help us figure out the scope of the bug and how many users it affects.\r\n-->\r\n- [ ] I have tried reproducing the issue on more than one Python version\r\n and/or implementation.\r\n- [ ] I have tried reproducing the issue on more than one message broker and/or\r\n result backend.\r\n- [ ] I have tried reproducing the issue on more than one version of the message\r\n broker and/or result backend.\r\n- [ ] I have tried reproducing the issue on more than one operating system.\r\n- [ ] I have tried reproducing the issue on more than one workers pool.\r\n- [ ] I have tried reproducing the issue with autoscaling, retries,\r\n ETA/Countdown & rate limits disabled.\r\n- [ ] I have tried reproducing the issue after downgrading\r\n and/or upgrading Celery and its dependencies.\r\n\r\n## Related Issues and Possible Duplicates\r\n<!--\r\nPlease make sure to search and mention any related issues\r\nor possible duplicates to this issue as requested by the checklist above.\r\n\r\nThis may or may not include issues in other repositories that the Celery project\r\nmaintains or other repositories that are dependencies of Celery.\r\n\r\nIf you don't know how to mention issues, please refer to Github's documentation\r\non the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests\r\n-->\r\n\r\n#### Related Issues\r\n\r\n- None\r\n\r\n#### Possible Duplicates\r\n\r\n- None\r\n\r\n## Environment & Settings\r\n<!-- Include the contents of celery --version below -->\r\n**Celery version**:\r\n<!-- Include the output of celery -A proj report below -->\r\n<details>\r\n<summary><b><code>celery report</code> Output:</b></summary>\r\n<p>\r\n\r\n```\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n# Steps to Reproduce\r\n\r\n## Required Dependencies\r\n<!-- Please fill the required dependencies to reproduce this issue -->\r\n- **Minimal Python Version**: N/A or Unknown\r\n- **Minimal Celery Version**: N/A or Unknown\r\n- **Minimal Kombu Version**: N/A or Unknown\r\n- **Minimal Broker Version**: N/A or Unknown\r\n- **Minimal Result Backend Version**: N/A or Unknown\r\n- **Minimal OS and/or Kernel Version**: N/A or Unknown\r\n- **Minimal Broker Client Version**: N/A or Unknown\r\n- **Minimal Result Backend Client Version**: N/A or Unknown\r\n\r\n### Python Packages\r\n<!-- Please fill the contents of pip freeze below -->\r\n<details>\r\n<summary><b><code>pip freeze</code> Output:</b></summary>\r\n<p>\r\n\r\n```\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n### Other Dependencies\r\n<!--\r\nPlease provide system dependencies, configuration files\r\nand other dependency information if applicable\r\n-->\r\n<details>\r\n<p>\r\nN/A\r\n</p>\r\n</details>\r\n\r\n## Minimally Reproducible Test Case\r\n<!--\r\nPlease provide a reproducible test case.\r\nRefer to the Reporting Bugs section in our contribution guide.\r\n\r\nWe prefer submitting test cases in the form of a PR to our integration test suite.\r\nIf you can provide one, please mention the PR number below.\r\nIf not, please attach the most minimal code example required to reproduce the issue below.\r\nIf the test case is too large, please include a link to a gist or a repository below.\r\n\r\nAlternatively, the pytest-celery plugin can be used to create standalone reproduction scripts\r\nthat can be added to this report. See the pytest-celery documentation for more information at\r\npytest-celery.readthedocs.io\r\n-->\r\n\r\n<details>\r\n<p>\r\n\r\n```python\r\n\r\napp.conf.update(\r\n security_key='/private/keys/celery/private.key',\r\n security_certificate='/private/keys/celery/public.pem',\r\n security_cert_store='/private/keys/celery/*.pem')\r\n\r\napp.setup_security()\r\n\r\[email protected]\r\ndef serializer_test_task(arg: Any) -> Any:\r\n return arg\r\n\r\ndef test_serialize(data):\r\n res = serializer_test_task.delay(data)\r\n deserialized_value = res.get()\r\n assert deserialized_value == data\r\n\r\ntest_serialize(data=b\"foo\") # fails to validate signature\r\n\r\n#############\r\n\r\napp.setup_security(serializer=\"pickle\")\r\ntest_serialize(data=\"foo\") # fails to serialize any value using pickle serializer\r\n\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n# Expected Behavior\r\n\r\n\r\n# Actual Behavior\r\n\r\nThere are several bugs in the SecureSerializer feature:\r\n - When using the 'json' serializer (default), it will always fail to validate the signature if the passed value is of type 'bytes'\r\n- When using the other binary serializer (and not a string-based serializer like 'json'), for example 'pickle', the serializer is completely broken. \r\n\n", "before_files": [{"content": "\"\"\"Secure serializer.\"\"\"\nfrom kombu.serialization import dumps, loads, registry\nfrom kombu.utils.encoding import bytes_to_str, ensure_bytes, str_to_bytes\n\nfrom celery.app.defaults import DEFAULT_SECURITY_DIGEST\nfrom celery.utils.serialization import b64decode, b64encode\n\nfrom .certificate import Certificate, FSCertStore\nfrom .key import PrivateKey\nfrom .utils import get_digest_algorithm, reraise_errors\n\n__all__ = ('SecureSerializer', 'register_auth')\n\n\nclass SecureSerializer:\n \"\"\"Signed serializer.\"\"\"\n\n def __init__(self, key=None, cert=None, cert_store=None,\n digest=DEFAULT_SECURITY_DIGEST, serializer='json'):\n self._key = key\n self._cert = cert\n self._cert_store = cert_store\n self._digest = get_digest_algorithm(digest)\n self._serializer = serializer\n\n def serialize(self, data):\n \"\"\"Serialize data structure into string.\"\"\"\n assert self._key is not None\n assert self._cert is not None\n with reraise_errors('Unable to serialize: {0!r}', (Exception,)):\n content_type, content_encoding, body = dumps(\n bytes_to_str(data), serializer=self._serializer)\n # What we sign is the serialized body, not the body itself.\n # this way the receiver doesn't have to decode the contents\n # to verify the signature (and thus avoiding potential flaws\n # in the decoding step).\n body = ensure_bytes(body)\n return self._pack(body, content_type, content_encoding,\n signature=self._key.sign(body, self._digest),\n signer=self._cert.get_id())\n\n def deserialize(self, data):\n \"\"\"Deserialize data structure from string.\"\"\"\n assert self._cert_store is not None\n with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):\n payload = self._unpack(data)\n signature, signer, body = (payload['signature'],\n payload['signer'],\n payload['body'])\n self._cert_store[signer].verify(body, signature, self._digest)\n return loads(bytes_to_str(body), payload['content_type'],\n payload['content_encoding'], force=True)\n\n def _pack(self, body, content_type, content_encoding, signer, signature,\n sep=str_to_bytes('\\x00\\x01')):\n fields = sep.join(\n ensure_bytes(s) for s in [signer, signature, content_type,\n content_encoding, body]\n )\n return b64encode(fields)\n\n def _unpack(self, payload, sep=str_to_bytes('\\x00\\x01')):\n raw_payload = b64decode(ensure_bytes(payload))\n first_sep = raw_payload.find(sep)\n\n signer = raw_payload[:first_sep]\n signer_cert = self._cert_store[signer]\n\n # shift 3 bits right to get signature length\n # 2048bit rsa key has a signature length of 256\n # 4096bit rsa key has a signature length of 512\n sig_len = signer_cert.get_pubkey().key_size >> 3\n sep_len = len(sep)\n signature_start_position = first_sep + sep_len\n signature_end_position = signature_start_position + sig_len\n signature = raw_payload[\n signature_start_position:signature_end_position\n ]\n\n v = raw_payload[signature_end_position + sep_len:].split(sep)\n\n return {\n 'signer': signer,\n 'signature': signature,\n 'content_type': bytes_to_str(v[0]),\n 'content_encoding': bytes_to_str(v[1]),\n 'body': bytes_to_str(v[2]),\n }\n\n\ndef register_auth(key=None, key_password=None, cert=None, store=None,\n digest=DEFAULT_SECURITY_DIGEST,\n serializer='json'):\n \"\"\"Register security serializer.\"\"\"\n s = SecureSerializer(key and PrivateKey(key, password=key_password),\n cert and Certificate(cert),\n store and FSCertStore(store),\n digest, serializer=serializer)\n registry.register('auth', s.serialize, s.deserialize,\n content_type='application/data',\n content_encoding='utf-8')\n", "path": "celery/security/serialization.py"}]} | 3,130 | 315 |
gh_patches_debug_14696 | rasdani/github-patches | git_diff | Health-Informatics-UoN__Carrot-Mapper-645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
💡 Combine Github release workflows
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your proposal related to a problem or functionality gap?
Currently the release workflow for dev/test/production is 3 separate Github actions, working with Gitflow. So merging a new fix requires 3 separate PRs, artifacts, and deployments: https://github.com/Health-Informatics-UoN/CaRROT-Mapper/pulls?q=is%3Apr+is%3Aclosed
### Describe your proposal
I propose combining this into one workflow, utilising Github environments to deploy a single artifact from the main branch. An example here: https://github.com/Health-Informatics-UoN/rcc-monitor/actions/runs/7931015704
### Describe alternatives you've considered
_No response_
### I'm part of a Project Team
Yes - DRS
### Anything else?
It might not be done here - but I think it's worth also exploring when database migrations happen as part of CD, not sure if this currently a manual process?
### Are you willing to contribute to developing this feature?
✅ Yes, me or my team intend to do the development.
</issue>
<code>
[start of app/api/api/settings.py]
1 """
2 Django settings for api project.
3
4 Generated by 'django-admin startproject' using Django 3.1.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.1/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.1/ref/settings/
11 """
12
13 import os
14 from dotenv import load_dotenv
15
16 load_dotenv()
17
18 # Build paths inside the project like this: BASE_DIR / 'subdir'.
19 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
20 TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
21 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
22 STATIC_DIR = os.path.join(BASE_DIR, "static")
23 STATICFILES_DIRS = [
24 STATIC_DIR,
25 ]
26 MEDIA_URL = "/media/"
27 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
28
29 # Quick-start development settings - unsuitable for production
30 # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
31
32 # SECURITY WARNING: keep the secret key used in production secret!
33 SECRET_KEY = os.environ.get("SECRET_KEY")
34
35 # SECURITY WARNING: don't run with debug turned on in production!
36 # DEBUG will only evaluate to True if 'True' or 1 is supplied
37 DEBUG = os.getenv("DEBUG", "False") in ["True", 1]
38
39 # Here we need to manipulate a string containing a Python list into a list of strings
40 ALLOWED_HOSTS = [
41 x.strip()[1:-1] for x in os.environ.get("ALLOWED_HOSTS")[1:-1].split(",")
42 ]
43
44
45 # Application definition
46
47 INSTALLED_APPS = [
48 "django.contrib.admin",
49 "django.contrib.auth",
50 "django.contrib.contenttypes",
51 "django.contrib.sessions",
52 "django.contrib.messages",
53 "whitenoise.runserver_nostatic",
54 "django.contrib.staticfiles",
55 "extra_views",
56 "mapping",
57 "data",
58 "rest_framework",
59 "django_filters",
60 "rest_framework.authtoken",
61 "corsheaders",
62 ]
63
64 MIDDLEWARE = [
65 "corsheaders.middleware.CorsMiddleware",
66 "django.middleware.security.SecurityMiddleware",
67 "django.contrib.sessions.middleware.SessionMiddleware",
68 "django.middleware.common.CommonMiddleware",
69 "django.middleware.csrf.CsrfViewMiddleware",
70 "django.contrib.auth.middleware.AuthenticationMiddleware",
71 "django.contrib.messages.middleware.MessageMiddleware",
72 "django.middleware.clickjacking.XFrameOptionsMiddleware",
73 "whitenoise.middleware.WhiteNoiseMiddleware",
74 ]
75
76 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
77
78 ROOT_URLCONF = "api.urls"
79
80 TEMPLATES = [
81 {
82 "BACKEND": "django.template.backends.django.DjangoTemplates",
83 "DIRS": [os.path.join(BASE_DIR, "templates")],
84 "APP_DIRS": True,
85 "OPTIONS": {
86 "context_processors": [
87 "django.template.context_processors.debug",
88 "django.template.context_processors.request",
89 "django.contrib.auth.context_processors.auth",
90 "django.contrib.messages.context_processors.messages",
91 "django.template.context_processors.media",
92 "api.react.react",
93 ],
94 },
95 },
96 ]
97
98 WSGI_APPLICATION = "api.wsgi.application"
99
100
101 # Database
102 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases
103
104 DATABASES = {
105 "default": {
106 "ENGINE": os.getenv("COCONNECT_DB_ENGINE"),
107 "HOST": os.getenv("COCONNECT_DB_HOST"),
108 "PORT": os.getenv("COCONNECT_DB_PORT"),
109 "NAME": os.getenv("COCONNECT_DB_NAME"),
110 "USER": os.getenv("COCONNECT_DB_USER"),
111 "PASSWORD": os.getenv("COCONNECT_DB_PASSWORD"),
112 "TEST": {
113 "NAME": "throwaway-db",
114 "MIRROR": "default",
115 },
116 }
117 }
118
119
120 # Password validation
121 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
122
123 AUTH_PASSWORD_VALIDATORS = [
124 {
125 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
126 },
127 {
128 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
129 },
130 {
131 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
132 },
133 {
134 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
135 },
136 ]
137
138
139 # Internationalization
140 # https://docs.djangoproject.com/en/3.1/topics/i18n/
141
142 LANGUAGE_CODE = "en-us"
143
144 TIME_ZONE = "UTC"
145
146 USE_I18N = True
147
148 USE_L10N = True
149
150 USE_TZ = True
151
152 CORS_ORIGIN_ALLOW_ALL = True
153
154 REST_FRAMEWORK = {
155 "DEFAULT_AUTHENTICATION_CLASSES": (
156 "rest_framework.authentication.TokenAuthentication",
157 "rest_framework.authentication.SessionAuthentication",
158 ),
159 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
160 }
161 # Static files (CSS, JavaScript, Images)
162 # https://docs.djangoproject.com/en/3.1/howto/static-files/
163
164 STATIC_URL = "/static/"
165
166 LOGIN_REDIRECT_URL = "/"
167 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
168
169 # NLP API KEY
170 NLP_API_KEY = os.getenv("NLP_API_KEY")
171
172 SESSION_COOKIE_AGE = 86400 # session length is 24 hours
173
[end of app/api/api/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/api/settings.py b/app/api/api/settings.py
--- a/app/api/api/settings.py
+++ b/app/api/api/settings.py
@@ -11,6 +11,7 @@
"""
import os
+
from dotenv import load_dotenv
load_dotenv()
@@ -59,6 +60,7 @@
"django_filters",
"rest_framework.authtoken",
"corsheaders",
+ "test",
]
MIDDLEWARE = [
@@ -110,8 +112,7 @@
"USER": os.getenv("COCONNECT_DB_USER"),
"PASSWORD": os.getenv("COCONNECT_DB_PASSWORD"),
"TEST": {
- "NAME": "throwaway-db",
- "MIRROR": "default",
+ "NAME": "throwawaydb",
},
}
}
| {"golden_diff": "diff --git a/app/api/api/settings.py b/app/api/api/settings.py\n--- a/app/api/api/settings.py\n+++ b/app/api/api/settings.py\n@@ -11,6 +11,7 @@\n \"\"\"\n \n import os\n+\n from dotenv import load_dotenv\n \n load_dotenv()\n@@ -59,6 +60,7 @@\n \"django_filters\",\n \"rest_framework.authtoken\",\n \"corsheaders\",\n+ \"test\",\n ]\n \n MIDDLEWARE = [\n@@ -110,8 +112,7 @@\n \"USER\": os.getenv(\"COCONNECT_DB_USER\"),\n \"PASSWORD\": os.getenv(\"COCONNECT_DB_PASSWORD\"),\n \"TEST\": {\n- \"NAME\": \"throwaway-db\",\n- \"MIRROR\": \"default\",\n+ \"NAME\": \"throwawaydb\",\n },\n }\n }\n", "issue": "\ud83d\udca1 Combine Github release workflows\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Is your proposal related to a problem or functionality gap?\r\n\r\nCurrently the release workflow for dev/test/production is 3 separate Github actions, working with Gitflow. So merging a new fix requires 3 separate PRs, artifacts, and deployments: https://github.com/Health-Informatics-UoN/CaRROT-Mapper/pulls?q=is%3Apr+is%3Aclosed\r\n\r\n\r\n\r\n### Describe your proposal\r\n\r\nI propose combining this into one workflow, utilising Github environments to deploy a single artifact from the main branch. An example here: https://github.com/Health-Informatics-UoN/rcc-monitor/actions/runs/7931015704\r\n\r\n\r\n### Describe alternatives you've considered\r\n\r\n_No response_\r\n\r\n### I'm part of a Project Team\r\n\r\nYes - DRS\r\n\r\n### Anything else?\r\n\r\nIt might not be done here - but I think it's worth also exploring when database migrations happen as part of CD, not sure if this currently a manual process?\r\n\r\n### Are you willing to contribute to developing this feature?\r\n\r\n\u2705 Yes, me or my team intend to do the development.\n", "before_files": [{"content": "\"\"\"\nDjango settings for api project.\n\nGenerated by 'django-admin startproject' using Django 3.1.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEMPLATE_DIR = os.path.join(BASE_DIR, \"templates\")\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\nSTATIC_DIR = os.path.join(BASE_DIR, \"static\")\nSTATICFILES_DIRS = [\n STATIC_DIR,\n]\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# DEBUG will only evaluate to True if 'True' or 1 is supplied\nDEBUG = os.getenv(\"DEBUG\", \"False\") in [\"True\", 1]\n\n# Here we need to manipulate a string containing a Python list into a list of strings\nALLOWED_HOSTS = [\n x.strip()[1:-1] for x in os.environ.get(\"ALLOWED_HOSTS\")[1:-1].split(\",\")\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"extra_views\",\n \"mapping\",\n \"data\",\n \"rest_framework\",\n \"django_filters\",\n \"rest_framework.authtoken\",\n \"corsheaders\",\n]\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n]\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\nROOT_URLCONF = \"api.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n \"api.react.react\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"api.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": os.getenv(\"COCONNECT_DB_ENGINE\"),\n \"HOST\": os.getenv(\"COCONNECT_DB_HOST\"),\n \"PORT\": os.getenv(\"COCONNECT_DB_PORT\"),\n \"NAME\": os.getenv(\"COCONNECT_DB_NAME\"),\n \"USER\": os.getenv(\"COCONNECT_DB_USER\"),\n \"PASSWORD\": os.getenv(\"COCONNECT_DB_PASSWORD\"),\n \"TEST\": {\n \"NAME\": \"throwaway-db\",\n \"MIRROR\": \"default\",\n },\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n}\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nLOGIN_REDIRECT_URL = \"/\"\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n# NLP API KEY\nNLP_API_KEY = os.getenv(\"NLP_API_KEY\")\n\nSESSION_COOKIE_AGE = 86400 # session length is 24 hours\n", "path": "app/api/api/settings.py"}]} | 2,343 | 180 |
gh_patches_debug_3328 | rasdani/github-patches | git_diff | Mailu__Mailu-1944 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Letsencrypt Force Renewal
Is there a limit on the Subject Alt Name entries?
I have updated my /mailu/mailu.env "HOSTNAMES" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.
Previously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.
I am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'
</issue>
<code>
[start of core/nginx/letsencrypt.py]
1 #!/usr/bin/python3
2
3 import os
4 import time
5 import subprocess
6
7
8 command = [
9 "certbot",
10 "-n", "--agree-tos", # non-interactive
11 "-d", os.environ["HOSTNAMES"],
12 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]),
13 "certonly", "--standalone",
14 "--cert-name", "mailu",
15 "--preferred-challenges", "http", "--http-01-port", "8008",
16 "--keep-until-expiring",
17 "--rsa-key-size", "4096",
18 "--config-dir", "/certs/letsencrypt",
19 "--post-hook", "/config.py"
20 ]
21
22 # Wait for nginx to start
23 time.sleep(5)
24
25 # Run certbot every hour
26 while True:
27 subprocess.call(command)
28 time.sleep(3600)
29
30
[end of core/nginx/letsencrypt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -14,8 +14,8 @@
"--cert-name", "mailu",
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
- "--rsa-key-size", "4096",
"--config-dir", "/certs/letsencrypt",
+ "--renew-with-new-domains",
"--post-hook", "/config.py"
]
| {"golden_diff": "diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py\n--- a/core/nginx/letsencrypt.py\n+++ b/core/nginx/letsencrypt.py\n@@ -14,8 +14,8 @@\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n- \"--rsa-key-size\", \"4096\",\n \"--config-dir\", \"/certs/letsencrypt\",\n+ \"--renew-with-new-domains\",\n \"--post-hook\", \"/config.py\"\n ]\n", "issue": "Letsencrypt Force Renewal\nIs there a limit on the Subject Alt Name entries?\r\n\r\nI have updated my /mailu/mailu.env \"HOSTNAMES\" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.\r\n\r\nPreviously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.\r\n\r\nI am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\n\n\ncommand = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--rsa-key-size\", \"4096\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\n\n# Wait for nginx to start\ntime.sleep(5)\n\n# Run certbot every hour\nwhile True:\n subprocess.call(command)\n time.sleep(3600)\n\n", "path": "core/nginx/letsencrypt.py"}]} | 912 | 132 |
gh_patches_debug_29046 | rasdani/github-patches | git_diff | Mailu__Mailu-931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Admin whitelist_webmail hard coded
Hi,
I'm trying to send transactional email with Mailu SMTP from a backend server and it takes more than 15s.
I tracked down one problem in core/admin/mailu/internal/__init__.py:
def whitelist_webmail() uses socket.gethostbyname("webmail")
In my docker configuration there is no "webmail" host so the function socket.gethostbyname return nothing after 5s which slows down a lot the request /internal/auth/email
When I set "webmail" to a fake ip on the admin server the /internal/auth/email returns immediately.
Maybe it would be better to define a list of hostnames in the configuration file instead of using a hard coded "webmail" value. What do you think?
Thanks Mailu for the great work!
JB
</issue>
<code>
[start of core/admin/mailu/internal/__init__.py]
1 from flask_limiter import RateLimitExceeded
2
3 from mailu import utils
4
5 import socket
6 import flask
7
8
9 internal = flask.Blueprint('internal', __name__, template_folder='templates')
10
11
12 @internal.app_errorhandler(RateLimitExceeded)
13 def rate_limit_handler(e):
14 response = flask.Response()
15 response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'
16 response.headers['Auth-Error-Code'] = '451 4.3.2'
17 if int(flask.request.headers['Auth-Login-Attempt']) < 10:
18 response.headers['Auth-Wait'] = '3'
19 return response
20
21
22 @utils.limiter.request_filter
23 def whitelist_webmail():
24 try:
25 return flask.request.headers["Client-Ip"] ==\
26 socket.gethostbyname("webmail")
27 except:
28 return False
29
30
31 from mailu.internal.views import *
32
[end of core/admin/mailu/internal/__init__.py]
[start of core/admin/mailu/configuration.py]
1 import os
2 from mailustart import resolve
3
4 DEFAULT_CONFIG = {
5 # Specific to the admin UI
6 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
7 'BABEL_DEFAULT_LOCALE': 'en',
8 'BABEL_DEFAULT_TIMEZONE': 'UTC',
9 'BOOTSTRAP_SERVE_LOCAL': True,
10 'RATELIMIT_STORAGE_URL': 'redis://redis/2',
11 'QUOTA_STORAGE_URL': 'redis://redis/1',
12 'DEBUG': False,
13 'DOMAIN_REGISTRATION': False,
14 'TEMPLATES_AUTO_RELOAD': True,
15 # Database settings
16 'DB_FLAVOR': None,
17 'DB_USER': 'mailu',
18 'DB_PW': None,
19 'DB_HOST': 'database',
20 'DB_NAME': 'mailu',
21 'SQLITE_DATABASE_FILE':'data/main.db',
22 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
23 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
24 # Statistics management
25 'INSTANCE_ID_PATH': '/data/instance',
26 'STATS_ENDPOINT': '0.{}.stats.mailu.io',
27 # Common configuration variables
28 'SECRET_KEY': 'changeMe',
29 'DOMAIN': 'mailu.io',
30 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
31 'POSTMASTER': 'postmaster',
32 'TLS_FLAVOR': 'cert',
33 'AUTH_RATELIMIT': '10/minute;1000/hour',
34 'DISABLE_STATISTICS': False,
35 # Mail settings
36 'DMARC_RUA': None,
37 'DMARC_RUF': None,
38 'WELCOME': False,
39 'WELCOME_SUBJECT': 'Dummy welcome topic',
40 'WELCOME_BODY': 'Dummy welcome body',
41 'DKIM_SELECTOR': 'dkim',
42 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
43 'DEFAULT_QUOTA': 1000000000,
44 # Web settings
45 'SITENAME': 'Mailu',
46 'WEBSITE': 'https://mailu.io',
47 'WEB_ADMIN': '/admin',
48 'WEB_WEBMAIL': '/webmail',
49 'RECAPTCHA_PUBLIC_KEY': '',
50 'RECAPTCHA_PRIVATE_KEY': '',
51 # Advanced settings
52 'PASSWORD_SCHEME': 'BLF-CRYPT',
53 # Host settings
54 'HOST_IMAP': 'imap',
55 'HOST_POP3': 'imap',
56 'HOST_SMTP': 'smtp',
57 'HOST_WEBMAIL': 'webmail',
58 'HOST_FRONT': 'front',
59 'HOST_AUTHSMTP': os.environ.get('HOST_SMTP', 'smtp'),
60 'SUBNET': '192.168.203.0/24',
61 'POD_ADDRESS_RANGE': None
62 }
63
64 class ConfigManager(dict):
65 """ Naive configuration manager that uses environment only
66 """
67
68 DB_TEMPLATES = {
69 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
70 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
71 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
72 }
73
74 def __init__(self):
75 self.config = dict()
76
77 def resolve_host(self):
78 self.config['HOST_IMAP'] = resolve(self.config['HOST_IMAP'])
79 self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])
80 self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])
81 self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])
82
83 def __coerce_value(self, value):
84 if isinstance(value, str) and value.lower() in ('true','yes'):
85 return True
86 elif isinstance(value, str) and value.lower() in ('false', 'no'):
87 return False
88 return value
89
90 def init_app(self, app):
91 self.config.update(app.config)
92 # get environment variables
93 self.config.update({
94 key: self.__coerce_value(os.environ.get(key, value))
95 for key, value in DEFAULT_CONFIG.items()
96 })
97 self.resolve_host()
98
99 # automatically set the sqlalchemy string
100 if self.config['DB_FLAVOR']:
101 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
102 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
103 # update the app config itself
104 app.config = self
105
106 def setdefault(self, key, value):
107 if key not in self.config:
108 self.config[key] = value
109 return self.config[key]
110
111 def get(self, *args):
112 return self.config.get(*args)
113
114 def keys(self):
115 return self.config.keys()
116
117 def __getitem__(self, key):
118 return self.config.get(key)
119
120 def __setitem__(self, key, value):
121 self.config[key] = value
122
123 def __contains__(self, key):
124 return key in self.config
125
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -46,6 +46,7 @@
'WEBSITE': 'https://mailu.io',
'WEB_ADMIN': '/admin',
'WEB_WEBMAIL': '/webmail',
+ 'WEBMAIL': 'none',
'RECAPTCHA_PUBLIC_KEY': '',
'RECAPTCHA_PRIVATE_KEY': '',
# Advanced settings
@@ -79,6 +80,8 @@
self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])
self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])
self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])
+ if self.config['WEBMAIL'] != 'none':
+ self.config['HOST_WEBMAIL'] = resolve(self.config['HOST_WEBMAIL'])
def __coerce_value(self, value):
if isinstance(value, str) and value.lower() in ('true','yes'):
diff --git a/core/admin/mailu/internal/__init__.py b/core/admin/mailu/internal/__init__.py
--- a/core/admin/mailu/internal/__init__.py
+++ b/core/admin/mailu/internal/__init__.py
@@ -1,6 +1,7 @@
from flask_limiter import RateLimitExceeded
from mailu import utils
+from flask import current_app as app
import socket
import flask
@@ -23,7 +24,7 @@
def whitelist_webmail():
try:
return flask.request.headers["Client-Ip"] ==\
- socket.gethostbyname("webmail")
+ app.config["HOST_WEBMAIL"]
except:
return False
| {"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -46,6 +46,7 @@\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n+ 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n@@ -79,6 +80,8 @@\n self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])\n self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])\n self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])\n+ if self.config['WEBMAIL'] != 'none':\n+ self.config['HOST_WEBMAIL'] = resolve(self.config['HOST_WEBMAIL'])\n \n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\ndiff --git a/core/admin/mailu/internal/__init__.py b/core/admin/mailu/internal/__init__.py\n--- a/core/admin/mailu/internal/__init__.py\n+++ b/core/admin/mailu/internal/__init__.py\n@@ -1,6 +1,7 @@\n from flask_limiter import RateLimitExceeded\n \n from mailu import utils\n+from flask import current_app as app\n \n import socket\n import flask\n@@ -23,7 +24,7 @@\n def whitelist_webmail():\n try:\n return flask.request.headers[\"Client-Ip\"] ==\\\n- socket.gethostbyname(\"webmail\")\n+ app.config[\"HOST_WEBMAIL\"]\n except:\n return False\n", "issue": "Admin whitelist_webmail hard coded\nHi,\r\nI'm trying to send transactional email with Mailu SMTP from a backend server and it takes more than 15s.\r\nI tracked down one problem in core/admin/mailu/internal/__init__.py:\r\ndef whitelist_webmail() uses socket.gethostbyname(\"webmail\")\r\n\r\nIn my docker configuration there is no \"webmail\" host so the function socket.gethostbyname return nothing after 5s which slows down a lot the request /internal/auth/email\r\nWhen I set \"webmail\" to a fake ip on the admin server the /internal/auth/email returns immediately.\r\n\r\nMaybe it would be better to define a list of hostnames in the configuration file instead of using a hard coded \"webmail\" value. What do you think?\r\n\r\nThanks Mailu for the great work!\r\nJB\n", "before_files": [{"content": "from flask_limiter import RateLimitExceeded\n\nfrom mailu import utils\n\nimport socket\nimport flask\n\n\ninternal = flask.Blueprint('internal', __name__, template_folder='templates')\n\n\[email protected]_errorhandler(RateLimitExceeded)\ndef rate_limit_handler(e):\n response = flask.Response()\n response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'\n response.headers['Auth-Error-Code'] = '451 4.3.2'\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n\n\[email protected]_filter\ndef whitelist_webmail():\n try:\n return flask.request.headers[\"Client-Ip\"] ==\\\n socket.gethostbyname(\"webmail\")\n except:\n return False\n\n\nfrom mailu.internal.views import *\n", "path": "core/admin/mailu/internal/__init__.py"}, {"content": "import os\nfrom mailustart import resolve\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': 'redis://redis/2',\n 'QUOTA_STORAGE_URL': 'redis://redis/1',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '0.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'TLS_FLAVOR': 'cert',\n 'AUTH_RATELIMIT': '10/minute;1000/hour',\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n 'PASSWORD_SCHEME': 'BLF-CRYPT',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_FRONT': 'front',\n 'HOST_AUTHSMTP': os.environ.get('HOST_SMTP', 'smtp'),\n 'SUBNET': '192.168.203.0/24',\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def resolve_host(self):\n self.config['HOST_IMAP'] = resolve(self.config['HOST_IMAP'])\n self.config['HOST_POP3'] = resolve(self.config['HOST_POP3'])\n self.config['HOST_AUTHSMTP'] = resolve(self.config['HOST_AUTHSMTP'])\n self.config['HOST_SMTP'] = resolve(self.config['HOST_SMTP'])\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(os.environ.get(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_host()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py"}]} | 2,320 | 379 |
gh_patches_debug_6932 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1172 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect import of book from Goodreads
**Describe the bug**
I attempted to import my Goodreads library (https://www.goodreads.com/user/show/986159-nikhil) into Bookwyrm. I noticed the book _Dark Fiber: Tracking Critical Internet Culture_ ([bookwyrm](https://bookwyrm.social/book/120984), [goodreads](https://www.goodreads.com/book/show/1371653.Dark_Fiber); ISBN 9780262122498) in my library, which was not a book on my Goodreads list. Based on the order the imported books began showing up in my library, I think (but cannot confirm) that this was an incorrect import of the book _Ganhadores: A greve negra de 1857 na Bahia_ ([goodreads](https://www.goodreads.com/book/show/49417547-ganhadores); ISBN 9788535932430).
**To Reproduce**
Steps to reproduce the behavior:
1. Export Goodreads CSV for account https://www.goodreads.com/user/show/986159-nikhil
2. Navigate to https://bookwyrm.social/import
3. Upload CSV with format `GoodReads (CSV)`.
4. Navigate to https://bookwyrm.social/user/nikhilarundesai/books and see error
**Expected behavior**
The book _Dark Fiber_ does not appear in my BookWyrm library.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: Mac OS
- Browser: Chrome
- Version: 90.0.4430.212 (Official Build) (x86_64)
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of bookwyrm/models/import_job.py]
1 """ track progress of goodreads imports """
2 import re
3 import dateutil.parser
4
5 from django.apps import apps
6 from django.db import models
7 from django.utils import timezone
8
9 from bookwyrm.connectors import connector_manager
10 from bookwyrm.models import ReadThrough, User, Book
11 from .fields import PrivacyLevels
12
13
14 # Mapping goodreads -> bookwyrm shelf titles.
15 GOODREADS_SHELVES = {
16 "read": "read",
17 "currently-reading": "reading",
18 "to-read": "to-read",
19 }
20
21
22 def unquote_string(text):
23 """resolve csv quote weirdness"""
24 match = re.match(r'="([^"]*)"', text)
25 if match:
26 return match.group(1)
27 return text
28
29
30 def construct_search_term(title, author):
31 """formulate a query for the data connector"""
32 # Strip brackets (usually series title from search term)
33 title = re.sub(r"\s*\([^)]*\)\s*", "", title)
34 # Open library doesn't like including author initials in search term.
35 author = re.sub(r"(\w\.)+\s*", "", author)
36
37 return " ".join([title, author])
38
39
40 class ImportJob(models.Model):
41 """entry for a specific request for book data import"""
42
43 user = models.ForeignKey(User, on_delete=models.CASCADE)
44 created_date = models.DateTimeField(default=timezone.now)
45 task_id = models.CharField(max_length=100, null=True)
46 include_reviews = models.BooleanField(default=True)
47 complete = models.BooleanField(default=False)
48 privacy = models.CharField(
49 max_length=255, default="public", choices=PrivacyLevels.choices
50 )
51 retry = models.BooleanField(default=False)
52
53 def save(self, *args, **kwargs):
54 """save and notify"""
55 super().save(*args, **kwargs)
56 if self.complete:
57 notification_model = apps.get_model(
58 "bookwyrm.Notification", require_ready=True
59 )
60 notification_model.objects.create(
61 user=self.user,
62 notification_type="IMPORT",
63 related_import=self,
64 )
65
66
67 class ImportItem(models.Model):
68 """a single line of a csv being imported"""
69
70 job = models.ForeignKey(ImportJob, on_delete=models.CASCADE, related_name="items")
71 index = models.IntegerField()
72 data = models.JSONField()
73 book = models.ForeignKey(Book, on_delete=models.SET_NULL, null=True, blank=True)
74 fail_reason = models.TextField(null=True)
75
76 def resolve(self):
77 """try various ways to lookup a book"""
78 self.book = self.get_book_from_isbn() or self.get_book_from_title_author()
79
80 def get_book_from_isbn(self):
81 """search by isbn"""
82 search_result = connector_manager.first_search_result(
83 self.isbn, min_confidence=0.999
84 )
85 if search_result:
86 # raises ConnectorException
87 return search_result.connector.get_or_create_book(search_result.key)
88 return None
89
90 def get_book_from_title_author(self):
91 """search by title and author"""
92 search_term = construct_search_term(self.title, self.author)
93 search_result = connector_manager.first_search_result(
94 search_term, min_confidence=0.999
95 )
96 if search_result:
97 # raises ConnectorException
98 return search_result.connector.get_or_create_book(search_result.key)
99 return None
100
101 @property
102 def title(self):
103 """get the book title"""
104 return self.data["Title"]
105
106 @property
107 def author(self):
108 """get the book title"""
109 return self.data["Author"]
110
111 @property
112 def isbn(self):
113 """pulls out the isbn13 field from the csv line data"""
114 return unquote_string(self.data["ISBN13"])
115
116 @property
117 def shelf(self):
118 """the goodreads shelf field"""
119 if self.data["Exclusive Shelf"]:
120 return GOODREADS_SHELVES.get(self.data["Exclusive Shelf"])
121 return None
122
123 @property
124 def review(self):
125 """a user-written review, to be imported with the book data"""
126 return self.data["My Review"]
127
128 @property
129 def rating(self):
130 """x/5 star rating for a book"""
131 if self.data.get("My Rating", None):
132 return int(self.data["My Rating"])
133 return None
134
135 @property
136 def date_added(self):
137 """when the book was added to this dataset"""
138 if self.data["Date Added"]:
139 return timezone.make_aware(dateutil.parser.parse(self.data["Date Added"]))
140 return None
141
142 @property
143 def date_started(self):
144 """when the book was started"""
145 if "Date Started" in self.data and self.data["Date Started"]:
146 return timezone.make_aware(dateutil.parser.parse(self.data["Date Started"]))
147 return None
148
149 @property
150 def date_read(self):
151 """the date a book was completed"""
152 if self.data["Date Read"]:
153 return timezone.make_aware(dateutil.parser.parse(self.data["Date Read"]))
154 return None
155
156 @property
157 def reads(self):
158 """formats a read through dataset for the book in this line"""
159 start_date = self.date_started
160
161 # Goodreads special case (no 'date started' field)
162 if (
163 (self.shelf == "reading" or (self.shelf == "read" and self.date_read))
164 and self.date_added
165 and not start_date
166 ):
167 start_date = self.date_added
168
169 if start_date and start_date is not None and not self.date_read:
170 return [ReadThrough(start_date=start_date)]
171 if self.date_read:
172 return [
173 ReadThrough(
174 start_date=start_date,
175 finish_date=self.date_read,
176 )
177 ]
178 return []
179
180 def __repr__(self):
181 return "<{!r}Item {!r}>".format(self.data["import_source"], self.data["Title"])
182
183 def __str__(self):
184 return "{} by {}".format(self.data["Title"], self.data["Author"])
185
[end of bookwyrm/models/import_job.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/models/import_job.py b/bookwyrm/models/import_job.py
--- a/bookwyrm/models/import_job.py
+++ b/bookwyrm/models/import_job.py
@@ -75,7 +75,12 @@
def resolve(self):
"""try various ways to lookup a book"""
- self.book = self.get_book_from_isbn() or self.get_book_from_title_author()
+ if self.isbn:
+ self.book = self.get_book_from_isbn()
+ else:
+ # don't fall back on title/author search is isbn is present.
+ # you're too likely to mismatch
+ self.get_book_from_title_author()
def get_book_from_isbn(self):
"""search by isbn"""
| {"golden_diff": "diff --git a/bookwyrm/models/import_job.py b/bookwyrm/models/import_job.py\n--- a/bookwyrm/models/import_job.py\n+++ b/bookwyrm/models/import_job.py\n@@ -75,7 +75,12 @@\n \n def resolve(self):\n \"\"\"try various ways to lookup a book\"\"\"\n- self.book = self.get_book_from_isbn() or self.get_book_from_title_author()\n+ if self.isbn:\n+ self.book = self.get_book_from_isbn()\n+ else:\n+ # don't fall back on title/author search is isbn is present.\n+ # you're too likely to mismatch\n+ self.get_book_from_title_author()\n \n def get_book_from_isbn(self):\n \"\"\"search by isbn\"\"\"\n", "issue": "Incorrect import of book from Goodreads\n**Describe the bug**\r\nI attempted to import my Goodreads library (https://www.goodreads.com/user/show/986159-nikhil) into Bookwyrm. I noticed the book _Dark Fiber: Tracking Critical Internet Culture_ ([bookwyrm](https://bookwyrm.social/book/120984), [goodreads](https://www.goodreads.com/book/show/1371653.Dark_Fiber); ISBN 9780262122498) in my library, which was not a book on my Goodreads list. Based on the order the imported books began showing up in my library, I think (but cannot confirm) that this was an incorrect import of the book _Ganhadores: A greve negra de 1857 na Bahia_ ([goodreads](https://www.goodreads.com/book/show/49417547-ganhadores); ISBN 9788535932430).\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Export Goodreads CSV for account https://www.goodreads.com/user/show/986159-nikhil\r\n2. Navigate to https://bookwyrm.social/import\r\n3. Upload CSV with format `GoodReads (CSV)`.\r\n4. Navigate to https://bookwyrm.social/user/nikhilarundesai/books and see error\r\n\r\n**Expected behavior**\r\nThe book _Dark Fiber_ does not appear in my BookWyrm library.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Mac OS\r\n - Browser: Chrome\r\n - Version: 90.0.4430.212 (Official Build) (x86_64)\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "\"\"\" track progress of goodreads imports \"\"\"\nimport re\nimport dateutil.parser\n\nfrom django.apps import apps\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom bookwyrm.connectors import connector_manager\nfrom bookwyrm.models import ReadThrough, User, Book\nfrom .fields import PrivacyLevels\n\n\n# Mapping goodreads -> bookwyrm shelf titles.\nGOODREADS_SHELVES = {\n \"read\": \"read\",\n \"currently-reading\": \"reading\",\n \"to-read\": \"to-read\",\n}\n\n\ndef unquote_string(text):\n \"\"\"resolve csv quote weirdness\"\"\"\n match = re.match(r'=\"([^\"]*)\"', text)\n if match:\n return match.group(1)\n return text\n\n\ndef construct_search_term(title, author):\n \"\"\"formulate a query for the data connector\"\"\"\n # Strip brackets (usually series title from search term)\n title = re.sub(r\"\\s*\\([^)]*\\)\\s*\", \"\", title)\n # Open library doesn't like including author initials in search term.\n author = re.sub(r\"(\\w\\.)+\\s*\", \"\", author)\n\n return \" \".join([title, author])\n\n\nclass ImportJob(models.Model):\n \"\"\"entry for a specific request for book data import\"\"\"\n\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created_date = models.DateTimeField(default=timezone.now)\n task_id = models.CharField(max_length=100, null=True)\n include_reviews = models.BooleanField(default=True)\n complete = models.BooleanField(default=False)\n privacy = models.CharField(\n max_length=255, default=\"public\", choices=PrivacyLevels.choices\n )\n retry = models.BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n \"\"\"save and notify\"\"\"\n super().save(*args, **kwargs)\n if self.complete:\n notification_model = apps.get_model(\n \"bookwyrm.Notification\", require_ready=True\n )\n notification_model.objects.create(\n user=self.user,\n notification_type=\"IMPORT\",\n related_import=self,\n )\n\n\nclass ImportItem(models.Model):\n \"\"\"a single line of a csv being imported\"\"\"\n\n job = models.ForeignKey(ImportJob, on_delete=models.CASCADE, related_name=\"items\")\n index = models.IntegerField()\n data = models.JSONField()\n book = models.ForeignKey(Book, on_delete=models.SET_NULL, null=True, blank=True)\n fail_reason = models.TextField(null=True)\n\n def resolve(self):\n \"\"\"try various ways to lookup a book\"\"\"\n self.book = self.get_book_from_isbn() or self.get_book_from_title_author()\n\n def get_book_from_isbn(self):\n \"\"\"search by isbn\"\"\"\n search_result = connector_manager.first_search_result(\n self.isbn, min_confidence=0.999\n )\n if search_result:\n # raises ConnectorException\n return search_result.connector.get_or_create_book(search_result.key)\n return None\n\n def get_book_from_title_author(self):\n \"\"\"search by title and author\"\"\"\n search_term = construct_search_term(self.title, self.author)\n search_result = connector_manager.first_search_result(\n search_term, min_confidence=0.999\n )\n if search_result:\n # raises ConnectorException\n return search_result.connector.get_or_create_book(search_result.key)\n return None\n\n @property\n def title(self):\n \"\"\"get the book title\"\"\"\n return self.data[\"Title\"]\n\n @property\n def author(self):\n \"\"\"get the book title\"\"\"\n return self.data[\"Author\"]\n\n @property\n def isbn(self):\n \"\"\"pulls out the isbn13 field from the csv line data\"\"\"\n return unquote_string(self.data[\"ISBN13\"])\n\n @property\n def shelf(self):\n \"\"\"the goodreads shelf field\"\"\"\n if self.data[\"Exclusive Shelf\"]:\n return GOODREADS_SHELVES.get(self.data[\"Exclusive Shelf\"])\n return None\n\n @property\n def review(self):\n \"\"\"a user-written review, to be imported with the book data\"\"\"\n return self.data[\"My Review\"]\n\n @property\n def rating(self):\n \"\"\"x/5 star rating for a book\"\"\"\n if self.data.get(\"My Rating\", None):\n return int(self.data[\"My Rating\"])\n return None\n\n @property\n def date_added(self):\n \"\"\"when the book was added to this dataset\"\"\"\n if self.data[\"Date Added\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Added\"]))\n return None\n\n @property\n def date_started(self):\n \"\"\"when the book was started\"\"\"\n if \"Date Started\" in self.data and self.data[\"Date Started\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Started\"]))\n return None\n\n @property\n def date_read(self):\n \"\"\"the date a book was completed\"\"\"\n if self.data[\"Date Read\"]:\n return timezone.make_aware(dateutil.parser.parse(self.data[\"Date Read\"]))\n return None\n\n @property\n def reads(self):\n \"\"\"formats a read through dataset for the book in this line\"\"\"\n start_date = self.date_started\n\n # Goodreads special case (no 'date started' field)\n if (\n (self.shelf == \"reading\" or (self.shelf == \"read\" and self.date_read))\n and self.date_added\n and not start_date\n ):\n start_date = self.date_added\n\n if start_date and start_date is not None and not self.date_read:\n return [ReadThrough(start_date=start_date)]\n if self.date_read:\n return [\n ReadThrough(\n start_date=start_date,\n finish_date=self.date_read,\n )\n ]\n return []\n\n def __repr__(self):\n return \"<{!r}Item {!r}>\".format(self.data[\"import_source\"], self.data[\"Title\"])\n\n def __str__(self):\n return \"{} by {}\".format(self.data[\"Title\"], self.data[\"Author\"])\n", "path": "bookwyrm/models/import_job.py"}]} | 2,687 | 164 |
gh_patches_debug_594 | rasdani/github-patches | git_diff | pex-tool__pex-1057 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.17
On the docket:
+ [x] TypeError when resolving local platforms. #1043
+ [x] No such file for interpreter's binary name #1009
+ [x] Pex resources leak while bootstrapping pants #1050
+ [x] Pex PEX perf regression #1054
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.16"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.16"
+__version__ = "2.1.17"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.16\"\n+__version__ = \"2.1.17\"\n", "issue": "Release 2.1.17\nOn the docket:\r\n+ [x] TypeError when resolving local platforms. #1043\r\n+ [x] No such file for interpreter's binary name #1009\r\n+ [x] Pex resources leak while bootstrapping pants #1050\r\n+ [x] Pex PEX perf regression #1054\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.16\"\n", "path": "pex/version.py"}]} | 666 | 96 |
gh_patches_debug_39575 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3586 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement `columns.delete` RPC method
Replaces `DELETE /api/db/v0/tables/{tableId}/columns/{columnId}`
</issue>
<code>
[start of db/sql/install.py]
1 import os
2 from db.connection import load_file_with_engine
3
4 FILE_DIR = os.path.abspath(os.path.dirname(__file__))
5 MSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')
6 MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')
7
8
9 def install(engine):
10 """Install SQL pieces using the given engine."""
11 with open(MSAR_SQL) as file_handle:
12 load_file_with_engine(engine, file_handle)
13 with open(MSAR_AGGREGATE_SQL) as custom_aggregates:
14 load_file_with_engine(engine, custom_aggregates)
15
[end of db/sql/install.py]
[start of mathesar/rpc/columns.py]
1 """
2 Classes and functions exposed to the RPC endpoint for managing table columns.
3 """
4 from typing import TypedDict
5
6 from modernrpc.core import rpc_method, REQUEST_KEY
7 from modernrpc.auth.basic import http_basic_auth_login_required
8
9 from db.columns.operations.select import get_column_info_for_table
10 from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions
11 from mathesar.rpc.utils import connect
12 from mathesar.utils.columns import get_raw_display_options
13
14
15 class TypeOptions(TypedDict, total=False):
16 """
17 Options applied to a type. All attributes are optional.
18
19 Take special care with the difference between numeric and date/time
20 types w.r.t. precision. The attribute has a different meaning
21 depending on the type to which it's being applied.
22
23 Attributes:
24 precision: For numeric types, the number of significant digits.
25 For date/time types, the number of fractional digits.
26 scale: For numeric types, the number of fractional digits.
27 fields: Which time fields are stored. See Postgres docs.
28 length: The maximum length of a character-type field.
29 item_type: The member type for arrays.
30 """
31 precision: int
32 scale: int
33 fields: str
34 length: int
35 item_type: str
36
37 @classmethod
38 def from_dict(cls, type_options):
39 if type_options is None:
40 return
41 # All keys are optional, but we want to validate the keys we
42 # actually return.
43 all_keys = dict(
44 precision=type_options.get("precision"),
45 scale=type_options.get("scale"),
46 fields=type_options.get("fields"),
47 length=type_options.get("length"),
48 item_type=type_options.get("item_type"),
49 )
50 reduced_keys = {k: v for k, v in all_keys.items() if v is not None}
51 if reduced_keys != {}:
52 return cls(**reduced_keys)
53
54
55 class ColumnDefault(TypedDict):
56 """
57 A dictionary describing the default value for a column.
58
59 Attributes:
60 value: An SQL expression giving the default value.
61 is_dynamic: Whether the `value` is possibly dynamic.
62 """
63 value: str
64 is_dynamic: bool
65
66 @classmethod
67 def from_dict(cls, col_default):
68 if col_default is not None:
69 return cls(
70 value=col_default["value"],
71 is_dynamic=col_default["is_dynamic"],
72 )
73
74
75 class ColumnInfo(TypedDict):
76 """
77 Information about a column.
78
79 Attributes:
80 id: The `attnum` of the column in the table.
81 name: The name of the column.
82 type: The type of the column on the database.
83 type_options: The options applied to the column type.
84 nullable: Whether or not the column is nullable.
85 primary_key: Whether the column is in the primary key.
86 default: The default value and whether it's dynamic.
87 has_dependents: Whether the column has dependent objects.
88 description: The description of the column.
89 """
90 id: int
91 name: str
92 type: str
93 type_options: TypeOptions
94 nullable: bool
95 primary_key: bool
96 default: ColumnDefault
97 has_dependents: bool
98 description: str
99
100 @classmethod
101 def from_dict(cls, col_info):
102 return cls(
103 id=col_info["id"],
104 name=col_info["name"],
105 type=col_info["type"],
106 type_options=TypeOptions.from_dict(col_info.get("type_options")),
107 nullable=col_info["nullable"],
108 primary_key=col_info["primary_key"],
109 default=ColumnDefault.from_dict(col_info.get("default")),
110 has_dependents=col_info["has_dependents"],
111 description=col_info.get("description")
112 )
113
114
115 class ColumnListReturn(TypedDict):
116 """
117 Information about the columns of a table.
118
119 Attributes:
120 column_info: Column information from the user's database.
121 display_options: Display metadata managed by Mathesar.
122 """
123 column_info: list[ColumnInfo]
124 display_options: list[dict]
125
126
127 @rpc_method(name="columns.list")
128 @http_basic_auth_login_required
129 @handle_rpc_exceptions
130 def list_(*, table_oid: int, database_id: int, **kwargs) -> ColumnListReturn:
131 """
132 List information about columns for a table. Exposed as `list`.
133
134 Also return display options for each column, if they're defined.
135
136 Args:
137 table_oid: Identity of the table in the user's database.
138 database_id: The Django id of the database containing the table.
139
140 Returns:
141 A list of column details, and a separate list of display options.
142 """
143 user = kwargs.get(REQUEST_KEY).user
144 with connect(database_id, user) as conn:
145 raw_column_info = get_column_info_for_table(table_oid, conn)
146 column_info, attnums = tuple(
147 zip(
148 *[(ColumnInfo.from_dict(col), col['id']) for col in raw_column_info]
149 )
150 )
151 display_options = get_raw_display_options(
152 database_id, table_oid, attnums, user
153 )
154 return ColumnListReturn(
155 column_info=column_info,
156 display_options=display_options,
157 )
158
[end of mathesar/rpc/columns.py]
[start of db/columns/operations/drop.py]
1 """The function in this module wraps SQL functions that drop columns."""
2 from db import connection as db_conn
3
4
5 def drop_column(table_oid, column_attnum, engine):
6 """
7 Drop the given columns from the given table.
8
9 Args:
10 table_oid: OID of the table whose columns we'll drop.
11 column_attnum: The attnums of the columns to drop.
12 engine: SQLAlchemy engine object for connecting.
13
14 Returns:
15 Returns a string giving the command that was run.
16 """
17 return db_conn.execute_msar_func_with_engine(
18 engine, 'drop_columns', table_oid, column_attnum
19 ).fetchone()[0]
20
[end of db/columns/operations/drop.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/columns/operations/drop.py b/db/columns/operations/drop.py
--- a/db/columns/operations/drop.py
+++ b/db/columns/operations/drop.py
@@ -1,14 +1,14 @@
-"""The function in this module wraps SQL functions that drop columns."""
+"""The functions in this module wrap SQL functions that drop columns."""
from db import connection as db_conn
def drop_column(table_oid, column_attnum, engine):
"""
- Drop the given columns from the given table.
+ Drop the given column from the given table.
Args:
- table_oid: OID of the table whose columns we'll drop.
- column_attnum: The attnums of the columns to drop.
+ table_oid: OID of the table whose column we'll drop.
+ column_attnum: The attnum of the column to drop.
engine: SQLAlchemy engine object for connecting.
Returns:
@@ -17,3 +17,17 @@
return db_conn.execute_msar_func_with_engine(
engine, 'drop_columns', table_oid, column_attnum
).fetchone()[0]
+
+
+def drop_columns_from_table(table_oid, column_attnums, conn):
+ """
+ Drop the given columns from the given table.
+
+ Args:
+ table_oid: OID of the table whose columns we'll drop.
+ column_attnums: The attnums of the columns to drop.
+ conn: A psycopg connection to the relevant database.
+ """
+ return db_conn.exec_msar_func(
+ conn, 'drop_columns', table_oid, *column_attnums
+ ).fetchone()[0]
diff --git a/db/sql/install.py b/db/sql/install.py
--- a/db/sql/install.py
+++ b/db/sql/install.py
@@ -2,8 +2,8 @@
from db.connection import load_file_with_engine
FILE_DIR = os.path.abspath(os.path.dirname(__file__))
-MSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')
-MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')
+MSAR_SQL = os.path.join(FILE_DIR, '00_msar.sql')
+MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '30_msar_custom_aggregates.sql')
def install(engine):
diff --git a/mathesar/rpc/columns.py b/mathesar/rpc/columns.py
--- a/mathesar/rpc/columns.py
+++ b/mathesar/rpc/columns.py
@@ -7,6 +7,7 @@
from modernrpc.auth.basic import http_basic_auth_login_required
from db.columns.operations.select import get_column_info_for_table
+from db.columns.operations.drop import drop_columns_from_table
from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions
from mathesar.rpc.utils import connect
from mathesar.utils.columns import get_raw_display_options
@@ -155,3 +156,25 @@
column_info=column_info,
display_options=display_options,
)
+
+
+@rpc_method(name="columns.delete")
+@http_basic_auth_login_required
+@handle_rpc_exceptions
+def delete(
+ *, column_attnums: list[int], table_oid: int, database_id: int, **kwargs
+) -> int:
+ """
+ Delete columns from a table.
+
+ Args:
+ column_attnums: A list of attnums of columns to delete.
+ table_oid: Identity of the table in the user's database.
+ database_id: The Django id of the database containing the table.
+
+ Returns:
+ The number of columns dropped.
+ """
+ user = kwargs.get(REQUEST_KEY).user
+ with connect(database_id, user) as conn:
+ return drop_columns_from_table(table_oid, column_attnums, conn)
| {"golden_diff": "diff --git a/db/columns/operations/drop.py b/db/columns/operations/drop.py\n--- a/db/columns/operations/drop.py\n+++ b/db/columns/operations/drop.py\n@@ -1,14 +1,14 @@\n-\"\"\"The function in this module wraps SQL functions that drop columns.\"\"\"\n+\"\"\"The functions in this module wrap SQL functions that drop columns.\"\"\"\n from db import connection as db_conn\n \n \n def drop_column(table_oid, column_attnum, engine):\n \"\"\"\n- Drop the given columns from the given table.\n+ Drop the given column from the given table.\n \n Args:\n- table_oid: OID of the table whose columns we'll drop.\n- column_attnum: The attnums of the columns to drop.\n+ table_oid: OID of the table whose column we'll drop.\n+ column_attnum: The attnum of the column to drop.\n engine: SQLAlchemy engine object for connecting.\n \n Returns:\n@@ -17,3 +17,17 @@\n return db_conn.execute_msar_func_with_engine(\n engine, 'drop_columns', table_oid, column_attnum\n ).fetchone()[0]\n+\n+\n+def drop_columns_from_table(table_oid, column_attnums, conn):\n+ \"\"\"\n+ Drop the given columns from the given table.\n+\n+ Args:\n+ table_oid: OID of the table whose columns we'll drop.\n+ column_attnums: The attnums of the columns to drop.\n+ conn: A psycopg connection to the relevant database.\n+ \"\"\"\n+ return db_conn.exec_msar_func(\n+ conn, 'drop_columns', table_oid, *column_attnums\n+ ).fetchone()[0]\ndiff --git a/db/sql/install.py b/db/sql/install.py\n--- a/db/sql/install.py\n+++ b/db/sql/install.py\n@@ -2,8 +2,8 @@\n from db.connection import load_file_with_engine\n \n FILE_DIR = os.path.abspath(os.path.dirname(__file__))\n-MSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')\n-MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')\n+MSAR_SQL = os.path.join(FILE_DIR, '00_msar.sql')\n+MSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '30_msar_custom_aggregates.sql')\n \n \n def install(engine):\ndiff --git a/mathesar/rpc/columns.py b/mathesar/rpc/columns.py\n--- a/mathesar/rpc/columns.py\n+++ b/mathesar/rpc/columns.py\n@@ -7,6 +7,7 @@\n from modernrpc.auth.basic import http_basic_auth_login_required\n \n from db.columns.operations.select import get_column_info_for_table\n+from db.columns.operations.drop import drop_columns_from_table\n from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\n from mathesar.rpc.utils import connect\n from mathesar.utils.columns import get_raw_display_options\n@@ -155,3 +156,25 @@\n column_info=column_info,\n display_options=display_options,\n )\n+\n+\n+@rpc_method(name=\"columns.delete\")\n+@http_basic_auth_login_required\n+@handle_rpc_exceptions\n+def delete(\n+ *, column_attnums: list[int], table_oid: int, database_id: int, **kwargs\n+) -> int:\n+ \"\"\"\n+ Delete columns from a table.\n+\n+ Args:\n+ column_attnums: A list of attnums of columns to delete.\n+ table_oid: Identity of the table in the user's database.\n+ database_id: The Django id of the database containing the table.\n+\n+ Returns:\n+ The number of columns dropped.\n+ \"\"\"\n+ user = kwargs.get(REQUEST_KEY).user\n+ with connect(database_id, user) as conn:\n+ return drop_columns_from_table(table_oid, column_attnums, conn)\n", "issue": "Implement `columns.delete` RPC method\nReplaces `DELETE /api/db/v0/tables/{tableId}/columns/{columnId}`\n", "before_files": [{"content": "import os\nfrom db.connection import load_file_with_engine\n\nFILE_DIR = os.path.abspath(os.path.dirname(__file__))\nMSAR_SQL = os.path.join(FILE_DIR, '0_msar.sql')\nMSAR_AGGREGATE_SQL = os.path.join(FILE_DIR, '3_msar_custom_aggregates.sql')\n\n\ndef install(engine):\n \"\"\"Install SQL pieces using the given engine.\"\"\"\n with open(MSAR_SQL) as file_handle:\n load_file_with_engine(engine, file_handle)\n with open(MSAR_AGGREGATE_SQL) as custom_aggregates:\n load_file_with_engine(engine, custom_aggregates)\n", "path": "db/sql/install.py"}, {"content": "\"\"\"\nClasses and functions exposed to the RPC endpoint for managing table columns.\n\"\"\"\nfrom typing import TypedDict\n\nfrom modernrpc.core import rpc_method, REQUEST_KEY\nfrom modernrpc.auth.basic import http_basic_auth_login_required\n\nfrom db.columns.operations.select import get_column_info_for_table\nfrom mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\nfrom mathesar.rpc.utils import connect\nfrom mathesar.utils.columns import get_raw_display_options\n\n\nclass TypeOptions(TypedDict, total=False):\n \"\"\"\n Options applied to a type. All attributes are optional.\n\n Take special care with the difference between numeric and date/time\n types w.r.t. precision. The attribute has a different meaning\n depending on the type to which it's being applied.\n\n Attributes:\n precision: For numeric types, the number of significant digits.\n For date/time types, the number of fractional digits.\n scale: For numeric types, the number of fractional digits.\n fields: Which time fields are stored. See Postgres docs.\n length: The maximum length of a character-type field.\n item_type: The member type for arrays.\n \"\"\"\n precision: int\n scale: int\n fields: str\n length: int\n item_type: str\n\n @classmethod\n def from_dict(cls, type_options):\n if type_options is None:\n return\n # All keys are optional, but we want to validate the keys we\n # actually return.\n all_keys = dict(\n precision=type_options.get(\"precision\"),\n scale=type_options.get(\"scale\"),\n fields=type_options.get(\"fields\"),\n length=type_options.get(\"length\"),\n item_type=type_options.get(\"item_type\"),\n )\n reduced_keys = {k: v for k, v in all_keys.items() if v is not None}\n if reduced_keys != {}:\n return cls(**reduced_keys)\n\n\nclass ColumnDefault(TypedDict):\n \"\"\"\n A dictionary describing the default value for a column.\n\n Attributes:\n value: An SQL expression giving the default value.\n is_dynamic: Whether the `value` is possibly dynamic.\n \"\"\"\n value: str\n is_dynamic: bool\n\n @classmethod\n def from_dict(cls, col_default):\n if col_default is not None:\n return cls(\n value=col_default[\"value\"],\n is_dynamic=col_default[\"is_dynamic\"],\n )\n\n\nclass ColumnInfo(TypedDict):\n \"\"\"\n Information about a column.\n\n Attributes:\n id: The `attnum` of the column in the table.\n name: The name of the column.\n type: The type of the column on the database.\n type_options: The options applied to the column type.\n nullable: Whether or not the column is nullable.\n primary_key: Whether the column is in the primary key.\n default: The default value and whether it's dynamic.\n has_dependents: Whether the column has dependent objects.\n description: The description of the column.\n \"\"\"\n id: int\n name: str\n type: str\n type_options: TypeOptions\n nullable: bool\n primary_key: bool\n default: ColumnDefault\n has_dependents: bool\n description: str\n\n @classmethod\n def from_dict(cls, col_info):\n return cls(\n id=col_info[\"id\"],\n name=col_info[\"name\"],\n type=col_info[\"type\"],\n type_options=TypeOptions.from_dict(col_info.get(\"type_options\")),\n nullable=col_info[\"nullable\"],\n primary_key=col_info[\"primary_key\"],\n default=ColumnDefault.from_dict(col_info.get(\"default\")),\n has_dependents=col_info[\"has_dependents\"],\n description=col_info.get(\"description\")\n )\n\n\nclass ColumnListReturn(TypedDict):\n \"\"\"\n Information about the columns of a table.\n\n Attributes:\n column_info: Column information from the user's database.\n display_options: Display metadata managed by Mathesar.\n \"\"\"\n column_info: list[ColumnInfo]\n display_options: list[dict]\n\n\n@rpc_method(name=\"columns.list\")\n@http_basic_auth_login_required\n@handle_rpc_exceptions\ndef list_(*, table_oid: int, database_id: int, **kwargs) -> ColumnListReturn:\n \"\"\"\n List information about columns for a table. Exposed as `list`.\n\n Also return display options for each column, if they're defined.\n\n Args:\n table_oid: Identity of the table in the user's database.\n database_id: The Django id of the database containing the table.\n\n Returns:\n A list of column details, and a separate list of display options.\n \"\"\"\n user = kwargs.get(REQUEST_KEY).user\n with connect(database_id, user) as conn:\n raw_column_info = get_column_info_for_table(table_oid, conn)\n column_info, attnums = tuple(\n zip(\n *[(ColumnInfo.from_dict(col), col['id']) for col in raw_column_info]\n )\n )\n display_options = get_raw_display_options(\n database_id, table_oid, attnums, user\n )\n return ColumnListReturn(\n column_info=column_info,\n display_options=display_options,\n )\n", "path": "mathesar/rpc/columns.py"}, {"content": "\"\"\"The function in this module wraps SQL functions that drop columns.\"\"\"\nfrom db import connection as db_conn\n\n\ndef drop_column(table_oid, column_attnum, engine):\n \"\"\"\n Drop the given columns from the given table.\n\n Args:\n table_oid: OID of the table whose columns we'll drop.\n column_attnum: The attnums of the columns to drop.\n engine: SQLAlchemy engine object for connecting.\n\n Returns:\n Returns a string giving the command that was run.\n \"\"\"\n return db_conn.execute_msar_func_with_engine(\n engine, 'drop_columns', table_oid, column_attnum\n ).fetchone()[0]\n", "path": "db/columns/operations/drop.py"}]} | 2,409 | 847 |
gh_patches_debug_31953 | rasdani/github-patches | git_diff | goauthentik__authentik-5927 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LDAP Source Bind from Federation & Social login is Broken Since Commit 1ca8feb
**Describe the bug**
LDAP Sources are broken since commit 1ca8feb. This is due to a double bind that is attempted on the Connection object. The second bind consistently causes a LDAPInvalidCredentailResult exception even if it is successful the first bind. I found this bug after updating Authentik to 2023.5.3 wondering why LDAP logins no longer worked. The LDAP backend is consistently changing passwords due to TOTP so a cached password was never hit always forcing a LDAP bind. Which is what made me find this issue.
**To Reproduce**
1. Set up a LDAP Source that works
2. Login in as any user using the LDAP Password for that users THATS NOT CACHED
3. User Always gets Invalid Credentials
**Expected behavior**
User should successfully login
**Logs**
None Provided
**Version and Deployment (please complete the following information):**
- authentik version: 2023.5.3
- Deployment: docker-compose
**Additional context**
I confirmed this bug on my own stack by creating multiple logging lines using LOGGER and checking DEBUG output. For some reason a second bind always causes this Exception with Authentik if the first was successful. I'm not sure if the state is reset per successful connection which is causing this. But by removing the extra bind performed in /authentik/sources/ldap/auth.py I no longer get erroneous invalid credential exceptions.
</issue>
<code>
[start of authentik/sources/ldap/models.py]
1 """authentik LDAP Models"""
2 from ssl import CERT_REQUIRED
3 from typing import Optional
4
5 from django.db import models
6 from django.utils.translation import gettext_lazy as _
7 from ldap3 import ALL, NONE, RANDOM, Connection, Server, ServerPool, Tls
8 from ldap3.core.exceptions import LDAPSchemaError
9 from rest_framework.serializers import Serializer
10
11 from authentik.core.models import Group, PropertyMapping, Source
12 from authentik.crypto.models import CertificateKeyPair
13 from authentik.lib.config import CONFIG
14 from authentik.lib.models import DomainlessURLValidator
15
16 LDAP_TIMEOUT = 15
17
18
19 class MultiURLValidator(DomainlessURLValidator):
20 """Same as DomainlessURLValidator but supports multiple URLs separated with a comma."""
21
22 def __call__(self, value: str):
23 if "," in value:
24 for url in value.split(","):
25 super().__call__(url)
26 else:
27 super().__call__(value)
28
29
30 class LDAPSource(Source):
31 """Federate LDAP Directory with authentik, or create new accounts in LDAP."""
32
33 server_uri = models.TextField(
34 validators=[MultiURLValidator(schemes=["ldap", "ldaps"])],
35 verbose_name=_("Server URI"),
36 )
37 peer_certificate = models.ForeignKey(
38 CertificateKeyPair,
39 on_delete=models.SET_DEFAULT,
40 default=None,
41 null=True,
42 help_text=_(
43 "Optionally verify the LDAP Server's Certificate against the CA Chain in this keypair."
44 ),
45 )
46
47 bind_cn = models.TextField(verbose_name=_("Bind CN"), blank=True)
48 bind_password = models.TextField(blank=True)
49 start_tls = models.BooleanField(default=False, verbose_name=_("Enable Start TLS"))
50
51 base_dn = models.TextField(verbose_name=_("Base DN"))
52 additional_user_dn = models.TextField(
53 help_text=_("Prepended to Base DN for User-queries."),
54 verbose_name=_("Addition User DN"),
55 blank=True,
56 )
57 additional_group_dn = models.TextField(
58 help_text=_("Prepended to Base DN for Group-queries."),
59 verbose_name=_("Addition Group DN"),
60 blank=True,
61 )
62
63 user_object_filter = models.TextField(
64 default="(objectClass=person)",
65 help_text=_("Consider Objects matching this filter to be Users."),
66 )
67 group_membership_field = models.TextField(
68 default="member", help_text=_("Field which contains members of a group.")
69 )
70 group_object_filter = models.TextField(
71 default="(objectClass=group)",
72 help_text=_("Consider Objects matching this filter to be Groups."),
73 )
74 object_uniqueness_field = models.TextField(
75 default="objectSid", help_text=_("Field which contains a unique Identifier.")
76 )
77
78 property_mappings_group = models.ManyToManyField(
79 PropertyMapping,
80 default=None,
81 blank=True,
82 help_text=_("Property mappings used for group creation/updating."),
83 )
84
85 sync_users = models.BooleanField(default=True)
86 sync_users_password = models.BooleanField(
87 default=True,
88 help_text=_(
89 "When a user changes their password, sync it back to LDAP. "
90 "This can only be enabled on a single LDAP source."
91 ),
92 )
93 sync_groups = models.BooleanField(default=True)
94 sync_parent_group = models.ForeignKey(
95 Group, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT
96 )
97
98 @property
99 def component(self) -> str:
100 return "ak-source-ldap-form"
101
102 @property
103 def serializer(self) -> type[Serializer]:
104 from authentik.sources.ldap.api import LDAPSourceSerializer
105
106 return LDAPSourceSerializer
107
108 def server(self, **kwargs) -> Server:
109 """Get LDAP Server/ServerPool"""
110 servers = []
111 tls_kwargs = {}
112 if self.peer_certificate:
113 tls_kwargs["ca_certs_data"] = self.peer_certificate.certificate_data
114 tls_kwargs["validate"] = CERT_REQUIRED
115 if ciphers := CONFIG.y("ldap.tls.ciphers", None):
116 tls_kwargs["ciphers"] = ciphers.strip()
117 server_kwargs = {
118 "get_info": ALL,
119 "connect_timeout": LDAP_TIMEOUT,
120 "tls": Tls(**tls_kwargs),
121 }
122 server_kwargs.update(kwargs)
123 if "," in self.server_uri:
124 for server in self.server_uri.split(","):
125 servers.append(Server(server, **server_kwargs))
126 else:
127 servers = [Server(self.server_uri, **server_kwargs)]
128 return ServerPool(servers, RANDOM, active=True, exhaust=True)
129
130 def connection(
131 self, server_kwargs: Optional[dict] = None, connection_kwargs: Optional[dict] = None
132 ) -> Connection:
133 """Get a fully connected and bound LDAP Connection"""
134 server_kwargs = server_kwargs or {}
135 connection_kwargs = connection_kwargs or {}
136 connection_kwargs.setdefault("user", self.bind_cn)
137 connection_kwargs.setdefault("password", self.bind_password)
138 connection = Connection(
139 self.server(**server_kwargs),
140 raise_exceptions=True,
141 receive_timeout=LDAP_TIMEOUT,
142 **connection_kwargs,
143 )
144
145 if self.start_tls:
146 connection.start_tls(read_server_info=False)
147 try:
148 connection.bind()
149 except LDAPSchemaError as exc:
150 # Schema error, so try connecting without schema info
151 # See https://github.com/goauthentik/authentik/issues/4590
152 if server_kwargs.get("get_info", ALL) == NONE:
153 raise exc
154 server_kwargs["get_info"] = NONE
155 return self.connection(server_kwargs, connection_kwargs)
156 return connection
157
158 class Meta:
159 verbose_name = _("LDAP Source")
160 verbose_name_plural = _("LDAP Sources")
161
162
163 class LDAPPropertyMapping(PropertyMapping):
164 """Map LDAP Property to User or Group object attribute"""
165
166 object_field = models.TextField()
167
168 @property
169 def component(self) -> str:
170 return "ak-property-mapping-ldap-form"
171
172 @property
173 def serializer(self) -> type[Serializer]:
174 from authentik.sources.ldap.api import LDAPPropertyMappingSerializer
175
176 return LDAPPropertyMappingSerializer
177
178 def __str__(self):
179 return str(self.name)
180
181 class Meta:
182 verbose_name = _("LDAP Property Mapping")
183 verbose_name_plural = _("LDAP Property Mappings")
184
[end of authentik/sources/ldap/models.py]
[start of authentik/sources/ldap/auth.py]
1 """authentik LDAP Authentication Backend"""
2 from typing import Optional
3
4 from django.http import HttpRequest
5 from ldap3.core.exceptions import LDAPException, LDAPInvalidCredentialsResult
6 from structlog.stdlib import get_logger
7
8 from authentik.core.auth import InbuiltBackend
9 from authentik.core.models import User
10 from authentik.sources.ldap.models import LDAPSource
11
12 LOGGER = get_logger()
13 LDAP_DISTINGUISHED_NAME = "distinguishedName"
14
15
16 class LDAPBackend(InbuiltBackend):
17 """Authenticate users against LDAP Server"""
18
19 def authenticate(self, request: HttpRequest, **kwargs):
20 """Try to authenticate a user via ldap"""
21 if "password" not in kwargs:
22 return None
23 for source in LDAPSource.objects.filter(enabled=True):
24 LOGGER.debug("LDAP Auth attempt", source=source)
25 user = self.auth_user(source, **kwargs)
26 if user:
27 self.set_method("ldap", request, source=source)
28 return user
29 return None
30
31 def auth_user(self, source: LDAPSource, password: str, **filters: str) -> Optional[User]:
32 """Try to bind as either user_dn or mail with password.
33 Returns True on success, otherwise False"""
34 users = User.objects.filter(**filters)
35 if not users.exists():
36 return None
37 user: User = users.first()
38 if LDAP_DISTINGUISHED_NAME not in user.attributes:
39 LOGGER.debug("User doesn't have DN set, assuming not LDAP imported.", user=user)
40 return None
41 # Either has unusable password,
42 # or has a password, but couldn't be authenticated by ModelBackend.
43 # This means we check with a bind to see if the LDAP password has changed
44 if self.auth_user_by_bind(source, user, password):
45 # Password given successfully binds to LDAP, so we save it in our Database
46 LOGGER.debug("Updating user's password in DB", user=user)
47 user.set_password(password, signal=False)
48 user.save()
49 return user
50 # Password doesn't match
51 LOGGER.debug("Failed to bind, password invalid")
52 return None
53
54 def auth_user_by_bind(self, source: LDAPSource, user: User, password: str) -> Optional[User]:
55 """Attempt authentication by binding to the LDAP server as `user`. This
56 method should be avoided as its slow to do the bind."""
57 # Try to bind as new user
58 LOGGER.debug("Attempting to bind as user", user=user)
59 try:
60 temp_connection = source.connection(
61 connection_kwargs={
62 "user": user.attributes.get(LDAP_DISTINGUISHED_NAME),
63 "password": password,
64 }
65 )
66 temp_connection.bind()
67 return user
68 except LDAPInvalidCredentialsResult as exc:
69 LOGGER.debug("invalid LDAP credentials", user=user, exc=exc)
70 except LDAPException as exc:
71 LOGGER.warning("failed to bind to LDAP", exc=exc)
72 return None
73
[end of authentik/sources/ldap/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/sources/ldap/auth.py b/authentik/sources/ldap/auth.py
--- a/authentik/sources/ldap/auth.py
+++ b/authentik/sources/ldap/auth.py
@@ -57,13 +57,13 @@
# Try to bind as new user
LOGGER.debug("Attempting to bind as user", user=user)
try:
- temp_connection = source.connection(
+ # source.connection also attempts to bind
+ source.connection(
connection_kwargs={
"user": user.attributes.get(LDAP_DISTINGUISHED_NAME),
"password": password,
}
)
- temp_connection.bind()
return user
except LDAPInvalidCredentialsResult as exc:
LOGGER.debug("invalid LDAP credentials", user=user, exc=exc)
diff --git a/authentik/sources/ldap/models.py b/authentik/sources/ldap/models.py
--- a/authentik/sources/ldap/models.py
+++ b/authentik/sources/ldap/models.py
@@ -145,7 +145,9 @@
if self.start_tls:
connection.start_tls(read_server_info=False)
try:
- connection.bind()
+ successful = connection.bind()
+ if successful:
+ return connection
except LDAPSchemaError as exc:
# Schema error, so try connecting without schema info
# See https://github.com/goauthentik/authentik/issues/4590
@@ -153,7 +155,7 @@
raise exc
server_kwargs["get_info"] = NONE
return self.connection(server_kwargs, connection_kwargs)
- return connection
+ return RuntimeError("Failed to bind")
class Meta:
verbose_name = _("LDAP Source")
| {"golden_diff": "diff --git a/authentik/sources/ldap/auth.py b/authentik/sources/ldap/auth.py\n--- a/authentik/sources/ldap/auth.py\n+++ b/authentik/sources/ldap/auth.py\n@@ -57,13 +57,13 @@\n # Try to bind as new user\n LOGGER.debug(\"Attempting to bind as user\", user=user)\n try:\n- temp_connection = source.connection(\n+ # source.connection also attempts to bind\n+ source.connection(\n connection_kwargs={\n \"user\": user.attributes.get(LDAP_DISTINGUISHED_NAME),\n \"password\": password,\n }\n )\n- temp_connection.bind()\n return user\n except LDAPInvalidCredentialsResult as exc:\n LOGGER.debug(\"invalid LDAP credentials\", user=user, exc=exc)\ndiff --git a/authentik/sources/ldap/models.py b/authentik/sources/ldap/models.py\n--- a/authentik/sources/ldap/models.py\n+++ b/authentik/sources/ldap/models.py\n@@ -145,7 +145,9 @@\n if self.start_tls:\n connection.start_tls(read_server_info=False)\n try:\n- connection.bind()\n+ successful = connection.bind()\n+ if successful:\n+ return connection\n except LDAPSchemaError as exc:\n # Schema error, so try connecting without schema info\n # See https://github.com/goauthentik/authentik/issues/4590\n@@ -153,7 +155,7 @@\n raise exc\n server_kwargs[\"get_info\"] = NONE\n return self.connection(server_kwargs, connection_kwargs)\n- return connection\n+ return RuntimeError(\"Failed to bind\")\n \n class Meta:\n verbose_name = _(\"LDAP Source\")\n", "issue": "LDAP Source Bind from Federation & Social login is Broken Since Commit 1ca8feb\n**Describe the bug**\r\nLDAP Sources are broken since commit 1ca8feb. This is due to a double bind that is attempted on the Connection object. The second bind consistently causes a LDAPInvalidCredentailResult exception even if it is successful the first bind. I found this bug after updating Authentik to 2023.5.3 wondering why LDAP logins no longer worked. The LDAP backend is consistently changing passwords due to TOTP so a cached password was never hit always forcing a LDAP bind. Which is what made me find this issue.\r\n\r\n**To Reproduce**\r\n1. Set up a LDAP Source that works\r\n2. Login in as any user using the LDAP Password for that users THATS NOT CACHED\r\n3. User Always gets Invalid Credentials\r\n\r\n**Expected behavior**\r\nUser should successfully login \r\n\r\n**Logs**\r\nNone Provided\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.5.3\r\n- Deployment: docker-compose\r\n\r\n\r\n**Additional context**\r\nI confirmed this bug on my own stack by creating multiple logging lines using LOGGER and checking DEBUG output. For some reason a second bind always causes this Exception with Authentik if the first was successful. I'm not sure if the state is reset per successful connection which is causing this. But by removing the extra bind performed in /authentik/sources/ldap/auth.py I no longer get erroneous invalid credential exceptions.\r\n\n", "before_files": [{"content": "\"\"\"authentik LDAP Models\"\"\"\nfrom ssl import CERT_REQUIRED\nfrom typing import Optional\n\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom ldap3 import ALL, NONE, RANDOM, Connection, Server, ServerPool, Tls\nfrom ldap3.core.exceptions import LDAPSchemaError\nfrom rest_framework.serializers import Serializer\n\nfrom authentik.core.models import Group, PropertyMapping, Source\nfrom authentik.crypto.models import CertificateKeyPair\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.models import DomainlessURLValidator\n\nLDAP_TIMEOUT = 15\n\n\nclass MultiURLValidator(DomainlessURLValidator):\n \"\"\"Same as DomainlessURLValidator but supports multiple URLs separated with a comma.\"\"\"\n\n def __call__(self, value: str):\n if \",\" in value:\n for url in value.split(\",\"):\n super().__call__(url)\n else:\n super().__call__(value)\n\n\nclass LDAPSource(Source):\n \"\"\"Federate LDAP Directory with authentik, or create new accounts in LDAP.\"\"\"\n\n server_uri = models.TextField(\n validators=[MultiURLValidator(schemes=[\"ldap\", \"ldaps\"])],\n verbose_name=_(\"Server URI\"),\n )\n peer_certificate = models.ForeignKey(\n CertificateKeyPair,\n on_delete=models.SET_DEFAULT,\n default=None,\n null=True,\n help_text=_(\n \"Optionally verify the LDAP Server's Certificate against the CA Chain in this keypair.\"\n ),\n )\n\n bind_cn = models.TextField(verbose_name=_(\"Bind CN\"), blank=True)\n bind_password = models.TextField(blank=True)\n start_tls = models.BooleanField(default=False, verbose_name=_(\"Enable Start TLS\"))\n\n base_dn = models.TextField(verbose_name=_(\"Base DN\"))\n additional_user_dn = models.TextField(\n help_text=_(\"Prepended to Base DN for User-queries.\"),\n verbose_name=_(\"Addition User DN\"),\n blank=True,\n )\n additional_group_dn = models.TextField(\n help_text=_(\"Prepended to Base DN for Group-queries.\"),\n verbose_name=_(\"Addition Group DN\"),\n blank=True,\n )\n\n user_object_filter = models.TextField(\n default=\"(objectClass=person)\",\n help_text=_(\"Consider Objects matching this filter to be Users.\"),\n )\n group_membership_field = models.TextField(\n default=\"member\", help_text=_(\"Field which contains members of a group.\")\n )\n group_object_filter = models.TextField(\n default=\"(objectClass=group)\",\n help_text=_(\"Consider Objects matching this filter to be Groups.\"),\n )\n object_uniqueness_field = models.TextField(\n default=\"objectSid\", help_text=_(\"Field which contains a unique Identifier.\")\n )\n\n property_mappings_group = models.ManyToManyField(\n PropertyMapping,\n default=None,\n blank=True,\n help_text=_(\"Property mappings used for group creation/updating.\"),\n )\n\n sync_users = models.BooleanField(default=True)\n sync_users_password = models.BooleanField(\n default=True,\n help_text=_(\n \"When a user changes their password, sync it back to LDAP. \"\n \"This can only be enabled on a single LDAP source.\"\n ),\n )\n sync_groups = models.BooleanField(default=True)\n sync_parent_group = models.ForeignKey(\n Group, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT\n )\n\n @property\n def component(self) -> str:\n return \"ak-source-ldap-form\"\n\n @property\n def serializer(self) -> type[Serializer]:\n from authentik.sources.ldap.api import LDAPSourceSerializer\n\n return LDAPSourceSerializer\n\n def server(self, **kwargs) -> Server:\n \"\"\"Get LDAP Server/ServerPool\"\"\"\n servers = []\n tls_kwargs = {}\n if self.peer_certificate:\n tls_kwargs[\"ca_certs_data\"] = self.peer_certificate.certificate_data\n tls_kwargs[\"validate\"] = CERT_REQUIRED\n if ciphers := CONFIG.y(\"ldap.tls.ciphers\", None):\n tls_kwargs[\"ciphers\"] = ciphers.strip()\n server_kwargs = {\n \"get_info\": ALL,\n \"connect_timeout\": LDAP_TIMEOUT,\n \"tls\": Tls(**tls_kwargs),\n }\n server_kwargs.update(kwargs)\n if \",\" in self.server_uri:\n for server in self.server_uri.split(\",\"):\n servers.append(Server(server, **server_kwargs))\n else:\n servers = [Server(self.server_uri, **server_kwargs)]\n return ServerPool(servers, RANDOM, active=True, exhaust=True)\n\n def connection(\n self, server_kwargs: Optional[dict] = None, connection_kwargs: Optional[dict] = None\n ) -> Connection:\n \"\"\"Get a fully connected and bound LDAP Connection\"\"\"\n server_kwargs = server_kwargs or {}\n connection_kwargs = connection_kwargs or {}\n connection_kwargs.setdefault(\"user\", self.bind_cn)\n connection_kwargs.setdefault(\"password\", self.bind_password)\n connection = Connection(\n self.server(**server_kwargs),\n raise_exceptions=True,\n receive_timeout=LDAP_TIMEOUT,\n **connection_kwargs,\n )\n\n if self.start_tls:\n connection.start_tls(read_server_info=False)\n try:\n connection.bind()\n except LDAPSchemaError as exc:\n # Schema error, so try connecting without schema info\n # See https://github.com/goauthentik/authentik/issues/4590\n if server_kwargs.get(\"get_info\", ALL) == NONE:\n raise exc\n server_kwargs[\"get_info\"] = NONE\n return self.connection(server_kwargs, connection_kwargs)\n return connection\n\n class Meta:\n verbose_name = _(\"LDAP Source\")\n verbose_name_plural = _(\"LDAP Sources\")\n\n\nclass LDAPPropertyMapping(PropertyMapping):\n \"\"\"Map LDAP Property to User or Group object attribute\"\"\"\n\n object_field = models.TextField()\n\n @property\n def component(self) -> str:\n return \"ak-property-mapping-ldap-form\"\n\n @property\n def serializer(self) -> type[Serializer]:\n from authentik.sources.ldap.api import LDAPPropertyMappingSerializer\n\n return LDAPPropertyMappingSerializer\n\n def __str__(self):\n return str(self.name)\n\n class Meta:\n verbose_name = _(\"LDAP Property Mapping\")\n verbose_name_plural = _(\"LDAP Property Mappings\")\n", "path": "authentik/sources/ldap/models.py"}, {"content": "\"\"\"authentik LDAP Authentication Backend\"\"\"\nfrom typing import Optional\n\nfrom django.http import HttpRequest\nfrom ldap3.core.exceptions import LDAPException, LDAPInvalidCredentialsResult\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.auth import InbuiltBackend\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\nLDAP_DISTINGUISHED_NAME = \"distinguishedName\"\n\n\nclass LDAPBackend(InbuiltBackend):\n \"\"\"Authenticate users against LDAP Server\"\"\"\n\n def authenticate(self, request: HttpRequest, **kwargs):\n \"\"\"Try to authenticate a user via ldap\"\"\"\n if \"password\" not in kwargs:\n return None\n for source in LDAPSource.objects.filter(enabled=True):\n LOGGER.debug(\"LDAP Auth attempt\", source=source)\n user = self.auth_user(source, **kwargs)\n if user:\n self.set_method(\"ldap\", request, source=source)\n return user\n return None\n\n def auth_user(self, source: LDAPSource, password: str, **filters: str) -> Optional[User]:\n \"\"\"Try to bind as either user_dn or mail with password.\n Returns True on success, otherwise False\"\"\"\n users = User.objects.filter(**filters)\n if not users.exists():\n return None\n user: User = users.first()\n if LDAP_DISTINGUISHED_NAME not in user.attributes:\n LOGGER.debug(\"User doesn't have DN set, assuming not LDAP imported.\", user=user)\n return None\n # Either has unusable password,\n # or has a password, but couldn't be authenticated by ModelBackend.\n # This means we check with a bind to see if the LDAP password has changed\n if self.auth_user_by_bind(source, user, password):\n # Password given successfully binds to LDAP, so we save it in our Database\n LOGGER.debug(\"Updating user's password in DB\", user=user)\n user.set_password(password, signal=False)\n user.save()\n return user\n # Password doesn't match\n LOGGER.debug(\"Failed to bind, password invalid\")\n return None\n\n def auth_user_by_bind(self, source: LDAPSource, user: User, password: str) -> Optional[User]:\n \"\"\"Attempt authentication by binding to the LDAP server as `user`. This\n method should be avoided as its slow to do the bind.\"\"\"\n # Try to bind as new user\n LOGGER.debug(\"Attempting to bind as user\", user=user)\n try:\n temp_connection = source.connection(\n connection_kwargs={\n \"user\": user.attributes.get(LDAP_DISTINGUISHED_NAME),\n \"password\": password,\n }\n )\n temp_connection.bind()\n return user\n except LDAPInvalidCredentialsResult as exc:\n LOGGER.debug(\"invalid LDAP credentials\", user=user, exc=exc)\n except LDAPException as exc:\n LOGGER.warning(\"failed to bind to LDAP\", exc=exc)\n return None\n", "path": "authentik/sources/ldap/auth.py"}]} | 3,392 | 374 |
gh_patches_debug_41083 | rasdani/github-patches | git_diff | iterative__dvc-925 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
S3 AWS: default profile is always used
Hi!
Defining a profile in dvc-config has no influence on the used profile in AWS credentials file.
I quickly took a look at the code and I never see boto3.session(profile=..) called. Or is this done using the AWS_PROFILE env variable?
(using dvc 0.14.0)
</issue>
<code>
[start of dvc/remote/s3.py]
1 import os
2 import math
3 import threading
4 import posixpath
5
6 try:
7 import boto3
8 except ImportError:
9 boto3 = None
10
11 try:
12 from urlparse import urlparse
13 except ImportError:
14 from urllib.parse import urlparse
15
16 from dvc.logger import Logger
17 from dvc.progress import progress
18 from dvc.config import Config
19 from dvc.remote.base import RemoteBase
20 from dvc.remote.local import RemoteLOCAL
21 from dvc.exceptions import DvcException
22
23
24 class Callback(object):
25 def __init__(self, name, total):
26 self.name = name
27 self.total = total
28 self.current = 0
29 self.lock = threading.Lock()
30
31 def __call__(self, byts):
32 with self.lock:
33 self.current += byts
34 progress.update_target(self.name, self.current, self.total)
35
36
37 class RemoteS3(RemoteBase):
38 scheme = 's3'
39 REGEX = r'^s3://(?P<path>.*)$'
40 REQUIRES = {'boto3': boto3}
41 PARAM_ETAG = 'etag'
42
43 def __init__(self, project, config):
44 self.project = project
45 storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')
46 self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)
47 self.region = config.get(Config.SECTION_AWS_REGION, None)
48 self.profile = config.get(Config.SECTION_AWS_PROFILE, None)
49 self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)
50 self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)
51
52 @property
53 def bucket(self):
54 return urlparse(self.url).netloc
55
56 @property
57 def prefix(self):
58 return urlparse(self.url).path.lstrip('/')
59
60 @property
61 def s3(self):
62 return boto3.resource('s3', endpoint_url=self.endpoint_url)
63
64 @property
65 def s3_session_client(self):
66 session = boto3.session.Session()
67 s3 = session.client('s3', endpoint_url=self.endpoint_url)
68 return s3
69
70 def get_etag(self, bucket, key):
71 try:
72 obj = self.s3.Object(bucket, key).get()
73 except Exception:
74 raise DvcException('s3://{}/{} does not exist'.format(bucket, key))
75
76 return obj['ETag'].strip('"')
77
78 def save_info(self, path_info):
79 if path_info['scheme'] != 's3':
80 raise NotImplementedError
81
82 return {self.PARAM_ETAG: self.get_etag(path_info['bucket'], path_info['key'])}
83
84 def _copy(self, from_info, to_info, s3=None):
85 s3 = s3 if s3 else self.s3
86
87 source = {'Bucket': from_info['bucket'],
88 'Key': from_info['key']}
89 self.s3.Bucket(to_info['bucket']).copy(source, to_info['key'])
90
91 def save(self, path_info):
92 if path_info['scheme'] != 's3':
93 raise NotImplementedError
94
95 etag = self.get_etag(path_info['bucket'], path_info['key'])
96 key = posixpath.join(self.prefix, etag[0:2], etag[2:])
97 to_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}
98
99 self._copy(path_info, to_info)
100
101 return {self.PARAM_ETAG: etag}
102
103 def checkout(self, path_info, checksum_info):
104 if path_info['scheme'] != 's3':
105 raise NotImplementedError
106
107 etag = checksum_info.get(self.PARAM_ETAG, None)
108 if not etag:
109 return
110
111 key = posixpath.join(self.prefix, etag[0:2], etag[2:])
112 from_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}
113
114 self._copy(from_info, path_info)
115
116 def remove(self, path_info):
117 if path_info['scheme'] != 's3':
118 raise NotImplementedError
119
120 Logger.debug('Removing s3://{}/{}'.format(path_info['bucket'],
121 path_info['key']))
122
123 obj = self.s3.Object(path_info['bucket'], path_info['key'])
124 try:
125 obj.get()
126 except Exception:
127 return
128
129 obj.delete()
130
131 def md5s_to_path_infos(self, md5s):
132 return [{'scheme': self.scheme,
133 'bucket': self.bucket,
134 'key': posixpath.join(self.prefix, md5[0:2], md5[2:])} for md5 in md5s]
135
136 def exists(self, path_infos):
137 # NOTE: We mostly use exists() method when filtering a bulk of cache
138 # files to decide if we need to download/upload them and in s3
139 # list_objects_v2() is much-much faster than trying to check keys
140 # one-by-one.
141 ret = []
142 s3 = self.s3_session_client
143
144 keys = []
145 kwargs = {'Bucket': self.bucket,
146 'Prefix': self.prefix}
147 while True:
148 resp = s3.list_objects_v2(**kwargs)
149 contents = resp.get('Contents', None)
150 if not contents:
151 break
152
153 for obj in contents:
154 keys.append(obj['Key'])
155
156 token = resp.get('NextContinuationToken', None)
157 if not token:
158 break
159
160 kwargs['ContinuationToken'] = token
161
162 for path_info in path_infos:
163 exists = False
164 if path_info['key'] in keys:
165 exists = True
166 ret.append(exists)
167
168 return ret
169
170 def upload(self, from_infos, to_infos, names=None):
171 names = self._verify_path_args(to_infos, from_infos, names)
172
173 s3 = self.s3_session_client
174
175 for from_info, to_info, name in zip(from_infos, to_infos, names):
176 if to_info['scheme'] != 's3':
177 raise NotImplementedError
178
179 if from_info['scheme'] != 'local':
180 raise NotImplementedError
181
182 Logger.debug("Uploading '{}' to '{}/{}'".format(from_info['path'],
183 to_info['bucket'],
184 to_info['key']))
185
186 if not name:
187 name = os.path.basename(from_info['path'])
188
189 total = os.path.getsize(from_info['path'])
190 cb = Callback(name, total)
191
192 try:
193 s3.upload_file(from_info['path'], to_info['bucket'], to_info['key'], Callback=cb)
194 except Exception as exc:
195 Logger.error("Failed to upload '{}'".format(from_info['path']), exc)
196 continue
197
198 progress.finish_target(name)
199
200 def download(self, from_infos, to_infos, no_progress_bar=False, names=None):
201 names = self._verify_path_args(from_infos, to_infos, names)
202
203 s3 = self.s3_session_client
204
205 for to_info, from_info, name in zip(to_infos, from_infos, names):
206 if from_info['scheme'] != 's3':
207 raise NotImplementedError
208
209 if to_info['scheme'] == 's3':
210 self._copy(from_info, to_info, s3=s3)
211 continue
212
213 if to_info['scheme'] != 'local':
214 raise NotImplementedError
215
216 Logger.debug("Downloading '{}/{}' to '{}'".format(from_info['bucket'],
217 from_info['key'],
218 to_info['path']))
219
220 tmp_file = self.tmp_file(to_info['path'])
221 if not name:
222 name = os.path.basename(to_info['path'])
223
224 if no_progress_bar:
225 cb = None
226 else:
227 total = s3.head_object(Bucket=from_info['bucket'],
228 Key=from_info['key'])['ContentLength']
229 cb = Callback(name, total)
230
231 self._makedirs(to_info['path'])
232
233 try:
234 s3.download_file(from_info['bucket'], from_info['key'], tmp_file, Callback=cb)
235 except Exception as exc:
236 Logger.error("Failed to download '{}/{}'".format(from_info['bucket'],
237 from_info['key']), exc)
238 return
239
240 os.rename(tmp_file, to_info['path'])
241
242 if not no_progress_bar:
243 progress.finish_target(name)
244
245 def _path_to_etag(self, path):
246 relpath = posixpath.relpath(path, self.prefix)
247 return posixpath.dirname(relpath) + posixpath.basename(relpath)
248
249 def _all(self):
250 objects = self.s3.Bucket(self.bucket).objects.filter(Prefix=self.prefix)
251 return [self._path_to_etag(obj.key) for obj in objects]
252
253 def gc(self, checksum_infos):
254 used_etags = [info[self.PARAM_ETAG] for info in checksum_infos['s3']]
255 used_etags += [info[RemoteLOCAL.PARAM_MD5] for info in checksum_infos['local']]
256
257 for etag in self._all():
258 if etag in used_etags:
259 continue
260 path_info = {'scheme': 's3',
261 'key': posixpath.join(self.prefix, etag[0:2], etag[2:]),
262 'bucket': self.bucket}
263 self.remove(path_info)
264
[end of dvc/remote/s3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/remote/s3.py b/dvc/remote/s3.py
--- a/dvc/remote/s3.py
+++ b/dvc/remote/s3.py
@@ -41,14 +41,26 @@
PARAM_ETAG = 'etag'
def __init__(self, project, config):
+ import configobj
+
self.project = project
storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')
self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)
self.region = config.get(Config.SECTION_AWS_REGION, None)
- self.profile = config.get(Config.SECTION_AWS_PROFILE, None)
- self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)
+ self.profile = config.get(Config.SECTION_AWS_PROFILE, 'default')
self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)
+ credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)
+ if credentialpath:
+ creds_conf = configobj.ConfigObj(credentialpath)
+ creds = creds_conf.get(self.profile, {})
+ else:
+ creds = {}
+
+ self.region = creds.get('region', self.region)
+ self.aws_access_key_id = creds.get('aws_access_key_id', None)
+ self.aws_secret_access_key = creds.get('aws_secret_access_key', None)
+
@property
def bucket(self):
return urlparse(self.url).netloc
@@ -59,13 +71,12 @@
@property
def s3(self):
- return boto3.resource('s3', endpoint_url=self.endpoint_url)
-
- @property
- def s3_session_client(self):
- session = boto3.session.Session()
- s3 = session.client('s3', endpoint_url=self.endpoint_url)
- return s3
+ session = boto3.session.Session(profile_name=self.profile)
+ return session.client('s3',
+ aws_access_key_id=self.aws_access_key_id,
+ aws_secret_access_key=self.aws_secret_access_key,
+ region_name=self.region,
+ endpoint_url=self.endpoint_url)
def get_etag(self, bucket, key):
try:
@@ -139,7 +150,7 @@
# list_objects_v2() is much-much faster than trying to check keys
# one-by-one.
ret = []
- s3 = self.s3_session_client
+ s3 = self.s3
keys = []
kwargs = {'Bucket': self.bucket,
@@ -170,7 +181,7 @@
def upload(self, from_infos, to_infos, names=None):
names = self._verify_path_args(to_infos, from_infos, names)
- s3 = self.s3_session_client
+ s3 = self.s3
for from_info, to_info, name in zip(from_infos, to_infos, names):
if to_info['scheme'] != 's3':
@@ -200,7 +211,7 @@
def download(self, from_infos, to_infos, no_progress_bar=False, names=None):
names = self._verify_path_args(from_infos, to_infos, names)
- s3 = self.s3_session_client
+ s3 = self.s3
for to_info, from_info, name in zip(to_infos, from_infos, names):
if from_info['scheme'] != 's3':
| {"golden_diff": "diff --git a/dvc/remote/s3.py b/dvc/remote/s3.py\n--- a/dvc/remote/s3.py\n+++ b/dvc/remote/s3.py\n@@ -41,14 +41,26 @@\n PARAM_ETAG = 'etag'\n \n def __init__(self, project, config):\n+ import configobj\n+\n self.project = project\n storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')\n self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)\n self.region = config.get(Config.SECTION_AWS_REGION, None)\n- self.profile = config.get(Config.SECTION_AWS_PROFILE, None)\n- self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n+ self.profile = config.get(Config.SECTION_AWS_PROFILE, 'default')\n self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)\n \n+ credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n+ if credentialpath:\n+ creds_conf = configobj.ConfigObj(credentialpath)\n+ creds = creds_conf.get(self.profile, {})\n+ else:\n+ creds = {}\n+\n+ self.region = creds.get('region', self.region)\n+ self.aws_access_key_id = creds.get('aws_access_key_id', None)\n+ self.aws_secret_access_key = creds.get('aws_secret_access_key', None)\n+\n @property\n def bucket(self):\n return urlparse(self.url).netloc\n@@ -59,13 +71,12 @@\n \n @property\n def s3(self):\n- return boto3.resource('s3', endpoint_url=self.endpoint_url)\n-\n- @property\n- def s3_session_client(self):\n- session = boto3.session.Session()\n- s3 = session.client('s3', endpoint_url=self.endpoint_url)\n- return s3\n+ session = boto3.session.Session(profile_name=self.profile)\n+ return session.client('s3',\n+ aws_access_key_id=self.aws_access_key_id,\n+ aws_secret_access_key=self.aws_secret_access_key,\n+ region_name=self.region,\n+ endpoint_url=self.endpoint_url)\n \n def get_etag(self, bucket, key):\n try:\n@@ -139,7 +150,7 @@\n # list_objects_v2() is much-much faster than trying to check keys\n # one-by-one.\n ret = []\n- s3 = self.s3_session_client\n+ s3 = self.s3\n \n keys = []\n kwargs = {'Bucket': self.bucket,\n@@ -170,7 +181,7 @@\n def upload(self, from_infos, to_infos, names=None):\n names = self._verify_path_args(to_infos, from_infos, names)\n \n- s3 = self.s3_session_client\n+ s3 = self.s3\n \n for from_info, to_info, name in zip(from_infos, to_infos, names):\n if to_info['scheme'] != 's3':\n@@ -200,7 +211,7 @@\n def download(self, from_infos, to_infos, no_progress_bar=False, names=None):\n names = self._verify_path_args(from_infos, to_infos, names)\n \n- s3 = self.s3_session_client\n+ s3 = self.s3\n \n for to_info, from_info, name in zip(to_infos, from_infos, names):\n if from_info['scheme'] != 's3':\n", "issue": "S3 AWS: default profile is always used\nHi!\r\n\r\nDefining a profile in dvc-config has no influence on the used profile in AWS credentials file.\r\n\r\nI quickly took a look at the code and I never see boto3.session(profile=..) called. Or is this done using the AWS_PROFILE env variable?\r\n\r\n(using dvc 0.14.0)\n", "before_files": [{"content": "import os\nimport math\nimport threading\nimport posixpath\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.logger import Logger\nfrom dvc.progress import progress\nfrom dvc.config import Config\nfrom dvc.remote.base import RemoteBase\nfrom dvc.remote.local import RemoteLOCAL\nfrom dvc.exceptions import DvcException\n\n\nclass Callback(object):\n def __init__(self, name, total):\n self.name = name\n self.total = total\n self.current = 0\n self.lock = threading.Lock()\n\n def __call__(self, byts):\n with self.lock:\n self.current += byts\n progress.update_target(self.name, self.current, self.total)\n\n\nclass RemoteS3(RemoteBase):\n scheme = 's3'\n REGEX = r'^s3://(?P<path>.*)$'\n REQUIRES = {'boto3': boto3}\n PARAM_ETAG = 'etag'\n\n def __init__(self, project, config):\n self.project = project\n storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH, '').lstrip('/')\n self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)\n self.region = config.get(Config.SECTION_AWS_REGION, None)\n self.profile = config.get(Config.SECTION_AWS_PROFILE, None)\n self.credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)\n\n @property\n def bucket(self):\n return urlparse(self.url).netloc\n\n @property\n def prefix(self):\n return urlparse(self.url).path.lstrip('/')\n\n @property\n def s3(self):\n return boto3.resource('s3', endpoint_url=self.endpoint_url)\n\n @property\n def s3_session_client(self):\n session = boto3.session.Session()\n s3 = session.client('s3', endpoint_url=self.endpoint_url)\n return s3\n\n def get_etag(self, bucket, key):\n try:\n obj = self.s3.Object(bucket, key).get()\n except Exception:\n raise DvcException('s3://{}/{} does not exist'.format(bucket, key))\n\n return obj['ETag'].strip('\"')\n\n def save_info(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n return {self.PARAM_ETAG: self.get_etag(path_info['bucket'], path_info['key'])}\n\n def _copy(self, from_info, to_info, s3=None):\n s3 = s3 if s3 else self.s3\n\n source = {'Bucket': from_info['bucket'],\n 'Key': from_info['key']}\n self.s3.Bucket(to_info['bucket']).copy(source, to_info['key'])\n\n def save(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = self.get_etag(path_info['bucket'], path_info['key'])\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n to_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(path_info, to_info)\n\n return {self.PARAM_ETAG: etag}\n\n def checkout(self, path_info, checksum_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = checksum_info.get(self.PARAM_ETAG, None)\n if not etag:\n return\n\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n from_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(from_info, path_info)\n\n def remove(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n Logger.debug('Removing s3://{}/{}'.format(path_info['bucket'],\n path_info['key']))\n\n obj = self.s3.Object(path_info['bucket'], path_info['key'])\n try:\n obj.get()\n except Exception:\n return\n\n obj.delete()\n\n def md5s_to_path_infos(self, md5s):\n return [{'scheme': self.scheme,\n 'bucket': self.bucket,\n 'key': posixpath.join(self.prefix, md5[0:2], md5[2:])} for md5 in md5s]\n\n def exists(self, path_infos):\n # NOTE: We mostly use exists() method when filtering a bulk of cache\n # files to decide if we need to download/upload them and in s3\n # list_objects_v2() is much-much faster than trying to check keys\n # one-by-one.\n ret = []\n s3 = self.s3_session_client\n\n keys = []\n kwargs = {'Bucket': self.bucket,\n 'Prefix': self.prefix}\n while True:\n resp = s3.list_objects_v2(**kwargs)\n contents = resp.get('Contents', None)\n if not contents:\n break\n\n for obj in contents:\n keys.append(obj['Key'])\n\n token = resp.get('NextContinuationToken', None)\n if not token:\n break\n\n kwargs['ContinuationToken'] = token\n\n for path_info in path_infos:\n exists = False\n if path_info['key'] in keys:\n exists = True\n ret.append(exists)\n\n return ret\n\n def upload(self, from_infos, to_infos, names=None):\n names = self._verify_path_args(to_infos, from_infos, names)\n\n s3 = self.s3_session_client\n\n for from_info, to_info, name in zip(from_infos, to_infos, names):\n if to_info['scheme'] != 's3':\n raise NotImplementedError\n\n if from_info['scheme'] != 'local':\n raise NotImplementedError\n\n Logger.debug(\"Uploading '{}' to '{}/{}'\".format(from_info['path'],\n to_info['bucket'],\n to_info['key']))\n\n if not name:\n name = os.path.basename(from_info['path'])\n\n total = os.path.getsize(from_info['path'])\n cb = Callback(name, total)\n\n try:\n s3.upload_file(from_info['path'], to_info['bucket'], to_info['key'], Callback=cb)\n except Exception as exc:\n Logger.error(\"Failed to upload '{}'\".format(from_info['path']), exc)\n continue\n\n progress.finish_target(name)\n\n def download(self, from_infos, to_infos, no_progress_bar=False, names=None):\n names = self._verify_path_args(from_infos, to_infos, names)\n\n s3 = self.s3_session_client\n\n for to_info, from_info, name in zip(to_infos, from_infos, names):\n if from_info['scheme'] != 's3':\n raise NotImplementedError\n\n if to_info['scheme'] == 's3':\n self._copy(from_info, to_info, s3=s3)\n continue\n\n if to_info['scheme'] != 'local':\n raise NotImplementedError\n\n Logger.debug(\"Downloading '{}/{}' to '{}'\".format(from_info['bucket'],\n from_info['key'],\n to_info['path']))\n\n tmp_file = self.tmp_file(to_info['path'])\n if not name:\n name = os.path.basename(to_info['path'])\n\n if no_progress_bar:\n cb = None\n else:\n total = s3.head_object(Bucket=from_info['bucket'],\n Key=from_info['key'])['ContentLength']\n cb = Callback(name, total)\n\n self._makedirs(to_info['path'])\n\n try:\n s3.download_file(from_info['bucket'], from_info['key'], tmp_file, Callback=cb)\n except Exception as exc:\n Logger.error(\"Failed to download '{}/{}'\".format(from_info['bucket'],\n from_info['key']), exc)\n return\n\n os.rename(tmp_file, to_info['path'])\n\n if not no_progress_bar:\n progress.finish_target(name)\n\n def _path_to_etag(self, path):\n relpath = posixpath.relpath(path, self.prefix)\n return posixpath.dirname(relpath) + posixpath.basename(relpath)\n\n def _all(self):\n objects = self.s3.Bucket(self.bucket).objects.filter(Prefix=self.prefix)\n return [self._path_to_etag(obj.key) for obj in objects]\n\n def gc(self, checksum_infos):\n used_etags = [info[self.PARAM_ETAG] for info in checksum_infos['s3']]\n used_etags += [info[RemoteLOCAL.PARAM_MD5] for info in checksum_infos['local']]\n\n for etag in self._all():\n if etag in used_etags:\n continue\n path_info = {'scheme': 's3',\n 'key': posixpath.join(self.prefix, etag[0:2], etag[2:]),\n 'bucket': self.bucket}\n self.remove(path_info)\n", "path": "dvc/remote/s3.py"}]} | 3,297 | 773 |
gh_patches_debug_30340 | rasdani/github-patches | git_diff | acl-org__acl-anthology-1422 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding Anthology ID 2021.naacl-srw.21
## Revision or erratum: please add the following information**
- [x] I have attached the revised PDF or erratum to this issue
Hi! I (one of the NAACL SRW chairs) mistakenly omitted this paper when compiling the proceedings. Would it be possible to add it as a new entry `2021.naacl-srw.21` ?
The paper is attached to this issue.
The title is: `Towards Multi-Modal Text-Image Retrieval to improve Human Reading`
The authors are: `Florian Schneider, Özge Alacam, Xintong Wang, Chris Biemann`
[37_Final_Paper_PDF.pdf](https://github.com/acl-org/acl-anthology/files/6704101/37_Final_Paper_PDF.pdf)
Thank you so much!
</issue>
<code>
[start of bin/likely_name_split.py]
1 #!/usr/bin/env python3
2 # Daniel Gildea, 2020
3
4 """Usage: likely_name_split.py [--importdir=DIR]
5
6 Counts first and last names in anthology.
7 Predicts best split into first and last.
8 Checks whether current names match our predictions.
9
10 Options:
11 --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]
12 -h, --help Display this helpful text.
13 """
14
15 from collections import defaultdict
16 from docopt import docopt
17 import pickle, json
18 import sys
19 import re
20 import os
21 from math import *
22
23 from anthology import Anthology
24 from anthology.people import PersonName
25
26
27 class NameSplitter:
28 def __init__(self, anthology=None, anthology_dir=None):
29 # counts of how often each name appears
30 self.first_count = defaultdict(lambda: 0) # "Maria" "Victoria"
31 self.first_full_count = defaultdict(lambda: 0) # "Maria Victoria"
32 self.last_count = defaultdict(lambda: 0) # "van" "den" "Bosch"
33 self.last_full_count = defaultdict(lambda: 0) # "van den Bosch"
34 self.first_total = 0
35 self.last_total = 0
36
37 if os.path.exists("names.cache"):
38 self.load_cache()
39 else:
40 if anthology is None and anthology_dir is not None:
41 anthology = Anthology(os.path.join(anthology_dir, "data"))
42 self.count_names(anthology)
43 self.dump_cache()
44
45 def load_cache(self):
46 with open("names.cache", "r") as cache:
47 p = json.load(cache)
48 self.first_count = defaultdict(int, p["first_count"])
49 self.first_full_count = defaultdict(int, p["first_full_count"])
50 self.first_total = p["first_total"]
51 self.last_count = defaultdict(int, p["last_count"])
52 self.last_full_count = defaultdict(int, p["last_full_count"])
53 self.last_total = p["last_total"]
54 print(f"Loaded cache from names.cache", file=sys.stderr)
55
56 def dump_cache(self):
57 with open("names.cache", "w") as cache:
58 p = {
59 "first_count": self.first_count,
60 "first_full_count": self.first_full_count,
61 "first_total": self.first_total,
62 "last_count": self.last_count,
63 "last_full_count": self.last_full_count,
64 "last_total": self.last_total,
65 }
66 print(json.dumps(p), file=cache)
67 print(f"Dumped counts to names.cache", file=sys.stderr)
68
69 # counts names in anthology database into global vars
70 # first_count last_count (dicts)
71 # first_full_count last_full_count (dicts)
72 # first_total last_total (floats)
73 def count_names(self, anthology):
74 for person in anthology.people.personids():
75 name = anthology.people.get_canonical_name(person)
76 num_papers = len(anthology.people.get_papers(person)) + 0.0
77 # print(name.last, ", ", name.first, num_papers)
78 for w in name.first.split(" "):
79 self.first_count[w] += num_papers
80 self.first_full_count[name.first] += num_papers
81 self.first_total += num_papers
82
83 for w in name.last.split(" "):
84 self.last_count[w] += num_papers
85 self.last_full_count[name.last] += num_papers
86 self.last_total += num_papers
87
88 # takes "Maria Victoria Lopez Gonzalez"
89 # returns ("Lopez Gonzalez", "Maria Victoria")
90 # uses counts of words in first and last names in current database
91 def best_split(self, name):
92 if "," in name and not "Jr." in name:
93 # Short-circuit names that are already split
94 # comma in "William Baumgartner, Jr." does not count as a split
95 surname, given_names = name.split(",")
96 return (surname.strip(), given_names.strip())
97
98 words = name.split(" ")
99 best_score = -inf
100 best = ("", "")
101 # loop over possible split points between first/last
102 for i in range(1, len(words)): # at least one word in each part
103 first = " ".join(words[0:i])
104 last = " ".join(words[i:])
105 # max of log prob of "Maria Victoria" and
106 # log prob of "Maria" + log prob of "Victoria"
107 first_probs = [
108 log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]
109 ]
110 first_score = max(
111 log((self.first_full_count[first] + 0.000001) / self.first_total),
112 sum(first_probs),
113 )
114 last_probs = [
115 log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]
116 ]
117 last_score = max(
118 log((self.last_full_count[last] + 0.000001) / self.last_total),
119 sum(last_probs),
120 )
121
122 if first_score + last_score > best_score:
123 best_score = first_score + last_score
124 best = (last, first)
125 # end of loop over split points
126 return best
127
128
129 if __name__ == "__main__":
130 args = docopt(__doc__)
131 scriptdir = os.path.dirname(os.path.abspath(__file__))
132 if "{scriptdir}" in args["--importdir"]:
133 args["--importdir"] = os.path.abspath(
134 args["--importdir"].format(scriptdir=scriptdir)
135 )
136
137 anthology = Anthology(importdir=args["--importdir"])
138 splitter = NameSplitter(anthology)
139
140 # for all names currently in anthology,
141 # see if they match what we predict
142 for person in anthology.people.personids():
143 name = anthology.people.get_canonical_name(person)
144
145 # find our prediction of split
146 best = splitter.best_split(name.first + " " + name.last)
147
148 # if current split does not match our prediction
149 if not (best[0] == name.last and best[1] == name.first):
150 # print suggested replacement
151 print(name.last, ",", name.first, " ==> ", best[0], ",", best[1])
152
[end of bin/likely_name_split.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/likely_name_split.py b/bin/likely_name_split.py
--- a/bin/likely_name_split.py
+++ b/bin/likely_name_split.py
@@ -24,6 +24,13 @@
from anthology.people import PersonName
+def log0(x):
+ if x == 0:
+ return -inf
+ else:
+ return log(x)
+
+
class NameSplitter:
def __init__(self, anthology=None, anthology_dir=None):
# counts of how often each name appears
@@ -105,18 +112,21 @@
# max of log prob of "Maria Victoria" and
# log prob of "Maria" + log prob of "Victoria"
first_probs = [
- log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]
+ # more smoothing for first than last name,
+ # so that default is one-word last name when all counts are zero
+ log((self.first_count[x] + 0.1) / self.first_total)
+ for x in words[0:i]
]
first_score = max(
- log((self.first_full_count[first] + 0.000001) / self.first_total),
+ # no smoothing for multiword name: log(0) => -inf
+ log0((self.first_full_count[first]) / self.first_total),
sum(first_probs),
)
last_probs = [
log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]
]
last_score = max(
- log((self.last_full_count[last] + 0.000001) / self.last_total),
- sum(last_probs),
+ log0((self.last_full_count[last]) / self.last_total), sum(last_probs)
)
if first_score + last_score > best_score:
| {"golden_diff": "diff --git a/bin/likely_name_split.py b/bin/likely_name_split.py\n--- a/bin/likely_name_split.py\n+++ b/bin/likely_name_split.py\n@@ -24,6 +24,13 @@\n from anthology.people import PersonName\n \n \n+def log0(x):\n+ if x == 0:\n+ return -inf\n+ else:\n+ return log(x)\n+\n+\n class NameSplitter:\n def __init__(self, anthology=None, anthology_dir=None):\n # counts of how often each name appears\n@@ -105,18 +112,21 @@\n # max of log prob of \"Maria Victoria\" and\n # log prob of \"Maria\" + log prob of \"Victoria\"\n first_probs = [\n- log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]\n+ # more smoothing for first than last name,\n+ # so that default is one-word last name when all counts are zero\n+ log((self.first_count[x] + 0.1) / self.first_total)\n+ for x in words[0:i]\n ]\n first_score = max(\n- log((self.first_full_count[first] + 0.000001) / self.first_total),\n+ # no smoothing for multiword name: log(0) => -inf\n+ log0((self.first_full_count[first]) / self.first_total),\n sum(first_probs),\n )\n last_probs = [\n log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]\n ]\n last_score = max(\n- log((self.last_full_count[last] + 0.000001) / self.last_total),\n- sum(last_probs),\n+ log0((self.last_full_count[last]) / self.last_total), sum(last_probs)\n )\n \n if first_score + last_score > best_score:\n", "issue": "Adding Anthology ID 2021.naacl-srw.21\n## Revision or erratum: please add the following information**\r\n\r\n- [x] I have attached the revised PDF or erratum to this issue\r\n\r\nHi! I (one of the NAACL SRW chairs) mistakenly omitted this paper when compiling the proceedings. Would it be possible to add it as a new entry `2021.naacl-srw.21` ?\r\n\r\nThe paper is attached to this issue.\r\nThe title is: `Towards Multi-Modal Text-Image Retrieval to improve Human Reading`\r\nThe authors are: `Florian Schneider, \u00d6zge Alacam, Xintong Wang, Chris Biemann`\r\n[37_Final_Paper_PDF.pdf](https://github.com/acl-org/acl-anthology/files/6704101/37_Final_Paper_PDF.pdf)\r\n\r\nThank you so much!\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Daniel Gildea, 2020\n\n\"\"\"Usage: likely_name_split.py [--importdir=DIR]\n\nCounts first and last names in anthology.\nPredicts best split into first and last.\nChecks whether current names match our predictions.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom collections import defaultdict\nfrom docopt import docopt\nimport pickle, json\nimport sys\nimport re\nimport os\nfrom math import *\n\nfrom anthology import Anthology\nfrom anthology.people import PersonName\n\n\nclass NameSplitter:\n def __init__(self, anthology=None, anthology_dir=None):\n # counts of how often each name appears\n self.first_count = defaultdict(lambda: 0) # \"Maria\" \"Victoria\"\n self.first_full_count = defaultdict(lambda: 0) # \"Maria Victoria\"\n self.last_count = defaultdict(lambda: 0) # \"van\" \"den\" \"Bosch\"\n self.last_full_count = defaultdict(lambda: 0) # \"van den Bosch\"\n self.first_total = 0\n self.last_total = 0\n\n if os.path.exists(\"names.cache\"):\n self.load_cache()\n else:\n if anthology is None and anthology_dir is not None:\n anthology = Anthology(os.path.join(anthology_dir, \"data\"))\n self.count_names(anthology)\n self.dump_cache()\n\n def load_cache(self):\n with open(\"names.cache\", \"r\") as cache:\n p = json.load(cache)\n self.first_count = defaultdict(int, p[\"first_count\"])\n self.first_full_count = defaultdict(int, p[\"first_full_count\"])\n self.first_total = p[\"first_total\"]\n self.last_count = defaultdict(int, p[\"last_count\"])\n self.last_full_count = defaultdict(int, p[\"last_full_count\"])\n self.last_total = p[\"last_total\"]\n print(f\"Loaded cache from names.cache\", file=sys.stderr)\n\n def dump_cache(self):\n with open(\"names.cache\", \"w\") as cache:\n p = {\n \"first_count\": self.first_count,\n \"first_full_count\": self.first_full_count,\n \"first_total\": self.first_total,\n \"last_count\": self.last_count,\n \"last_full_count\": self.last_full_count,\n \"last_total\": self.last_total,\n }\n print(json.dumps(p), file=cache)\n print(f\"Dumped counts to names.cache\", file=sys.stderr)\n\n # counts names in anthology database into global vars\n # first_count last_count (dicts)\n # first_full_count last_full_count (dicts)\n # first_total last_total (floats)\n def count_names(self, anthology):\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n num_papers = len(anthology.people.get_papers(person)) + 0.0\n # print(name.last, \", \", name.first, num_papers)\n for w in name.first.split(\" \"):\n self.first_count[w] += num_papers\n self.first_full_count[name.first] += num_papers\n self.first_total += num_papers\n\n for w in name.last.split(\" \"):\n self.last_count[w] += num_papers\n self.last_full_count[name.last] += num_papers\n self.last_total += num_papers\n\n # takes \"Maria Victoria Lopez Gonzalez\"\n # returns (\"Lopez Gonzalez\", \"Maria Victoria\")\n # uses counts of words in first and last names in current database\n def best_split(self, name):\n if \",\" in name and not \"Jr.\" in name:\n # Short-circuit names that are already split\n # comma in \"William Baumgartner, Jr.\" does not count as a split\n surname, given_names = name.split(\",\")\n return (surname.strip(), given_names.strip())\n\n words = name.split(\" \")\n best_score = -inf\n best = (\"\", \"\")\n # loop over possible split points between first/last\n for i in range(1, len(words)): # at least one word in each part\n first = \" \".join(words[0:i])\n last = \" \".join(words[i:])\n # max of log prob of \"Maria Victoria\" and\n # log prob of \"Maria\" + log prob of \"Victoria\"\n first_probs = [\n log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]\n ]\n first_score = max(\n log((self.first_full_count[first] + 0.000001) / self.first_total),\n sum(first_probs),\n )\n last_probs = [\n log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]\n ]\n last_score = max(\n log((self.last_full_count[last] + 0.000001) / self.last_total),\n sum(last_probs),\n )\n\n if first_score + last_score > best_score:\n best_score = first_score + last_score\n best = (last, first)\n # end of loop over split points\n return best\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n\n anthology = Anthology(importdir=args[\"--importdir\"])\n splitter = NameSplitter(anthology)\n\n # for all names currently in anthology,\n # see if they match what we predict\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n\n # find our prediction of split\n best = splitter.best_split(name.first + \" \" + name.last)\n\n # if current split does not match our prediction\n if not (best[0] == name.last and best[1] == name.first):\n # print suggested replacement\n print(name.last, \",\", name.first, \" ==> \", best[0], \",\", best[1])\n", "path": "bin/likely_name_split.py"}]} | 2,448 | 434 |
gh_patches_debug_15957 | rasdani/github-patches | git_diff | ansible__ansible-16239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Chroot doesn't work with busybox
With busybox, `/bin/sh` can be a relative symlink, and won't resolve correctly prior to `chroot()`.
```
PLAY ***************************************************************************
TASK [setup] *******************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: OSError: [Errno 2] No such file or directory: '/rootfs/bin/sh'
fatal: [/rootfs]: FAILED! => {"failed": true, "stdout": ""}
NO MORE HOSTS LEFT *************************************************************
PLAY RECAP *********************************************************************
/rootfs : ok=0 changed=0 unreachable=0 failed=1
```
```
# ls -la /rootfs/bin/sh
lrwxrwxrwx 1 root root 12 Jun 1 22:17 /rootfs/bin/sh -> /bin/busybox
```
A sensible solution for this would be through an option to override [the hardcoded shell path](https://github.com/ansible/ansible/blob/v2.0.0.1-1/lib/ansible/plugins/connection/chroot.py#L67-L68), or something like that.
</issue>
<code>
[start of lib/ansible/plugins/connection/chroot.py]
1 # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
2 # (c) 2013, Maykel Moya <[email protected]>
3 # (c) 2015, Toshio Kuratomi <[email protected]>
4 #
5 # This file is part of Ansible
6 #
7 # Ansible is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # Ansible is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 import distutils.spawn
23 import os
24 import os.path
25 import pipes
26 import subprocess
27 import traceback
28
29 from ansible import constants as C
30 from ansible.errors import AnsibleError
31 from ansible.plugins.connection import ConnectionBase, BUFSIZE
32 from ansible.module_utils.basic import is_executable
33 from ansible.utils.unicode import to_bytes
34
35 try:
36 from __main__ import display
37 except ImportError:
38 from ansible.utils.display import Display
39 display = Display()
40
41
42 class Connection(ConnectionBase):
43 ''' Local chroot based connections '''
44
45 transport = 'chroot'
46 has_pipelining = True
47 # su currently has an undiagnosed issue with calculating the file
48 # checksums (so copy, for instance, doesn't work right)
49 # Have to look into that before re-enabling this
50 become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
51
52 def __init__(self, play_context, new_stdin, *args, **kwargs):
53 super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
54
55 self.chroot = self._play_context.remote_addr
56
57 if os.geteuid() != 0:
58 raise AnsibleError("chroot connection requires running as root")
59
60 # we're running as root on the local system so do some
61 # trivial checks for ensuring 'host' is actually a chroot'able dir
62 if not os.path.isdir(self.chroot):
63 raise AnsibleError("%s is not a directory" % self.chroot)
64
65 chrootsh = os.path.join(self.chroot, 'bin/sh')
66 if not is_executable(chrootsh):
67 raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
68
69 self.chroot_cmd = distutils.spawn.find_executable('chroot')
70 if not self.chroot_cmd:
71 raise AnsibleError("chroot command not found in PATH")
72
73 def _connect(self):
74 ''' connect to the chroot; nothing to do here '''
75 super(Connection, self)._connect()
76 if not self._connected:
77 display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
78 self._connected = True
79
80 def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
81 ''' run a command on the chroot. This is only needed for implementing
82 put_file() get_file() so that we don't have to read the whole file
83 into memory.
84
85 compared to exec_command() it looses some niceties like being able to
86 return the process's exit code immediately.
87 '''
88 executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
89 local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
90
91 display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
92 local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]
93 p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
94 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
95
96 return p
97
98 def exec_command(self, cmd, in_data=None, sudoable=False):
99 ''' run a command on the chroot '''
100 super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
101
102 p = self._buffered_exec_command(cmd)
103
104 stdout, stderr = p.communicate(in_data)
105 return (p.returncode, stdout, stderr)
106
107 def _prefix_login_path(self, remote_path):
108 ''' Make sure that we put files into a standard path
109
110 If a path is relative, then we need to choose where to put it.
111 ssh chooses $HOME but we aren't guaranteed that a home dir will
112 exist in any given chroot. So for now we're choosing "/" instead.
113 This also happens to be the former default.
114
115 Can revisit using $HOME instead if it's a problem
116 '''
117 if not remote_path.startswith(os.path.sep):
118 remote_path = os.path.join(os.path.sep, remote_path)
119 return os.path.normpath(remote_path)
120
121 def put_file(self, in_path, out_path):
122 ''' transfer a file from local to chroot '''
123 super(Connection, self).put_file(in_path, out_path)
124 display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
125
126 out_path = pipes.quote(self._prefix_login_path(out_path))
127 try:
128 with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
129 try:
130 p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
131 except OSError:
132 raise AnsibleError("chroot connection requires dd command in the chroot")
133 try:
134 stdout, stderr = p.communicate()
135 except:
136 traceback.print_exc()
137 raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
138 if p.returncode != 0:
139 raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
140 except IOError:
141 raise AnsibleError("file or module does not exist at: %s" % in_path)
142
143 def fetch_file(self, in_path, out_path):
144 ''' fetch a file from chroot to local '''
145 super(Connection, self).fetch_file(in_path, out_path)
146 display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
147
148 in_path = pipes.quote(self._prefix_login_path(in_path))
149 try:
150 p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
151 except OSError:
152 raise AnsibleError("chroot connection requires dd command in the chroot")
153
154 with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:
155 try:
156 chunk = p.stdout.read(BUFSIZE)
157 while chunk:
158 out_file.write(chunk)
159 chunk = p.stdout.read(BUFSIZE)
160 except:
161 traceback.print_exc()
162 raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
163 stdout, stderr = p.communicate()
164 if p.returncode != 0:
165 raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
166
167 def close(self):
168 ''' terminate the connection; nothing to do here '''
169 super(Connection, self).close()
170 self._connected = False
171
[end of lib/ansible/plugins/connection/chroot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py
--- a/lib/ansible/plugins/connection/chroot.py
+++ b/lib/ansible/plugins/connection/chroot.py
@@ -63,7 +63,11 @@
raise AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
- if not is_executable(chrootsh):
+ # Want to check for a usable bourne shell inside the chroot.
+ # is_executable() == True is sufficient. For symlinks it
+ # gets really complicated really fast. So we punt on finding that
+ # out. As long as it's a symlink we assume that it will work
+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
| {"golden_diff": "diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py\n--- a/lib/ansible/plugins/connection/chroot.py\n+++ b/lib/ansible/plugins/connection/chroot.py\n@@ -63,7 +63,11 @@\n raise AnsibleError(\"%s is not a directory\" % self.chroot)\n \n chrootsh = os.path.join(self.chroot, 'bin/sh')\n- if not is_executable(chrootsh):\n+ # Want to check for a usable bourne shell inside the chroot.\n+ # is_executable() == True is sufficient. For symlinks it\n+ # gets really complicated really fast. So we punt on finding that\n+ # out. As long as it's a symlink we assume that it will work\n+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):\n raise AnsibleError(\"%s does not look like a chrootable dir (/bin/sh missing)\" % self.chroot)\n \n self.chroot_cmd = distutils.spawn.find_executable('chroot')\n", "issue": "Chroot doesn't work with busybox\nWith busybox, `/bin/sh` can be a relative symlink, and won't resolve correctly prior to `chroot()`.\n\n```\nPLAY ***************************************************************************\n\nTASK [setup] *******************************************************************\nAn exception occurred during task execution. To see the full traceback, use -vvv. The error was: OSError: [Errno 2] No such file or directory: '/rootfs/bin/sh'\nfatal: [/rootfs]: FAILED! => {\"failed\": true, \"stdout\": \"\"}\n\nNO MORE HOSTS LEFT *************************************************************\n\nPLAY RECAP *********************************************************************\n/rootfs : ok=0 changed=0 unreachable=0 failed=1 \n```\n\n```\n# ls -la /rootfs/bin/sh\nlrwxrwxrwx 1 root root 12 Jun 1 22:17 /rootfs/bin/sh -> /bin/busybox\n```\n\nA sensible solution for this would be through an option to override [the hardcoded shell path](https://github.com/ansible/ansible/blob/v2.0.0.1-1/lib/ansible/plugins/connection/chroot.py#L67-L68), or something like that.\n\n", "before_files": [{"content": "# Based on local.py (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Maykel Moya <[email protected]>\n# (c) 2015, Toshio Kuratomi <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport distutils.spawn\nimport os\nimport os.path\nimport pipes\nimport subprocess\nimport traceback\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.connection import ConnectionBase, BUFSIZE\nfrom ansible.module_utils.basic import is_executable\nfrom ansible.utils.unicode import to_bytes\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass Connection(ConnectionBase):\n ''' Local chroot based connections '''\n\n transport = 'chroot'\n has_pipelining = True\n # su currently has an undiagnosed issue with calculating the file\n # checksums (so copy, for instance, doesn't work right)\n # Have to look into that before re-enabling this\n become_methods = frozenset(C.BECOME_METHODS).difference(('su',))\n\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n\n self.chroot = self._play_context.remote_addr\n\n if os.geteuid() != 0:\n raise AnsibleError(\"chroot connection requires running as root\")\n\n # we're running as root on the local system so do some\n # trivial checks for ensuring 'host' is actually a chroot'able dir\n if not os.path.isdir(self.chroot):\n raise AnsibleError(\"%s is not a directory\" % self.chroot)\n\n chrootsh = os.path.join(self.chroot, 'bin/sh')\n if not is_executable(chrootsh):\n raise AnsibleError(\"%s does not look like a chrootable dir (/bin/sh missing)\" % self.chroot)\n\n self.chroot_cmd = distutils.spawn.find_executable('chroot')\n if not self.chroot_cmd:\n raise AnsibleError(\"chroot command not found in PATH\")\n\n def _connect(self):\n ''' connect to the chroot; nothing to do here '''\n super(Connection, self)._connect()\n if not self._connected:\n display.vvv(\"THIS IS A LOCAL CHROOT DIR\", host=self.chroot)\n self._connected = True\n\n def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):\n ''' run a command on the chroot. This is only needed for implementing\n put_file() get_file() so that we don't have to read the whole file\n into memory.\n\n compared to exec_command() it looses some niceties like being able to\n return the process's exit code immediately.\n '''\n executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'\n local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]\n\n display.vvv(\"EXEC %s\" % (local_cmd), host=self.chroot)\n local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]\n p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n return p\n\n def exec_command(self, cmd, in_data=None, sudoable=False):\n ''' run a command on the chroot '''\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n p = self._buffered_exec_command(cmd)\n\n stdout, stderr = p.communicate(in_data)\n return (p.returncode, stdout, stderr)\n\n def _prefix_login_path(self, remote_path):\n ''' Make sure that we put files into a standard path\n\n If a path is relative, then we need to choose where to put it.\n ssh chooses $HOME but we aren't guaranteed that a home dir will\n exist in any given chroot. So for now we're choosing \"/\" instead.\n This also happens to be the former default.\n\n Can revisit using $HOME instead if it's a problem\n '''\n if not remote_path.startswith(os.path.sep):\n remote_path = os.path.join(os.path.sep, remote_path)\n return os.path.normpath(remote_path)\n\n def put_file(self, in_path, out_path):\n ''' transfer a file from local to chroot '''\n super(Connection, self).put_file(in_path, out_path)\n display.vvv(\"PUT %s TO %s\" % (in_path, out_path), host=self.chroot)\n\n out_path = pipes.quote(self._prefix_login_path(out_path))\n try:\n with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:\n try:\n p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)\n except OSError:\n raise AnsibleError(\"chroot connection requires dd command in the chroot\")\n try:\n stdout, stderr = p.communicate()\n except:\n traceback.print_exc()\n raise AnsibleError(\"failed to transfer file %s to %s\" % (in_path, out_path))\n if p.returncode != 0:\n raise AnsibleError(\"failed to transfer file %s to %s:\\n%s\\n%s\" % (in_path, out_path, stdout, stderr))\n except IOError:\n raise AnsibleError(\"file or module does not exist at: %s\" % in_path)\n\n def fetch_file(self, in_path, out_path):\n ''' fetch a file from chroot to local '''\n super(Connection, self).fetch_file(in_path, out_path)\n display.vvv(\"FETCH %s TO %s\" % (in_path, out_path), host=self.chroot)\n\n in_path = pipes.quote(self._prefix_login_path(in_path))\n try:\n p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))\n except OSError:\n raise AnsibleError(\"chroot connection requires dd command in the chroot\")\n\n with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:\n try:\n chunk = p.stdout.read(BUFSIZE)\n while chunk:\n out_file.write(chunk)\n chunk = p.stdout.read(BUFSIZE)\n except:\n traceback.print_exc()\n raise AnsibleError(\"failed to transfer file %s to %s\" % (in_path, out_path))\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise AnsibleError(\"failed to transfer file %s to %s:\\n%s\\n%s\" % (in_path, out_path, stdout, stderr))\n\n def close(self):\n ''' terminate the connection; nothing to do here '''\n super(Connection, self).close()\n self._connected = False\n", "path": "lib/ansible/plugins/connection/chroot.py"}]} | 2,902 | 249 |
gh_patches_debug_3456 | rasdani/github-patches | git_diff | CTFd__CTFd-1827 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set plugin migration version in between each migration
https://github.com/CTFd/CTFd/blob/e1991e16963b10302baa7cc50d52071a5053bf2f/CTFd/plugins/migrations.py#L72-L77
This code here probably should be setting the plugin version in between each migration so that if a migration fails it doesn't need to be started from the beginning again.
</issue>
<code>
[start of CTFd/plugins/migrations.py]
1 import inspect
2 import os
3
4 from alembic.config import Config
5 from alembic.migration import MigrationContext
6 from alembic.operations import Operations
7 from alembic.script import ScriptDirectory
8 from flask import current_app
9 from sqlalchemy import create_engine, pool
10
11 from CTFd.utils import get_config, set_config
12
13
14 def current(plugin_name=None):
15 if plugin_name is None:
16 # Get the directory name of the plugin if unspecified
17 # Doing it this way doesn't waste the rest of the inspect.stack call
18 frame = inspect.currentframe()
19 caller_info = inspect.getframeinfo(frame.f_back)
20 caller_path = caller_info[0]
21 plugin_name = os.path.basename(os.path.dirname(caller_path))
22
23 return get_config(plugin_name + "_alembic_version")
24
25
26 def upgrade(plugin_name=None, revision=None, lower="current"):
27 database_url = current_app.config.get("SQLALCHEMY_DATABASE_URI")
28 if database_url.startswith("sqlite"):
29 current_app.db.create_all()
30 return
31
32 if plugin_name is None:
33 # Get the directory name of the plugin if unspecified
34 # Doing it this way doesn't waste the rest of the inspect.stack call
35 frame = inspect.currentframe()
36 caller_info = inspect.getframeinfo(frame.f_back)
37 caller_path = caller_info[0]
38 plugin_name = os.path.basename(os.path.dirname(caller_path))
39
40 # Check if the plugin has migraitons
41 migrations_path = os.path.join(current_app.plugins_dir, plugin_name, "migrations")
42 if os.path.isdir(migrations_path) is False:
43 return
44
45 engine = create_engine(database_url, poolclass=pool.NullPool)
46 conn = engine.connect()
47 context = MigrationContext.configure(conn)
48 op = Operations(context)
49
50 # Find the list of migrations to run
51 config = Config()
52 config.set_main_option("script_location", migrations_path)
53 config.set_main_option("version_locations", migrations_path)
54 script = ScriptDirectory.from_config(config)
55
56 # Choose base revision for plugin upgrade
57 # "current" points to the current plugin version stored in config
58 # None represents the absolute base layer (e.g. first installation)
59 if lower == "current":
60 lower = get_config(plugin_name + "_alembic_version")
61
62 # Do we upgrade to head or to a specific revision
63 if revision is None:
64 upper = script.get_current_head()
65 else:
66 upper = revision
67
68 # Apply from lower to upper
69 revs = list(script.iterate_revisions(lower=lower, upper=upper))
70 revs.reverse()
71
72 try:
73 for r in revs:
74 with context.begin_transaction():
75 r.module.upgrade(op=op)
76 finally:
77 conn.close()
78
79 # Set the new latest revision
80 set_config(plugin_name + "_alembic_version", upper)
81
[end of CTFd/plugins/migrations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/plugins/migrations.py b/CTFd/plugins/migrations.py
--- a/CTFd/plugins/migrations.py
+++ b/CTFd/plugins/migrations.py
@@ -73,6 +73,9 @@
for r in revs:
with context.begin_transaction():
r.module.upgrade(op=op)
+ # Set revision that succeeded so we don't need
+ # to start from the beginning on failure
+ set_config(plugin_name + "_alembic_version", r.revision)
finally:
conn.close()
| {"golden_diff": "diff --git a/CTFd/plugins/migrations.py b/CTFd/plugins/migrations.py\n--- a/CTFd/plugins/migrations.py\n+++ b/CTFd/plugins/migrations.py\n@@ -73,6 +73,9 @@\n for r in revs:\n with context.begin_transaction():\n r.module.upgrade(op=op)\n+ # Set revision that succeeded so we don't need\n+ # to start from the beginning on failure\n+ set_config(plugin_name + \"_alembic_version\", r.revision)\n finally:\n conn.close()\n", "issue": "Set plugin migration version in between each migration\nhttps://github.com/CTFd/CTFd/blob/e1991e16963b10302baa7cc50d52071a5053bf2f/CTFd/plugins/migrations.py#L72-L77\r\n\r\nThis code here probably should be setting the plugin version in between each migration so that if a migration fails it doesn't need to be started from the beginning again. \n", "before_files": [{"content": "import inspect\nimport os\n\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.operations import Operations\nfrom alembic.script import ScriptDirectory\nfrom flask import current_app\nfrom sqlalchemy import create_engine, pool\n\nfrom CTFd.utils import get_config, set_config\n\n\ndef current(plugin_name=None):\n if plugin_name is None:\n # Get the directory name of the plugin if unspecified\n # Doing it this way doesn't waste the rest of the inspect.stack call\n frame = inspect.currentframe()\n caller_info = inspect.getframeinfo(frame.f_back)\n caller_path = caller_info[0]\n plugin_name = os.path.basename(os.path.dirname(caller_path))\n\n return get_config(plugin_name + \"_alembic_version\")\n\n\ndef upgrade(plugin_name=None, revision=None, lower=\"current\"):\n database_url = current_app.config.get(\"SQLALCHEMY_DATABASE_URI\")\n if database_url.startswith(\"sqlite\"):\n current_app.db.create_all()\n return\n\n if plugin_name is None:\n # Get the directory name of the plugin if unspecified\n # Doing it this way doesn't waste the rest of the inspect.stack call\n frame = inspect.currentframe()\n caller_info = inspect.getframeinfo(frame.f_back)\n caller_path = caller_info[0]\n plugin_name = os.path.basename(os.path.dirname(caller_path))\n\n # Check if the plugin has migraitons\n migrations_path = os.path.join(current_app.plugins_dir, plugin_name, \"migrations\")\n if os.path.isdir(migrations_path) is False:\n return\n\n engine = create_engine(database_url, poolclass=pool.NullPool)\n conn = engine.connect()\n context = MigrationContext.configure(conn)\n op = Operations(context)\n\n # Find the list of migrations to run\n config = Config()\n config.set_main_option(\"script_location\", migrations_path)\n config.set_main_option(\"version_locations\", migrations_path)\n script = ScriptDirectory.from_config(config)\n\n # Choose base revision for plugin upgrade\n # \"current\" points to the current plugin version stored in config\n # None represents the absolute base layer (e.g. first installation)\n if lower == \"current\":\n lower = get_config(plugin_name + \"_alembic_version\")\n\n # Do we upgrade to head or to a specific revision\n if revision is None:\n upper = script.get_current_head()\n else:\n upper = revision\n\n # Apply from lower to upper\n revs = list(script.iterate_revisions(lower=lower, upper=upper))\n revs.reverse()\n\n try:\n for r in revs:\n with context.begin_transaction():\n r.module.upgrade(op=op)\n finally:\n conn.close()\n\n # Set the new latest revision\n set_config(plugin_name + \"_alembic_version\", upper)\n", "path": "CTFd/plugins/migrations.py"}]} | 1,408 | 122 |
gh_patches_debug_23213 | rasdani/github-patches | git_diff | microsoft__lisa-1567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Command not found (PATH does not contain /usr/sbin)
Getting errors when using LISAv3 to deploy and test CentOS 7_9 on Azure
`[ERROR] lisa.env[generated_0].node[0].cmd[7289] not found command: Command not found: modinfo. Check that modinfo is installed and on $PATH`
`[ERROR] lisa.env[generated_0].node[0].cmd[1038] not found command: Command not found: waagent. Check that waagent is installed and on $PATH`
`[ERROR] lisa.env[generated_0].node[0].cmd[8629] not found command: Command not found: lsmod. Check that lsmod is installed and on $PATH`
SSHing into the node confirms that all three of these commands are present and runnable on the node.
The error about modinfo missing appears to occur before any tests start running. These errors do not occur when deploying and testing Ubuntu 18.04-LTS.
</issue>
<code>
[start of lisa/tools/modinfo.py]
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 import re
5 from typing import Any
6
7 from lisa.executable import Tool
8 from lisa.util import find_patterns_in_lines
9
10
11 class Modinfo(Tool):
12 __version_pattern = re.compile(r"^version:[ \t]*([^ \n]*)")
13
14 @property
15 def command(self) -> str:
16 return self._command
17
18 def _check_exists(self) -> bool:
19 return True
20
21 def _initialize(self, *args: Any, **kwargs: Any) -> None:
22 self._command = "modinfo"
23
24 def get_info(
25 self,
26 mod_name: str,
27 force_run: bool = False,
28 no_info_log: bool = True,
29 no_error_log: bool = True,
30 ) -> str:
31 result = self.run(
32 mod_name,
33 force_run=force_run,
34 no_info_log=no_info_log,
35 no_error_log=no_error_log,
36 )
37 if result.exit_code != 0:
38 # CentOS may not include the path when started,
39 # specify path and try again.
40 self._command = "/usr/sbin/modinfo"
41 result = self.run(
42 mod_name,
43 force_run=force_run,
44 no_info_log=no_info_log,
45 no_error_log=no_error_log,
46 )
47 return result.stdout
48
49 def get_version(
50 self,
51 mod_name: str,
52 force_run: bool = False,
53 no_info_log: bool = True,
54 no_error_log: bool = True,
55 ) -> str:
56 output = self.get_info(
57 mod_name=mod_name,
58 force_run=force_run,
59 no_info_log=no_info_log,
60 no_error_log=no_error_log,
61 )
62 found_version = find_patterns_in_lines(output, [self.__version_pattern])
63 return found_version[0][0] if found_version[0] else ""
64
[end of lisa/tools/modinfo.py]
[start of lisa/tools/lsmod.py]
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 import re
5 from typing import Any
6
7 from lisa.executable import Tool
8 from lisa.util import LisaException, find_patterns_in_lines
9
10
11 class Lsmod(Tool):
12 # The output of lsmod command is in below format -
13 # Module Size Used by
14 # fuse 52176 3
15 # cryptd 14125 0
16 # aes_generic 32970 1 aes_i586
17 __output_pattern = re.compile(
18 r"^(?P<name>[^\s]+)\s+(?P<size>[^\s]+)\s+(?P<usedby>.*)?$", re.MULTILINE
19 )
20
21 @property
22 def command(self) -> str:
23 return self._command
24
25 def _check_exists(self) -> bool:
26 return True
27
28 def _initialize(self, *args: Any, **kwargs: Any) -> None:
29 self._command = "lsmod"
30
31 def module_exists(
32 self,
33 mod_name: str = "",
34 force_run: bool = False,
35 no_info_log: bool = True,
36 no_error_log: bool = True,
37 ) -> bool:
38 result = self.run(
39 force_run=force_run,
40 no_info_log=no_info_log,
41 no_error_log=no_error_log,
42 )
43 if result.exit_code != 0:
44 raise LisaException(
45 f"{self._command} command got non-zero exit code: {result.exit_code}"
46 )
47
48 module_info = find_patterns_in_lines(result.stdout, [self.__output_pattern])
49 if any(mod_name in info for sublist in module_info for info in sublist):
50 return True
51
52 return False
53
[end of lisa/tools/lsmod.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lisa/tools/lsmod.py b/lisa/tools/lsmod.py
--- a/lisa/tools/lsmod.py
+++ b/lisa/tools/lsmod.py
@@ -36,6 +36,7 @@
no_error_log: bool = True,
) -> bool:
result = self.run(
+ sudo=True,
force_run=force_run,
no_info_log=no_info_log,
no_error_log=no_error_log,
diff --git a/lisa/tools/modinfo.py b/lisa/tools/modinfo.py
--- a/lisa/tools/modinfo.py
+++ b/lisa/tools/modinfo.py
@@ -30,20 +30,13 @@
) -> str:
result = self.run(
mod_name,
+ sudo=True,
force_run=force_run,
no_info_log=no_info_log,
no_error_log=no_error_log,
+ expected_exit_code=0,
+ expected_exit_code_failure_message=f"Modinfo failed for module {mod_name}",
)
- if result.exit_code != 0:
- # CentOS may not include the path when started,
- # specify path and try again.
- self._command = "/usr/sbin/modinfo"
- result = self.run(
- mod_name,
- force_run=force_run,
- no_info_log=no_info_log,
- no_error_log=no_error_log,
- )
return result.stdout
def get_version(
| {"golden_diff": "diff --git a/lisa/tools/lsmod.py b/lisa/tools/lsmod.py\n--- a/lisa/tools/lsmod.py\n+++ b/lisa/tools/lsmod.py\n@@ -36,6 +36,7 @@\n no_error_log: bool = True,\n ) -> bool:\n result = self.run(\n+ sudo=True,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\ndiff --git a/lisa/tools/modinfo.py b/lisa/tools/modinfo.py\n--- a/lisa/tools/modinfo.py\n+++ b/lisa/tools/modinfo.py\n@@ -30,20 +30,13 @@\n ) -> str:\n result = self.run(\n mod_name,\n+ sudo=True,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n+ expected_exit_code=0,\n+ expected_exit_code_failure_message=f\"Modinfo failed for module {mod_name}\",\n )\n- if result.exit_code != 0:\n- # CentOS may not include the path when started,\n- # specify path and try again.\n- self._command = \"/usr/sbin/modinfo\"\n- result = self.run(\n- mod_name,\n- force_run=force_run,\n- no_info_log=no_info_log,\n- no_error_log=no_error_log,\n- )\n return result.stdout\n \n def get_version(\n", "issue": "Command not found (PATH does not contain /usr/sbin)\nGetting errors when using LISAv3 to deploy and test CentOS 7_9 on Azure\r\n\r\n`[ERROR] lisa.env[generated_0].node[0].cmd[7289] not found command: Command not found: modinfo. Check that modinfo is installed and on $PATH`\r\n\r\n`[ERROR] lisa.env[generated_0].node[0].cmd[1038] not found command: Command not found: waagent. Check that waagent is installed and on $PATH`\r\n\r\n`[ERROR] lisa.env[generated_0].node[0].cmd[8629] not found command: Command not found: lsmod. Check that lsmod is installed and on $PATH`\r\n\r\nSSHing into the node confirms that all three of these commands are present and runnable on the node.\r\n\r\nThe error about modinfo missing appears to occur before any tests start running. These errors do not occur when deploying and testing Ubuntu 18.04-LTS.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport re\nfrom typing import Any\n\nfrom lisa.executable import Tool\nfrom lisa.util import find_patterns_in_lines\n\n\nclass Modinfo(Tool):\n __version_pattern = re.compile(r\"^version:[ \\t]*([^ \\n]*)\")\n\n @property\n def command(self) -> str:\n return self._command\n\n def _check_exists(self) -> bool:\n return True\n\n def _initialize(self, *args: Any, **kwargs: Any) -> None:\n self._command = \"modinfo\"\n\n def get_info(\n self,\n mod_name: str,\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> str:\n result = self.run(\n mod_name,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n if result.exit_code != 0:\n # CentOS may not include the path when started,\n # specify path and try again.\n self._command = \"/usr/sbin/modinfo\"\n result = self.run(\n mod_name,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n return result.stdout\n\n def get_version(\n self,\n mod_name: str,\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> str:\n output = self.get_info(\n mod_name=mod_name,\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n found_version = find_patterns_in_lines(output, [self.__version_pattern])\n return found_version[0][0] if found_version[0] else \"\"\n", "path": "lisa/tools/modinfo.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport re\nfrom typing import Any\n\nfrom lisa.executable import Tool\nfrom lisa.util import LisaException, find_patterns_in_lines\n\n\nclass Lsmod(Tool):\n # The output of lsmod command is in below format -\n # Module Size Used by\n # fuse 52176 3\n # cryptd 14125 0\n # aes_generic 32970 1 aes_i586\n __output_pattern = re.compile(\n r\"^(?P<name>[^\\s]+)\\s+(?P<size>[^\\s]+)\\s+(?P<usedby>.*)?$\", re.MULTILINE\n )\n\n @property\n def command(self) -> str:\n return self._command\n\n def _check_exists(self) -> bool:\n return True\n\n def _initialize(self, *args: Any, **kwargs: Any) -> None:\n self._command = \"lsmod\"\n\n def module_exists(\n self,\n mod_name: str = \"\",\n force_run: bool = False,\n no_info_log: bool = True,\n no_error_log: bool = True,\n ) -> bool:\n result = self.run(\n force_run=force_run,\n no_info_log=no_info_log,\n no_error_log=no_error_log,\n )\n if result.exit_code != 0:\n raise LisaException(\n f\"{self._command} command got non-zero exit code: {result.exit_code}\"\n )\n\n module_info = find_patterns_in_lines(result.stdout, [self.__output_pattern])\n if any(mod_name in info for sublist in module_info for info in sublist):\n return True\n\n return False\n", "path": "lisa/tools/lsmod.py"}]} | 1,806 | 315 |
gh_patches_debug_13297 | rasdani/github-patches | git_diff | celery__celery-1206 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MongoDB and BROKER_USE_SSL=True
I've recently started with mongodb and BROKER_USE_SSL=True, this doesn't seem to work. Celery is trying to reconnect with Re-establishing connection message. BROKER_USE_SSL=False works well.
``` python
[2013-02-21 14:57:45,708: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...
[2013-02-21 14:57:45,710: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.
[2013-02-21 14:57:45,714: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...
Traceback (most recent call last):
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 392, in start
self.reset_connection()
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 741, in reset_connection
self.connection, on_decode_error=self.on_decode_error,
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/app/amqp.py", line 291, in __init__
queues or self.app.amqp.queues.consume_from.values(), **kw
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 338, in __init__
self.revive(self.channel)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 350, in revive
self.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 360, in declare
queue.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 471, in declare
self.queue_declare(nowait, passive=False)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 497, in queue_declare
nowait=nowait)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/virtual/__init__.py", line 398, in queue_declare
return queue, self._size(queue), 0
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 76, in _size
return self.client.messages.find({'queue': queue}).count()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 204, in client
self._client = self._open()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 133, in _open
mongoconn = Connection(host=hostname)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/connection.py", line 180, in __init__
max_pool_size, document_class, tz_aware, _connect, **kwargs)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/mongo_client.py", line 269, in __init__
raise ConnectionFailure(str(e))
ConnectionFailure: [Errno 104] Connection reset by peer
[2013-02-21 14:57:45,716: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...
[2013-02-21 14:57:45,718: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.
[2013-02-21 14:57:45,721: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...
```
Problem seems to be generated by this line
https://github.com/celery/kombu/blob/master/kombu/transport/mongodb.py#L135
which should take ssl=True parameter for SSL connections.
I know it's kombu component, but setting (BROKER_USE_SSL) which is leading to this problem is part of celery library.
</issue>
<code>
[start of celery/backends/mongodb.py]
1 # -*- coding: utf-8 -*-
2 """
3 celery.backends.mongodb
4 ~~~~~~~~~~~~~~~~~~~~~~~
5
6 MongoDB result store backend.
7
8 """
9 from __future__ import absolute_import
10
11 from datetime import datetime
12
13 try:
14 import pymongo
15 except ImportError: # pragma: no cover
16 pymongo = None # noqa
17
18 if pymongo:
19 try:
20 from bson.binary import Binary
21 except ImportError: # pragma: no cover
22 from pymongo.binary import Binary # noqa
23 else: # pragma: no cover
24 Binary = None # noqa
25
26 from kombu.utils import cached_property
27
28 from celery import states
29 from celery.exceptions import ImproperlyConfigured
30 from celery.five import string_t
31 from celery.utils.timeutils import maybe_timedelta
32
33 from .base import BaseBackend
34
35
36 class Bunch(object):
37
38 def __init__(self, **kw):
39 self.__dict__.update(kw)
40
41
42 class MongoBackend(BaseBackend):
43 mongodb_host = 'localhost'
44 mongodb_port = 27017
45 mongodb_user = None
46 mongodb_password = None
47 mongodb_database = 'celery'
48 mongodb_taskmeta_collection = 'celery_taskmeta'
49 mongodb_max_pool_size = 10
50
51 def __init__(self, *args, **kwargs):
52 """Initialize MongoDB backend instance.
53
54 :raises celery.exceptions.ImproperlyConfigured: if
55 module :mod:`pymongo` is not available.
56
57 """
58 super(MongoBackend, self).__init__(*args, **kwargs)
59 self.expires = kwargs.get('expires') or maybe_timedelta(
60 self.app.conf.CELERY_TASK_RESULT_EXPIRES)
61
62 if not pymongo:
63 raise ImproperlyConfigured(
64 'You need to install the pymongo library to use the '
65 'MongoDB backend.')
66
67 config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None)
68 if config is not None:
69 if not isinstance(config, dict):
70 raise ImproperlyConfigured(
71 'MongoDB backend settings should be grouped in a dict')
72
73 self.mongodb_host = config.get('host', self.mongodb_host)
74 self.mongodb_port = int(config.get('port', self.mongodb_port))
75 self.mongodb_user = config.get('user', self.mongodb_user)
76 self.mongodb_password = config.get(
77 'password', self.mongodb_password)
78 self.mongodb_database = config.get(
79 'database', self.mongodb_database)
80 self.mongodb_taskmeta_collection = config.get(
81 'taskmeta_collection', self.mongodb_taskmeta_collection)
82 self.mongodb_max_pool_size = config.get(
83 'max_pool_size', self.mongodb_max_pool_size)
84
85 self._connection = None
86
87 def _get_connection(self):
88 """Connect to the MongoDB server."""
89 if self._connection is None:
90 from pymongo.connection import Connection
91
92 # The first pymongo.Connection() argument (host) can be
93 # a list of ['host:port'] elements or a mongodb connection
94 # URI. If this is the case, don't use self.mongodb_port
95 # but let pymongo get the port(s) from the URI instead.
96 # This enables the use of replica sets and sharding.
97 # See pymongo.Connection() for more info.
98 args = [self.mongodb_host]
99 kwargs = {'max_pool_size': self.mongodb_max_pool_size}
100 if isinstance(self.mongodb_host, string_t) \
101 and not self.mongodb_host.startswith('mongodb://'):
102 args.append(self.mongodb_port)
103
104 self._connection = Connection(*args, **kwargs)
105
106 return self._connection
107
108 def process_cleanup(self):
109 if self._connection is not None:
110 # MongoDB connection will be closed automatically when object
111 # goes out of scope
112 self._connection = None
113
114 def _store_result(self, task_id, result, status, traceback=None):
115 """Store return value and status of an executed task."""
116 meta = {'_id': task_id,
117 'status': status,
118 'result': Binary(self.encode(result)),
119 'date_done': datetime.utcnow(),
120 'traceback': Binary(self.encode(traceback)),
121 'children': Binary(self.encode(self.current_task_children()))}
122 self.collection.save(meta, safe=True)
123
124 return result
125
126 def _get_task_meta_for(self, task_id):
127 """Get task metadata for a task by id."""
128
129 obj = self.collection.find_one({'_id': task_id})
130 if not obj:
131 return {'status': states.PENDING, 'result': None}
132
133 meta = {
134 'task_id': obj['_id'],
135 'status': obj['status'],
136 'result': self.decode(obj['result']),
137 'date_done': obj['date_done'],
138 'traceback': self.decode(obj['traceback']),
139 'children': self.decode(obj['children']),
140 }
141
142 return meta
143
144 def _save_group(self, group_id, result):
145 """Save the group result."""
146 meta = {'_id': group_id,
147 'result': Binary(self.encode(result)),
148 'date_done': datetime.utcnow()}
149 self.collection.save(meta, safe=True)
150
151 return result
152
153 def _restore_group(self, group_id):
154 """Get the result for a group by id."""
155 obj = self.collection.find_one({'_id': group_id})
156 if not obj:
157 return
158
159 meta = {
160 'task_id': obj['_id'],
161 'result': self.decode(obj['result']),
162 'date_done': obj['date_done'],
163 }
164
165 return meta
166
167 def _delete_group(self, group_id):
168 """Delete a group by id."""
169 self.collection.remove({'_id': group_id})
170
171 def _forget(self, task_id):
172 """
173 Remove result from MongoDB.
174
175 :raises celery.exceptions.OperationsError: if the task_id could not be
176 removed.
177 """
178 # By using safe=True, this will wait until it receives a response from
179 # the server. Likewise, it will raise an OperationsError if the
180 # response was unable to be completed.
181 self.collection.remove({'_id': task_id}, safe=True)
182
183 def cleanup(self):
184 """Delete expired metadata."""
185 self.collection.remove(
186 {'date_done': {'$lt': self.app.now() - self.expires}},
187 )
188
189 def __reduce__(self, args=(), kwargs={}):
190 kwargs.update(
191 dict(expires=self.expires))
192 return super(MongoBackend, self).__reduce__(args, kwargs)
193
194 def _get_database(self):
195 conn = self._get_connection()
196 db = conn[self.mongodb_database]
197 if self.mongodb_user and self.mongodb_password:
198 if not db.authenticate(self.mongodb_user,
199 self.mongodb_password):
200 raise ImproperlyConfigured(
201 'Invalid MongoDB username or password.')
202 return db
203
204 @cached_property
205 def database(self):
206 """Get database from MongoDB connection and perform authentication
207 if necessary."""
208 return self._get_database()
209
210 @cached_property
211 def collection(self):
212 """Get the metadata task collection."""
213 collection = self.database[self.mongodb_taskmeta_collection]
214
215 # Ensure an index on date_done is there, if not process the index
216 # in the background. Once completed cleanup will be much faster
217 collection.ensure_index('date_done', background='true')
218 return collection
219
[end of celery/backends/mongodb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -96,7 +96,10 @@
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
args = [self.mongodb_host]
- kwargs = {'max_pool_size': self.mongodb_max_pool_size}
+ kwargs = {
+ 'max_pool_size': self.mongodb_max_pool_size,
+ 'ssl': self.app.conf.BROKER_USE_SSL
+ }
if isinstance(self.mongodb_host, string_t) \
and not self.mongodb_host.startswith('mongodb://'):
args.append(self.mongodb_port)
| {"golden_diff": "diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py\n--- a/celery/backends/mongodb.py\n+++ b/celery/backends/mongodb.py\n@@ -96,7 +96,10 @@\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n args = [self.mongodb_host]\n- kwargs = {'max_pool_size': self.mongodb_max_pool_size}\n+ kwargs = {\n+ 'max_pool_size': self.mongodb_max_pool_size,\n+ 'ssl': self.app.conf.BROKER_USE_SSL\n+ }\n if isinstance(self.mongodb_host, string_t) \\\n and not self.mongodb_host.startswith('mongodb://'):\n args.append(self.mongodb_port)\n", "issue": "MongoDB and BROKER_USE_SSL=True\nI've recently started with mongodb and BROKER_USE_SSL=True, this doesn't seem to work. Celery is trying to reconnect with Re-establishing connection message. BROKER_USE_SSL=False works well. \n\n``` python\n[2013-02-21 14:57:45,708: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...\n[2013-02-21 14:57:45,710: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.\n[2013-02-21 14:57:45,714: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...\nTraceback (most recent call last):\n File \"/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py\", line 392, in start\n self.reset_connection()\n File \"/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py\", line 741, in reset_connection\n self.connection, on_decode_error=self.on_decode_error,\n File \"/stuff/eggs/celery-3.0.13-py2.7.egg/celery/app/amqp.py\", line 291, in __init__\n queues or self.app.amqp.queues.consume_from.values(), **kw\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py\", line 338, in __init__\n self.revive(self.channel)\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py\", line 350, in revive\n self.declare()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py\", line 360, in declare\n queue.declare()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py\", line 471, in declare\n self.queue_declare(nowait, passive=False)\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py\", line 497, in queue_declare\n nowait=nowait)\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/virtual/__init__.py\", line 398, in queue_declare\n return queue, self._size(queue), 0\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py\", line 76, in _size\n return self.client.messages.find({'queue': queue}).count()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py\", line 204, in client\n self._client = self._open()\n File \"/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py\", line 133, in _open\n mongoconn = Connection(host=hostname)\n File \"/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/connection.py\", line 180, in __init__\n max_pool_size, document_class, tz_aware, _connect, **kwargs)\n File \"/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/mongo_client.py\", line 269, in __init__\n raise ConnectionFailure(str(e))\nConnectionFailure: [Errno 104] Connection reset by peer\n[2013-02-21 14:57:45,716: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...\n[2013-02-21 14:57:45,718: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.\n[2013-02-21 14:57:45,721: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...\n```\n\nProblem seems to be generated by this line \nhttps://github.com/celery/kombu/blob/master/kombu/transport/mongodb.py#L135\nwhich should take ssl=True parameter for SSL connections.\n\nI know it's kombu component, but setting (BROKER_USE_SSL) which is leading to this problem is part of celery library. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n celery.backends.mongodb\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n MongoDB result store backend.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom datetime import datetime\n\ntry:\n import pymongo\nexcept ImportError: # pragma: no cover\n pymongo = None # noqa\n\nif pymongo:\n try:\n from bson.binary import Binary\n except ImportError: # pragma: no cover\n from pymongo.binary import Binary # noqa\nelse: # pragma: no cover\n Binary = None # noqa\n\nfrom kombu.utils import cached_property\n\nfrom celery import states\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.five import string_t\nfrom celery.utils.timeutils import maybe_timedelta\n\nfrom .base import BaseBackend\n\n\nclass Bunch(object):\n\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n\nclass MongoBackend(BaseBackend):\n mongodb_host = 'localhost'\n mongodb_port = 27017\n mongodb_user = None\n mongodb_password = None\n mongodb_database = 'celery'\n mongodb_taskmeta_collection = 'celery_taskmeta'\n mongodb_max_pool_size = 10\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize MongoDB backend instance.\n\n :raises celery.exceptions.ImproperlyConfigured: if\n module :mod:`pymongo` is not available.\n\n \"\"\"\n super(MongoBackend, self).__init__(*args, **kwargs)\n self.expires = kwargs.get('expires') or maybe_timedelta(\n self.app.conf.CELERY_TASK_RESULT_EXPIRES)\n\n if not pymongo:\n raise ImproperlyConfigured(\n 'You need to install the pymongo library to use the '\n 'MongoDB backend.')\n\n config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None)\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'MongoDB backend settings should be grouped in a dict')\n\n self.mongodb_host = config.get('host', self.mongodb_host)\n self.mongodb_port = int(config.get('port', self.mongodb_port))\n self.mongodb_user = config.get('user', self.mongodb_user)\n self.mongodb_password = config.get(\n 'password', self.mongodb_password)\n self.mongodb_database = config.get(\n 'database', self.mongodb_database)\n self.mongodb_taskmeta_collection = config.get(\n 'taskmeta_collection', self.mongodb_taskmeta_collection)\n self.mongodb_max_pool_size = config.get(\n 'max_pool_size', self.mongodb_max_pool_size)\n\n self._connection = None\n\n def _get_connection(self):\n \"\"\"Connect to the MongoDB server.\"\"\"\n if self._connection is None:\n from pymongo.connection import Connection\n\n # The first pymongo.Connection() argument (host) can be\n # a list of ['host:port'] elements or a mongodb connection\n # URI. If this is the case, don't use self.mongodb_port\n # but let pymongo get the port(s) from the URI instead.\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n args = [self.mongodb_host]\n kwargs = {'max_pool_size': self.mongodb_max_pool_size}\n if isinstance(self.mongodb_host, string_t) \\\n and not self.mongodb_host.startswith('mongodb://'):\n args.append(self.mongodb_port)\n\n self._connection = Connection(*args, **kwargs)\n\n return self._connection\n\n def process_cleanup(self):\n if self._connection is not None:\n # MongoDB connection will be closed automatically when object\n # goes out of scope\n self._connection = None\n\n def _store_result(self, task_id, result, status, traceback=None):\n \"\"\"Store return value and status of an executed task.\"\"\"\n meta = {'_id': task_id,\n 'status': status,\n 'result': Binary(self.encode(result)),\n 'date_done': datetime.utcnow(),\n 'traceback': Binary(self.encode(traceback)),\n 'children': Binary(self.encode(self.current_task_children()))}\n self.collection.save(meta, safe=True)\n\n return result\n\n def _get_task_meta_for(self, task_id):\n \"\"\"Get task metadata for a task by id.\"\"\"\n\n obj = self.collection.find_one({'_id': task_id})\n if not obj:\n return {'status': states.PENDING, 'result': None}\n\n meta = {\n 'task_id': obj['_id'],\n 'status': obj['status'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n 'traceback': self.decode(obj['traceback']),\n 'children': self.decode(obj['children']),\n }\n\n return meta\n\n def _save_group(self, group_id, result):\n \"\"\"Save the group result.\"\"\"\n meta = {'_id': group_id,\n 'result': Binary(self.encode(result)),\n 'date_done': datetime.utcnow()}\n self.collection.save(meta, safe=True)\n\n return result\n\n def _restore_group(self, group_id):\n \"\"\"Get the result for a group by id.\"\"\"\n obj = self.collection.find_one({'_id': group_id})\n if not obj:\n return\n\n meta = {\n 'task_id': obj['_id'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n }\n\n return meta\n\n def _delete_group(self, group_id):\n \"\"\"Delete a group by id.\"\"\"\n self.collection.remove({'_id': group_id})\n\n def _forget(self, task_id):\n \"\"\"\n Remove result from MongoDB.\n\n :raises celery.exceptions.OperationsError: if the task_id could not be\n removed.\n \"\"\"\n # By using safe=True, this will wait until it receives a response from\n # the server. Likewise, it will raise an OperationsError if the\n # response was unable to be completed.\n self.collection.remove({'_id': task_id}, safe=True)\n\n def cleanup(self):\n \"\"\"Delete expired metadata.\"\"\"\n self.collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires}},\n )\n\n def __reduce__(self, args=(), kwargs={}):\n kwargs.update(\n dict(expires=self.expires))\n return super(MongoBackend, self).__reduce__(args, kwargs)\n\n def _get_database(self):\n conn = self._get_connection()\n db = conn[self.mongodb_database]\n if self.mongodb_user and self.mongodb_password:\n if not db.authenticate(self.mongodb_user,\n self.mongodb_password):\n raise ImproperlyConfigured(\n 'Invalid MongoDB username or password.')\n return db\n\n @cached_property\n def database(self):\n \"\"\"Get database from MongoDB connection and perform authentication\n if necessary.\"\"\"\n return self._get_database()\n\n @cached_property\n def collection(self):\n \"\"\"Get the metadata task collection.\"\"\"\n collection = self.database[self.mongodb_taskmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n", "path": "celery/backends/mongodb.py"}]} | 3,789 | 167 |
gh_patches_debug_39443 | rasdani/github-patches | git_diff | DataDog__dd-agent-1263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
system.mem.pct_usable is missing on Windows
We don't compute this very useful metric on Windows. We should to have more consistency with Unix.
Need to hack around there:
https://github.com/DataDog/dd-agent/blob/master/checks/system/win32.py#L166
</issue>
<code>
[start of checks/system/win32.py]
1 # project
2 from checks import Check
3
4 # 3rd party
5 try:
6 import psutil
7 except ImportError:
8 psutil = None
9
10 try:
11 import wmi
12 w = wmi.WMI()
13 except Exception:
14 wmi, w = None, None
15
16 # Device WMI drive types
17 class DriveType(object):
18 UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)
19 B2MB = float(1048576)
20 KB2MB = B2KB = float(1024)
21
22 def should_ignore_disk(name, blacklist_re):
23 # blacklist_re is a compiled regex, compilation done at config loading time
24 return name =='_total' or blacklist_re is not None and blacklist_re.match(name)
25
26 class Processes(Check):
27 def __init__(self, logger):
28 Check.__init__(self, logger)
29 self.gauge('system.proc.queue_length')
30 self.gauge('system.proc.count')
31
32 def check(self, agentConfig):
33 try:
34 os = w.Win32_PerfFormattedData_PerfOS_System()[0]
35 except AttributeError:
36 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \
37 ' No process metrics will be returned.')
38 return
39
40 try:
41 cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name="_Total")[0]
42 except AttributeError:
43 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \
44 ' No process metrics will be returned.')
45 return
46 if os.ProcessorQueueLength is not None:
47 self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)
48 if os.Processes is not None:
49 self.save_sample('system.proc.count', os.Processes)
50
51 return self.get_metrics()
52
53 class Memory(Check):
54 def __init__(self, logger):
55 Check.__init__(self, logger)
56 self.logger = logger
57 self.gauge('system.mem.free')
58 self.gauge('system.mem.used')
59 self.gauge('system.mem.total')
60 self.gauge('system.mem.cached')
61 self.gauge('system.mem.committed')
62 self.gauge('system.mem.paged')
63 self.gauge('system.mem.nonpaged')
64
65 def check(self, agentConfig):
66 try:
67 os = w.Win32_OperatingSystem()[0]
68 except AttributeError:
69 self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')
70 return
71
72 if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:
73 total = int(os.TotalVisibleMemorySize) / KB2MB
74 free = int(os.FreePhysicalMemory) / KB2MB
75 self.save_sample('system.mem.total', total)
76 self.save_sample('system.mem.free', free)
77 self.save_sample('system.mem.used', total - free)
78
79 mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]
80 if mem.CacheBytes is not None:
81 self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)
82 if mem.CommittedBytes is not None:
83 self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)
84 if mem.PoolPagedBytes is not None:
85 self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)
86 if mem.PoolNonpagedBytes is not None:
87 self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)
88
89 return self.get_metrics()
90
91 class Cpu(Check):
92 def __init__(self, logger):
93 Check.__init__(self, logger)
94 self.logger = logger
95 self.counter('system.cpu.user')
96 self.counter('system.cpu.idle')
97 self.gauge('system.cpu.interrupt')
98 self.counter('system.cpu.system')
99
100 def check(self, agentConfig):
101 try:
102 cpu = w.Win32_PerfFormattedData_PerfOS_Processor()
103 except AttributeError:
104 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \
105 ' No CPU metrics will be returned.')
106 return
107
108 cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')
109 if cpu_interrupt is not None:
110 self.save_sample('system.cpu.interrupt', cpu_interrupt)
111
112 cpu_percent = psutil.cpu_times()
113
114 self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)
115 self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)
116 self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)
117
118 return self.get_metrics()
119
120 def _average_metric(self, wmi_class, wmi_prop):
121 ''' Sum all of the values of a metric from a WMI class object, excluding
122 the value for "_Total"
123 '''
124 val = 0
125 counter = 0
126 for wmi_object in wmi_class:
127 if wmi_object.Name == '_Total':
128 # Skip the _Total value
129 continue
130
131 if getattr(wmi_object, wmi_prop) is not None:
132 counter += 1
133 val += float(getattr(wmi_object, wmi_prop))
134
135 if counter > 0:
136 return val / counter
137
138 return val
139
140
141 class Network(Check):
142 def __init__(self, logger):
143 Check.__init__(self, logger)
144 self.logger = logger
145 self.gauge('system.net.bytes_rcvd')
146 self.gauge('system.net.bytes_sent')
147
148 def check(self, agentConfig):
149 try:
150 net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()
151 except AttributeError:
152 self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \
153 ' No network metrics will be returned')
154 return
155
156 for iface in net:
157 name = self.normalize_device_name(iface.name)
158 if iface.BytesReceivedPerSec is not None:
159 self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,
160 device_name=name)
161 if iface.BytesSentPerSec is not None:
162 self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,
163 device_name=name)
164 return self.get_metrics()
165
166 class Disk(Check):
167 def __init__(self, logger):
168 Check.__init__(self, logger)
169 self.logger = logger
170 self.gauge('system.disk.free')
171 self.gauge('system.disk.total')
172 self.gauge('system.disk.in_use')
173 self.gauge('system.disk.used')
174
175 def check(self, agentConfig):
176 try:
177 disk = w.Win32_LogicalDisk()
178 except AttributeError:
179 self.logger.info('Missing Win32_LogicalDisk WMI class.' \
180 ' No disk metrics will be returned.')
181 return
182
183 blacklist_re = agentConfig.get('device_blacklist_re', None)
184 for device in disk:
185 name = self.normalize_device_name(device.name)
186 if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):
187 continue
188 if device.FreeSpace is not None and device.Size is not None:
189 free = float(device.FreeSpace) / B2KB
190 total = float(device.Size) / B2KB
191 used = total - free
192 self.save_sample('system.disk.free', free, device_name=name)
193 self.save_sample('system.disk.total', total, device_name=name)
194 self.save_sample('system.disk.used', used, device_name=name)
195 self.save_sample('system.disk.in_use', (used / total),
196 device_name=name)
197 return self.get_metrics()
198
199 class IO(Check):
200 def __init__(self, logger):
201 Check.__init__(self, logger)
202 self.logger = logger
203 self.gauge('system.io.wkb_s')
204 self.gauge('system.io.w_s')
205 self.gauge('system.io.rkb_s')
206 self.gauge('system.io.r_s')
207 self.gauge('system.io.avg_q_sz')
208
209 def check(self, agentConfig):
210 try:
211 disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()
212 except AttributeError:
213 self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \
214 ' No I/O metrics will be returned.')
215 return
216 blacklist_re = agentConfig.get('device_blacklist_re', None)
217 for device in disk:
218 name = self.normalize_device_name(device.name)
219 if should_ignore_disk(name, blacklist_re):
220 continue
221 if device.DiskWriteBytesPerSec is not None:
222 self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,
223 device_name=name)
224 if device.DiskWritesPerSec is not None:
225 self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),
226 device_name=name)
227 if device.DiskReadBytesPerSec is not None:
228 self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,
229 device_name=name)
230 if device.DiskReadsPerSec is not None:
231 self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),
232 device_name=name)
233 if device.CurrentDiskQueueLength is not None:
234 self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,
235 device_name=name)
236 return self.get_metrics()
237
[end of checks/system/win32.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks/system/win32.py b/checks/system/win32.py
--- a/checks/system/win32.py
+++ b/checks/system/win32.py
@@ -57,10 +57,23 @@
self.gauge('system.mem.free')
self.gauge('system.mem.used')
self.gauge('system.mem.total')
+ # area of physical memory that stores recently used pages of data
+ # for applications
self.gauge('system.mem.cached')
+ # Committed memory is physical memory for which space has been
+ # reserved on the disk paging file in case it must be written
+ # back to disk
self.gauge('system.mem.committed')
+ # physical memory used by the operating system, for objects
+ # that can be written to disk when they are not being used
self.gauge('system.mem.paged')
+ # physical memory used by the operating system for objects that
+ # cannot be written to disk, but must remain in physical memory
+ # as long as they are allocated.
self.gauge('system.mem.nonpaged')
+ # usable = free + cached
+ self.gauge('system.mem.usable')
+ self.gauge('system.mem.pct_usable')
def check(self, agentConfig):
try:
@@ -69,6 +82,10 @@
self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')
return
+ total = 0
+ free = 0
+ cached = 0
+
if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:
total = int(os.TotalVisibleMemorySize) / KB2MB
free = int(os.FreePhysicalMemory) / KB2MB
@@ -76,9 +93,11 @@
self.save_sample('system.mem.free', free)
self.save_sample('system.mem.used', total - free)
+
mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]
if mem.CacheBytes is not None:
- self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)
+ cached = int(mem.CacheBytes) / B2MB
+ self.save_sample('system.mem.cached', cached)
if mem.CommittedBytes is not None:
self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)
if mem.PoolPagedBytes is not None:
@@ -86,6 +105,12 @@
if mem.PoolNonpagedBytes is not None:
self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)
+ usable = free + cached
+ self.save_sample('system.mem.usable', usable)
+ if total > 0:
+ pct_usable = float(usable) / total
+ self.save_sample('system.mem.pct_usable', pct_usable)
+
return self.get_metrics()
class Cpu(Check):
| {"golden_diff": "diff --git a/checks/system/win32.py b/checks/system/win32.py\n--- a/checks/system/win32.py\n+++ b/checks/system/win32.py\n@@ -57,10 +57,23 @@\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n+ # area of physical memory that stores recently used pages of data\n+ # for applications\n self.gauge('system.mem.cached')\n+ # Committed memory is physical memory for which space has been\n+ # reserved on the disk paging file in case it must be written\n+ # back to disk\n self.gauge('system.mem.committed')\n+ # physical memory used by the operating system, for objects\n+ # that can be written to disk when they are not being used\n self.gauge('system.mem.paged')\n+ # physical memory used by the operating system for objects that\n+ # cannot be written to disk, but must remain in physical memory\n+ # as long as they are allocated.\n self.gauge('system.mem.nonpaged')\n+ # usable = free + cached\n+ self.gauge('system.mem.usable')\n+ self.gauge('system.mem.pct_usable')\n \n def check(self, agentConfig):\n try:\n@@ -69,6 +82,10 @@\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n \n+ total = 0\n+ free = 0\n+ cached = 0\n+\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n@@ -76,9 +93,11 @@\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n \n+\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n- self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)\n+ cached = int(mem.CacheBytes) / B2MB\n+ self.save_sample('system.mem.cached', cached)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n@@ -86,6 +105,12 @@\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n \n+ usable = free + cached\n+ self.save_sample('system.mem.usable', usable)\n+ if total > 0:\n+ pct_usable = float(usable) / total\n+ self.save_sample('system.mem.pct_usable', pct_usable)\n+\n return self.get_metrics()\n \n class Cpu(Check):\n", "issue": "system.mem.pct_usable is missing on Windows\nWe don't compute this very useful metric on Windows. We should to have more consistency with Unix.\nNeed to hack around there: \nhttps://github.com/DataDog/dd-agent/blob/master/checks/system/win32.py#L166\n\n", "before_files": [{"content": "# project\nfrom checks import Check\n\n# 3rd party\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\ntry:\n import wmi\n w = wmi.WMI()\nexcept Exception:\n wmi, w = None, None\n\n# Device WMI drive types\nclass DriveType(object):\n UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)\nB2MB = float(1048576)\nKB2MB = B2KB = float(1024)\n\ndef should_ignore_disk(name, blacklist_re):\n # blacklist_re is a compiled regex, compilation done at config loading time\n return name =='_total' or blacklist_re is not None and blacklist_re.match(name)\n\nclass Processes(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.gauge('system.proc.queue_length')\n self.gauge('system.proc.count')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_PerfFormattedData_PerfOS_System()[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \\\n ' No process metrics will be returned.')\n return\n\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name=\"_Total\")[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No process metrics will be returned.')\n return\n if os.ProcessorQueueLength is not None:\n self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)\n if os.Processes is not None:\n self.save_sample('system.proc.count', os.Processes)\n\n return self.get_metrics()\n\nclass Memory(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n self.gauge('system.mem.cached')\n self.gauge('system.mem.committed')\n self.gauge('system.mem.paged')\n self.gauge('system.mem.nonpaged')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_OperatingSystem()[0]\n except AttributeError:\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n self.save_sample('system.mem.total', total)\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n\n return self.get_metrics()\n\nclass Cpu(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.counter('system.cpu.user')\n self.counter('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n self.counter('system.cpu.system')\n\n def check(self, agentConfig):\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No CPU metrics will be returned.')\n return\n\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n\n cpu_percent = psutil.cpu_times()\n\n self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)\n self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)\n self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)\n\n return self.get_metrics()\n\n def _average_metric(self, wmi_class, wmi_prop):\n ''' Sum all of the values of a metric from a WMI class object, excluding\n the value for \"_Total\"\n '''\n val = 0\n counter = 0\n for wmi_object in wmi_class:\n if wmi_object.Name == '_Total':\n # Skip the _Total value\n continue\n\n if getattr(wmi_object, wmi_prop) is not None:\n counter += 1\n val += float(getattr(wmi_object, wmi_prop))\n\n if counter > 0:\n return val / counter\n\n return val\n\n\nclass Network(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.net.bytes_rcvd')\n self.gauge('system.net.bytes_sent')\n\n def check(self, agentConfig):\n try:\n net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \\\n ' No network metrics will be returned')\n return\n\n for iface in net:\n name = self.normalize_device_name(iface.name)\n if iface.BytesReceivedPerSec is not None:\n self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,\n device_name=name)\n if iface.BytesSentPerSec is not None:\n self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,\n device_name=name)\n return self.get_metrics()\n\nclass Disk(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.disk.free')\n self.gauge('system.disk.total')\n self.gauge('system.disk.in_use')\n self.gauge('system.disk.used')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_LogicalDisk WMI class.' \\\n ' No disk metrics will be returned.')\n return\n\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):\n continue\n if device.FreeSpace is not None and device.Size is not None:\n free = float(device.FreeSpace) / B2KB\n total = float(device.Size) / B2KB\n used = total - free\n self.save_sample('system.disk.free', free, device_name=name)\n self.save_sample('system.disk.total', total, device_name=name)\n self.save_sample('system.disk.used', used, device_name=name)\n self.save_sample('system.disk.in_use', (used / total),\n device_name=name)\n return self.get_metrics()\n\nclass IO(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.io.wkb_s')\n self.gauge('system.io.w_s')\n self.gauge('system.io.rkb_s')\n self.gauge('system.io.r_s')\n self.gauge('system.io.avg_q_sz')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \\\n ' No I/O metrics will be returned.')\n return\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if should_ignore_disk(name, blacklist_re):\n continue\n if device.DiskWriteBytesPerSec is not None:\n self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskWritesPerSec is not None:\n self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),\n device_name=name)\n if device.DiskReadBytesPerSec is not None:\n self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskReadsPerSec is not None:\n self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),\n device_name=name)\n if device.CurrentDiskQueueLength is not None:\n self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,\n device_name=name)\n return self.get_metrics()\n", "path": "checks/system/win32.py"}]} | 3,322 | 680 |
gh_patches_debug_36985 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-332 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve tastypie support
Currently some [Django Tastypie](https://django-tastypie.readthedocs.io/en/latest/) views are named simply `tastypie.resources` in their operation name. We can probably find better names, and maybe support some Tastypie specific tags!
</issue>
<code>
[start of src/scout_apm/compat.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5 import inspect
6 import sys
7 from functools import wraps
8
9 string_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821
10 text_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821
11
12 # Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves
13 if sys.version_info < (3, 2):
14 import functools
15
16 class ContextDecorator(object):
17 def __call__(self, f):
18 @functools.wraps(f)
19 def decorated(*args, **kwds):
20 with self:
21 return f(*args, **kwds)
22
23 return decorated
24
25
26 else:
27 from contextlib import ContextDecorator
28
29 try:
30 # Python 3.x
31 import queue
32 except ImportError:
33 # Python 2.x
34 import Queue as queue
35
36 # datetime_to_timestamp converts a naive UTC datetime to a unix timestamp
37 if sys.version_info >= (3, 3):
38
39 def datetime_to_timestamp(datetime_obj):
40 return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()
41
42
43 else:
44 _EPOCH = dt.datetime(1970, 1, 1)
45
46 def datetime_to_timestamp(datetime_obj):
47 return (datetime_obj - _EPOCH).total_seconds()
48
49
50 def text(value, encoding="utf-8", errors="strict"):
51 """
52 Convert a value to str on Python 3 and unicode on Python 2.
53 """
54 if isinstance(value, text_type):
55 return value
56 elif isinstance(value, bytes):
57 return text_type(value, encoding, errors)
58 else:
59 return text_type(value)
60
61
62 try:
63 from urllib.parse import urlencode
64 except ImportError:
65 from urllib import urlencode
66
67
68 def kwargs_only(func):
69 """
70 Make a function only accept keyword arguments.
71 This can be dropped in Python 3 in lieu of:
72 def foo(*, bar=default):
73 Source: https://pypi.org/project/kwargs-only/
74 """
75 if hasattr(inspect, "signature"): # pragma: no cover
76 # Python 3
77 signature = inspect.signature(func)
78 first_arg_name = list(signature.parameters.keys())[0]
79 else: # pragma: no cover
80 # Python 2
81 signature = inspect.getargspec(func)
82 first_arg_name = signature.args[0]
83
84 if first_arg_name in ("self", "cls"):
85 allowable_args = 1
86 else:
87 allowable_args = 0
88
89 @wraps(func)
90 def wrapper(*args, **kwargs):
91 if len(args) > allowable_args:
92 raise TypeError(
93 "{} should only be called with keyword args".format(func.__name__)
94 )
95 return func(*args, **kwargs)
96
97 return wrapper
98
99
100 __all__ = [
101 "ContextDecorator",
102 "datetime_to_timestamp",
103 "kwargs_only",
104 "queue",
105 "string_type",
106 "text",
107 "text_type",
108 "urlencode",
109 ]
110
[end of src/scout_apm/compat.py]
[start of src/scout_apm/django/middleware.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import django
5 from django.conf import settings
6
7 from scout_apm.core.config import scout_config
8 from scout_apm.core.tracked_request import TrackedRequest
9 from scout_apm.core.web_requests import (
10 create_filtered_path,
11 ignore_path,
12 track_amazon_request_queue_time,
13 track_request_queue_time,
14 )
15
16 if django.VERSION >= (2, 0):
17 from django.urls import get_urlconf
18 else:
19 from django.core.urlresolvers import get_urlconf
20
21
22 def get_operation_name(request):
23 view_func = request.resolver_match.func
24 view_name = request.resolver_match._func_path
25
26 if hasattr(view_func, "model_admin"):
27 # Seems to comes from Django admin (attribute only set on Django 1.9+)
28 admin_class = view_func.model_admin.__class__
29 view_name = (
30 admin_class.__module__
31 + "."
32 + admin_class.__name__
33 + "."
34 + view_func.__name__
35 )
36
37 return "Controller/" + view_name
38
39
40 def track_request_view_data(request, tracked_request):
41 path = request.path
42 tracked_request.tag(
43 "path",
44 create_filtered_path(
45 path, [(k, v) for k, vs in request.GET.lists() for v in vs]
46 ),
47 )
48 if ignore_path(path):
49 tracked_request.tag("ignore_transaction", True)
50
51 try:
52 # Determine a remote IP to associate with the request. The value is
53 # spoofable by the requester so this is not suitable to use in any
54 # security sensitive context.
55 user_ip = (
56 request.META.get("HTTP_X_FORWARDED_FOR", "").split(",")[0]
57 or request.META.get("HTTP_CLIENT_IP", "").split(",")[0]
58 or request.META.get("REMOTE_ADDR", None)
59 )
60 tracked_request.tag("user_ip", user_ip)
61 except Exception:
62 pass
63
64 user = getattr(request, "user", None)
65 if user is not None:
66 try:
67 tracked_request.tag("username", user.get_username())
68 except Exception:
69 pass
70
71 tracked_request.tag("urlconf", get_urlconf(settings.ROOT_URLCONF))
72
73
74 class MiddlewareTimingMiddleware(object):
75 """
76 Insert as early into the Middleware stack as possible (outermost layers),
77 so that other middlewares called after can be timed.
78 """
79
80 def __init__(self, get_response):
81 self.get_response = get_response
82
83 def __call__(self, request):
84 if not scout_config.value("monitor"):
85 return self.get_response(request)
86
87 tracked_request = TrackedRequest.instance()
88
89 tracked_request.start_span(
90 operation="Middleware", should_capture_backtrace=False
91 )
92 queue_time = request.META.get("HTTP_X_QUEUE_START") or request.META.get(
93 "HTTP_X_REQUEST_START", ""
94 )
95 queue_time_tracked = track_request_queue_time(queue_time, tracked_request)
96 if not queue_time_tracked:
97 track_amazon_request_queue_time(
98 request.META.get("HTTP_X_AMZN_TRACE_ID", ""), tracked_request
99 )
100
101 try:
102 return self.get_response(request)
103 finally:
104 tracked_request.stop_span()
105
106
107 class ViewTimingMiddleware(object):
108 """
109 Insert as deep into the middleware stack as possible, ideally wrapping no
110 other middleware. Designed to time the View itself
111 """
112
113 def __init__(self, get_response):
114 self.get_response = get_response
115
116 def __call__(self, request):
117 """
118 Wrap a single incoming request with start and stop calls.
119 This will start timing, but relies on the process_view callback to
120 capture more details about what view was really called, and other
121 similar info.
122
123 If process_view isn't called, then the request will not
124 be recorded. This can happen if a middleware further along the stack
125 doesn't call onward, and instead returns a response directly.
126 """
127 if not scout_config.value("monitor"):
128 return self.get_response(request)
129
130 tracked_request = TrackedRequest.instance()
131
132 # This operation name won't be recorded unless changed later in
133 # process_view
134 tracked_request.start_span(operation="Unknown", should_capture_backtrace=False)
135 try:
136 return self.get_response(request)
137 finally:
138 tracked_request.stop_span()
139
140 def process_view(self, request, view_func, view_args, view_kwargs):
141 """
142 Capture details about the view_func that is about to execute
143 """
144 if not scout_config.value("monitor"):
145 return
146 tracked_request = TrackedRequest.instance()
147 tracked_request.mark_real_request()
148
149 track_request_view_data(request, tracked_request)
150
151 span = tracked_request.current_span()
152 if span is not None:
153 span.operation = get_operation_name(request)
154
155 def process_exception(self, request, exception):
156 """
157 Mark this request as having errored out
158
159 Does not modify or catch or otherwise change the exception thrown
160 """
161 if not scout_config.value("monitor"):
162 return
163 TrackedRequest.instance().tag("error", "true")
164
165
166 class OldStyleMiddlewareTimingMiddleware(object):
167 """
168 Insert as early into the Middleware stack as possible (outermost layers),
169 so that other middlewares called after can be timed.
170 """
171
172 def process_request(self, request):
173 if not scout_config.value("monitor"):
174 return
175 tracked_request = TrackedRequest.instance()
176 request._scout_tracked_request = tracked_request
177
178 queue_time = request.META.get("HTTP_X_QUEUE_START") or request.META.get(
179 "HTTP_X_REQUEST_START", ""
180 )
181 queue_time_tracked = track_request_queue_time(queue_time, tracked_request)
182 if not queue_time_tracked:
183 track_amazon_request_queue_time(
184 request.META.get("HTTP_X_AMZN_TRACE_ID", ""), tracked_request
185 )
186
187 tracked_request.start_span(
188 operation="Middleware", should_capture_backtrace=False
189 )
190
191 def process_response(self, request, response):
192 # Only stop span if there's a request, but presume we are balanced,
193 # i.e. that custom instrumentation within the application is not
194 # causing errors
195 tracked_request = getattr(request, "_scout_tracked_request", None)
196 if tracked_request is not None:
197 tracked_request.stop_span()
198 return response
199
200
201 class OldStyleViewMiddleware(object):
202 def process_view(self, request, view_func, view_func_args, view_func_kwargs):
203 tracked_request = getattr(request, "_scout_tracked_request", None)
204 if tracked_request is None:
205 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
206 # don't do anything
207 return
208
209 tracked_request.mark_real_request()
210
211 track_request_view_data(request, tracked_request)
212
213 span = tracked_request.start_span(
214 operation=get_operation_name(request), should_capture_backtrace=False
215 )
216 # Save the span into the request, so we can check
217 # if we're matched up when stopping
218 request._scout_view_span = span
219
220 def process_response(self, request, response):
221 tracked_request = getattr(request, "_scout_tracked_request", None)
222 if tracked_request is None:
223 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
224 # don't do anything
225 return response
226
227 # Only stop span if we started, but presume we are balanced, i.e. that
228 # custom instrumentation within the application is not causing errors
229 span = getattr(request, "_scout_view_span", None)
230 if span is not None:
231 tracked_request.stop_span()
232 return response
233
234 def process_exception(self, request, exception):
235 tracked_request = getattr(request, "_scout_tracked_request", None)
236 if tracked_request is None:
237 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
238 # don't do anything
239 return
240
241 tracked_request.tag("error", "true")
242
[end of src/scout_apm/django/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/compat.py b/src/scout_apm/compat.py
--- a/src/scout_apm/compat.py
+++ b/src/scout_apm/compat.py
@@ -8,6 +8,7 @@
string_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821
text_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821
+string_types = tuple({string_type, text_type})
# Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves
if sys.version_info < (3, 2):
diff --git a/src/scout_apm/django/middleware.py b/src/scout_apm/django/middleware.py
--- a/src/scout_apm/django/middleware.py
+++ b/src/scout_apm/django/middleware.py
@@ -1,9 +1,12 @@
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
+
import django
from django.conf import settings
+from scout_apm.compat import string_types
from scout_apm.core.config import scout_config
from scout_apm.core.tracked_request import TrackedRequest
from scout_apm.core.web_requests import (
@@ -34,9 +37,52 @@
+ view_func.__name__
)
+ # Seems to be a Tastypie Resource. Need to resort to some stack inspection
+ # to find a better name since its decorators don't wrap very well
+ if view_name == "tastypie.resources.wrapper":
+ tastypie_name = _get_tastypie_operation_name(request, view_func)
+ if tastypie_name is not None:
+ return tastypie_name
+
return "Controller/" + view_name
+def _get_tastypie_operation_name(request, view_func):
+ try:
+ from tastypie.resources import Resource
+ except ImportError:
+ return None
+
+ if sys.version_info[0] == 2: # pragma: no cover
+ try:
+ wrapper = view_func.__closure__[0].cell_contents
+ except (AttributeError, IndexError):
+ return None
+ elif sys.version_info[0] == 3:
+ try:
+ wrapper = view_func.__wrapped__
+ except AttributeError:
+ return None
+
+ if not hasattr(wrapper, "__closure__") or len(wrapper.__closure__) != 2:
+ return None
+
+ instance = wrapper.__closure__[0].cell_contents
+ if not isinstance(instance, Resource): # pragma: no cover
+ return None
+
+ method_name = wrapper.__closure__[1].cell_contents
+ if not isinstance(method_name, string_types): # pragma: no cover
+ return None
+
+ if method_name.startswith("dispatch_"): # pragma: no cover
+ method_name = request.method.lower() + method_name.split("dispatch", 1)[1]
+
+ return "Controller/{}.{}.{}".format(
+ instance.__module__, instance.__class__.__name__, method_name
+ )
+
+
def track_request_view_data(request, tracked_request):
path = request.path
tracked_request.tag(
| {"golden_diff": "diff --git a/src/scout_apm/compat.py b/src/scout_apm/compat.py\n--- a/src/scout_apm/compat.py\n+++ b/src/scout_apm/compat.py\n@@ -8,6 +8,7 @@\n \n string_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\n text_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821\n+string_types = tuple({string_type, text_type})\n \n # Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves\n if sys.version_info < (3, 2):\ndiff --git a/src/scout_apm/django/middleware.py b/src/scout_apm/django/middleware.py\n--- a/src/scout_apm/django/middleware.py\n+++ b/src/scout_apm/django/middleware.py\n@@ -1,9 +1,12 @@\n # coding=utf-8\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import sys\n+\n import django\n from django.conf import settings\n \n+from scout_apm.compat import string_types\n from scout_apm.core.config import scout_config\n from scout_apm.core.tracked_request import TrackedRequest\n from scout_apm.core.web_requests import (\n@@ -34,9 +37,52 @@\n + view_func.__name__\n )\n \n+ # Seems to be a Tastypie Resource. Need to resort to some stack inspection\n+ # to find a better name since its decorators don't wrap very well\n+ if view_name == \"tastypie.resources.wrapper\":\n+ tastypie_name = _get_tastypie_operation_name(request, view_func)\n+ if tastypie_name is not None:\n+ return tastypie_name\n+\n return \"Controller/\" + view_name\n \n \n+def _get_tastypie_operation_name(request, view_func):\n+ try:\n+ from tastypie.resources import Resource\n+ except ImportError:\n+ return None\n+\n+ if sys.version_info[0] == 2: # pragma: no cover\n+ try:\n+ wrapper = view_func.__closure__[0].cell_contents\n+ except (AttributeError, IndexError):\n+ return None\n+ elif sys.version_info[0] == 3:\n+ try:\n+ wrapper = view_func.__wrapped__\n+ except AttributeError:\n+ return None\n+\n+ if not hasattr(wrapper, \"__closure__\") or len(wrapper.__closure__) != 2:\n+ return None\n+\n+ instance = wrapper.__closure__[0].cell_contents\n+ if not isinstance(instance, Resource): # pragma: no cover\n+ return None\n+\n+ method_name = wrapper.__closure__[1].cell_contents\n+ if not isinstance(method_name, string_types): # pragma: no cover\n+ return None\n+\n+ if method_name.startswith(\"dispatch_\"): # pragma: no cover\n+ method_name = request.method.lower() + method_name.split(\"dispatch\", 1)[1]\n+\n+ return \"Controller/{}.{}.{}\".format(\n+ instance.__module__, instance.__class__.__name__, method_name\n+ )\n+\n+\n def track_request_view_data(request, tracked_request):\n path = request.path\n tracked_request.tag(\n", "issue": "Improve tastypie support\nCurrently some [Django Tastypie](https://django-tastypie.readthedocs.io/en/latest/) views are named simply `tastypie.resources` in their operation name. We can probably find better names, and maybe support some Tastypie specific tags!\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport inspect\nimport sys\nfrom functools import wraps\n\nstring_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\ntext_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821\n\n# Python 2 (and very early 3.x) didn't have ContextDecorator, so define it for ourselves\nif sys.version_info < (3, 2):\n import functools\n\n class ContextDecorator(object):\n def __call__(self, f):\n @functools.wraps(f)\n def decorated(*args, **kwds):\n with self:\n return f(*args, **kwds)\n\n return decorated\n\n\nelse:\n from contextlib import ContextDecorator\n\ntry:\n # Python 3.x\n import queue\nexcept ImportError:\n # Python 2.x\n import Queue as queue\n\n# datetime_to_timestamp converts a naive UTC datetime to a unix timestamp\nif sys.version_info >= (3, 3):\n\n def datetime_to_timestamp(datetime_obj):\n return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()\n\n\nelse:\n _EPOCH = dt.datetime(1970, 1, 1)\n\n def datetime_to_timestamp(datetime_obj):\n return (datetime_obj - _EPOCH).total_seconds()\n\n\ndef text(value, encoding=\"utf-8\", errors=\"strict\"):\n \"\"\"\n Convert a value to str on Python 3 and unicode on Python 2.\n \"\"\"\n if isinstance(value, text_type):\n return value\n elif isinstance(value, bytes):\n return text_type(value, encoding, errors)\n else:\n return text_type(value)\n\n\ntry:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\n\ndef kwargs_only(func):\n \"\"\"\n Make a function only accept keyword arguments.\n This can be dropped in Python 3 in lieu of:\n def foo(*, bar=default):\n Source: https://pypi.org/project/kwargs-only/\n \"\"\"\n if hasattr(inspect, \"signature\"): # pragma: no cover\n # Python 3\n signature = inspect.signature(func)\n first_arg_name = list(signature.parameters.keys())[0]\n else: # pragma: no cover\n # Python 2\n signature = inspect.getargspec(func)\n first_arg_name = signature.args[0]\n\n if first_arg_name in (\"self\", \"cls\"):\n allowable_args = 1\n else:\n allowable_args = 0\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > allowable_args:\n raise TypeError(\n \"{} should only be called with keyword args\".format(func.__name__)\n )\n return func(*args, **kwargs)\n\n return wrapper\n\n\n__all__ = [\n \"ContextDecorator\",\n \"datetime_to_timestamp\",\n \"kwargs_only\",\n \"queue\",\n \"string_type\",\n \"text\",\n \"text_type\",\n \"urlencode\",\n]\n", "path": "src/scout_apm/compat.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport django\nfrom django.conf import settings\n\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\nif django.VERSION >= (2, 0):\n from django.urls import get_urlconf\nelse:\n from django.core.urlresolvers import get_urlconf\n\n\ndef get_operation_name(request):\n view_func = request.resolver_match.func\n view_name = request.resolver_match._func_path\n\n if hasattr(view_func, \"model_admin\"):\n # Seems to comes from Django admin (attribute only set on Django 1.9+)\n admin_class = view_func.model_admin.__class__\n view_name = (\n admin_class.__module__\n + \".\"\n + admin_class.__name__\n + \".\"\n + view_func.__name__\n )\n\n return \"Controller/\" + view_name\n\n\ndef track_request_view_data(request, tracked_request):\n path = request.path\n tracked_request.tag(\n \"path\",\n create_filtered_path(\n path, [(k, v) for k, vs in request.GET.lists() for v in vs]\n ),\n )\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.META.get(\"HTTP_X_FORWARDED_FOR\", \"\").split(\",\")[0]\n or request.META.get(\"HTTP_CLIENT_IP\", \"\").split(\",\")[0]\n or request.META.get(\"REMOTE_ADDR\", None)\n )\n tracked_request.tag(\"user_ip\", user_ip)\n except Exception:\n pass\n\n user = getattr(request, \"user\", None)\n if user is not None:\n try:\n tracked_request.tag(\"username\", user.get_username())\n except Exception:\n pass\n\n tracked_request.tag(\"urlconf\", get_urlconf(settings.ROOT_URLCONF))\n\n\nclass MiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n try:\n return self.get_response(request)\n finally:\n tracked_request.stop_span()\n\n\nclass ViewTimingMiddleware(object):\n \"\"\"\n Insert as deep into the middleware stack as possible, ideally wrapping no\n other middleware. Designed to time the View itself\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n \"\"\"\n Wrap a single incoming request with start and stop calls.\n This will start timing, but relies on the process_view callback to\n capture more details about what view was really called, and other\n similar info.\n\n If process_view isn't called, then the request will not\n be recorded. This can happen if a middleware further along the stack\n doesn't call onward, and instead returns a response directly.\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n # This operation name won't be recorded unless changed later in\n # process_view\n tracked_request.start_span(operation=\"Unknown\", should_capture_backtrace=False)\n try:\n return self.get_response(request)\n finally:\n tracked_request.stop_span()\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n \"\"\"\n Capture details about the view_func that is about to execute\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.current_span()\n if span is not None:\n span.operation = get_operation_name(request)\n\n def process_exception(self, request, exception):\n \"\"\"\n Mark this request as having errored out\n\n Does not modify or catch or otherwise change the exception thrown\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n TrackedRequest.instance().tag(\"error\", \"true\")\n\n\nclass OldStyleMiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def process_request(self, request):\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n request._scout_tracked_request = tracked_request\n\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n\n def process_response(self, request, response):\n # Only stop span if there's a request, but presume we are balanced,\n # i.e. that custom instrumentation within the application is not\n # causing errors\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is not None:\n tracked_request.stop_span()\n return response\n\n\nclass OldStyleViewMiddleware(object):\n def process_view(self, request, view_func, view_func_args, view_func_kwargs):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.mark_real_request()\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.start_span(\n operation=get_operation_name(request), should_capture_backtrace=False\n )\n # Save the span into the request, so we can check\n # if we're matched up when stopping\n request._scout_view_span = span\n\n def process_response(self, request, response):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return response\n\n # Only stop span if we started, but presume we are balanced, i.e. that\n # custom instrumentation within the application is not causing errors\n span = getattr(request, \"_scout_view_span\", None)\n if span is not None:\n tracked_request.stop_span()\n return response\n\n def process_exception(self, request, exception):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.tag(\"error\", \"true\")\n", "path": "src/scout_apm/django/middleware.py"}]} | 3,874 | 740 |
gh_patches_debug_30147 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-5856 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Data for French Guyana (up to today)
Hourly production data for French Guyana: https://opendata-guyane.edf.fr/explore/dataset/production-d-electricite-par-filiere-en-temps-reel/
This one is up-to-date to today!
Also mentioned in #1912, but that was closed due to inactivity after some of the data sources were implemented. Also, the URLs have changed.
Edit: looks like the FR_O parser supports a 'historical dataset', but not this real-time dataset.
</issue>
<code>
[start of parsers/FR_O.py]
1 from datetime import datetime
2 from logging import getLogger
3 from typing import Dict, Optional, Tuple, Union
4
5 from requests import Response, Session
6
7 from electricitymap.contrib.lib.models.event_lists import (
8 PriceList,
9 ProductionBreakdownList,
10 )
11 from electricitymap.contrib.lib.models.events import (
12 EventSourceType,
13 ProductionMix,
14 StorageMix,
15 )
16 from electricitymap.contrib.lib.types import ZoneKey
17
18 from .lib.exceptions import ParserException
19
20 DOMAIN_MAPPING = {
21 "FR-COR": "https://opendata-corse.edf.fr",
22 "RE": "https://opendata-reunion.edf.fr",
23 "GF": "https://opendata-guyane.edf.fr",
24 "MQ": "https://opendata-martinique.edf.fr",
25 "GP": "https://opendata-guadeloupe.edf.fr",
26 }
27
28 LIVE_DATASETS = {
29 "FR-COR": "production-delectricite-par-filiere-en-temps-reel",
30 "GP": "mix-temps-reel-guadeloupe",
31 "RE": "prod-electricite-temps-reel",
32 }
33
34 HISTORICAL_DATASETS = {
35 "FR-COR": "production-delectricite-par-filiere",
36 "RE": "courbe-de-charge-de-la-production-delectricite-par-filiere",
37 "GF": "courbe-de-charge-de-la-production-delectricite-par-filiere",
38 "MQ": "courbe-de-charge-de-la-production-delectricite-par-filiere",
39 "GP": "courbe-de-charge-de-la-production-delectricite-par-filiere",
40 }
41
42 API_PARAMETER_GROUPS = {
43 "production": {
44 "biomass": [
45 "biomasse",
46 "biomasse_mw",
47 "biomasse_mwh",
48 "bioenergies",
49 "bioenergies_mw",
50 "bioenergies_mwh",
51 ],
52 "coal": [
53 "charbon",
54 ],
55 "gas": [
56 "thermique_mw",
57 "thermique_mwh",
58 "turbines_a_combustion",
59 ],
60 "geothermal": [
61 "geothermie",
62 "geothermie_mw",
63 ],
64 "hydro": [
65 "hydraulique",
66 "hydraulique_mw",
67 "hydraulique_mwh",
68 "micro_hydro",
69 "micro_hydraulique_mw",
70 ],
71 "oil": ["diesel", "moteur_diesel"],
72 "solar": [
73 "photovoltaique",
74 "photovoltaique0",
75 "photovoltaique_mw",
76 "photovoltaique_mwh",
77 "solaire_mw",
78 ],
79 "wind": [
80 "eolien",
81 "eolien_mw",
82 "eolien_mwh",
83 ],
84 "unknown": ["bagasse_charbon_mwh", "charbon_bagasse_mw"],
85 },
86 "storage": {"battery": ["solde_stockage", "stockage"]},
87 "price": {
88 "price": ["cout_moyen_de_production_eur_mwh"],
89 },
90 }
91
92 PRODUCTION_MAPPING = {
93 API_TYPE: type
94 for key in ["production"]
95 for type, groups in API_PARAMETER_GROUPS[key].items()
96 for API_TYPE in groups
97 }
98
99 STORAGE_MAPPING = {
100 API_TYPE: type
101 for key in ["storage"]
102 for type, groups in API_PARAMETER_GROUPS[key].items()
103 for API_TYPE in groups
104 }
105
106 PRICE_MAPPING = {
107 API_TYPE: type
108 for key in ["price"]
109 for type, groups in API_PARAMETER_GROUPS[key].items()
110 for API_TYPE in groups
111 }
112
113 IGNORED_VALUES = ["jour", "total", "statut", "date", "heure", "liaisons", "tac"]
114
115
116 def generate_url(zone_key, target_datetime):
117 return f"{DOMAIN_MAPPING[zone_key]}/api/v2/catalog/datasets/{HISTORICAL_DATASETS[zone_key] if target_datetime else LIVE_DATASETS[zone_key]}/exports/json"
118
119
120 def generate_source(zone_key: ZoneKey):
121 # Return the domain name of the source without the protocol
122 return DOMAIN_MAPPING[zone_key].split("//")[1]
123
124
125 def fetch_data(
126 zone_key: ZoneKey,
127 session: Optional[Session] = None,
128 target_datetime: Optional[datetime] = None,
129 ) -> Tuple[list, str]:
130 ses = session or Session()
131
132 DATE_STRING_MAPPING = {
133 "FR-COR": "date_heure" if target_datetime else "date",
134 "RE": "date_heure" if target_datetime else "date",
135 "GF": "date",
136 "MQ": "date_heure",
137 "GP": "date",
138 }
139
140 if target_datetime and zone_key not in HISTORICAL_DATASETS.keys():
141 raise ParserException(
142 "FR_O.py",
143 f"Historical data not implemented for {zone_key} in this parser.",
144 zone_key,
145 )
146 elif target_datetime is None and zone_key not in LIVE_DATASETS.keys():
147 raise ParserException(
148 "FR_O.py",
149 f"Live data not implemented for {zone_key} in this parser.",
150 zone_key,
151 )
152
153 URL_QUERIES: Dict[str, Union[str, None]] = {
154 # "refine": "statut:Validé" if target_datetime else None,
155 "timezone": "UTC",
156 "order_by": f"{DATE_STRING_MAPPING[zone_key]} desc",
157 "refine": f"{DATE_STRING_MAPPING[zone_key]}:{target_datetime.strftime('%Y')}"
158 if target_datetime
159 else None,
160 }
161
162 url = generate_url(zone_key, target_datetime)
163 response: Response = ses.get(url, params=URL_QUERIES)
164 data: Union[dict, list, None] = response.json()
165 if data == []:
166 raise ParserException(
167 "FR_O.py",
168 f"No data available for {zone_key} for {target_datetime.strftime('%Y')}"
169 if target_datetime
170 else f"No live data available for {zone_key}.",
171 zone_key,
172 )
173 elif isinstance(data, dict):
174 if data.get("errorcode") == "10002":
175 raise ParserException(
176 "FR_O.py",
177 f"Rate limit exceeded. Please try again later after: {data.get('reset_time')}",
178 )
179 elif data.get("error_code") == "ODSQLError":
180 raise ParserException(
181 "FR_O.py",
182 "Query malformed. Please check the parameters. If this was previously working there has likely been a change in the API.",
183 )
184 if not isinstance(data, list):
185 raise ParserException(
186 "FR_O.py",
187 f"Unexpected data format for {zone_key} for {target_datetime.strftime('%Y')}"
188 if target_datetime
189 else f"Unexpected data format for {zone_key}.",
190 zone_key,
191 )
192 return data, DATE_STRING_MAPPING[zone_key]
193
194
195 def fetch_production(
196 zone_key: ZoneKey,
197 session: Optional[Session] = None,
198 target_datetime: Optional[datetime] = None,
199 logger=getLogger(__name__),
200 ):
201 production_objects, date_string = fetch_data(zone_key, session, target_datetime)
202
203 production_breakdown_list = ProductionBreakdownList(logger=logger)
204 for production_object in production_objects:
205 production = ProductionMix()
206 storage = StorageMix()
207 for mode_key in production_object:
208 if mode_key in PRODUCTION_MAPPING:
209 production.add_value(
210 PRODUCTION_MAPPING[mode_key],
211 production_object[mode_key],
212 correct_negative_with_zero=True,
213 )
214 elif mode_key in STORAGE_MAPPING:
215 storage.add_value(
216 STORAGE_MAPPING[mode_key], -production_object[mode_key]
217 )
218 elif mode_key in IGNORED_VALUES:
219 pass
220 else:
221 logger.warning(
222 f"Unknown mode_key: '{mode_key}' encountered for {zone_key}."
223 )
224
225 production_breakdown_list.append(
226 zoneKey=zone_key,
227 datetime=datetime.fromisoformat(production_object[date_string]),
228 production=production,
229 storage=storage,
230 source=generate_source(zone_key),
231 sourceType=EventSourceType.estimated
232 if production_object["statut"] == "Estimé"
233 else EventSourceType.measured,
234 )
235 return production_breakdown_list.to_list()
236
237
238 def fetch_price(
239 zone_key: ZoneKey,
240 session: Optional[Session] = None,
241 target_datetime: Optional[datetime] = None,
242 logger=getLogger(__name__),
243 ):
244 data_objects, date_string = fetch_data(zone_key, session, target_datetime)
245
246 price_list = PriceList(logger=logger)
247 for data_object in data_objects:
248 price: Union[float, int, None] = None
249 for mode_key in data_object:
250 if mode_key in PRICE_MAPPING:
251 price = data_object[mode_key]
252 break
253 if price is not None:
254 price_list.append(
255 zoneKey=zone_key,
256 currency="EUR",
257 datetime=datetime.fromisoformat(data_object[date_string]),
258 source=generate_source(zone_key),
259 price=price,
260 )
261 return price_list.to_list()
262
[end of parsers/FR_O.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/FR_O.py b/parsers/FR_O.py
--- a/parsers/FR_O.py
+++ b/parsers/FR_O.py
@@ -29,6 +29,8 @@
"FR-COR": "production-delectricite-par-filiere-en-temps-reel",
"GP": "mix-temps-reel-guadeloupe",
"RE": "prod-electricite-temps-reel",
+ "GF": "production-d-electricite-par-filiere-en-temps-reel",
+ "MQ": "production-delectricite-par-filiere-en-temps-reel",
}
HISTORICAL_DATASETS = {
@@ -68,7 +70,7 @@
"micro_hydro",
"micro_hydraulique_mw",
],
- "oil": ["diesel", "moteur_diesel"],
+ "oil": ["diesel", "moteur_diesel", "centrale_au_fioul", "moteurs_diesels"],
"solar": [
"photovoltaique",
"photovoltaique0",
@@ -133,7 +135,7 @@
"FR-COR": "date_heure" if target_datetime else "date",
"RE": "date_heure" if target_datetime else "date",
"GF": "date",
- "MQ": "date_heure",
+ "MQ": "date_heure" if target_datetime else "date",
"GP": "date",
}
@@ -229,7 +231,7 @@
storage=storage,
source=generate_source(zone_key),
sourceType=EventSourceType.estimated
- if production_object["statut"] == "Estimé"
+ if production_object.get("statut") == "Estimé"
else EventSourceType.measured,
)
return production_breakdown_list.to_list()
| {"golden_diff": "diff --git a/parsers/FR_O.py b/parsers/FR_O.py\n--- a/parsers/FR_O.py\n+++ b/parsers/FR_O.py\n@@ -29,6 +29,8 @@\n \"FR-COR\": \"production-delectricite-par-filiere-en-temps-reel\",\n \"GP\": \"mix-temps-reel-guadeloupe\",\n \"RE\": \"prod-electricite-temps-reel\",\n+ \"GF\": \"production-d-electricite-par-filiere-en-temps-reel\",\n+ \"MQ\": \"production-delectricite-par-filiere-en-temps-reel\",\n }\n \n HISTORICAL_DATASETS = {\n@@ -68,7 +70,7 @@\n \"micro_hydro\",\n \"micro_hydraulique_mw\",\n ],\n- \"oil\": [\"diesel\", \"moteur_diesel\"],\n+ \"oil\": [\"diesel\", \"moteur_diesel\", \"centrale_au_fioul\", \"moteurs_diesels\"],\n \"solar\": [\n \"photovoltaique\",\n \"photovoltaique0\",\n@@ -133,7 +135,7 @@\n \"FR-COR\": \"date_heure\" if target_datetime else \"date\",\n \"RE\": \"date_heure\" if target_datetime else \"date\",\n \"GF\": \"date\",\n- \"MQ\": \"date_heure\",\n+ \"MQ\": \"date_heure\" if target_datetime else \"date\",\n \"GP\": \"date\",\n }\n \n@@ -229,7 +231,7 @@\n storage=storage,\n source=generate_source(zone_key),\n sourceType=EventSourceType.estimated\n- if production_object[\"statut\"] == \"Estim\u00e9\"\n+ if production_object.get(\"statut\") == \"Estim\u00e9\"\n else EventSourceType.measured,\n )\n return production_breakdown_list.to_list()\n", "issue": "Data for French Guyana (up to today)\nHourly production data for French Guyana: https://opendata-guyane.edf.fr/explore/dataset/production-d-electricite-par-filiere-en-temps-reel/\r\n\r\nThis one is up-to-date to today!\r\n\r\nAlso mentioned in #1912, but that was closed due to inactivity after some of the data sources were implemented. Also, the URLs have changed.\r\n\r\nEdit: looks like the FR_O parser supports a 'historical dataset', but not this real-time dataset.\n", "before_files": [{"content": "from datetime import datetime\nfrom logging import getLogger\nfrom typing import Dict, Optional, Tuple, Union\n\nfrom requests import Response, Session\n\nfrom electricitymap.contrib.lib.models.event_lists import (\n PriceList,\n ProductionBreakdownList,\n)\nfrom electricitymap.contrib.lib.models.events import (\n EventSourceType,\n ProductionMix,\n StorageMix,\n)\nfrom electricitymap.contrib.lib.types import ZoneKey\n\nfrom .lib.exceptions import ParserException\n\nDOMAIN_MAPPING = {\n \"FR-COR\": \"https://opendata-corse.edf.fr\",\n \"RE\": \"https://opendata-reunion.edf.fr\",\n \"GF\": \"https://opendata-guyane.edf.fr\",\n \"MQ\": \"https://opendata-martinique.edf.fr\",\n \"GP\": \"https://opendata-guadeloupe.edf.fr\",\n}\n\nLIVE_DATASETS = {\n \"FR-COR\": \"production-delectricite-par-filiere-en-temps-reel\",\n \"GP\": \"mix-temps-reel-guadeloupe\",\n \"RE\": \"prod-electricite-temps-reel\",\n}\n\nHISTORICAL_DATASETS = {\n \"FR-COR\": \"production-delectricite-par-filiere\",\n \"RE\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"GF\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"MQ\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n \"GP\": \"courbe-de-charge-de-la-production-delectricite-par-filiere\",\n}\n\nAPI_PARAMETER_GROUPS = {\n \"production\": {\n \"biomass\": [\n \"biomasse\",\n \"biomasse_mw\",\n \"biomasse_mwh\",\n \"bioenergies\",\n \"bioenergies_mw\",\n \"bioenergies_mwh\",\n ],\n \"coal\": [\n \"charbon\",\n ],\n \"gas\": [\n \"thermique_mw\",\n \"thermique_mwh\",\n \"turbines_a_combustion\",\n ],\n \"geothermal\": [\n \"geothermie\",\n \"geothermie_mw\",\n ],\n \"hydro\": [\n \"hydraulique\",\n \"hydraulique_mw\",\n \"hydraulique_mwh\",\n \"micro_hydro\",\n \"micro_hydraulique_mw\",\n ],\n \"oil\": [\"diesel\", \"moteur_diesel\"],\n \"solar\": [\n \"photovoltaique\",\n \"photovoltaique0\",\n \"photovoltaique_mw\",\n \"photovoltaique_mwh\",\n \"solaire_mw\",\n ],\n \"wind\": [\n \"eolien\",\n \"eolien_mw\",\n \"eolien_mwh\",\n ],\n \"unknown\": [\"bagasse_charbon_mwh\", \"charbon_bagasse_mw\"],\n },\n \"storage\": {\"battery\": [\"solde_stockage\", \"stockage\"]},\n \"price\": {\n \"price\": [\"cout_moyen_de_production_eur_mwh\"],\n },\n}\n\nPRODUCTION_MAPPING = {\n API_TYPE: type\n for key in [\"production\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nSTORAGE_MAPPING = {\n API_TYPE: type\n for key in [\"storage\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nPRICE_MAPPING = {\n API_TYPE: type\n for key in [\"price\"]\n for type, groups in API_PARAMETER_GROUPS[key].items()\n for API_TYPE in groups\n}\n\nIGNORED_VALUES = [\"jour\", \"total\", \"statut\", \"date\", \"heure\", \"liaisons\", \"tac\"]\n\n\ndef generate_url(zone_key, target_datetime):\n return f\"{DOMAIN_MAPPING[zone_key]}/api/v2/catalog/datasets/{HISTORICAL_DATASETS[zone_key] if target_datetime else LIVE_DATASETS[zone_key]}/exports/json\"\n\n\ndef generate_source(zone_key: ZoneKey):\n # Return the domain name of the source without the protocol\n return DOMAIN_MAPPING[zone_key].split(\"//\")[1]\n\n\ndef fetch_data(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n) -> Tuple[list, str]:\n ses = session or Session()\n\n DATE_STRING_MAPPING = {\n \"FR-COR\": \"date_heure\" if target_datetime else \"date\",\n \"RE\": \"date_heure\" if target_datetime else \"date\",\n \"GF\": \"date\",\n \"MQ\": \"date_heure\",\n \"GP\": \"date\",\n }\n\n if target_datetime and zone_key not in HISTORICAL_DATASETS.keys():\n raise ParserException(\n \"FR_O.py\",\n f\"Historical data not implemented for {zone_key} in this parser.\",\n zone_key,\n )\n elif target_datetime is None and zone_key not in LIVE_DATASETS.keys():\n raise ParserException(\n \"FR_O.py\",\n f\"Live data not implemented for {zone_key} in this parser.\",\n zone_key,\n )\n\n URL_QUERIES: Dict[str, Union[str, None]] = {\n # \"refine\": \"statut:Valid\u00e9\" if target_datetime else None,\n \"timezone\": \"UTC\",\n \"order_by\": f\"{DATE_STRING_MAPPING[zone_key]} desc\",\n \"refine\": f\"{DATE_STRING_MAPPING[zone_key]}:{target_datetime.strftime('%Y')}\"\n if target_datetime\n else None,\n }\n\n url = generate_url(zone_key, target_datetime)\n response: Response = ses.get(url, params=URL_QUERIES)\n data: Union[dict, list, None] = response.json()\n if data == []:\n raise ParserException(\n \"FR_O.py\",\n f\"No data available for {zone_key} for {target_datetime.strftime('%Y')}\"\n if target_datetime\n else f\"No live data available for {zone_key}.\",\n zone_key,\n )\n elif isinstance(data, dict):\n if data.get(\"errorcode\") == \"10002\":\n raise ParserException(\n \"FR_O.py\",\n f\"Rate limit exceeded. Please try again later after: {data.get('reset_time')}\",\n )\n elif data.get(\"error_code\") == \"ODSQLError\":\n raise ParserException(\n \"FR_O.py\",\n \"Query malformed. Please check the parameters. If this was previously working there has likely been a change in the API.\",\n )\n if not isinstance(data, list):\n raise ParserException(\n \"FR_O.py\",\n f\"Unexpected data format for {zone_key} for {target_datetime.strftime('%Y')}\"\n if target_datetime\n else f\"Unexpected data format for {zone_key}.\",\n zone_key,\n )\n return data, DATE_STRING_MAPPING[zone_key]\n\n\ndef fetch_production(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger=getLogger(__name__),\n):\n production_objects, date_string = fetch_data(zone_key, session, target_datetime)\n\n production_breakdown_list = ProductionBreakdownList(logger=logger)\n for production_object in production_objects:\n production = ProductionMix()\n storage = StorageMix()\n for mode_key in production_object:\n if mode_key in PRODUCTION_MAPPING:\n production.add_value(\n PRODUCTION_MAPPING[mode_key],\n production_object[mode_key],\n correct_negative_with_zero=True,\n )\n elif mode_key in STORAGE_MAPPING:\n storage.add_value(\n STORAGE_MAPPING[mode_key], -production_object[mode_key]\n )\n elif mode_key in IGNORED_VALUES:\n pass\n else:\n logger.warning(\n f\"Unknown mode_key: '{mode_key}' encountered for {zone_key}.\"\n )\n\n production_breakdown_list.append(\n zoneKey=zone_key,\n datetime=datetime.fromisoformat(production_object[date_string]),\n production=production,\n storage=storage,\n source=generate_source(zone_key),\n sourceType=EventSourceType.estimated\n if production_object[\"statut\"] == \"Estim\u00e9\"\n else EventSourceType.measured,\n )\n return production_breakdown_list.to_list()\n\n\ndef fetch_price(\n zone_key: ZoneKey,\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger=getLogger(__name__),\n):\n data_objects, date_string = fetch_data(zone_key, session, target_datetime)\n\n price_list = PriceList(logger=logger)\n for data_object in data_objects:\n price: Union[float, int, None] = None\n for mode_key in data_object:\n if mode_key in PRICE_MAPPING:\n price = data_object[mode_key]\n break\n if price is not None:\n price_list.append(\n zoneKey=zone_key,\n currency=\"EUR\",\n datetime=datetime.fromisoformat(data_object[date_string]),\n source=generate_source(zone_key),\n price=price,\n )\n return price_list.to_list()\n", "path": "parsers/FR_O.py"}]} | 3,326 | 430 |
gh_patches_debug_26655 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6989 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Existing "inpainting" gallery example could use a better (more specific) title.
Creating this issue so we don't lose track of what's been discussed in the conversation _Originally posted by @lagru in https://github.com/scikit-image/scikit-image/pull/6853#discussion_r1149741067_
> @mkcor, just wondering how this relates to [our existing inpainting example ](https://scikit-image.org/docs/dev/auto_examples/filters/plot_inpaint.html#sphx-glr-auto-examples-filters-plot-inpaint-py). I am assuming that the main benefit here is that it's a real world use case?
[...]
> Which prompts the idea that we should update the title of the existing example, so it's less generic than just "inpainting."
</issue>
<code>
[start of doc/examples/filters/plot_inpaint.py]
1 """
2 ===========
3 Inpainting
4 ===========
5 Inpainting [1]_ is the process of reconstructing lost or deteriorated
6 parts of images and videos.
7
8 The reconstruction is supposed to be performed in fully automatic way by
9 exploiting the information presented in non-damaged regions.
10
11 In this example, we show how the masked pixels get inpainted by
12 inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.
13
14 .. [1] Wikipedia. Inpainting
15 https://en.wikipedia.org/wiki/Inpainting
16 .. [2] Wikipedia. Biharmonic equation
17 https://en.wikipedia.org/wiki/Biharmonic_equation
18 .. [3] S.B.Damelin and N.S.Hoang. "On Surface Completion and Image
19 Inpainting by Biharmonic Functions: Numerical Aspects",
20 International Journal of Mathematics and Mathematical Sciences,
21 Vol. 2018, Article ID 3950312
22 :DOI:`10.1155/2018/3950312`
23 .. [4] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of
24 Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal.,
25 28 (2010), 104-113,
26 :DOI:`10.1016/j.acha.2009.04.004`
27 """
28
29 import numpy as np
30 import matplotlib.pyplot as plt
31
32 from skimage import data
33 from skimage.morphology import disk, binary_dilation
34 from skimage.restoration import inpaint
35
36 image_orig = data.astronaut()
37
38 # Create mask with six block defect regions
39 mask = np.zeros(image_orig.shape[:-1], dtype=bool)
40 mask[20:60, 0:20] = 1
41 mask[160:180, 70:155] = 1
42 mask[30:60, 170:195] = 1
43 mask[-60:-30, 170:195] = 1
44 mask[-180:-160, 70:155] = 1
45 mask[-60:-20, 0:20] = 1
46
47 # add a few long, narrow defects
48 mask[200:205, -200:] = 1
49 mask[150:255, 20:23] = 1
50 mask[365:368, 60:130] = 1
51
52 # add randomly positioned small point-like defects
53 rstate = np.random.default_rng(0)
54 for radius in [0, 2, 4]:
55 # larger defects are less common
56 thresh = 3 + 0.25 * radius # make larger defects less common
57 tmp_mask = rstate.standard_normal(image_orig.shape[:-1]) > thresh
58 if radius > 0:
59 tmp_mask = binary_dilation(tmp_mask, disk(radius, dtype=bool))
60 mask[tmp_mask] = 1
61
62 # Apply defect mask to the image over the same region in each color channel
63 image_defect = image_orig * ~mask[..., np.newaxis]
64
65 image_result = inpaint.inpaint_biharmonic(image_defect, mask, channel_axis=-1)
66
67 fig, axes = plt.subplots(ncols=2, nrows=2)
68 ax = axes.ravel()
69
70 ax[0].set_title('Original image')
71 ax[0].imshow(image_orig)
72
73 ax[1].set_title('Mask')
74 ax[1].imshow(mask, cmap=plt.cm.gray)
75
76 ax[2].set_title('Defected image')
77 ax[2].imshow(image_defect)
78
79 ax[3].set_title('Inpainted image')
80 ax[3].imshow(image_result)
81
82 for a in ax:
83 a.axis('off')
84
85 fig.tight_layout()
86 plt.show()
87
[end of doc/examples/filters/plot_inpaint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py
--- a/doc/examples/filters/plot_inpaint.py
+++ b/doc/examples/filters/plot_inpaint.py
@@ -1,15 +1,16 @@
"""
-===========
-Inpainting
-===========
+===============================
+Fill in defects with inpainting
+===============================
+
Inpainting [1]_ is the process of reconstructing lost or deteriorated
parts of images and videos.
-The reconstruction is supposed to be performed in fully automatic way by
-exploiting the information presented in non-damaged regions.
+The reconstruction (restoration) is performed in an automatic way by
+exploiting the information present in non-damaged regions.
-In this example, we show how the masked pixels get inpainted by
-inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.
+In this example, we show how the masked pixels get inpainted using an
+inpainting algorithm based on the biharmonic equation [2]_ [3]_ [4]_.
.. [1] Wikipedia. Inpainting
https://en.wikipedia.org/wiki/Inpainting
@@ -44,12 +45,12 @@
mask[-180:-160, 70:155] = 1
mask[-60:-20, 0:20] = 1
-# add a few long, narrow defects
+# Add a few long, narrow defects
mask[200:205, -200:] = 1
mask[150:255, 20:23] = 1
mask[365:368, 60:130] = 1
-# add randomly positioned small point-like defects
+# Add randomly positioned small point-like defects
rstate = np.random.default_rng(0)
for radius in [0, 2, 4]:
# larger defects are less common
| {"golden_diff": "diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py\n--- a/doc/examples/filters/plot_inpaint.py\n+++ b/doc/examples/filters/plot_inpaint.py\n@@ -1,15 +1,16 @@\n \"\"\"\n-===========\n-Inpainting\n-===========\n+===============================\n+Fill in defects with inpainting\n+===============================\n+\n Inpainting [1]_ is the process of reconstructing lost or deteriorated\n parts of images and videos.\n \n-The reconstruction is supposed to be performed in fully automatic way by\n-exploiting the information presented in non-damaged regions.\n+The reconstruction (restoration) is performed in an automatic way by\n+exploiting the information present in non-damaged regions.\n \n-In this example, we show how the masked pixels get inpainted by\n-inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.\n+In this example, we show how the masked pixels get inpainted using an\n+inpainting algorithm based on the biharmonic equation [2]_ [3]_ [4]_.\n \n .. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n@@ -44,12 +45,12 @@\n mask[-180:-160, 70:155] = 1\n mask[-60:-20, 0:20] = 1\n \n-# add a few long, narrow defects\n+# Add a few long, narrow defects\n mask[200:205, -200:] = 1\n mask[150:255, 20:23] = 1\n mask[365:368, 60:130] = 1\n \n-# add randomly positioned small point-like defects\n+# Add randomly positioned small point-like defects\n rstate = np.random.default_rng(0)\n for radius in [0, 2, 4]:\n # larger defects are less common\n", "issue": "Existing \"inpainting\" gallery example could use a better (more specific) title.\nCreating this issue so we don't lose track of what's been discussed in the conversation _Originally posted by @lagru in https://github.com/scikit-image/scikit-image/pull/6853#discussion_r1149741067_\r\n\r\n> @mkcor, just wondering how this relates to [our existing inpainting example ](https://scikit-image.org/docs/dev/auto_examples/filters/plot_inpaint.html#sphx-glr-auto-examples-filters-plot-inpaint-py). I am assuming that the main benefit here is that it's a real world use case?\r\n\r\n[...]\r\n\r\n> Which prompts the idea that we should update the title of the existing example, so it's less generic than just \"inpainting.\"\n", "before_files": [{"content": "\"\"\"\n===========\nInpainting\n===========\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction is supposed to be performed in fully automatic way by\nexploiting the information presented in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted by\ninpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] S.B.Damelin and N.S.Hoang. \"On Surface Completion and Image\n Inpainting by Biharmonic Functions: Numerical Aspects\",\n International Journal of Mathematics and Mathematical Sciences,\n Vol. 2018, Article ID 3950312\n :DOI:`10.1155/2018/3950312`\n.. [4] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of\n Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal.,\n 28 (2010), 104-113,\n :DOI:`10.1016/j.acha.2009.04.004`\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.morphology import disk, binary_dilation\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()\n\n# Create mask with six block defect regions\nmask = np.zeros(image_orig.shape[:-1], dtype=bool)\nmask[20:60, 0:20] = 1\nmask[160:180, 70:155] = 1\nmask[30:60, 170:195] = 1\nmask[-60:-30, 170:195] = 1\nmask[-180:-160, 70:155] = 1\nmask[-60:-20, 0:20] = 1\n\n# add a few long, narrow defects\nmask[200:205, -200:] = 1\nmask[150:255, 20:23] = 1\nmask[365:368, 60:130] = 1\n\n# add randomly positioned small point-like defects\nrstate = np.random.default_rng(0)\nfor radius in [0, 2, 4]:\n # larger defects are less common\n thresh = 3 + 0.25 * radius # make larger defects less common\n tmp_mask = rstate.standard_normal(image_orig.shape[:-1]) > thresh\n if radius > 0:\n tmp_mask = binary_dilation(tmp_mask, disk(radius, dtype=bool))\n mask[tmp_mask] = 1\n\n# Apply defect mask to the image over the same region in each color channel\nimage_defect = image_orig * ~mask[..., np.newaxis]\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, channel_axis=-1)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax = axes.ravel()\n\nax[0].set_title('Original image')\nax[0].imshow(image_orig)\n\nax[1].set_title('Mask')\nax[1].imshow(mask, cmap=plt.cm.gray)\n\nax[2].set_title('Defected image')\nax[2].imshow(image_defect)\n\nax[3].set_title('Inpainted image')\nax[3].imshow(image_result)\n\nfor a in ax:\n a.axis('off')\n\nfig.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}]} | 1,769 | 454 |
gh_patches_debug_21673 | rasdani/github-patches | git_diff | ivy-llc__ivy-13280 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unwrap
</issue>
<code>
[start of ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py]
[end of ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py
--- a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py
+++ b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py
@@ -0,0 +1,48 @@
+# global
+import ivy
+
+# local
+from ivy.functional.frontends.numpy.func_wrapper import (
+ to_ivy_arrays_and_back,
+ handle_numpy_dtype,
+ from_zero_dim_arrays_to_scalar,
+ handle_numpy_out,
+)
+
+
+
+@handle_numpy_out
+@handle_numpy_dtype
+@to_ivy_arrays_and_back
+@from_zero_dim_arrays_to_scalar
+def unwrap(p, discont=None, axis=-1, *, period=2*pi):
+ p = ivy.Array.asarray(p)
+ nd = p.ndim
+ dd = ivy.diff(p, axis=axis)
+ if discont is None:
+ discont = period/2
+ slice1 = [ivy.slice(None, None)]*nd # full slices
+ slice1[axis] = ivy.slice(1, None)
+ slice1 = ivy.tuple(slice1)
+ dtype = ivy.result_type(dd, period)
+ if ivy.issubdtype(dtype, ivy.integer):
+ interval_high, rem = ivy.divmod(period, 2)
+ boundary_ambiguous = rem == 0
+ else:
+ interval_high = period / 2
+ boundary_ambiguous = True
+ interval_low = -interval_high
+ ddmod = ivy.mod(dd - interval_low, period) + interval_low
+ if boundary_ambiguous:
+ ivy.copyto(ddmod, interval_high,
+ where=(ddmod == interval_low) & (dd > 0))
+ ph_correct = ddmod - dd
+ ivy.copyto(ph_correct, 0, where=ivy.abs(dd) < discont)
+ up = ivy.array(p, copy=True, dtype=dtype)
+ up[slice1] = p[slice1] + ph_correct.cumsum(axis)
+ return up
+
+my_list = [24,8,3,4,34,8]
+ans = unwrap(my_list)
+print("After the np.unwrap()")
+print(ans)
\ No newline at end of file
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py\n--- a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py\n+++ b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py\n@@ -0,0 +1,48 @@\n+# global\n+import ivy\n+\n+# local\n+from ivy.functional.frontends.numpy.func_wrapper import (\n+ to_ivy_arrays_and_back,\n+ handle_numpy_dtype,\n+ from_zero_dim_arrays_to_scalar,\n+ handle_numpy_out,\n+)\n+\n+\n+\n+@handle_numpy_out\n+@handle_numpy_dtype\n+@to_ivy_arrays_and_back\n+@from_zero_dim_arrays_to_scalar\n+def unwrap(p, discont=None, axis=-1, *, period=2*pi):\n+ p = ivy.Array.asarray(p)\n+ nd = p.ndim\n+ dd = ivy.diff(p, axis=axis)\n+ if discont is None:\n+ discont = period/2\n+ slice1 = [ivy.slice(None, None)]*nd # full slices\n+ slice1[axis] = ivy.slice(1, None)\n+ slice1 = ivy.tuple(slice1)\n+ dtype = ivy.result_type(dd, period)\n+ if ivy.issubdtype(dtype, ivy.integer):\n+ interval_high, rem = ivy.divmod(period, 2)\n+ boundary_ambiguous = rem == 0\n+ else:\n+ interval_high = period / 2\n+ boundary_ambiguous = True\n+ interval_low = -interval_high\n+ ddmod = ivy.mod(dd - interval_low, period) + interval_low\n+ if boundary_ambiguous:\n+ ivy.copyto(ddmod, interval_high,\n+ where=(ddmod == interval_low) & (dd > 0))\n+ ph_correct = ddmod - dd\n+ ivy.copyto(ph_correct, 0, where=ivy.abs(dd) < discont)\n+ up = ivy.array(p, copy=True, dtype=dtype)\n+ up[slice1] = p[slice1] + ph_correct.cumsum(axis)\n+ return up\n+\n+my_list = [24,8,3,4,34,8]\n+ans = unwrap(my_list)\n+print(\"After the np.unwrap()\")\n+print(ans)\n\\ No newline at end of file\n", "issue": "unwrap\n\n", "before_files": [{"content": "", "path": "ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py"}]} | 560 | 552 |
gh_patches_debug_5853 | rasdani/github-patches | git_diff | vllm-project__vllm-147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Write README
</issue>
<code>
[start of setup.py]
1 import io
2 import os
3 import re
4 import subprocess
5 from typing import List, Set
6
7 from packaging.version import parse, Version
8 import setuptools
9 import torch
10 from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
11
12 ROOT_DIR = os.path.dirname(__file__)
13
14 # Compiler flags.
15 CXX_FLAGS = ["-g", "-O2", "-std=c++17"]
16 # TODO(woosuk): Should we use -O3?
17 NVCC_FLAGS = ["-O2", "-std=c++17"]
18
19 ABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0
20 CXX_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
21 NVCC_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
22
23 if not torch.cuda.is_available():
24 raise RuntimeError(
25 f"Cannot find CUDA at CUDA_HOME: {CUDA_HOME}. "
26 "CUDA must be available in order to build the package.")
27
28
29 def get_nvcc_cuda_version(cuda_dir: str) -> Version:
30 """Get the CUDA version from nvcc.
31
32 Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
33 """
34 nvcc_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
35 universal_newlines=True)
36 output = nvcc_output.split()
37 release_idx = output.index("release") + 1
38 nvcc_cuda_version = parse(output[release_idx].split(",")[0])
39 return nvcc_cuda_version
40
41
42 # Collect the compute capabilities of all available GPUs.
43 device_count = torch.cuda.device_count()
44 compute_capabilities: Set[int] = set()
45 for i in range(device_count):
46 major, minor = torch.cuda.get_device_capability(i)
47 if major < 7:
48 raise RuntimeError(
49 "GPUs with compute capability less than 7.0 are not supported.")
50 compute_capabilities.add(major * 10 + minor)
51 # If no GPU is available, add all supported compute capabilities.
52 if not compute_capabilities:
53 compute_capabilities = {70, 75, 80, 86, 90}
54 # Add target compute capabilities to NVCC flags.
55 for capability in compute_capabilities:
56 NVCC_FLAGS += ["-gencode", f"arch=compute_{capability},code=sm_{capability}"]
57
58 # Validate the NVCC CUDA version.
59 nvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)
60 if nvcc_cuda_version < Version("11.0"):
61 raise RuntimeError("CUDA 11.0 or higher is required to build the package.")
62 if 86 in compute_capabilities and nvcc_cuda_version < Version("11.1"):
63 raise RuntimeError(
64 "CUDA 11.1 or higher is required for GPUs with compute capability 8.6.")
65 if 90 in compute_capabilities and nvcc_cuda_version < Version("11.8"):
66 raise RuntimeError(
67 "CUDA 11.8 or higher is required for GPUs with compute capability 9.0.")
68
69 # Use NVCC threads to parallelize the build.
70 if nvcc_cuda_version >= Version("11.2"):
71 num_threads = min(os.cpu_count(), 8)
72 NVCC_FLAGS += ["--threads", str(num_threads)]
73
74 ext_modules = []
75
76 # Cache operations.
77 cache_extension = CUDAExtension(
78 name="vllm.cache_ops",
79 sources=["csrc/cache.cpp", "csrc/cache_kernels.cu"],
80 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
81 )
82 ext_modules.append(cache_extension)
83
84 # Attention kernels.
85 attention_extension = CUDAExtension(
86 name="vllm.attention_ops",
87 sources=["csrc/attention.cpp", "csrc/attention/attention_kernels.cu"],
88 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
89 )
90 ext_modules.append(attention_extension)
91
92 # Positional encoding kernels.
93 positional_encoding_extension = CUDAExtension(
94 name="vllm.pos_encoding_ops",
95 sources=["csrc/pos_encoding.cpp", "csrc/pos_encoding_kernels.cu"],
96 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
97 )
98 ext_modules.append(positional_encoding_extension)
99
100 # Layer normalization kernels.
101 layernorm_extension = CUDAExtension(
102 name="vllm.layernorm_ops",
103 sources=["csrc/layernorm.cpp", "csrc/layernorm_kernels.cu"],
104 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
105 )
106 ext_modules.append(layernorm_extension)
107
108 # Activation kernels.
109 activation_extension = CUDAExtension(
110 name="vllm.activation_ops",
111 sources=["csrc/activation.cpp", "csrc/activation_kernels.cu"],
112 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
113 )
114 ext_modules.append(activation_extension)
115
116
117 def get_path(*filepath) -> str:
118 return os.path.join(ROOT_DIR, *filepath)
119
120
121 def find_version(filepath: str):
122 """Extract version information from the given filepath.
123
124 Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
125 """
126 with open(filepath) as fp:
127 version_match = re.search(
128 r"^__version__ = ['\"]([^'\"]*)['\"]", fp.read(), re.M)
129 if version_match:
130 return version_match.group(1)
131 raise RuntimeError("Unable to find version string.")
132
133
134 def read_readme() -> str:
135 """Read the README file."""
136 return io.open(get_path("README.md"), "r", encoding="utf-8").read()
137
138
139 def get_requirements() -> List[str]:
140 """Get Python package dependencies from requirements.txt."""
141 with open(get_path("requirements.txt")) as f:
142 requirements = f.read().strip().split("\n")
143 return requirements
144
145
146 setuptools.setup(
147 name="vllm",
148 version=find_version(get_path("vllm", "__init__.py")),
149 author="vLLM Team",
150 author_email="[email protected]", # FIXME
151 license="Apache 2.0",
152 description="vLLM: Easy, Fast, and Cheap LLM Serving with PagedAttention", # FIXME
153 long_description=read_readme(),
154 long_description_content_type="text/markdown",
155 url="https://github.com/WoosukKwon/vllm",
156 project_urls={
157 "Homepage": "https://github.com/WoosukKwon/vllm",
158 "Documentation": "https://vllm.readthedocs.io/en/latest/", # FIXME
159 },
160 classifiers=[
161 "Programming Language :: Python :: 3.8",
162 "Programming Language :: Python :: 3.9",
163 "Programming Language :: Python :: 3.10",
164 "License :: OSI Approved :: Apache Software License",
165 "Topic :: Scientific/Engineering :: Artificial Intelligence",
166 ],
167 packages=setuptools.find_packages(
168 exclude=("benchmarks", "csrc", "docs", "examples", "tests")),
169 python_requires=">=3.8",
170 install_requires=get_requirements(),
171 ext_modules=ext_modules,
172 cmdclass={"build_ext": BuildExtension},
173 )
174
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -165,7 +165,7 @@
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
packages=setuptools.find_packages(
- exclude=("benchmarks", "csrc", "docs", "examples", "tests")),
+ exclude=("assets", "benchmarks", "csrc", "docs", "examples", "tests")),
python_requires=">=3.8",
install_requires=get_requirements(),
ext_modules=ext_modules,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -165,7 +165,7 @@\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n packages=setuptools.find_packages(\n- exclude=(\"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n+ exclude=(\"assets\", \"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n python_requires=\">=3.8\",\n install_requires=get_requirements(),\n ext_modules=ext_modules,\n", "issue": "Write README\n\n", "before_files": [{"content": "import io\nimport os\nimport re\nimport subprocess\nfrom typing import List, Set\n\nfrom packaging.version import parse, Version\nimport setuptools\nimport torch\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME\n\nROOT_DIR = os.path.dirname(__file__)\n\n# Compiler flags.\nCXX_FLAGS = [\"-g\", \"-O2\", \"-std=c++17\"]\n# TODO(woosuk): Should we use -O3?\nNVCC_FLAGS = [\"-O2\", \"-std=c++17\"]\n\nABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0\nCXX_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\nNVCC_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\n\nif not torch.cuda.is_available():\n raise RuntimeError(\n f\"Cannot find CUDA at CUDA_HOME: {CUDA_HOME}. \"\n \"CUDA must be available in order to build the package.\")\n\n\ndef get_nvcc_cuda_version(cuda_dir: str) -> Version:\n \"\"\"Get the CUDA version from nvcc.\n\n Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py\n \"\"\"\n nvcc_output = subprocess.check_output([cuda_dir + \"/bin/nvcc\", \"-V\"],\n universal_newlines=True)\n output = nvcc_output.split()\n release_idx = output.index(\"release\") + 1\n nvcc_cuda_version = parse(output[release_idx].split(\",\")[0])\n return nvcc_cuda_version\n\n\n# Collect the compute capabilities of all available GPUs.\ndevice_count = torch.cuda.device_count()\ncompute_capabilities: Set[int] = set()\nfor i in range(device_count):\n major, minor = torch.cuda.get_device_capability(i)\n if major < 7:\n raise RuntimeError(\n \"GPUs with compute capability less than 7.0 are not supported.\")\n compute_capabilities.add(major * 10 + minor)\n# If no GPU is available, add all supported compute capabilities.\nif not compute_capabilities:\n compute_capabilities = {70, 75, 80, 86, 90}\n# Add target compute capabilities to NVCC flags.\nfor capability in compute_capabilities:\n NVCC_FLAGS += [\"-gencode\", f\"arch=compute_{capability},code=sm_{capability}\"]\n\n# Validate the NVCC CUDA version.\nnvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)\nif nvcc_cuda_version < Version(\"11.0\"):\n raise RuntimeError(\"CUDA 11.0 or higher is required to build the package.\")\nif 86 in compute_capabilities and nvcc_cuda_version < Version(\"11.1\"):\n raise RuntimeError(\n \"CUDA 11.1 or higher is required for GPUs with compute capability 8.6.\")\nif 90 in compute_capabilities and nvcc_cuda_version < Version(\"11.8\"):\n raise RuntimeError(\n \"CUDA 11.8 or higher is required for GPUs with compute capability 9.0.\")\n\n# Use NVCC threads to parallelize the build.\nif nvcc_cuda_version >= Version(\"11.2\"):\n num_threads = min(os.cpu_count(), 8)\n NVCC_FLAGS += [\"--threads\", str(num_threads)]\n\next_modules = []\n\n# Cache operations.\ncache_extension = CUDAExtension(\n name=\"vllm.cache_ops\",\n sources=[\"csrc/cache.cpp\", \"csrc/cache_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(cache_extension)\n\n# Attention kernels.\nattention_extension = CUDAExtension(\n name=\"vllm.attention_ops\",\n sources=[\"csrc/attention.cpp\", \"csrc/attention/attention_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(attention_extension)\n\n# Positional encoding kernels.\npositional_encoding_extension = CUDAExtension(\n name=\"vllm.pos_encoding_ops\",\n sources=[\"csrc/pos_encoding.cpp\", \"csrc/pos_encoding_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(positional_encoding_extension)\n\n# Layer normalization kernels.\nlayernorm_extension = CUDAExtension(\n name=\"vllm.layernorm_ops\",\n sources=[\"csrc/layernorm.cpp\", \"csrc/layernorm_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(layernorm_extension)\n\n# Activation kernels.\nactivation_extension = CUDAExtension(\n name=\"vllm.activation_ops\",\n sources=[\"csrc/activation.cpp\", \"csrc/activation_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(activation_extension)\n\n\ndef get_path(*filepath) -> str:\n return os.path.join(ROOT_DIR, *filepath)\n\n\ndef find_version(filepath: str):\n \"\"\"Extract version information from the given filepath.\n\n Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py\n \"\"\"\n with open(filepath) as fp:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ndef read_readme() -> str:\n \"\"\"Read the README file.\"\"\"\n return io.open(get_path(\"README.md\"), \"r\", encoding=\"utf-8\").read()\n\n\ndef get_requirements() -> List[str]:\n \"\"\"Get Python package dependencies from requirements.txt.\"\"\"\n with open(get_path(\"requirements.txt\")) as f:\n requirements = f.read().strip().split(\"\\n\")\n return requirements\n\n\nsetuptools.setup(\n name=\"vllm\",\n version=find_version(get_path(\"vllm\", \"__init__.py\")),\n author=\"vLLM Team\",\n author_email=\"[email protected]\", # FIXME\n license=\"Apache 2.0\",\n description=\"vLLM: Easy, Fast, and Cheap LLM Serving with PagedAttention\", # FIXME\n long_description=read_readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/WoosukKwon/vllm\",\n project_urls={\n \"Homepage\": \"https://github.com/WoosukKwon/vllm\",\n \"Documentation\": \"https://vllm.readthedocs.io/en/latest/\", # FIXME\n },\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n packages=setuptools.find_packages(\n exclude=(\"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n python_requires=\">=3.8\",\n install_requires=get_requirements(),\n ext_modules=ext_modules,\n cmdclass={\"build_ext\": BuildExtension},\n)\n", "path": "setup.py"}]} | 2,569 | 119 |
gh_patches_debug_18052 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4064 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
travis has random failures on rank filters
## Description
See for ex: https://travis-ci.org/scikit-image/scikit-image/jobs/563363217
## Way to reproduce
```python
# Place the full code we need to recreate your issue here
# upload all necessary images to github too!
```
## Version information
```python
# Paste the output of the following python commands
from __future__ import print_function
import sys; print(sys.version)
import platform; print(platform.platform())
import skimage; print("scikit-image version: {}".format(skimage.__version__))
import numpy; print("numpy version: {}".format(numpy.__version__))
```
```python
# your output here
```
</issue>
<code>
[start of skimage/_shared/_warnings.py]
1 from contextlib import contextmanager
2 import sys
3 import warnings
4 import re
5 import os
6
7 __all__ = ['all_warnings', 'expected_warnings', 'warn']
8
9
10 def warn(message, category=None, stacklevel=2):
11 """A version of `warnings.warn` with a default stacklevel of 2.
12 """
13 if category is not None:
14 warnings.warn(message, category=category, stacklevel=stacklevel)
15 else:
16 warnings.warn(message, stacklevel=stacklevel)
17
18
19 @contextmanager
20 def all_warnings():
21 """
22 Context for use in testing to ensure that all warnings are raised.
23
24 Examples
25 --------
26 >>> import warnings
27 >>> def foo():
28 ... warnings.warn(RuntimeWarning("bar"))
29
30 We raise the warning once, while the warning filter is set to "once".
31 Hereafter, the warning is invisible, even with custom filters:
32
33 >>> with warnings.catch_warnings():
34 ... warnings.simplefilter('once')
35 ... foo()
36
37 We can now run ``foo()`` without a warning being raised:
38
39 >>> from numpy.testing import assert_warns
40 >>> foo()
41
42 To catch the warning, we call in the help of ``all_warnings``:
43
44 >>> with all_warnings():
45 ... assert_warns(RuntimeWarning, foo)
46 """
47 # _warnings.py is on the critical import path.
48 # Since this is a testing only function, we lazy import inspect.
49 import inspect
50 # Whenever a warning is triggered, Python adds a __warningregistry__
51 # member to the *calling* module. The exercize here is to find
52 # and eradicate all those breadcrumbs that were left lying around.
53 #
54 # We proceed by first searching all parent calling frames and explicitly
55 # clearing their warning registries (necessary for the doctests above to
56 # pass). Then, we search for all submodules of skimage and clear theirs
57 # as well (necessary for the skimage test suite to pass).
58
59 frame = inspect.currentframe()
60 if frame:
61 for f in inspect.getouterframes(frame):
62 f[0].f_locals['__warningregistry__'] = {}
63 del frame
64
65 for mod_name, mod in list(sys.modules.items()):
66 try:
67 mod.__warningregistry__.clear()
68 except AttributeError:
69 pass
70
71 with warnings.catch_warnings(record=True) as w:
72 warnings.simplefilter("always")
73 yield w
74
75
76 @contextmanager
77 def expected_warnings(matching):
78 r"""Context for use in testing to catch known warnings matching regexes
79
80 Parameters
81 ----------
82 matching : list of strings or compiled regexes
83 Regexes for the desired warning to catch
84
85 Examples
86 --------
87 >>> import numpy as np
88 >>> image = np.random.randint(0, 2**16, size=(100, 100), dtype=np.uint16)
89 >>> # rank filters are slow when bit-depth exceeds 10 bits
90 >>> from skimage import filters
91 >>> with expected_warnings(['Bad rank filter performance']):
92 ... median_filtered = filters.rank.median(image)
93
94 Notes
95 -----
96 Uses `all_warnings` to ensure all warnings are raised.
97 Upon exiting, it checks the recorded warnings for the desired matching
98 pattern(s).
99 Raises a ValueError if any match was not found or an unexpected
100 warning was raised.
101 Allows for three types of behaviors: `and`, `or`, and `optional` matches.
102 This is done to accommodate different build environments or loop conditions
103 that may produce different warnings. The behaviors can be combined.
104 If you pass multiple patterns, you get an orderless `and`, where all of the
105 warnings must be raised.
106 If you use the `|` operator in a pattern, you can catch one of several
107 warnings.
108 Finally, you can use `|\A\Z` in a pattern to signify it as optional.
109
110 """
111 if isinstance(matching, str):
112 raise ValueError('``matching`` should be a list of strings and not '
113 'a string itself.')
114
115 strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')
116 if strict_warnings.lower() == 'true':
117 strict_warnings = True
118 elif strict_warnings.lower() == 'false':
119 strict_warnings = False
120 else:
121 strict_warnings = bool(int(strict_warnings))
122
123 with all_warnings() as w:
124 # enter context
125 yield w
126 # exited user context, check the recorded warnings
127 # Allow users to provide None
128 while None in matching:
129 matching.remove(None)
130 remaining = [m for m in matching if r'\A\Z' not in m.split('|')]
131 for warn in w:
132 found = False
133 for match in matching:
134 if re.search(match, str(warn.message)) is not None:
135 found = True
136 if match in remaining:
137 remaining.remove(match)
138 if strict_warnings and not found:
139 raise ValueError('Unexpected warning: %s' % str(warn.message))
140 if strict_warnings and (len(remaining) > 0):
141 msg = 'No warning raised matching:\n%s' % '\n'.join(remaining)
142 raise ValueError(msg)
143
[end of skimage/_shared/_warnings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py
--- a/skimage/_shared/_warnings.py
+++ b/skimage/_shared/_warnings.py
@@ -79,8 +79,9 @@
Parameters
----------
- matching : list of strings or compiled regexes
+ matching : None or a list of strings or compiled regexes
Regexes for the desired warning to catch
+ If matching is None, this behaves as a no-op.
Examples
--------
@@ -112,6 +113,11 @@
raise ValueError('``matching`` should be a list of strings and not '
'a string itself.')
+ # Special case for disabling the context manager
+ if matching is None:
+ yield None
+ return
+
strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')
if strict_warnings.lower() == 'true':
strict_warnings = True
| {"golden_diff": "diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py\n--- a/skimage/_shared/_warnings.py\n+++ b/skimage/_shared/_warnings.py\n@@ -79,8 +79,9 @@\n \n Parameters\n ----------\n- matching : list of strings or compiled regexes\n+ matching : None or a list of strings or compiled regexes\n Regexes for the desired warning to catch\n+ If matching is None, this behaves as a no-op.\n \n Examples\n --------\n@@ -112,6 +113,11 @@\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n \n+ # Special case for disabling the context manager\n+ if matching is None:\n+ yield None\n+ return\n+\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n", "issue": "travis has random failures on rank filters\n## Description\r\n\r\nSee for ex: https://travis-ci.org/scikit-image/scikit-image/jobs/563363217\r\n\r\n\r\n## Way to reproduce\r\n```python\r\n# Place the full code we need to recreate your issue here\r\n# upload all necessary images to github too!\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n# Paste the output of the following python commands\r\nfrom __future__ import print_function\r\nimport sys; print(sys.version)\r\nimport platform; print(platform.platform())\r\nimport skimage; print(\"scikit-image version: {}\".format(skimage.__version__))\r\nimport numpy; print(\"numpy version: {}\".format(numpy.__version__))\r\n```\r\n\r\n```python\r\n# your output here\r\n\r\n```\r\n\r\n\n", "before_files": [{"content": "from contextlib import contextmanager\nimport sys\nimport warnings\nimport re\nimport os\n\n__all__ = ['all_warnings', 'expected_warnings', 'warn']\n\n\ndef warn(message, category=None, stacklevel=2):\n \"\"\"A version of `warnings.warn` with a default stacklevel of 2.\n \"\"\"\n if category is not None:\n warnings.warn(message, category=category, stacklevel=stacklevel)\n else:\n warnings.warn(message, stacklevel=stacklevel)\n\n\n@contextmanager\ndef all_warnings():\n \"\"\"\n Context for use in testing to ensure that all warnings are raised.\n\n Examples\n --------\n >>> import warnings\n >>> def foo():\n ... warnings.warn(RuntimeWarning(\"bar\"))\n\n We raise the warning once, while the warning filter is set to \"once\".\n Hereafter, the warning is invisible, even with custom filters:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('once')\n ... foo()\n\n We can now run ``foo()`` without a warning being raised:\n\n >>> from numpy.testing import assert_warns\n >>> foo()\n\n To catch the warning, we call in the help of ``all_warnings``:\n\n >>> with all_warnings():\n ... assert_warns(RuntimeWarning, foo)\n \"\"\"\n # _warnings.py is on the critical import path.\n # Since this is a testing only function, we lazy import inspect.\n import inspect\n # Whenever a warning is triggered, Python adds a __warningregistry__\n # member to the *calling* module. The exercize here is to find\n # and eradicate all those breadcrumbs that were left lying around.\n #\n # We proceed by first searching all parent calling frames and explicitly\n # clearing their warning registries (necessary for the doctests above to\n # pass). Then, we search for all submodules of skimage and clear theirs\n # as well (necessary for the skimage test suite to pass).\n\n frame = inspect.currentframe()\n if frame:\n for f in inspect.getouterframes(frame):\n f[0].f_locals['__warningregistry__'] = {}\n del frame\n\n for mod_name, mod in list(sys.modules.items()):\n try:\n mod.__warningregistry__.clear()\n except AttributeError:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n yield w\n\n\n@contextmanager\ndef expected_warnings(matching):\n r\"\"\"Context for use in testing to catch known warnings matching regexes\n\n Parameters\n ----------\n matching : list of strings or compiled regexes\n Regexes for the desired warning to catch\n\n Examples\n --------\n >>> import numpy as np\n >>> image = np.random.randint(0, 2**16, size=(100, 100), dtype=np.uint16)\n >>> # rank filters are slow when bit-depth exceeds 10 bits\n >>> from skimage import filters\n >>> with expected_warnings(['Bad rank filter performance']):\n ... median_filtered = filters.rank.median(image)\n\n Notes\n -----\n Uses `all_warnings` to ensure all warnings are raised.\n Upon exiting, it checks the recorded warnings for the desired matching\n pattern(s).\n Raises a ValueError if any match was not found or an unexpected\n warning was raised.\n Allows for three types of behaviors: `and`, `or`, and `optional` matches.\n This is done to accommodate different build environments or loop conditions\n that may produce different warnings. The behaviors can be combined.\n If you pass multiple patterns, you get an orderless `and`, where all of the\n warnings must be raised.\n If you use the `|` operator in a pattern, you can catch one of several\n warnings.\n Finally, you can use `|\\A\\Z` in a pattern to signify it as optional.\n\n \"\"\"\n if isinstance(matching, str):\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n elif strict_warnings.lower() == 'false':\n strict_warnings = False\n else:\n strict_warnings = bool(int(strict_warnings))\n\n with all_warnings() as w:\n # enter context\n yield w\n # exited user context, check the recorded warnings\n # Allow users to provide None\n while None in matching:\n matching.remove(None)\n remaining = [m for m in matching if r'\\A\\Z' not in m.split('|')]\n for warn in w:\n found = False\n for match in matching:\n if re.search(match, str(warn.message)) is not None:\n found = True\n if match in remaining:\n remaining.remove(match)\n if strict_warnings and not found:\n raise ValueError('Unexpected warning: %s' % str(warn.message))\n if strict_warnings and (len(remaining) > 0):\n msg = 'No warning raised matching:\\n%s' % '\\n'.join(remaining)\n raise ValueError(msg)\n", "path": "skimage/_shared/_warnings.py"}]} | 2,131 | 219 |
gh_patches_debug_30379 | rasdani/github-patches | git_diff | GPflow__GPflow-1654 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A multioutput Kuf function throws an error
<!-- Lines like this are comments and will be invisible -->
# Bug
<!-- A clear and concise description of what the bug is. -->
The multioutput covariance function `Kuf` has several functions corresponding to different combinations of parameters. The function which accepts `FallbackSeparateIndependentInducingVariables` or `FallbackSharedIndependentInducingVariables` along with the `LinearCoregionalization` kernel throws an error.
## To reproduce
**Minimal, reproducible example**
<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->
```python
import numpy as np
import gpflow
import gpflow.inducing_variables.multioutput as mf
import gpflow.kernels.multioutput as mk
from gpflow.covariances.multioutput import kufs as mo_kufs
inducing_variables = mf.FallbackSharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(np.random.rand(1, 1)))
kernel = mk.LinearCoregionalization(make_kernels(Datum.L), Datum.W)
Kuf = mo_kufs.Kuf(inducing_variables, kernel, Datum.Xnew)
```
**Stack trace, or error message**
```
@Kuf.register(
(FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),
LinearCoregionalization,
object,
)
def _Kuf(
inducing_variable: Union[
SeparateIndependentInducingVariables, SharedIndependentInducingVariables
],
kernel: LinearCoregionalization,
Xnew: tf.Tensor,
):
kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)
> K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]
E TypeError: 'NoneType' object is not callable
../../../gpflow/covariances/multioutput/kufs.py:96: TypeError
```
## Expected behavior
The covariance should be computed.
## System information
* GPflow version: 2.1.4
* GPflow installed from: git commit 405eb97dd30cb43f02501b58b581f2608eb6e43e
* TensorFlow version: 2.3.0
* Python version: 3.7.5
* Operating system: Ubuntu 18.04.5 LTS
## Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of gpflow/covariances/multioutput/kufs.py]
1 # Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Union
16
17 import tensorflow as tf
18
19 from ...inducing_variables import (
20 FallbackSeparateIndependentInducingVariables,
21 FallbackSharedIndependentInducingVariables,
22 InducingPoints,
23 SeparateIndependentInducingVariables,
24 SharedIndependentInducingVariables,
25 )
26 from ...kernels import (
27 LinearCoregionalization,
28 MultioutputKernel,
29 SeparateIndependent,
30 SharedIndependent,
31 )
32 from ..dispatch import Kuf
33
34
35 @Kuf.register(InducingPoints, MultioutputKernel, object)
36 def _Kuf(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):
37 return kernel(inducing_variable.Z, Xnew, full_cov=True, full_output_cov=True) # [M, P, N, P]
38
39
40 @Kuf.register(SharedIndependentInducingVariables, SharedIndependent, object)
41 def _Kuf(
42 inducing_variable: SharedIndependentInducingVariables,
43 kernel: SharedIndependent,
44 Xnew: tf.Tensor,
45 ):
46 return Kuf(inducing_variable.inducing_variable, kernel.kernel, Xnew) # [M, N]
47
48
49 @Kuf.register(SeparateIndependentInducingVariables, SharedIndependent, object)
50 def _Kuf(
51 inducing_variable: SeparateIndependentInducingVariables,
52 kernel: SharedIndependent,
53 Xnew: tf.Tensor,
54 ):
55 return tf.stack(
56 [Kuf(f, kernel.kernel, Xnew) for f in inducing_variable.inducing_variable_list], axis=0
57 ) # [L, M, N]
58
59
60 @Kuf.register(SharedIndependentInducingVariables, SeparateIndependent, object)
61 def _Kuf(
62 inducing_variable: SharedIndependentInducingVariables,
63 kernel: SeparateIndependent,
64 Xnew: tf.Tensor,
65 ):
66 return tf.stack(
67 [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0
68 ) # [L, M, N]
69
70
71 @Kuf.register(SeparateIndependentInducingVariables, SeparateIndependent, object)
72 def _Kuf(
73 inducing_variable: SeparateIndependentInducingVariables,
74 kernel: SeparateIndependent,
75 Xnew: tf.Tensor,
76 ):
77 Kufs = [
78 Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)
79 ]
80 return tf.stack(Kufs, axis=0) # [L, M, N]
81
82
83 @Kuf.register(
84 (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),
85 LinearCoregionalization,
86 object,
87 )
88 def _Kuf(
89 inducing_variable: Union[
90 SeparateIndependentInducingVariables, SharedIndependentInducingVariables
91 ],
92 kernel: LinearCoregionalization,
93 Xnew: tf.Tensor,
94 ):
95 kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)
96 K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]
97 return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]
98
99
100 @Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)
101 def _Kuf(
102 inducing_variable: SharedIndependentInducingVariables,
103 kernel: SeparateIndependent,
104 Xnew: tf.Tensor,
105 ):
106 return tf.stack(
107 [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0
108 ) # [L, M, N]
109
110
111 @Kuf.register(SeparateIndependentInducingVariables, LinearCoregionalization, object)
112 def _Kuf(inducing_variable, kernel, Xnew):
113 return tf.stack(
114 [Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)],
115 axis=0,
116 ) # [L, M, N]
117
[end of gpflow/covariances/multioutput/kufs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpflow/covariances/multioutput/kufs.py b/gpflow/covariances/multioutput/kufs.py
--- a/gpflow/covariances/multioutput/kufs.py
+++ b/gpflow/covariances/multioutput/kufs.py
@@ -80,23 +80,42 @@
return tf.stack(Kufs, axis=0) # [L, M, N]
[email protected](
- (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),
- LinearCoregionalization,
- object,
-)
-def _Kuf(
+def _fallback_Kuf(
+ kuf_impl,
inducing_variable: Union[
SeparateIndependentInducingVariables, SharedIndependentInducingVariables
],
kernel: LinearCoregionalization,
Xnew: tf.Tensor,
):
- kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)
K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]
return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]
[email protected](
+ FallbackSeparateIndependentInducingVariables, LinearCoregionalization, object,
+)
+def _Kuf(
+ inducing_variable: FallbackSeparateIndependentInducingVariables,
+ kernel: LinearCoregionalization,
+ Xnew: tf.Tensor,
+):
+ kuf_impl = Kuf.dispatch(SeparateIndependentInducingVariables, SeparateIndependent, object)
+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)
+
+
[email protected](
+ FallbackSharedIndependentInducingVariables, LinearCoregionalization, object,
+)
+def _Kuf(
+ inducing_variable: FallbackSharedIndependentInducingVariables,
+ kernel: LinearCoregionalization,
+ Xnew: tf.Tensor,
+):
+ kuf_impl = Kuf.dispatch(SharedIndependentInducingVariables, SeparateIndependent, object)
+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)
+
+
@Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)
def _Kuf(
inducing_variable: SharedIndependentInducingVariables,
| {"golden_diff": "diff --git a/gpflow/covariances/multioutput/kufs.py b/gpflow/covariances/multioutput/kufs.py\n--- a/gpflow/covariances/multioutput/kufs.py\n+++ b/gpflow/covariances/multioutput/kufs.py\n@@ -80,23 +80,42 @@\n return tf.stack(Kufs, axis=0) # [L, M, N]\n \n \[email protected](\n- (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),\n- LinearCoregionalization,\n- object,\n-)\n-def _Kuf(\n+def _fallback_Kuf(\n+ kuf_impl,\n inducing_variable: Union[\n SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n ],\n kernel: LinearCoregionalization,\n Xnew: tf.Tensor,\n ):\n- kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)\n K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]\n return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]\n \n \[email protected](\n+ FallbackSeparateIndependentInducingVariables, LinearCoregionalization, object,\n+)\n+def _Kuf(\n+ inducing_variable: FallbackSeparateIndependentInducingVariables,\n+ kernel: LinearCoregionalization,\n+ Xnew: tf.Tensor,\n+):\n+ kuf_impl = Kuf.dispatch(SeparateIndependentInducingVariables, SeparateIndependent, object)\n+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)\n+\n+\[email protected](\n+ FallbackSharedIndependentInducingVariables, LinearCoregionalization, object,\n+)\n+def _Kuf(\n+ inducing_variable: FallbackSharedIndependentInducingVariables,\n+ kernel: LinearCoregionalization,\n+ Xnew: tf.Tensor,\n+):\n+ kuf_impl = Kuf.dispatch(SharedIndependentInducingVariables, SeparateIndependent, object)\n+ return _fallback_Kuf(kuf_impl, inducing_variable, kernel, Xnew)\n+\n+\n @Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)\n def _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n", "issue": "A multioutput Kuf function throws an error\n<!-- Lines like this are comments and will be invisible -->\r\n\r\n# Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nThe multioutput covariance function `Kuf` has several functions corresponding to different combinations of parameters. The function which accepts `FallbackSeparateIndependentInducingVariables` or `FallbackSharedIndependentInducingVariables` along with the `LinearCoregionalization` kernel throws an error.\r\n\r\n## To reproduce\r\n\r\n**Minimal, reproducible example**\r\n<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->\r\n```python\r\nimport numpy as np\r\nimport gpflow\r\nimport gpflow.inducing_variables.multioutput as mf\r\nimport gpflow.kernels.multioutput as mk\r\nfrom gpflow.covariances.multioutput import kufs as mo_kufs\r\n\r\ninducing_variables = mf.FallbackSharedIndependentInducingVariables(\r\n gpflow.inducing_variables.InducingPoints(np.random.rand(1, 1)))\r\nkernel = mk.LinearCoregionalization(make_kernels(Datum.L), Datum.W)\r\nKuf = mo_kufs.Kuf(inducing_variables, kernel, Datum.Xnew)\r\n```\r\n\r\n**Stack trace, or error message**\r\n```\r\n @Kuf.register(\r\n (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),\r\n LinearCoregionalization,\r\n object,\r\n )\r\n def _Kuf(\r\n inducing_variable: Union[\r\n SeparateIndependentInducingVariables, SharedIndependentInducingVariables\r\n ],\r\n kernel: LinearCoregionalization,\r\n Xnew: tf.Tensor,\r\n ):\r\n kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)\r\n> K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]\r\nE TypeError: 'NoneType' object is not callable\r\n\r\n../../../gpflow/covariances/multioutput/kufs.py:96: TypeError\r\n\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe covariance should be computed.\r\n\r\n## System information\r\n\r\n* GPflow version: 2.1.4\r\n* GPflow installed from: git commit 405eb97dd30cb43f02501b58b581f2608eb6e43e\r\n* TensorFlow version: 2.3.0\r\n* Python version: 3.7.5\r\n* Operating system: Ubuntu 18.04.5 LTS\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union\n\nimport tensorflow as tf\n\nfrom ...inducing_variables import (\n FallbackSeparateIndependentInducingVariables,\n FallbackSharedIndependentInducingVariables,\n InducingPoints,\n SeparateIndependentInducingVariables,\n SharedIndependentInducingVariables,\n)\nfrom ...kernels import (\n LinearCoregionalization,\n MultioutputKernel,\n SeparateIndependent,\n SharedIndependent,\n)\nfrom ..dispatch import Kuf\n\n\[email protected](InducingPoints, MultioutputKernel, object)\ndef _Kuf(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):\n return kernel(inducing_variable.Z, Xnew, full_cov=True, full_output_cov=True) # [M, P, N, P]\n\n\[email protected](SharedIndependentInducingVariables, SharedIndependent, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SharedIndependent,\n Xnew: tf.Tensor,\n):\n return Kuf(inducing_variable.inducing_variable, kernel.kernel, Xnew) # [M, N]\n\n\[email protected](SeparateIndependentInducingVariables, SharedIndependent, object)\ndef _Kuf(\n inducing_variable: SeparateIndependentInducingVariables,\n kernel: SharedIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(f, kernel.kernel, Xnew) for f in inducing_variable.inducing_variable_list], axis=0\n ) # [L, M, N]\n\n\[email protected](SharedIndependentInducingVariables, SeparateIndependent, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0\n ) # [L, M, N]\n\n\[email protected](SeparateIndependentInducingVariables, SeparateIndependent, object)\ndef _Kuf(\n inducing_variable: SeparateIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n Kufs = [\n Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)\n ]\n return tf.stack(Kufs, axis=0) # [L, M, N]\n\n\[email protected](\n (FallbackSeparateIndependentInducingVariables, FallbackSharedIndependentInducingVariables),\n LinearCoregionalization,\n object,\n)\ndef _Kuf(\n inducing_variable: Union[\n SeparateIndependentInducingVariables, SharedIndependentInducingVariables\n ],\n kernel: LinearCoregionalization,\n Xnew: tf.Tensor,\n):\n kuf_impl = Kuf.dispatch(type(inducing_variable), SeparateIndependent, object)\n K = tf.transpose(kuf_impl(inducing_variable, kernel, Xnew), [1, 0, 2]) # [M, L, N]\n return K[:, :, :, None] * tf.transpose(kernel.W)[None, :, None, :] # [M, L, N, P]\n\n\[email protected](SharedIndependentInducingVariables, LinearCoregionalization, object)\ndef _Kuf(\n inducing_variable: SharedIndependentInducingVariables,\n kernel: SeparateIndependent,\n Xnew: tf.Tensor,\n):\n return tf.stack(\n [Kuf(inducing_variable.inducing_variable, k, Xnew) for k in kernel.kernels], axis=0\n ) # [L, M, N]\n\n\[email protected](SeparateIndependentInducingVariables, LinearCoregionalization, object)\ndef _Kuf(inducing_variable, kernel, Xnew):\n return tf.stack(\n [Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)],\n axis=0,\n ) # [L, M, N]\n", "path": "gpflow/covariances/multioutput/kufs.py"}]} | 2,342 | 531 |
gh_patches_debug_25191 | rasdani/github-patches | git_diff | scipy__scipy-6119 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DeprecationWarnings in stats on python 3.5
```
/home/br/repos/scipy/build/testenv/lib/python3.5/site-packages/scipy/stats/tests/test_stats.py:101: DeprecationWarning: Please use assertRaisesRegex instead.
```
Apparently, `assertRaisesRegexp` was renamed to `assertRaisesRegex`: https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegexp
</issue>
<code>
[start of scipy/_lib/_numpy_compat.py]
1 """Functions copypasted from newer versions of numpy.
2
3 """
4 from __future__ import division, print_function, absolute_import
5
6 import warnings
7
8 import numpy as np
9
10 from scipy._lib._version import NumpyVersion
11
12 if NumpyVersion(np.__version__) > '1.7.0.dev':
13 _assert_warns = np.testing.assert_warns
14 else:
15 def _assert_warns(warning_class, func, *args, **kw):
16 r"""
17 Fail unless the given callable throws the specified warning.
18
19 This definition is copypasted from numpy 1.9.0.dev.
20 The version in earlier numpy returns None.
21
22 Parameters
23 ----------
24 warning_class : class
25 The class defining the warning that `func` is expected to throw.
26 func : callable
27 The callable to test.
28 *args : Arguments
29 Arguments passed to `func`.
30 **kwargs : Kwargs
31 Keyword arguments passed to `func`.
32
33 Returns
34 -------
35 The value returned by `func`.
36
37 """
38 with warnings.catch_warnings(record=True) as l:
39 warnings.simplefilter('always')
40 result = func(*args, **kw)
41 if not len(l) > 0:
42 raise AssertionError("No warning raised when calling %s"
43 % func.__name__)
44 if not l[0].category is warning_class:
45 raise AssertionError("First warning for %s is not a "
46 "%s( is %s)" % (func.__name__, warning_class, l[0]))
47 return result
48
[end of scipy/_lib/_numpy_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scipy/_lib/_numpy_compat.py b/scipy/_lib/_numpy_compat.py
--- a/scipy/_lib/_numpy_compat.py
+++ b/scipy/_lib/_numpy_compat.py
@@ -4,8 +4,10 @@
from __future__ import division, print_function, absolute_import
import warnings
+import sys
import numpy as np
+from numpy.testing.nosetester import import_nose
from scipy._lib._version import NumpyVersion
@@ -45,3 +47,28 @@
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
+
+
+def assert_raises_regex(exception_class, expected_regexp,
+ callable_obj=None, *args, **kwargs):
+ """
+ Fail unless an exception of class exception_class and with message that
+ matches expected_regexp is thrown by callable when invoked with arguments
+ args and keyword arguments kwargs.
+ Name of this function adheres to Python 3.2+ reference, but should work in
+ all versions down to 2.6.
+ Notes
+ -----
+ .. versionadded:: 1.8.0
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ nose = import_nose()
+
+ if sys.version_info.major >= 3:
+ funcname = nose.tools.assert_raises_regex
+ else:
+ # Only present in Python 2.7, missing from unittest in 2.6
+ funcname = nose.tools.assert_raises_regexp
+
+ return funcname(exception_class, expected_regexp, callable_obj,
+ *args, **kwargs)
| {"golden_diff": "diff --git a/scipy/_lib/_numpy_compat.py b/scipy/_lib/_numpy_compat.py\n--- a/scipy/_lib/_numpy_compat.py\n+++ b/scipy/_lib/_numpy_compat.py\n@@ -4,8 +4,10 @@\n from __future__ import division, print_function, absolute_import\n \n import warnings\n+import sys\n \n import numpy as np\n+from numpy.testing.nosetester import import_nose\n \n from scipy._lib._version import NumpyVersion\n \n@@ -45,3 +47,28 @@\n raise AssertionError(\"First warning for %s is not a \"\n \"%s( is %s)\" % (func.__name__, warning_class, l[0]))\n return result\n+\n+\n+def assert_raises_regex(exception_class, expected_regexp,\n+ callable_obj=None, *args, **kwargs):\n+ \"\"\"\n+ Fail unless an exception of class exception_class and with message that\n+ matches expected_regexp is thrown by callable when invoked with arguments\n+ args and keyword arguments kwargs.\n+ Name of this function adheres to Python 3.2+ reference, but should work in\n+ all versions down to 2.6.\n+ Notes\n+ -----\n+ .. versionadded:: 1.8.0\n+ \"\"\"\n+ __tracebackhide__ = True # Hide traceback for py.test\n+ nose = import_nose()\n+\n+ if sys.version_info.major >= 3:\n+ funcname = nose.tools.assert_raises_regex\n+ else:\n+ # Only present in Python 2.7, missing from unittest in 2.6\n+ funcname = nose.tools.assert_raises_regexp\n+\n+ return funcname(exception_class, expected_regexp, callable_obj,\n+ *args, **kwargs)\n", "issue": "DeprecationWarnings in stats on python 3.5\n```\n/home/br/repos/scipy/build/testenv/lib/python3.5/site-packages/scipy/stats/tests/test_stats.py:101: DeprecationWarning: Please use assertRaisesRegex instead.\n```\n\nApparently, `assertRaisesRegexp` was renamed to `assertRaisesRegex`: https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegexp\n\n", "before_files": [{"content": "\"\"\"Functions copypasted from newer versions of numpy.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\n\nfrom scipy._lib._version import NumpyVersion\n\nif NumpyVersion(np.__version__) > '1.7.0.dev':\n _assert_warns = np.testing.assert_warns\nelse:\n def _assert_warns(warning_class, func, *args, **kw):\n r\"\"\"\n Fail unless the given callable throws the specified warning.\n\n This definition is copypasted from numpy 1.9.0.dev.\n The version in earlier numpy returns None.\n\n Parameters\n ----------\n warning_class : class\n The class defining the warning that `func` is expected to throw.\n func : callable\n The callable to test.\n *args : Arguments\n Arguments passed to `func`.\n **kwargs : Kwargs\n Keyword arguments passed to `func`.\n\n Returns\n -------\n The value returned by `func`.\n\n \"\"\"\n with warnings.catch_warnings(record=True) as l:\n warnings.simplefilter('always')\n result = func(*args, **kw)\n if not len(l) > 0:\n raise AssertionError(\"No warning raised when calling %s\"\n % func.__name__)\n if not l[0].category is warning_class:\n raise AssertionError(\"First warning for %s is not a \"\n \"%s( is %s)\" % (func.__name__, warning_class, l[0]))\n return result\n", "path": "scipy/_lib/_numpy_compat.py"}]} | 1,046 | 387 |
gh_patches_debug_34963 | rasdani/github-patches | git_diff | adfinis__timed-backend-925 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug(auth): requests to the api with an invalid token receive a response status 500 instead of 401
</issue>
<code>
[start of timed/authentication.py]
1 import base64
2 import functools
3 import hashlib
4
5 import requests
6 from django.conf import settings
7 from django.core.cache import cache
8 from django.core.exceptions import SuspiciousOperation
9 from django.utils.encoding import force_bytes
10 from mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend
11
12
13 class TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):
14 def get_introspection(self, access_token, id_token, payload):
15 """Return user details dictionary."""
16
17 basic = base64.b64encode(
18 f"{settings.OIDC_RP_INTROSPECT_CLIENT_ID}:{settings.OIDC_RP_INTROSPECT_CLIENT_SECRET}".encode(
19 "utf-8"
20 )
21 ).decode()
22 headers = {
23 "Authorization": f"Basic {basic}",
24 "Content-Type": "application/x-www-form-urlencoded",
25 }
26 response = requests.post(
27 settings.OIDC_OP_INTROSPECT_ENDPOINT,
28 verify=settings.OIDC_VERIFY_SSL,
29 headers=headers,
30 data={"token": access_token},
31 )
32 response.raise_for_status()
33 return response.json()
34
35 def get_userinfo_or_introspection(self, access_token):
36 try:
37 claims = self.cached_request(
38 self.get_userinfo, access_token, "auth.userinfo"
39 )
40 except requests.HTTPError as e:
41 if not (
42 e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT
43 ):
44 raise e
45
46 # check introspection if userinfo fails (confidental client)
47 claims = self.cached_request(
48 self.get_introspection, access_token, "auth.introspection"
49 )
50 if "client_id" not in claims:
51 raise SuspiciousOperation("client_id not present in introspection")
52
53 return claims
54
55 def get_or_create_user(self, access_token, id_token, payload):
56 """Verify claims and return user, otherwise raise an Exception."""
57
58 claims = self.get_userinfo_or_introspection(access_token)
59
60 users = self.filter_users_by_claims(claims)
61
62 if len(users) == 1:
63 user = users.get()
64 self.update_user_from_claims(user, claims)
65 return user
66 elif settings.OIDC_CREATE_USER:
67 return self.create_user(claims)
68 else:
69 LOGGER.debug(
70 "Login failed: No user with username %s found, and "
71 "OIDC_CREATE_USER is False",
72 self.get_username(claims),
73 )
74 return None
75
76 def update_user_from_claims(self, user, claims):
77 user.email = claims.get(settings.OIDC_EMAIL_CLAIM, "")
78 user.first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, "")
79 user.last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, "")
80 user.save()
81
82 def filter_users_by_claims(self, claims):
83 username = self.get_username(claims)
84 return self.UserModel.objects.filter(username__iexact=username)
85
86 def cached_request(self, method, token, cache_prefix):
87 token_hash = hashlib.sha256(force_bytes(token)).hexdigest()
88
89 func = functools.partial(method, token, None, None)
90
91 return cache.get_or_set(
92 f"{cache_prefix}.{token_hash}",
93 func,
94 timeout=settings.OIDC_BEARER_TOKEN_REVALIDATION_TIME,
95 )
96
97 def create_user(self, claims):
98 """Return object for a newly created user account."""
99
100 username = self.get_username(claims)
101 email = claims.get(settings.OIDC_EMAIL_CLAIM, "")
102 first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, "")
103 last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, "")
104
105 return self.UserModel.objects.create(
106 username=username, email=email, first_name=first_name, last_name=last_name
107 )
108
109 def get_username(self, claims):
110 try:
111 return claims[settings.OIDC_USERNAME_CLAIM]
112 except KeyError:
113 raise SuspiciousOperation("Couldn't find username claim")
114
[end of timed/authentication.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/timed/authentication.py b/timed/authentication.py
--- a/timed/authentication.py
+++ b/timed/authentication.py
@@ -8,6 +8,7 @@
from django.core.exceptions import SuspiciousOperation
from django.utils.encoding import force_bytes
from mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend
+from rest_framework.exceptions import AuthenticationFailed
class TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):
@@ -37,20 +38,29 @@
claims = self.cached_request(
self.get_userinfo, access_token, "auth.userinfo"
)
+ return claims
except requests.HTTPError as e:
- if not (
- e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT
- ):
+ if e.response.status_code not in [401, 403]:
raise e
-
- # check introspection if userinfo fails (confidental client)
- claims = self.cached_request(
- self.get_introspection, access_token, "auth.introspection"
- )
- if "client_id" not in claims:
- raise SuspiciousOperation("client_id not present in introspection")
-
- return claims
+ if settings.OIDC_CHECK_INTROSPECT:
+ try:
+ # check introspection if userinfo fails (confidential client)
+ claims = self.cached_request(
+ self.get_introspection, access_token, "auth.introspection"
+ )
+ if "client_id" not in claims:
+ raise SuspiciousOperation(
+ "client_id not present in introspection"
+ )
+ return claims
+ except requests.HTTPError as e:
+ # if the authorization fails it's not a valid client or
+ # the token is expired and permission is denied.
+ # Handing on the 401 Client Error would be transformed into
+ # a 500 by Django's exception handling. But that's not what we want.
+ if e.response.status_code not in [401, 403]: # pragma: no cover
+ raise e
+ raise AuthenticationFailed()
def get_or_create_user(self, access_token, id_token, payload):
"""Verify claims and return user, otherwise raise an Exception."""
| {"golden_diff": "diff --git a/timed/authentication.py b/timed/authentication.py\n--- a/timed/authentication.py\n+++ b/timed/authentication.py\n@@ -8,6 +8,7 @@\n from django.core.exceptions import SuspiciousOperation\n from django.utils.encoding import force_bytes\n from mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend\n+from rest_framework.exceptions import AuthenticationFailed\n \n \n class TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):\n@@ -37,20 +38,29 @@\n claims = self.cached_request(\n self.get_userinfo, access_token, \"auth.userinfo\"\n )\n+ return claims\n except requests.HTTPError as e:\n- if not (\n- e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT\n- ):\n+ if e.response.status_code not in [401, 403]:\n raise e\n-\n- # check introspection if userinfo fails (confidental client)\n- claims = self.cached_request(\n- self.get_introspection, access_token, \"auth.introspection\"\n- )\n- if \"client_id\" not in claims:\n- raise SuspiciousOperation(\"client_id not present in introspection\")\n-\n- return claims\n+ if settings.OIDC_CHECK_INTROSPECT:\n+ try:\n+ # check introspection if userinfo fails (confidential client)\n+ claims = self.cached_request(\n+ self.get_introspection, access_token, \"auth.introspection\"\n+ )\n+ if \"client_id\" not in claims:\n+ raise SuspiciousOperation(\n+ \"client_id not present in introspection\"\n+ )\n+ return claims\n+ except requests.HTTPError as e:\n+ # if the authorization fails it's not a valid client or\n+ # the token is expired and permission is denied.\n+ # Handing on the 401 Client Error would be transformed into\n+ # a 500 by Django's exception handling. But that's not what we want.\n+ if e.response.status_code not in [401, 403]: # pragma: no cover\n+ raise e\n+ raise AuthenticationFailed()\n \n def get_or_create_user(self, access_token, id_token, payload):\n \"\"\"Verify claims and return user, otherwise raise an Exception.\"\"\"\n", "issue": "bug(auth): requests to the api with an invalid token receive a response status 500 instead of 401\n\n", "before_files": [{"content": "import base64\nimport functools\nimport hashlib\n\nimport requests\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.utils.encoding import force_bytes\nfrom mozilla_django_oidc.auth import LOGGER, OIDCAuthenticationBackend\n\n\nclass TimedOIDCAuthenticationBackend(OIDCAuthenticationBackend):\n def get_introspection(self, access_token, id_token, payload):\n \"\"\"Return user details dictionary.\"\"\"\n\n basic = base64.b64encode(\n f\"{settings.OIDC_RP_INTROSPECT_CLIENT_ID}:{settings.OIDC_RP_INTROSPECT_CLIENT_SECRET}\".encode(\n \"utf-8\"\n )\n ).decode()\n headers = {\n \"Authorization\": f\"Basic {basic}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n response = requests.post(\n settings.OIDC_OP_INTROSPECT_ENDPOINT,\n verify=settings.OIDC_VERIFY_SSL,\n headers=headers,\n data={\"token\": access_token},\n )\n response.raise_for_status()\n return response.json()\n\n def get_userinfo_or_introspection(self, access_token):\n try:\n claims = self.cached_request(\n self.get_userinfo, access_token, \"auth.userinfo\"\n )\n except requests.HTTPError as e:\n if not (\n e.response.status_code in [401, 403] and settings.OIDC_CHECK_INTROSPECT\n ):\n raise e\n\n # check introspection if userinfo fails (confidental client)\n claims = self.cached_request(\n self.get_introspection, access_token, \"auth.introspection\"\n )\n if \"client_id\" not in claims:\n raise SuspiciousOperation(\"client_id not present in introspection\")\n\n return claims\n\n def get_or_create_user(self, access_token, id_token, payload):\n \"\"\"Verify claims and return user, otherwise raise an Exception.\"\"\"\n\n claims = self.get_userinfo_or_introspection(access_token)\n\n users = self.filter_users_by_claims(claims)\n\n if len(users) == 1:\n user = users.get()\n self.update_user_from_claims(user, claims)\n return user\n elif settings.OIDC_CREATE_USER:\n return self.create_user(claims)\n else:\n LOGGER.debug(\n \"Login failed: No user with username %s found, and \"\n \"OIDC_CREATE_USER is False\",\n self.get_username(claims),\n )\n return None\n\n def update_user_from_claims(self, user, claims):\n user.email = claims.get(settings.OIDC_EMAIL_CLAIM, \"\")\n user.first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, \"\")\n user.last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, \"\")\n user.save()\n\n def filter_users_by_claims(self, claims):\n username = self.get_username(claims)\n return self.UserModel.objects.filter(username__iexact=username)\n\n def cached_request(self, method, token, cache_prefix):\n token_hash = hashlib.sha256(force_bytes(token)).hexdigest()\n\n func = functools.partial(method, token, None, None)\n\n return cache.get_or_set(\n f\"{cache_prefix}.{token_hash}\",\n func,\n timeout=settings.OIDC_BEARER_TOKEN_REVALIDATION_TIME,\n )\n\n def create_user(self, claims):\n \"\"\"Return object for a newly created user account.\"\"\"\n\n username = self.get_username(claims)\n email = claims.get(settings.OIDC_EMAIL_CLAIM, \"\")\n first_name = claims.get(settings.OIDC_FIRSTNAME_CLAIM, \"\")\n last_name = claims.get(settings.OIDC_LASTNAME_CLAIM, \"\")\n\n return self.UserModel.objects.create(\n username=username, email=email, first_name=first_name, last_name=last_name\n )\n\n def get_username(self, claims):\n try:\n return claims[settings.OIDC_USERNAME_CLAIM]\n except KeyError:\n raise SuspiciousOperation(\"Couldn't find username claim\")\n", "path": "timed/authentication.py"}]} | 1,656 | 520 |
gh_patches_debug_20812 | rasdani/github-patches | git_diff | ipython__ipython-5202 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
node != nodejs within Debian packages
As part of resolving https://github.com/ipython/nbviewer/issues/196, (and https://github.com/ipython/nbviewer/pull/194), @ahmadia and I ended up finding out that Debian based Linux Distributions build the `node` binary as `nodejs`.
IPython nbconvert defaults to using `node`, which is actually `ax25-node` on Debian based systems. [See relevant posting on the Debian mailing list for more](https://lists.debian.org/debian-devel-announce/2012/07/msg00002.html).
This won't affect users of nvm (who provide `node`) or those who build from source. This will affect certain strains of Ubuntu (Saucy Salamander was what I used to test).
</issue>
<code>
[start of IPython/nbconvert/filters/markdown.py]
1 """Markdown filters
2 This file contains a collection of utility filters for dealing with
3 markdown within Jinja templates.
4 """
5 #-----------------------------------------------------------------------------
6 # Copyright (c) 2013, the IPython Development Team.
7 #
8 # Distributed under the terms of the Modified BSD License.
9 #
10 # The full license is in the file COPYING.txt, distributed with this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17
18 # Stdlib imports
19 import os
20 import subprocess
21 from io import TextIOWrapper, BytesIO
22
23 # IPython imports
24 from IPython.nbconvert.utils.pandoc import pandoc
25 from IPython.nbconvert.utils.exceptions import ConversionException
26 from IPython.utils.process import find_cmd, FindCmdError
27 from IPython.utils.py3compat import cast_bytes
28
29 #-----------------------------------------------------------------------------
30 # Functions
31 #-----------------------------------------------------------------------------
32 marked = os.path.join(os.path.dirname(__file__), "marked.js")
33
34 __all__ = [
35 'markdown2html',
36 'markdown2html_pandoc',
37 'markdown2html_marked',
38 'markdown2latex',
39 'markdown2rst',
40 ]
41
42 class NodeJSMissing(ConversionException):
43 """Exception raised when node.js is missing."""
44 pass
45
46 def markdown2latex(source):
47 """Convert a markdown string to LaTeX via pandoc.
48
49 This function will raise an error if pandoc is not installed.
50 Any error messages generated by pandoc are printed to stderr.
51
52 Parameters
53 ----------
54 source : string
55 Input string, assumed to be valid markdown.
56
57 Returns
58 -------
59 out : string
60 Output as returned by pandoc.
61 """
62 return pandoc(source, 'markdown', 'latex')
63
64 def markdown2html_pandoc(source):
65 """Convert a markdown string to HTML via pandoc"""
66 return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])
67
68 def markdown2html_marked(source, encoding='utf-8'):
69 """Convert a markdown string to HTML via marked"""
70 command = ['node', marked]
71 try:
72 p = subprocess.Popen(command,
73 stdin=subprocess.PIPE, stdout=subprocess.PIPE
74 )
75 except OSError as e:
76 raise NodeJSMissing(
77 "The command '%s' returned an error: %s.\n" % (" ".join(command), e) +
78 "Please check that Node.js is installed."
79 )
80 out, _ = p.communicate(cast_bytes(source, encoding))
81 out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()
82 return out.rstrip('\n')
83
84 def markdown2rst(source):
85 """Convert a markdown string to LaTeX via pandoc.
86
87 This function will raise an error if pandoc is not installed.
88 Any error messages generated by pandoc are printed to stderr.
89
90 Parameters
91 ----------
92 source : string
93 Input string, assumed to be valid markdown.
94
95 Returns
96 -------
97 out : string
98 Output as returned by pandoc.
99 """
100 return pandoc(source, 'markdown', 'rst')
101
102 try:
103 find_cmd('node')
104 except FindCmdError:
105 markdown2html = markdown2html_pandoc
106 else:
107 markdown2html = markdown2html_marked
108
[end of IPython/nbconvert/filters/markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py
--- a/IPython/nbconvert/filters/markdown.py
+++ b/IPython/nbconvert/filters/markdown.py
@@ -67,7 +67,7 @@
def markdown2html_marked(source, encoding='utf-8'):
"""Convert a markdown string to HTML via marked"""
- command = ['node', marked]
+ command = [node_cmd, marked]
try:
p = subprocess.Popen(command,
stdin=subprocess.PIPE, stdout=subprocess.PIPE
@@ -99,9 +99,18 @@
"""
return pandoc(source, 'markdown', 'rst')
+# prefer md2html via marked if node.js is available
+# node is called nodejs on debian, so try that first
+node_cmd = 'nodejs'
try:
- find_cmd('node')
+ find_cmd(node_cmd)
except FindCmdError:
- markdown2html = markdown2html_pandoc
+ node_cmd = 'node'
+ try:
+ find_cmd(node_cmd)
+ except FindCmdError:
+ markdown2html = markdown2html_pandoc
+ else:
+ markdown2html = markdown2html_marked
else:
markdown2html = markdown2html_marked
| {"golden_diff": "diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py\n--- a/IPython/nbconvert/filters/markdown.py\n+++ b/IPython/nbconvert/filters/markdown.py\n@@ -67,7 +67,7 @@\n \n def markdown2html_marked(source, encoding='utf-8'):\n \"\"\"Convert a markdown string to HTML via marked\"\"\"\n- command = ['node', marked]\n+ command = [node_cmd, marked]\n try:\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE\n@@ -99,9 +99,18 @@\n \"\"\"\n return pandoc(source, 'markdown', 'rst')\n \n+# prefer md2html via marked if node.js is available\n+# node is called nodejs on debian, so try that first\n+node_cmd = 'nodejs'\n try:\n- find_cmd('node')\n+ find_cmd(node_cmd)\n except FindCmdError:\n- markdown2html = markdown2html_pandoc\n+ node_cmd = 'node'\n+ try:\n+ find_cmd(node_cmd)\n+ except FindCmdError:\n+ markdown2html = markdown2html_pandoc\n+ else:\n+ markdown2html = markdown2html_marked\n else:\n markdown2html = markdown2html_marked\n", "issue": "node != nodejs within Debian packages\nAs part of resolving https://github.com/ipython/nbviewer/issues/196, (and https://github.com/ipython/nbviewer/pull/194), @ahmadia and I ended up finding out that Debian based Linux Distributions build the `node` binary as `nodejs`.\n\nIPython nbconvert defaults to using `node`, which is actually `ax25-node` on Debian based systems. [See relevant posting on the Debian mailing list for more](https://lists.debian.org/debian-devel-announce/2012/07/msg00002.html).\n\nThis won't affect users of nvm (who provide `node`) or those who build from source. This will affect certain strains of Ubuntu (Saucy Salamander was what I used to test).\n\n", "before_files": [{"content": "\"\"\"Markdown filters\nThis file contains a collection of utility filters for dealing with \nmarkdown within Jinja templates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib imports\nimport os\nimport subprocess\nfrom io import TextIOWrapper, BytesIO\n\n# IPython imports\nfrom IPython.nbconvert.utils.pandoc import pandoc\nfrom IPython.nbconvert.utils.exceptions import ConversionException\nfrom IPython.utils.process import find_cmd, FindCmdError\nfrom IPython.utils.py3compat import cast_bytes\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\nmarked = os.path.join(os.path.dirname(__file__), \"marked.js\")\n\n__all__ = [\n 'markdown2html',\n 'markdown2html_pandoc',\n 'markdown2html_marked',\n 'markdown2latex',\n 'markdown2rst',\n]\n\nclass NodeJSMissing(ConversionException):\n \"\"\"Exception raised when node.js is missing.\"\"\"\n pass\n\ndef markdown2latex(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'latex')\n\ndef markdown2html_pandoc(source):\n \"\"\"Convert a markdown string to HTML via pandoc\"\"\"\n return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])\n\ndef markdown2html_marked(source, encoding='utf-8'):\n \"\"\"Convert a markdown string to HTML via marked\"\"\"\n command = ['node', marked]\n try:\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE\n )\n except OSError as e:\n raise NodeJSMissing(\n \"The command '%s' returned an error: %s.\\n\" % (\" \".join(command), e) +\n \"Please check that Node.js is installed.\"\n )\n out, _ = p.communicate(cast_bytes(source, encoding))\n out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()\n return out.rstrip('\\n')\n\ndef markdown2rst(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'rst')\n\ntry:\n find_cmd('node')\nexcept FindCmdError:\n markdown2html = markdown2html_pandoc\nelse:\n markdown2html = markdown2html_marked\n", "path": "IPython/nbconvert/filters/markdown.py"}]} | 1,626 | 293 |
gh_patches_debug_1512 | rasdani/github-patches | git_diff | Mailu__Mailu-2034 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Timezone in containers
Hello,
I have seen that timezones are not set for containers. This causes logs to have wrong timestamps and email "received" headers have wrong timezones.
It should be possible to set the timezone for all containers. The setting should be exposed and described in the .env file.
</issue>
<code>
[start of core/admin/mailu/configuration.py]
1 import os
2
3 from datetime import timedelta
4 from socrate import system
5 import ipaddress
6
7 DEFAULT_CONFIG = {
8 # Specific to the admin UI
9 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
10 'BABEL_DEFAULT_LOCALE': 'en',
11 'BABEL_DEFAULT_TIMEZONE': 'UTC',
12 'BOOTSTRAP_SERVE_LOCAL': True,
13 'RATELIMIT_STORAGE_URL': '',
14 'QUOTA_STORAGE_URL': '',
15 'DEBUG': False,
16 'DOMAIN_REGISTRATION': False,
17 'TEMPLATES_AUTO_RELOAD': True,
18 'MEMORY_SESSIONS': False,
19 # Database settings
20 'DB_FLAVOR': None,
21 'DB_USER': 'mailu',
22 'DB_PW': None,
23 'DB_HOST': 'database',
24 'DB_NAME': 'mailu',
25 'SQLITE_DATABASE_FILE':'data/main.db',
26 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
27 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
28 # Statistics management
29 'INSTANCE_ID_PATH': '/data/instance',
30 'STATS_ENDPOINT': '18.{}.stats.mailu.io',
31 # Common configuration variables
32 'SECRET_KEY': 'changeMe',
33 'DOMAIN': 'mailu.io',
34 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
35 'POSTMASTER': 'postmaster',
36 'WILDCARD_SENDERS': '',
37 'TLS_FLAVOR': 'cert',
38 'INBOUND_TLS_ENFORCE': False,
39 'DEFER_ON_TLS_ERROR': True,
40 'AUTH_RATELIMIT_IP': '60/hour',
41 'AUTH_RATELIMIT_IP_V4_MASK': 24,
42 'AUTH_RATELIMIT_IP_V6_MASK': 56,
43 'AUTH_RATELIMIT_USER': '100/day',
44 'AUTH_RATELIMIT_EXEMPTION': '',
45 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
46 'DISABLE_STATISTICS': False,
47 # Mail settings
48 'DMARC_RUA': None,
49 'DMARC_RUF': None,
50 'WELCOME': False,
51 'WELCOME_SUBJECT': 'Dummy welcome topic',
52 'WELCOME_BODY': 'Dummy welcome body',
53 'DKIM_SELECTOR': 'dkim',
54 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
55 'DEFAULT_QUOTA': 1000000000,
56 'MESSAGE_RATELIMIT': '200/day',
57 'RECIPIENT_DELIMITER': '',
58 # Web settings
59 'SITENAME': 'Mailu',
60 'WEBSITE': 'https://mailu.io',
61 'ADMIN' : 'none',
62 'WEB_ADMIN': '/admin',
63 'WEB_WEBMAIL': '/webmail',
64 'WEBMAIL': 'none',
65 'RECAPTCHA_PUBLIC_KEY': '',
66 'RECAPTCHA_PRIVATE_KEY': '',
67 'LOGO_URL': None,
68 'LOGO_BACKGROUND': None,
69 # Advanced settings
70 'LOG_LEVEL': 'WARNING',
71 'SESSION_KEY_BITS': 128,
72 'SESSION_LIFETIME': 24,
73 'SESSION_COOKIE_SECURE': True,
74 'CREDENTIAL_ROUNDS': 12,
75 # Host settings
76 'HOST_IMAP': 'imap',
77 'HOST_LMTP': 'imap:2525',
78 'HOST_POP3': 'imap',
79 'HOST_SMTP': 'smtp',
80 'HOST_AUTHSMTP': 'smtp',
81 'HOST_ADMIN': 'admin',
82 'HOST_WEBMAIL': 'webmail',
83 'HOST_WEBDAV': 'webdav:5232',
84 'HOST_REDIS': 'redis',
85 'HOST_FRONT': 'front',
86 'SUBNET': '192.168.203.0/24',
87 'SUBNET6': None,
88 'POD_ADDRESS_RANGE': None
89 }
90
91 class ConfigManager(dict):
92 """ Naive configuration manager that uses environment only
93 """
94
95 DB_TEMPLATES = {
96 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
97 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
98 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
99 }
100
101 def __init__(self):
102 self.config = dict()
103
104 def get_host_address(self, name):
105 # if MYSERVICE_ADDRESS is defined, use this
106 if '{}_ADDRESS'.format(name) in os.environ:
107 return os.environ.get('{}_ADDRESS'.format(name))
108 # otherwise use the host name and resolve it
109 return system.resolve_address(self.config['HOST_{}'.format(name)])
110
111 def resolve_hosts(self):
112 self.config["IMAP_ADDRESS"] = self.get_host_address("IMAP")
113 self.config["POP3_ADDRESS"] = self.get_host_address("POP3")
114 self.config["AUTHSMTP_ADDRESS"] = self.get_host_address("AUTHSMTP")
115 self.config["SMTP_ADDRESS"] = self.get_host_address("SMTP")
116 self.config["REDIS_ADDRESS"] = self.get_host_address("REDIS")
117 if self.config["WEBMAIL"] != "none":
118 self.config["WEBMAIL_ADDRESS"] = self.get_host_address("WEBMAIL")
119
120 def __get_env(self, key, value):
121 key_file = key + "_FILE"
122 if key_file in os.environ:
123 with open(os.environ.get(key_file)) as file:
124 value_from_file = file.read()
125 return value_from_file.strip()
126 else:
127 return os.environ.get(key, value)
128
129 def __coerce_value(self, value):
130 if isinstance(value, str) and value.lower() in ('true','yes'):
131 return True
132 elif isinstance(value, str) and value.lower() in ('false', 'no'):
133 return False
134 return value
135
136 def init_app(self, app):
137 self.config.update(app.config)
138 # get environment variables
139 self.config.update({
140 key: self.__coerce_value(self.__get_env(key, value))
141 for key, value in DEFAULT_CONFIG.items()
142 })
143 self.resolve_hosts()
144
145 # automatically set the sqlalchemy string
146 if self.config['DB_FLAVOR']:
147 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
148 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
149
150 self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])
151 self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])
152 self.config['SESSION_STORAGE_URL'] = 'redis://{0}/3'.format(self.config['REDIS_ADDRESS'])
153 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
154 self.config['SESSION_COOKIE_HTTPONLY'] = True
155 self.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=int(self.config['SESSION_LIFETIME']))
156 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
157 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
158 self.config['HOSTNAMES'] = ','.join(hostnames)
159 self.config['HOSTNAME'] = hostnames[0]
160 # update the app config itself
161 app.config = self
162
163 def setdefault(self, key, value):
164 if key not in self.config:
165 self.config[key] = value
166 return self.config[key]
167
168 def get(self, *args):
169 return self.config.get(*args)
170
171 def keys(self):
172 return self.config.keys()
173
174 def __getitem__(self, key):
175 return self.config.get(key)
176
177 def __setitem__(self, key, value):
178 self.config[key] = value
179
180 def __contains__(self, key):
181 return key in self.config
182
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -72,6 +72,7 @@
'SESSION_LIFETIME': 24,
'SESSION_COOKIE_SECURE': True,
'CREDENTIAL_ROUNDS': 12,
+ 'TZ': 'Etc/UTC',
# Host settings
'HOST_IMAP': 'imap',
'HOST_LMTP': 'imap:2525',
| {"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -72,6 +72,7 @@\n 'SESSION_LIFETIME': 24,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n+ 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n", "issue": "Timezone in containers\nHello,\r\n\r\nI have seen that timezones are not set for containers. This causes logs to have wrong timestamps and email \"received\" headers have wrong timezones.\r\n\r\nIt should be possible to set the timezone for all containers. The setting should be exposed and described in the .env file.\n", "before_files": [{"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '18.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_LIFETIME': 24,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if '{}_ADDRESS'.format(name) in os.environ:\n return os.environ.get('{}_ADDRESS'.format(name))\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config['HOST_{}'.format(name)])\n\n def resolve_hosts(self):\n self.config[\"IMAP_ADDRESS\"] = self.get_host_address(\"IMAP\")\n self.config[\"POP3_ADDRESS\"] = self.get_host_address(\"POP3\")\n self.config[\"AUTHSMTP_ADDRESS\"] = self.get_host_address(\"AUTHSMTP\")\n self.config[\"SMTP_ADDRESS\"] = self.get_host_address(\"SMTP\")\n self.config[\"REDIS_ADDRESS\"] = self.get_host_address(\"REDIS\")\n if self.config[\"WEBMAIL\"] != \"none\":\n self.config[\"WEBMAIL_ADDRESS\"] = self.get_host_address(\"WEBMAIL\")\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])\n self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_STORAGE_URL'] = 'redis://{0}/3'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=int(self.config['SESSION_LIFETIME']))\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py"}]} | 2,754 | 119 |
gh_patches_debug_21153 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5100 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws.cloudtrail - tag filter
Objective: I want to write a policy to identify all cloud trails with missing tags. Currently it does not support this.
</issue>
<code>
[start of c7n/resources/cloudtrail.py]
1 # Copyright 2017-2019 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import logging
17
18 from c7n.actions import Action, BaseAction
19 from c7n.exceptions import PolicyValidationError
20 from c7n.filters import ValueFilter, Filter
21 from c7n.manager import resources
22 from c7n.query import QueryResourceManager, TypeInfo
23 from c7n.utils import local_session, type_schema
24
25 from .aws import shape_validate, Arn
26
27 log = logging.getLogger('c7n.resources.cloudtrail')
28
29
30 @resources.register('cloudtrail')
31 class CloudTrail(QueryResourceManager):
32
33 class resource_type(TypeInfo):
34 service = 'cloudtrail'
35 enum_spec = ('describe_trails', 'trailList', None)
36 filter_name = 'trailNameList'
37 filter_type = 'list'
38 arn = id = 'TrailARN'
39 name = 'Name'
40 config_type = "AWS::CloudTrail::Trail"
41
42
43 @CloudTrail.filter_registry.register('is-shadow')
44 class IsShadow(Filter):
45 """Identify shadow trails (secondary copies), shadow trails
46 can't be modified directly, the origin trail needs to be modified.
47
48 Shadow trails are created for multi-region trails as well for
49 organizational trails.
50 """
51 schema = type_schema('is-shadow', state={'type': 'boolean'})
52 permissions = ('cloudtrail:DescribeTrails',)
53 embedded = False
54
55 def process(self, resources, event=None):
56 rcount = len(resources)
57 trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]
58 if len(trails) != rcount and self.embedded:
59 self.log.info("implicitly filtering shadow trails %d -> %d",
60 rcount, len(trails))
61 return trails
62
63 def is_shadow(self, t):
64 if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:
65 return True
66 if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:
67 return True
68 return False
69
70
71 @CloudTrail.filter_registry.register('status')
72 class Status(ValueFilter):
73 """Filter a cloudtrail by its status.
74
75 :Example:
76
77 .. code-block:: yaml
78
79 policies:
80 - name: cloudtrail-check-status
81 resource: aws.cloudtrail
82 filters:
83 - type: status
84 key: IsLogging
85 value: False
86 """
87
88 schema = type_schema('status', rinherit=ValueFilter.schema)
89 schema_alias = False
90 permissions = ('cloudtrail:GetTrailStatus',)
91 annotation_key = 'c7n:TrailStatus'
92
93 def process(self, resources, event=None):
94 for r in resources:
95 region = self.manager.config.region
96 trail_arn = Arn.parse(r['TrailARN'])
97
98 if (r.get('IsOrganizationTrail') and
99 self.manager.config.account_id != trail_arn.account_id):
100 continue
101 if r.get('HomeRegion') and r['HomeRegion'] != region:
102 region = trail_arn.region
103 if self.annotation_key in r:
104 continue
105 client = local_session(self.manager.session_factory).client(
106 'cloudtrail', region_name=region)
107 status = client.get_trail_status(Name=r['Name'])
108 status.pop('ResponseMetadata')
109 r[self.annotation_key] = status
110
111 return super(Status, self).process(resources)
112
113 def __call__(self, r):
114 return self.match(r['c7n:TrailStatus'])
115
116
117 @CloudTrail.action_registry.register('update-trail')
118 class UpdateTrail(Action):
119 """Update trail attributes.
120
121 :Example:
122
123 .. code-block:: yaml
124
125 policies:
126 - name: cloudtrail-set-log
127 resource: aws.cloudtrail
128 filters:
129 - or:
130 - KmsKeyId: empty
131 - LogFileValidationEnabled: false
132 actions:
133 - type: update-trail
134 attributes:
135 KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef
136 EnableLogFileValidation: true
137 """
138 schema = type_schema(
139 'update-trail',
140 attributes={'type': 'object'},
141 required=('attributes',))
142 shape = 'UpdateTrailRequest'
143 permissions = ('cloudtrail:UpdateTrail',)
144
145 def validate(self):
146 attrs = dict(self.data['attributes'])
147 if 'Name' in attrs:
148 raise PolicyValidationError(
149 "Can't include Name in update-trail action")
150 attrs['Name'] = 'PolicyValidation'
151 return shape_validate(
152 attrs,
153 self.shape,
154 self.manager.resource_type.service)
155
156 def process(self, resources):
157 client = local_session(self.manager.session_factory).client('cloudtrail')
158 shadow_check = IsShadow({'state': False}, self.manager)
159 shadow_check.embedded = True
160 resources = shadow_check.process(resources)
161
162 for r in resources:
163 client.update_trail(
164 Name=r['Name'],
165 **self.data['attributes'])
166
167
168 @CloudTrail.action_registry.register('set-logging')
169 class SetLogging(Action):
170 """Set the logging state of a trail
171
172 :Example:
173
174 .. code-block:: yaml
175
176 policies:
177 - name: cloudtrail-set-active
178 resource: aws.cloudtrail
179 filters:
180 - type: status
181 key: IsLogging
182 value: False
183 actions:
184 - type: set-logging
185 enabled: True
186 """
187 schema = type_schema(
188 'set-logging', enabled={'type': 'boolean'})
189
190 def get_permissions(self):
191 enable = self.data.get('enabled', True)
192 if enable is True:
193 return ('cloudtrail:StartLogging',)
194 else:
195 return ('cloudtrail:StopLogging',)
196
197 def process(self, resources):
198 client = local_session(self.manager.session_factory).client('cloudtrail')
199 shadow_check = IsShadow({'state': False}, self.manager)
200 shadow_check.embedded = True
201 resources = shadow_check.process(resources)
202 enable = self.data.get('enabled', True)
203
204 for r in resources:
205 if enable:
206 client.start_logging(Name=r['Name'])
207 else:
208 client.stop_logging(Name=r['Name'])
209
210
211 @CloudTrail.action_registry.register('delete')
212 class DeleteTrail(BaseAction):
213 """ Delete a cloud trail
214
215 :example:
216
217 .. code-block:: yaml
218
219 policies:
220 - name: delete-cloudtrail
221 resource: aws.cloudtrail
222 filters:
223 - type: value
224 key: Name
225 value: delete-me
226 op: eq
227 actions:
228 - type: delete
229 """
230
231 schema = type_schema('delete')
232 permissions = ('cloudtrail:DeleteTrail',)
233
234 def process(self, resources):
235 client = local_session(self.manager.session_factory).client('cloudtrail')
236 shadow_check = IsShadow({'state': False}, self.manager)
237 shadow_check.embedded = True
238 resources = shadow_check.process(resources)
239 for r in resources:
240 try:
241 client.delete_trail(Name=r['Name'])
242 except client.exceptions.TrailNotFoundException:
243 continue
244
[end of c7n/resources/cloudtrail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py
--- a/c7n/resources/cloudtrail.py
+++ b/c7n/resources/cloudtrail.py
@@ -19,7 +19,8 @@
from c7n.exceptions import PolicyValidationError
from c7n.filters import ValueFilter, Filter
from c7n.manager import resources
-from c7n.query import QueryResourceManager, TypeInfo
+from c7n.tags import universal_augment
+from c7n.query import DescribeSource, QueryResourceManager, TypeInfo
from c7n.utils import local_session, type_schema
from .aws import shape_validate, Arn
@@ -38,6 +39,18 @@
arn = id = 'TrailARN'
name = 'Name'
config_type = "AWS::CloudTrail::Trail"
+ universal_taggable = object()
+
+ def get_source(self, source_type):
+ if source_type == 'describe':
+ return DescribeTrail(self)
+ return super(CloudTrail, self).get_source(source_type)
+
+
+class DescribeTrail(DescribeSource):
+
+ def augment(self, resources):
+ return universal_augment(self.manager, resources)
@CloudTrail.filter_registry.register('is-shadow')
| {"golden_diff": "diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py\n--- a/c7n/resources/cloudtrail.py\n+++ b/c7n/resources/cloudtrail.py\n@@ -19,7 +19,8 @@\n from c7n.exceptions import PolicyValidationError\n from c7n.filters import ValueFilter, Filter\n from c7n.manager import resources\n-from c7n.query import QueryResourceManager, TypeInfo\n+from c7n.tags import universal_augment\n+from c7n.query import DescribeSource, QueryResourceManager, TypeInfo\n from c7n.utils import local_session, type_schema\n \n from .aws import shape_validate, Arn\n@@ -38,6 +39,18 @@\n arn = id = 'TrailARN'\n name = 'Name'\n config_type = \"AWS::CloudTrail::Trail\"\n+ universal_taggable = object()\n+\n+ def get_source(self, source_type):\n+ if source_type == 'describe':\n+ return DescribeTrail(self)\n+ return super(CloudTrail, self).get_source(source_type)\n+\n+\n+class DescribeTrail(DescribeSource):\n+\n+ def augment(self, resources):\n+ return universal_augment(self.manager, resources)\n \n \n @CloudTrail.filter_registry.register('is-shadow')\n", "issue": "aws.cloudtrail - tag filter\nObjective: I want to write a policy to identify all cloud trails with missing tags. Currently it does not support this.\n", "before_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom c7n.actions import Action, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n config_type = \"AWS::CloudTrail::Trail\"\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n rcount = len(resources)\n trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n return False\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-check-status\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n schema_alias = False\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n return super(Status, self).process(resources)\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n\n\[email protected]_registry.register('delete')\nclass DeleteTrail(BaseAction):\n \"\"\" Delete a cloud trail\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: delete-cloudtrail\n resource: aws.cloudtrail\n filters:\n - type: value\n key: Name\n value: delete-me\n op: eq\n actions:\n - type: delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudtrail:DeleteTrail',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n for r in resources:\n try:\n client.delete_trail(Name=r['Name'])\n except client.exceptions.TrailNotFoundException:\n continue\n", "path": "c7n/resources/cloudtrail.py"}]} | 2,881 | 270 |
gh_patches_debug_8504 | rasdani/github-patches | git_diff | Gallopsled__pwntools-218 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SyntaxWarning in pwnlib.util.web
This line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27
Either we should use qualified names or only import the names that we need. My votes goes toward the former.
SyntaxWarning in pwnlib.util.web
This line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27
Either we should use qualified names or only import the names that we need. My votes goes toward the former.
</issue>
<code>
[start of pwnlib/util/web.py]
1 # -*- coding: utf-8 -*-
2 import os, tempfile, logging
3 from .misc import size
4 log = logging.getLogger(__name__)
5
6 def wget(url, save=None, timeout=5, **kwargs):
7 """wget(url, save=None, timeout=5) -> str
8
9 Downloads a file via HTTP/HTTPS.
10
11 Args:
12 url (str): URL to download
13 save (str or bool): Name to save as. Any truthy value
14 will auto-generate a name based on the URL.
15 timeout (int): Timeout, in seconds
16
17 Example:
18
19 >>> url = 'http://httpbin.org/robots.txt'
20 >>> with context.local(log_level='ERROR'): result = wget(url)
21 >>> result
22 'User-agent: *\nDisallow: /deny\n'
23 >>> with context.local(log_level='ERROR'): wget(url, True)
24 >>> result == file('robots.txt').read()
25 True
26 """
27 from requests import *
28
29 with log.progress("Downloading '%s'" % url) as w:
30 w.status("Making request...")
31
32 response = get(url, stream=True, **kwargs)
33
34 if not response.ok:
35 w.failure("Got code %s" % response.status_code)
36 return
37
38 total_size = int(response.headers.get('content-length',0))
39
40 w.status('0 / %s' % size(total_size))
41
42 # Find out the next largest size we can represent as
43 chunk_size = 1
44 while chunk_size < (total_size/10):
45 chunk_size *= 1000
46
47 # Count chunks as they're received
48 total_data = ''
49
50 # Loop until we have all of the data
51 for chunk in response.iter_content(chunk_size = 2**10):
52 total_data += chunk
53 if total_size:
54 w.status('%s / %s' % (size(total_data), size(total_size)))
55 else:
56 w.status('%s' % size(total_data))
57
58 # Save to the target file if provided
59 if save:
60 if not isinstance(save, (str, unicode)):
61 save = os.path.basename(url)
62 save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name
63 with file(save,'wb+') as f:
64 f.write(total_data)
65 w.success('Saved %r (%s)' % (f.name, size(total_data)))
66 else:
67 w.success('%s' % size(total_data))
68
69 return total_data
70
71
[end of pwnlib/util/web.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/util/web.py b/pwnlib/util/web.py
--- a/pwnlib/util/web.py
+++ b/pwnlib/util/web.py
@@ -24,12 +24,12 @@
>>> result == file('robots.txt').read()
True
"""
- from requests import *
+ import requests
with log.progress("Downloading '%s'" % url) as w:
w.status("Making request...")
- response = get(url, stream=True, **kwargs)
+ response = requests.get(url, stream=True, **kwargs)
if not response.ok:
w.failure("Got code %s" % response.status_code)
| {"golden_diff": "diff --git a/pwnlib/util/web.py b/pwnlib/util/web.py\n--- a/pwnlib/util/web.py\n+++ b/pwnlib/util/web.py\n@@ -24,12 +24,12 @@\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n- from requests import *\n+ import requests\n \n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n \n- response = get(url, stream=True, **kwargs)\n+ response = requests.get(url, stream=True, **kwargs)\n \n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n", "issue": "SyntaxWarning in pwnlib.util.web\nThis line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27\n\nEither we should use qualified names or only import the names that we need. My votes goes toward the former.\n\nSyntaxWarning in pwnlib.util.web\nThis line generates a `SyntaxWarning`: https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L27\n\nEither we should use qualified names or only import the names that we need. My votes goes toward the former.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, tempfile, logging\nfrom .misc import size\nlog = logging.getLogger(__name__)\n\ndef wget(url, save=None, timeout=5, **kwargs):\n \"\"\"wget(url, save=None, timeout=5) -> str\n\n Downloads a file via HTTP/HTTPS.\n\n Args:\n url (str): URL to download\n save (str or bool): Name to save as. Any truthy value\n will auto-generate a name based on the URL.\n timeout (int): Timeout, in seconds\n\n Example:\n\n >>> url = 'http://httpbin.org/robots.txt'\n >>> with context.local(log_level='ERROR'): result = wget(url)\n >>> result\n 'User-agent: *\\nDisallow: /deny\\n'\n >>> with context.local(log_level='ERROR'): wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n from requests import *\n\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n\n response = get(url, stream=True, **kwargs)\n\n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n return\n\n total_size = int(response.headers.get('content-length',0))\n\n w.status('0 / %s' % size(total_size))\n\n # Find out the next largest size we can represent as\n chunk_size = 1\n while chunk_size < (total_size/10):\n chunk_size *= 1000\n\n # Count chunks as they're received\n total_data = ''\n\n # Loop until we have all of the data\n for chunk in response.iter_content(chunk_size = 2**10):\n total_data += chunk\n if total_size:\n w.status('%s / %s' % (size(total_data), size(total_size)))\n else:\n w.status('%s' % size(total_data))\n\n # Save to the target file if provided\n if save:\n if not isinstance(save, (str, unicode)):\n save = os.path.basename(url)\n save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name\n with file(save,'wb+') as f:\n f.write(total_data)\n w.success('Saved %r (%s)' % (f.name, size(total_data)))\n else:\n w.success('%s' % size(total_data))\n\n return total_data\n\n", "path": "pwnlib/util/web.py"}]} | 1,338 | 148 |
gh_patches_debug_14960 | rasdani/github-patches | git_diff | flairNLP__flair-422 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't pin package dependencies in setup.py
To be removed, once it is done: Please add the appropriate label to this ticket, e.g. feature or enhancement.
**Is your feature/enhancement request related to a problem? Please describe.**
It is not considered good practice to pin package dependencies in setup.py (see additional context).
For instance, I'm forced to downgrade certain packages by installing flair.
**Describe the solution you'd like**
Just list the abstract requirements in setup.py with less restrictive version bounds.
**Additional context**
See https://packaging.python.org/discussions/install-requires-vs-requirements/
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 setup(
4 name='flair',
5 version='0.4.0',
6 description='A very simple framework for state-of-the-art NLP',
7 long_description=open("README.md", encoding='utf-8').read(),
8 long_description_content_type="text/markdown",
9 author='Alan Akbik',
10 author_email='[email protected]',
11 url='https://github.com/zalandoresearch/flair',
12 packages=find_packages(exclude='test'), # same as name
13 license='MIT',
14 install_requires=[
15 'torch==1.0.0',
16 'gensim==3.4.0',
17 'typing==3.6.4',
18 'tqdm==4.26.0',
19 'segtok==1.5.7',
20 'matplotlib==3.0.0',
21 'mpld3==0.3',
22 'sklearn',
23 'sqlitedict==1.6.0',
24 'deprecated==1.2.4',
25 'hyperopt==0.1.1',
26 'pytorch-pretrained-bert==0.3.0'
27 ],
28 include_package_data=True,
29 python_requires='>=3.6',
30 )
31
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,18 +12,17 @@
packages=find_packages(exclude='test'), # same as name
license='MIT',
install_requires=[
- 'torch==1.0.0',
- 'gensim==3.4.0',
- 'typing==3.6.4',
- 'tqdm==4.26.0',
- 'segtok==1.5.7',
- 'matplotlib==3.0.0',
- 'mpld3==0.3',
+ 'torch>=1.0.0',
+ 'gensim>=3.4.0',
+ 'tqdm>=4.26.0',
+ 'segtok>=1.5.7',
+ 'matplotlib>=3.0.0',
+ 'mpld3>=0.3',
'sklearn',
- 'sqlitedict==1.6.0',
- 'deprecated==1.2.4',
- 'hyperopt==0.1.1',
- 'pytorch-pretrained-bert==0.3.0'
+ 'sqlitedict>=1.6.0',
+ 'deprecated>=1.2.4',
+ 'hyperopt>=0.1.1',
+ 'pytorch-pretrained-bert>=0.3.0'
],
include_package_data=True,
python_requires='>=3.6',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,18 +12,17 @@\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n- 'torch==1.0.0',\n- 'gensim==3.4.0',\n- 'typing==3.6.4',\n- 'tqdm==4.26.0',\n- 'segtok==1.5.7',\n- 'matplotlib==3.0.0',\n- 'mpld3==0.3',\n+ 'torch>=1.0.0',\n+ 'gensim>=3.4.0',\n+ 'tqdm>=4.26.0',\n+ 'segtok>=1.5.7',\n+ 'matplotlib>=3.0.0',\n+ 'mpld3>=0.3',\n 'sklearn',\n- 'sqlitedict==1.6.0',\n- 'deprecated==1.2.4',\n- 'hyperopt==0.1.1',\n- 'pytorch-pretrained-bert==0.3.0'\n+ 'sqlitedict>=1.6.0',\n+ 'deprecated>=1.2.4',\n+ 'hyperopt>=0.1.1',\n+ 'pytorch-pretrained-bert>=0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n", "issue": "Don't pin package dependencies in setup.py\nTo be removed, once it is done: Please add the appropriate label to this ticket, e.g. feature or enhancement.\r\n\r\n**Is your feature/enhancement request related to a problem? Please describe.**\r\n\r\nIt is not considered good practice to pin package dependencies in setup.py (see additional context).\r\n\r\nFor instance, I'm forced to downgrade certain packages by installing flair.\r\n\r\n**Describe the solution you'd like**\r\n\r\nJust list the abstract requirements in setup.py with less restrictive version bounds.\r\n\r\n**Additional context**\r\n\r\nSee https://packaging.python.org/discussions/install-requires-vs-requirements/\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.4.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch==1.0.0',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.26.0',\n 'segtok==1.5.7',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n 'sklearn',\n 'sqlitedict==1.6.0',\n 'deprecated==1.2.4',\n 'hyperopt==0.1.1',\n 'pytorch-pretrained-bert==0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]} | 985 | 339 |
gh_patches_debug_5563 | rasdani/github-patches | git_diff | mlflow__mlflow-9536 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] basic-auth init on remote database
### Describe the problem
Same issue #9399 happened when trying to initialize database which invokes this function [migrate_if_needed](https://github.com/mlflow/mlflow/blob/master/mlflow/server/auth/db/utils.py#L30)
Suggestion: Apply the same fix #9410 to force SqlAlchemy to render unobfuscated url
### Suggestion
```
alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
```
### What component(s) does this bug affect?
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [X] `area/server-infra`: MLflow Tracking server backend
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
### What interface(s) does this bug affect?
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
### What language(s) does this bug affect?
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
### What integration(s) does this bug affect?
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
</issue>
<code>
[start of mlflow/server/auth/db/utils.py]
1 from pathlib import Path
2
3 from alembic.command import upgrade
4 from alembic.config import Config
5 from alembic.migration import MigrationContext
6 from alembic.script import ScriptDirectory
7 from sqlalchemy.engine.base import Engine
8
9
10 def _get_alembic_dir() -> str:
11 return Path(__file__).parent / "migrations"
12
13
14 def _get_alembic_config(url: str) -> Config:
15 alembic_dir = _get_alembic_dir()
16 alembic_ini_path = alembic_dir / "alembic.ini"
17 alembic_cfg = Config(alembic_ini_path)
18 alembic_cfg.set_main_option("script_location", str(alembic_dir))
19 alembic_cfg.set_main_option("sqlalchemy.url", url)
20 return alembic_cfg
21
22
23 def migrate(engine: Engine, revision: str) -> None:
24 alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
25 with engine.begin() as conn:
26 alembic_cfg.attributes["connection"] = conn
27 upgrade(alembic_cfg, revision)
28
29
30 def migrate_if_needed(engine: Engine, revision: str) -> None:
31 alembic_cfg = _get_alembic_config(str(engine.url))
32 script_dir = ScriptDirectory.from_config(alembic_cfg)
33 with engine.begin() as conn:
34 context = MigrationContext.configure(conn)
35 if context.get_current_revision() != script_dir.get_current_head():
36 upgrade(alembic_cfg, revision)
37
[end of mlflow/server/auth/db/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py
--- a/mlflow/server/auth/db/utils.py
+++ b/mlflow/server/auth/db/utils.py
@@ -28,7 +28,7 @@
def migrate_if_needed(engine: Engine, revision: str) -> None:
- alembic_cfg = _get_alembic_config(str(engine.url))
+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
script_dir = ScriptDirectory.from_config(alembic_cfg)
with engine.begin() as conn:
context = MigrationContext.configure(conn)
| {"golden_diff": "diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py\n--- a/mlflow/server/auth/db/utils.py\n+++ b/mlflow/server/auth/db/utils.py\n@@ -28,7 +28,7 @@\n \n \n def migrate_if_needed(engine: Engine, revision: str) -> None:\n- alembic_cfg = _get_alembic_config(str(engine.url))\n+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n", "issue": "[BUG] basic-auth init on remote database\n### Describe the problem\r\n\r\nSame issue #9399 happened when trying to initialize database which invokes this function [migrate_if_needed](https://github.com/mlflow/mlflow/blob/master/mlflow/server/auth/db/utils.py#L30)\r\n\r\nSuggestion: Apply the same fix #9410 to force SqlAlchemy to render unobfuscated url\r\n\r\n### Suggestion\r\n```\r\nalembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\r\n```\r\n\r\n### What component(s) does this bug affect?\r\n\r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [X] `area/server-infra`: MLflow Tracking server backend\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\n### What interface(s) does this bug affect?\r\n\r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\n### What language(s) does this bug affect?\r\n\r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\n### What integration(s) does this bug affect?\r\n\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "from pathlib import Path\n\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.script import ScriptDirectory\nfrom sqlalchemy.engine.base import Engine\n\n\ndef _get_alembic_dir() -> str:\n return Path(__file__).parent / \"migrations\"\n\n\ndef _get_alembic_config(url: str) -> Config:\n alembic_dir = _get_alembic_dir()\n alembic_ini_path = alembic_dir / \"alembic.ini\"\n alembic_cfg = Config(alembic_ini_path)\n alembic_cfg.set_main_option(\"script_location\", str(alembic_dir))\n alembic_cfg.set_main_option(\"sqlalchemy.url\", url)\n return alembic_cfg\n\n\ndef migrate(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n\n\ndef migrate_if_needed(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(str(engine.url))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n if context.get_current_revision() != script_dir.get_current_head():\n upgrade(alembic_cfg, revision)\n", "path": "mlflow/server/auth/db/utils.py"}]} | 1,465 | 140 |
gh_patches_debug_61226 | rasdani/github-patches | git_diff | searxng__searxng-2862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: bilibili engine is broken
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
Something has changed, and now some fixes are needed to use the api successfully.
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
Repository: https://github.com/searxng/searxng
Branch: master
Version: 2023.9.27+1a66d7467+dirty
<!-- If you are running on master branch using git execute this command
in order to fetch the latest commit ID:
```
git log -1
```
If you are using searxng-docker then look at the bottom of the SearXNG page
and check for the version after "Powered by SearXNG"
Please also stipulate if you are using a forked version of SearXNG and
include a link to the fork source code.
-->
**How did you install SearXNG?**
make run
<!-- Did you install SearXNG using the official wiki or using searxng-docker
or manually by executing the searx/webapp.py file? -->
**What happened?**
<!-- A clear and concise description of what the bug is. -->
**How To Reproduce**
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots & Logs**
<!-- If applicable, add screenshots, logs to help explain your problem. -->
**Additional context**
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of searx/engines/bilibili.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Bilibili is a Chinese video sharing website.
4
5 .. _Bilibili: https://www.bilibili.com
6 """
7
8 import random
9 import string
10 from urllib.parse import urlencode
11 from datetime import datetime, timedelta
12
13 # Engine metadata
14 about = {
15 "website": "https://www.bilibili.com",
16 "wikidata_id": "Q3077586",
17 "official_api_documentation": None,
18 "use_official_api": False,
19 "require_api_key": False,
20 "results": "JSON",
21 }
22
23 # Engine configuration
24 paging = True
25 results_per_page = 20
26 categories = ["videos"]
27
28 # Search URL
29 base_url = "https://api.bilibili.com/x/web-interface/wbi/search/type"
30
31 cookie = {
32 "innersign": "0",
33 "buvid3": "".join(random.choice(string.hexdigits) for _ in range(16)) + "infoc",
34 "i-wanna-go-back": "-1",
35 "b_ut": "7",
36 "FEED_LIVE_VERSION": "V8",
37 "header_theme_version": "undefined",
38 "home_feed_column": "4",
39 }
40
41
42 def request(query, params):
43 query_params = {
44 "__refresh__": "true",
45 "page": params["pageno"],
46 "page_size": results_per_page,
47 "single_column": "0",
48 "keyword": query,
49 "search_type": "video",
50 }
51
52 params["url"] = f"{base_url}?{urlencode(query_params)}"
53 params["cookies"] = cookie
54
55 return params
56
57
58 # Format the video duration
59 def format_duration(duration):
60 minutes, seconds = map(int, duration.split(":"))
61 total_seconds = minutes * 60 + seconds
62
63 formatted_duration = str(timedelta(seconds=total_seconds))[2:] if 0 <= total_seconds < 3600 else ""
64
65 return formatted_duration
66
67
68 def response(resp):
69 search_res = resp.json()
70
71 results = []
72
73 for item in search_res.get("data", {}).get("result", []):
74 title = item["title"]
75 url = item["arcurl"]
76 thumbnail = item["pic"]
77 description = item["description"]
78 author = item["author"]
79 video_id = item["aid"]
80 unix_date = item["pubdate"]
81
82 formatted_date = datetime.utcfromtimestamp(unix_date)
83 formatted_duration = format_duration(item["duration"])
84 iframe_url = f"https://player.bilibili.com/player.html?aid={video_id}&high_quality=1&autoplay=false&danmaku=0"
85
86 results.append(
87 {
88 "title": title,
89 "url": url,
90 "content": description,
91 "author": author,
92 "publishedDate": formatted_date,
93 "length": formatted_duration,
94 "thumbnail": thumbnail,
95 "iframe_src": iframe_url,
96 "template": "videos.html",
97 }
98 )
99
100 return results
101
[end of searx/engines/bilibili.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/bilibili.py b/searx/engines/bilibili.py
--- a/searx/engines/bilibili.py
+++ b/searx/engines/bilibili.py
@@ -26,7 +26,7 @@
categories = ["videos"]
# Search URL
-base_url = "https://api.bilibili.com/x/web-interface/wbi/search/type"
+base_url = "https://api.bilibili.com/x/web-interface/search/type"
cookie = {
"innersign": "0",
| {"golden_diff": "diff --git a/searx/engines/bilibili.py b/searx/engines/bilibili.py\n--- a/searx/engines/bilibili.py\n+++ b/searx/engines/bilibili.py\n@@ -26,7 +26,7 @@\n categories = [\"videos\"]\n \n # Search URL\n-base_url = \"https://api.bilibili.com/x/web-interface/wbi/search/type\"\n+base_url = \"https://api.bilibili.com/x/web-interface/search/type\"\n \n cookie = {\n \"innersign\": \"0\",\n", "issue": "Bug: bilibili engine is broken\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\nSomething has changed, and now some fixes are needed to use the api successfully.\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2023.9.27+1a66d7467+dirty\r\n<!-- If you are running on master branch using git execute this command\r\nin order to fetch the latest commit ID:\r\n```\r\ngit log -1\r\n``` \r\nIf you are using searxng-docker then look at the bottom of the SearXNG page\r\nand check for the version after \"Powered by SearXNG\"\r\n\r\nPlease also stipulate if you are using a forked version of SearXNG and\r\ninclude a link to the fork source code.\r\n-->\r\n**How did you install SearXNG?**\r\nmake run\r\n<!-- Did you install SearXNG using the official wiki or using searxng-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bilibili is a Chinese video sharing website.\n\n.. _Bilibili: https://www.bilibili.com\n\"\"\"\n\nimport random\nimport string\nfrom urllib.parse import urlencode\nfrom datetime import datetime, timedelta\n\n# Engine metadata\nabout = {\n \"website\": \"https://www.bilibili.com\",\n \"wikidata_id\": \"Q3077586\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = [\"videos\"]\n\n# Search URL\nbase_url = \"https://api.bilibili.com/x/web-interface/wbi/search/type\"\n\ncookie = {\n \"innersign\": \"0\",\n \"buvid3\": \"\".join(random.choice(string.hexdigits) for _ in range(16)) + \"infoc\",\n \"i-wanna-go-back\": \"-1\",\n \"b_ut\": \"7\",\n \"FEED_LIVE_VERSION\": \"V8\",\n \"header_theme_version\": \"undefined\",\n \"home_feed_column\": \"4\",\n}\n\n\ndef request(query, params):\n query_params = {\n \"__refresh__\": \"true\",\n \"page\": params[\"pageno\"],\n \"page_size\": results_per_page,\n \"single_column\": \"0\",\n \"keyword\": query,\n \"search_type\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n params[\"cookies\"] = cookie\n\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n minutes, seconds = map(int, duration.split(\":\"))\n total_seconds = minutes * 60 + seconds\n\n formatted_duration = str(timedelta(seconds=total_seconds))[2:] if 0 <= total_seconds < 3600 else \"\"\n\n return formatted_duration\n\n\ndef response(resp):\n search_res = resp.json()\n\n results = []\n\n for item in search_res.get(\"data\", {}).get(\"result\", []):\n title = item[\"title\"]\n url = item[\"arcurl\"]\n thumbnail = item[\"pic\"]\n description = item[\"description\"]\n author = item[\"author\"]\n video_id = item[\"aid\"]\n unix_date = item[\"pubdate\"]\n\n formatted_date = datetime.utcfromtimestamp(unix_date)\n formatted_duration = format_duration(item[\"duration\"])\n iframe_url = f\"https://player.bilibili.com/player.html?aid={video_id}&high_quality=1&autoplay=false&danmaku=0\"\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": author,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/bilibili.py"}]} | 1,769 | 124 |
gh_patches_debug_57128 | rasdani/github-patches | git_diff | liqd__adhocracy4-58 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend linting to javascript and jsx files
</issue>
<code>
[start of adhocracy4/reports/emails.py]
1 from django.contrib.auth import get_user_model
2 from django.core import urlresolvers
3
4 from adhocracy4 import emails
5
6 User = get_user_model()
7
8
9 class ReportModeratorEmail(emails.ModeratorNotification):
10 template_name = 'a4reports/emails/report_moderators'
11
12
13 class ReportCreatorEmail(emails.Email):
14 template_name = 'a4reports/emails/report_creator'
15
16 def get_receivers(self):
17 return [self.object.content_object.creator]
18
[end of adhocracy4/reports/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/adhocracy4/reports/emails.py b/adhocracy4/reports/emails.py
--- a/adhocracy4/reports/emails.py
+++ b/adhocracy4/reports/emails.py
@@ -1,5 +1,4 @@
from django.contrib.auth import get_user_model
-from django.core import urlresolvers
from adhocracy4 import emails
| {"golden_diff": "diff --git a/adhocracy4/reports/emails.py b/adhocracy4/reports/emails.py\n--- a/adhocracy4/reports/emails.py\n+++ b/adhocracy4/reports/emails.py\n@@ -1,5 +1,4 @@\n from django.contrib.auth import get_user_model\n-from django.core import urlresolvers\n \n from adhocracy4 import emails\n", "issue": "Extend linting to javascript and jsx files\n\n", "before_files": [{"content": "from django.contrib.auth import get_user_model\nfrom django.core import urlresolvers\n\nfrom adhocracy4 import emails\n\nUser = get_user_model()\n\n\nclass ReportModeratorEmail(emails.ModeratorNotification):\n template_name = 'a4reports/emails/report_moderators'\n\n\nclass ReportCreatorEmail(emails.Email):\n template_name = 'a4reports/emails/report_creator'\n\n def get_receivers(self):\n return [self.object.content_object.creator]\n", "path": "adhocracy4/reports/emails.py"}]} | 682 | 83 |
gh_patches_debug_24071 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-5654 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error get params DETR/ Deformable DETR
Despite my attempts to modify, also just testing with the basic config detr file.
Maybe this issue has already been raised?
mmdet==2.13.0
mmcv=1.3.3
```python
python tools/analysis_tools/get_flops.py configs/detr/detr_r50_8x2_150e_coco.py
```
```python
/home/bluav/mmdetection/mmdet/models/backbones/resnet.py:400: UserWarning: DeprecationWarning: pretrained is a deprecated, please use "init_cfg" instead
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Warning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!
Traceback (most recent call last):
File "tools/analysis_tools/get_flops.py", line 81, in <module>
main()
File "tools/analysis_tools/get_flops.py", line 71, in main
flops, params = get_model_complexity_info(model, input_shape)
File "/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/mmcv/cnn/utils/flops_counter.py", line 104, in get_model_complexity_info
_ = flops_model(batch)
File "/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/bluav/mmdetection/mmdet/models/detectors/single_stage.py", line 48, in forward_dummy
outs = self.bbox_head(x)
File "/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() missing 1 required positional argument: 'img_metas'
```
</issue>
<code>
[start of mmdet/models/detectors/detr.py]
1 import torch
2
3 from ..builder import DETECTORS
4 from .single_stage import SingleStageDetector
5
6
7 @DETECTORS.register_module()
8 class DETR(SingleStageDetector):
9 r"""Implementation of `DETR: End-to-End Object Detection with
10 Transformers <https://arxiv.org/pdf/2005.12872>`_"""
11
12 def __init__(self,
13 backbone,
14 bbox_head,
15 train_cfg=None,
16 test_cfg=None,
17 pretrained=None,
18 init_cfg=None):
19 super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
20 test_cfg, pretrained, init_cfg)
21
22 # over-write `onnx_export` because:
23 # (1) the forward of bbox_head requires img_metas
24 # (2) the different behavior (e.g. construction of `masks`) between
25 # torch and ONNX model, during the forward of bbox_head
26 def onnx_export(self, img, img_metas):
27 """Test function for exporting to ONNX, without test time augmentation.
28
29 Args:
30 img (torch.Tensor): input images.
31 img_metas (list[dict]): List of image information.
32
33 Returns:
34 tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
35 and class labels of shape [N, num_det].
36 """
37 x = self.extract_feat(img)
38 # forward of this head requires img_metas
39 outs = self.bbox_head.forward_onnx(x, img_metas)
40 # get shape as tensor
41 img_shape = torch._shape_as_tensor(img)[2:]
42 img_metas[0]['img_shape_for_onnx'] = img_shape
43
44 det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
45
46 return det_bboxes, det_labels
47
[end of mmdet/models/detectors/detr.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmdet/models/detectors/detr.py b/mmdet/models/detectors/detr.py
--- a/mmdet/models/detectors/detr.py
+++ b/mmdet/models/detectors/detr.py
@@ -1,3 +1,5 @@
+import warnings
+
import torch
from ..builder import DETECTORS
@@ -19,6 +21,27 @@
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
+ # over-write `forward_dummy` because:
+ # the forward of bbox_head requires img_metas
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ warnings.warn('Warning! MultiheadAttention in DETR does not '
+ 'support flops computation! Do not use the '
+ 'results in your papers!')
+
+ batch_size, _, height, width = img.shape
+ dummy_img_metas = [
+ dict(
+ batch_input_shape=(height, width),
+ img_shape=(height, width, 3)) for _ in range(batch_size)
+ ]
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x, dummy_img_metas)
+ return outs
+
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
| {"golden_diff": "diff --git a/mmdet/models/detectors/detr.py b/mmdet/models/detectors/detr.py\n--- a/mmdet/models/detectors/detr.py\n+++ b/mmdet/models/detectors/detr.py\n@@ -1,3 +1,5 @@\n+import warnings\n+\n import torch\n \n from ..builder import DETECTORS\n@@ -19,6 +21,27 @@\n super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,\n test_cfg, pretrained, init_cfg)\n \n+ # over-write `forward_dummy` because:\n+ # the forward of bbox_head requires img_metas\n+ def forward_dummy(self, img):\n+ \"\"\"Used for computing network flops.\n+\n+ See `mmdetection/tools/analysis_tools/get_flops.py`\n+ \"\"\"\n+ warnings.warn('Warning! MultiheadAttention in DETR does not '\n+ 'support flops computation! Do not use the '\n+ 'results in your papers!')\n+\n+ batch_size, _, height, width = img.shape\n+ dummy_img_metas = [\n+ dict(\n+ batch_input_shape=(height, width),\n+ img_shape=(height, width, 3)) for _ in range(batch_size)\n+ ]\n+ x = self.extract_feat(img)\n+ outs = self.bbox_head(x, dummy_img_metas)\n+ return outs\n+\n # over-write `onnx_export` because:\n # (1) the forward of bbox_head requires img_metas\n # (2) the different behavior (e.g. construction of `masks`) between\n", "issue": "Error get params DETR/ Deformable DETR\nDespite my attempts to modify, also just testing with the basic config detr file. \r\nMaybe this issue has already been raised?\r\nmmdet==2.13.0\r\nmmcv=1.3.3\r\n\r\n```python\r\npython tools/analysis_tools/get_flops.py configs/detr/detr_r50_8x2_150e_coco.py\r\n```\r\n\r\n```python\r\n/home/bluav/mmdetection/mmdet/models/backbones/resnet.py:400: UserWarning: DeprecationWarning: pretrained is a deprecated, please use \"init_cfg\" instead\r\n warnings.warn('DeprecationWarning: pretrained is a deprecated, '\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nWarning: variables __flops__ or __params__ are already defined for the moduleReLU ptflops can affect your code!\r\nTraceback (most recent call last):\r\n File \"tools/analysis_tools/get_flops.py\", line 81, in <module>\r\n main()\r\n File \"tools/analysis_tools/get_flops.py\", line 71, in main\r\n flops, params = get_model_complexity_info(model, input_shape)\r\n File \"/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/mmcv/cnn/utils/flops_counter.py\", line 104, in get_model_complexity_info\r\n _ = flops_model(batch)\r\n File \"/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 889, in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/bluav/mmdetection/mmdet/models/detectors/single_stage.py\", line 48, in forward_dummy\r\n outs = self.bbox_head(x)\r\n File \"/home/bluav/.conda/envs/open-mmlab/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 889, in _call_impl\r\n result = self.forward(*input, **kwargs)\r\nTypeError: forward() missing 1 required positional argument: 'img_metas'\r\n```\r\n\n", "before_files": [{"content": "import torch\n\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\[email protected]_module()\nclass DETR(SingleStageDetector):\n r\"\"\"Implementation of `DETR: End-to-End Object Detection with\n Transformers <https://arxiv.org/pdf/2005.12872>`_\"\"\"\n\n def __init__(self,\n backbone,\n bbox_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,\n test_cfg, pretrained, init_cfg)\n\n # over-write `onnx_export` because:\n # (1) the forward of bbox_head requires img_metas\n # (2) the different behavior (e.g. construction of `masks`) between\n # torch and ONNX model, during the forward of bbox_head\n def onnx_export(self, img, img_metas):\n \"\"\"Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n \"\"\"\n x = self.extract_feat(img)\n # forward of this head requires img_metas\n outs = self.bbox_head.forward_onnx(x, img_metas)\n # get shape as tensor\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n\n det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)\n\n return det_bboxes, det_labels\n", "path": "mmdet/models/detectors/detr.py"}]} | 1,805 | 355 |
gh_patches_debug_12016 | rasdani/github-patches | git_diff | celery__celery-450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
os.kill is not available in windows before python 2.7
As per the topic, the current celery implementation (>=2.3.0) crashes on windows using python 2.5 and 2.6, because it uses os.kill which is not available in windows before python 2.7
</issue>
<code>
[start of celery/concurrency/processes/__init__.py]
1 """
2
3 Process Pools.
4
5 """
6 import platform
7 import signal as _signal
8
9 from os import kill as _kill
10
11 from celery.concurrency.base import BasePool
12 from celery.concurrency.processes.pool import Pool, RUN
13
14 if platform.system() == "Windows": # pragma: no cover
15 # On Windows os.kill calls TerminateProcess which cannot be
16 # handled by # any process, so this is needed to terminate the task
17 # *and its children* (if any).
18 from celery.concurrency.processes import _win
19 _kill = _win.kill_processtree # noqa
20
21
22 class TaskPool(BasePool):
23 """Process Pool for processing tasks in parallel.
24
25 :param processes: see :attr:`processes`.
26 :param logger: see :attr:`logger`.
27
28
29 .. attribute:: limit
30
31 The number of processes that can run simultaneously.
32
33 .. attribute:: logger
34
35 The logger used for debugging.
36
37 """
38 Pool = Pool
39
40 def on_start(self):
41 """Run the task pool.
42
43 Will pre-fork all workers so they're ready to accept tasks.
44
45 """
46 self._pool = self.Pool(processes=self.limit, **self.options)
47 self.on_apply = self._pool.apply_async
48
49 def on_stop(self):
50 """Gracefully stop the pool."""
51 if self._pool is not None and self._pool._state == RUN:
52 self._pool.close()
53 self._pool.join()
54 self._pool = None
55
56 def on_terminate(self):
57 """Force terminate the pool."""
58 if self._pool is not None:
59 self._pool.terminate()
60 self._pool = None
61
62 def terminate_job(self, pid, signal=None):
63 _kill(pid, signal or _signal.SIGTERM)
64
65 def grow(self, n=1):
66 return self._pool.grow(n)
67
68 def shrink(self, n=1):
69 return self._pool.shrink(n)
70
71 def _get_info(self):
72 return {"max-concurrency": self.limit,
73 "processes": [p.pid for p in self._pool._pool],
74 "max-tasks-per-child": self._pool._maxtasksperchild,
75 "put-guarded-by-semaphore": self.putlocks,
76 "timeouts": (self._pool.soft_timeout, self._pool.timeout)}
77
78 @property
79 def num_processes(self):
80 return self._pool._processes
81
[end of celery/concurrency/processes/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/concurrency/processes/__init__.py b/celery/concurrency/processes/__init__.py
--- a/celery/concurrency/processes/__init__.py
+++ b/celery/concurrency/processes/__init__.py
@@ -6,8 +6,6 @@
import platform
import signal as _signal
-from os import kill as _kill
-
from celery.concurrency.base import BasePool
from celery.concurrency.processes.pool import Pool, RUN
@@ -17,6 +15,8 @@
# *and its children* (if any).
from celery.concurrency.processes import _win
_kill = _win.kill_processtree # noqa
+else:
+ from os import kill as _kill
class TaskPool(BasePool):
| {"golden_diff": "diff --git a/celery/concurrency/processes/__init__.py b/celery/concurrency/processes/__init__.py\n--- a/celery/concurrency/processes/__init__.py\n+++ b/celery/concurrency/processes/__init__.py\n@@ -6,8 +6,6 @@\n import platform\n import signal as _signal\n \n-from os import kill as _kill\n-\n from celery.concurrency.base import BasePool\n from celery.concurrency.processes.pool import Pool, RUN\n \n@@ -17,6 +15,8 @@\n # *and its children* (if any).\n from celery.concurrency.processes import _win\n _kill = _win.kill_processtree # noqa\n+else:\n+ from os import kill as _kill\n \n \n class TaskPool(BasePool):\n", "issue": "os.kill is not available in windows before python 2.7\nAs per the topic, the current celery implementation (>=2.3.0) crashes on windows using python 2.5 and 2.6, because it uses os.kill which is not available in windows before python 2.7\n\n", "before_files": [{"content": "\"\"\"\n\nProcess Pools.\n\n\"\"\"\nimport platform\nimport signal as _signal\n\nfrom os import kill as _kill\n\nfrom celery.concurrency.base import BasePool\nfrom celery.concurrency.processes.pool import Pool, RUN\n\nif platform.system() == \"Windows\": # pragma: no cover\n # On Windows os.kill calls TerminateProcess which cannot be\n # handled by # any process, so this is needed to terminate the task\n # *and its children* (if any).\n from celery.concurrency.processes import _win\n _kill = _win.kill_processtree # noqa\n\n\nclass TaskPool(BasePool):\n \"\"\"Process Pool for processing tasks in parallel.\n\n :param processes: see :attr:`processes`.\n :param logger: see :attr:`logger`.\n\n\n .. attribute:: limit\n\n The number of processes that can run simultaneously.\n\n .. attribute:: logger\n\n The logger used for debugging.\n\n \"\"\"\n Pool = Pool\n\n def on_start(self):\n \"\"\"Run the task pool.\n\n Will pre-fork all workers so they're ready to accept tasks.\n\n \"\"\"\n self._pool = self.Pool(processes=self.limit, **self.options)\n self.on_apply = self._pool.apply_async\n\n def on_stop(self):\n \"\"\"Gracefully stop the pool.\"\"\"\n if self._pool is not None and self._pool._state == RUN:\n self._pool.close()\n self._pool.join()\n self._pool = None\n\n def on_terminate(self):\n \"\"\"Force terminate the pool.\"\"\"\n if self._pool is not None:\n self._pool.terminate()\n self._pool = None\n\n def terminate_job(self, pid, signal=None):\n _kill(pid, signal or _signal.SIGTERM)\n\n def grow(self, n=1):\n return self._pool.grow(n)\n\n def shrink(self, n=1):\n return self._pool.shrink(n)\n\n def _get_info(self):\n return {\"max-concurrency\": self.limit,\n \"processes\": [p.pid for p in self._pool._pool],\n \"max-tasks-per-child\": self._pool._maxtasksperchild,\n \"put-guarded-by-semaphore\": self.putlocks,\n \"timeouts\": (self._pool.soft_timeout, self._pool.timeout)}\n\n @property\n def num_processes(self):\n return self._pool._processes\n", "path": "celery/concurrency/processes/__init__.py"}]} | 1,288 | 174 |
gh_patches_debug_3985 | rasdani/github-patches | git_diff | facebookresearch__hydra-2694 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Failing lint: Need type annotation for "matches"
See `lint` CI job failure on PR #2689.
```
hydra/plugins/completion_plugin.py:110: error: Need type annotation for "matches" (hint: "matches: List[<type>] = ...") [var-annotated]
```
</issue>
<code>
[start of hydra/plugins/completion_plugin.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
3 # TODO: Test with /miniconda3/envs/hydra36/bin/python , seems to be running python for some reason.
4 # TODO: Add tests for completion with +prefix (should suggest config groups that are not listed)
5 # TODO: Test completion when defaults has a missing mandatory item
6
7
8 import os
9 import re
10 import sys
11 from abc import abstractmethod
12
13 from hydra.errors import ConfigCompositionException
14 from omegaconf import (
15 Container,
16 DictConfig,
17 MissingMandatoryValue,
18 OmegaConf,
19 ListConfig,
20 )
21 from typing import Any, List, Optional, Tuple
22
23 from hydra.core.config_loader import ConfigLoader
24 from hydra.core.object_type import ObjectType
25 from hydra.plugins.plugin import Plugin
26 from hydra.types import RunMode
27
28
29 class CompletionPlugin(Plugin):
30 def __init__(self, config_loader: ConfigLoader) -> None:
31 self.config_loader = config_loader
32
33 @abstractmethod
34 def install(self) -> None:
35 ...
36
37 @abstractmethod
38 def uninstall(self) -> None:
39 ...
40
41 @staticmethod
42 @abstractmethod
43 def provides() -> str:
44 """
45 :return: the name of the shell this plugin provides completion for
46 """
47 ...
48
49 @abstractmethod
50 def query(self, config_name: Optional[str]) -> None:
51 ...
52
53 @staticmethod
54 @abstractmethod
55 def help(command: str) -> str:
56 """
57 :param command: "install" or "uninstall"
58 :return: command the user can run to install or uninstall this shell completion on the appropriate shell
59 """
60 ...
61
62 @staticmethod
63 def _get_filename(filename: str) -> Tuple[Optional[str], Optional[str]]:
64 last = filename.rfind("=")
65 if last != -1:
66 key_eq = filename[0 : last + 1]
67 filename = filename[last + 1 :]
68 prefixes = [".", "/", "\\", "./", ".\\"]
69 if sys.platform.startswith("win"):
70 for drive in range(ord("a"), ord("z")):
71 prefixes.append(f"{chr(drive)}:")
72
73 if not filename:
74 return None, None
75 for prefix in prefixes:
76 if filename.lower().startswith(prefix):
77 return key_eq, filename
78 return None, None
79
80 @staticmethod
81 def complete_files(word: str) -> List[str]:
82 if os.path.isdir(word):
83 dirname = word
84 files = os.listdir(word)
85 file_prefix = ""
86 else:
87 dirname = os.path.dirname(word)
88 if os.path.isdir(dirname):
89 files = os.listdir(dirname)
90 else:
91 files = []
92 file_prefix = os.path.basename(word)
93 ret = []
94 for file in files:
95 if file.startswith(file_prefix):
96 ret.append(os.path.join(dirname, file))
97 return ret
98
99 @staticmethod
100 def _get_matches(config: Container, word: str) -> List[str]:
101 def str_rep(in_key: Any, in_value: Any) -> str:
102 if OmegaConf.is_config(in_value):
103 return f"{in_key}."
104 else:
105 return f"{in_key}="
106
107 if config is None:
108 return []
109 elif OmegaConf.is_config(config):
110 matches = []
111 if word.endswith(".") or word.endswith("="):
112 exact_key = word[0:-1]
113 try:
114 conf_node = OmegaConf.select(
115 config, exact_key, throw_on_missing=True
116 )
117 except MissingMandatoryValue:
118 conf_node = ""
119 if conf_node is not None:
120 if OmegaConf.is_config(conf_node):
121 key_matches = CompletionPlugin._get_matches(conf_node, "")
122 else:
123 # primitive
124 if isinstance(conf_node, bool):
125 conf_node = str(conf_node).lower()
126 key_matches = [conf_node]
127 else:
128 key_matches = []
129
130 matches.extend([f"{word}{match}" for match in key_matches])
131 else:
132 last_dot = word.rfind(".")
133 if last_dot != -1:
134 base_key = word[0:last_dot]
135 partial_key = word[last_dot + 1 :]
136 conf_node = OmegaConf.select(config, base_key)
137 key_matches = CompletionPlugin._get_matches(conf_node, partial_key)
138 matches.extend([f"{base_key}.{match}" for match in key_matches])
139 else:
140 if isinstance(config, DictConfig):
141 for key, value in config.items_ex(resolve=False):
142 str_key = str(key)
143 if str_key.startswith(word):
144 matches.append(str_rep(key, value))
145 elif OmegaConf.is_list(config):
146 assert isinstance(config, ListConfig)
147 for idx in range(len(config)):
148 try:
149 value = config[idx]
150 if str(idx).startswith(word):
151 matches.append(str_rep(idx, value))
152 except MissingMandatoryValue:
153 matches.append(str_rep(idx, ""))
154
155 else:
156 assert False, f"Object is not an instance of config : {type(config)}"
157
158 return matches
159
160 def _query_config_groups(
161 self, word: str, config_name: Optional[str], words: List[str]
162 ) -> Tuple[List[str], bool]:
163 is_addition = word.startswith("+")
164 is_deletion = word.startswith("~")
165 if is_addition or is_deletion:
166 prefix, word = word[0], word[1:]
167 else:
168 prefix = ""
169 last_eq_index = word.rfind("=")
170 last_slash_index = word.rfind("/")
171 exact_match: bool = False
172 if last_eq_index != -1:
173 parent_group = word[0:last_eq_index]
174 results_filter = ObjectType.CONFIG
175 else:
176 results_filter = ObjectType.GROUP
177 if last_slash_index == -1:
178 parent_group = ""
179 else:
180 parent_group = word[0:last_slash_index]
181
182 all_matched_groups = self.config_loader.get_group_options(
183 group_name=parent_group,
184 results_filter=results_filter,
185 config_name=config_name,
186 overrides=words,
187 )
188 matched_groups: List[str] = []
189 if results_filter == ObjectType.CONFIG:
190 for match in all_matched_groups:
191 name = f"{parent_group}={match}" if parent_group != "" else match
192 if name.startswith(word):
193 matched_groups.append(name)
194 exact_match = True
195 elif results_filter == ObjectType.GROUP:
196 for match in all_matched_groups:
197 name = f"{parent_group}/{match}" if parent_group != "" else match
198 if name.startswith(word):
199 files = self.config_loader.get_group_options(
200 group_name=name,
201 results_filter=ObjectType.CONFIG,
202 config_name=config_name,
203 overrides=words,
204 )
205 dirs = self.config_loader.get_group_options(
206 group_name=name,
207 results_filter=ObjectType.GROUP,
208 config_name=config_name,
209 overrides=words,
210 )
211 if len(dirs) == 0 and len(files) > 0 and not is_deletion:
212 name = name + "="
213 elif len(dirs) > 0 and len(files) == 0:
214 name = name + "/"
215 matched_groups.append(name)
216
217 matched_groups = [f"{prefix}{group}" for group in matched_groups]
218 return matched_groups, exact_match
219
220 def _query(self, config_name: Optional[str], line: str) -> List[str]:
221 from .._internal.utils import get_args
222
223 new_word = len(line) == 0 or line[-1] == " "
224 parsed_args = get_args(line.split())
225 words = parsed_args.overrides
226 if new_word or len(words) == 0:
227 word = ""
228 else:
229 word = words[-1]
230 words = words[0:-1]
231
232 fname_prefix, filename = CompletionPlugin._get_filename(word)
233 if filename is not None:
234 assert fname_prefix is not None
235 result = CompletionPlugin.complete_files(filename)
236 result = [fname_prefix + file for file in result]
237 else:
238 matched_groups, exact_match = self._query_config_groups(
239 word, config_name=config_name, words=words
240 )
241 config_matches: List[str] = []
242 if not exact_match:
243 run_mode = RunMode.MULTIRUN if parsed_args.multirun else RunMode.RUN
244 config_matches = []
245 try:
246 config = self.config_loader.load_configuration(
247 config_name=config_name, overrides=words, run_mode=run_mode
248 )
249 config_matches = CompletionPlugin._get_matches(config, word)
250 except ConfigCompositionException:
251 # if config fails to load for whatever reason, do not provide config matches.
252 # possible reasons:
253 # - missing entry in defaults list (- group: ???) and not populated in command line
254 # - a config file is not found
255 # etc.
256 pass
257
258 result = list(set(matched_groups + config_matches))
259
260 return sorted(result)
261
262 @staticmethod
263 def strip_python_or_app_name(line: str) -> str:
264 """
265 Take the command line received from shell completion, and strip the app name from it
266 which could be at the form of python script.py or some_app.
267 it also corrects the key (COMP_INDEX) to reflect the same location in the striped command line.
268 :param line: input line, may contain python file.py followed=by_args..
269 :return: tuple(args line, key of cursor in args line)
270 """
271 python_args = r"^\s*[\w\/]*python[3]?\s*[\w/\.]*\s*(.*)"
272 app_args = r"^\s*[\w_\-=\./]+\s*(.*)"
273 match = re.match(python_args, line)
274 if match:
275 return match.group(1)
276 else:
277 match = re.match(app_args, line)
278 if match:
279 return match.group(1)
280 else:
281 raise RuntimeError(f"Error parsing line '{line}'")
282
283
284 class DefaultCompletionPlugin(CompletionPlugin):
285 """
286 A concrete instance of CompletionPlugin that is used for testing.
287 """
288
289 def install(self) -> None:
290 raise NotImplementedError
291
292 def uninstall(self) -> None:
293 raise NotImplementedError
294
295 @staticmethod
296 def provides() -> str:
297 raise NotImplementedError
298
299 def query(self, config_name: Optional[str]) -> None:
300 raise NotImplementedError
301
302 @staticmethod
303 def help(command: str) -> str:
304 raise NotImplementedError
305
[end of hydra/plugins/completion_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/plugins/completion_plugin.py b/hydra/plugins/completion_plugin.py
--- a/hydra/plugins/completion_plugin.py
+++ b/hydra/plugins/completion_plugin.py
@@ -107,7 +107,7 @@
if config is None:
return []
elif OmegaConf.is_config(config):
- matches = []
+ matches: List[str] = []
if word.endswith(".") or word.endswith("="):
exact_key = word[0:-1]
try:
| {"golden_diff": "diff --git a/hydra/plugins/completion_plugin.py b/hydra/plugins/completion_plugin.py\n--- a/hydra/plugins/completion_plugin.py\n+++ b/hydra/plugins/completion_plugin.py\n@@ -107,7 +107,7 @@\n if config is None:\n return []\n elif OmegaConf.is_config(config):\n- matches = []\n+ matches: List[str] = []\n if word.endswith(\".\") or word.endswith(\"=\"):\n exact_key = word[0:-1]\n try:\n", "issue": "[Bug] Failing lint: Need type annotation for \"matches\"\nSee `lint` CI job failure on PR #2689.\r\n\r\n```\r\nhydra/plugins/completion_plugin.py:110: error: Need type annotation for \"matches\" (hint: \"matches: List[<type>] = ...\") [var-annotated]\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n# TODO: Test with /miniconda3/envs/hydra36/bin/python , seems to be running python for some reason.\n# TODO: Add tests for completion with +prefix (should suggest config groups that are not listed)\n# TODO: Test completion when defaults has a missing mandatory item\n\n\nimport os\nimport re\nimport sys\nfrom abc import abstractmethod\n\nfrom hydra.errors import ConfigCompositionException\nfrom omegaconf import (\n Container,\n DictConfig,\n MissingMandatoryValue,\n OmegaConf,\n ListConfig,\n)\nfrom typing import Any, List, Optional, Tuple\n\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.object_type import ObjectType\nfrom hydra.plugins.plugin import Plugin\nfrom hydra.types import RunMode\n\n\nclass CompletionPlugin(Plugin):\n def __init__(self, config_loader: ConfigLoader) -> None:\n self.config_loader = config_loader\n\n @abstractmethod\n def install(self) -> None:\n ...\n\n @abstractmethod\n def uninstall(self) -> None:\n ...\n\n @staticmethod\n @abstractmethod\n def provides() -> str:\n \"\"\"\n :return: the name of the shell this plugin provides completion for\n \"\"\"\n ...\n\n @abstractmethod\n def query(self, config_name: Optional[str]) -> None:\n ...\n\n @staticmethod\n @abstractmethod\n def help(command: str) -> str:\n \"\"\"\n :param command: \"install\" or \"uninstall\"\n :return: command the user can run to install or uninstall this shell completion on the appropriate shell\n \"\"\"\n ...\n\n @staticmethod\n def _get_filename(filename: str) -> Tuple[Optional[str], Optional[str]]:\n last = filename.rfind(\"=\")\n if last != -1:\n key_eq = filename[0 : last + 1]\n filename = filename[last + 1 :]\n prefixes = [\".\", \"/\", \"\\\\\", \"./\", \".\\\\\"]\n if sys.platform.startswith(\"win\"):\n for drive in range(ord(\"a\"), ord(\"z\")):\n prefixes.append(f\"{chr(drive)}:\")\n\n if not filename:\n return None, None\n for prefix in prefixes:\n if filename.lower().startswith(prefix):\n return key_eq, filename\n return None, None\n\n @staticmethod\n def complete_files(word: str) -> List[str]:\n if os.path.isdir(word):\n dirname = word\n files = os.listdir(word)\n file_prefix = \"\"\n else:\n dirname = os.path.dirname(word)\n if os.path.isdir(dirname):\n files = os.listdir(dirname)\n else:\n files = []\n file_prefix = os.path.basename(word)\n ret = []\n for file in files:\n if file.startswith(file_prefix):\n ret.append(os.path.join(dirname, file))\n return ret\n\n @staticmethod\n def _get_matches(config: Container, word: str) -> List[str]:\n def str_rep(in_key: Any, in_value: Any) -> str:\n if OmegaConf.is_config(in_value):\n return f\"{in_key}.\"\n else:\n return f\"{in_key}=\"\n\n if config is None:\n return []\n elif OmegaConf.is_config(config):\n matches = []\n if word.endswith(\".\") or word.endswith(\"=\"):\n exact_key = word[0:-1]\n try:\n conf_node = OmegaConf.select(\n config, exact_key, throw_on_missing=True\n )\n except MissingMandatoryValue:\n conf_node = \"\"\n if conf_node is not None:\n if OmegaConf.is_config(conf_node):\n key_matches = CompletionPlugin._get_matches(conf_node, \"\")\n else:\n # primitive\n if isinstance(conf_node, bool):\n conf_node = str(conf_node).lower()\n key_matches = [conf_node]\n else:\n key_matches = []\n\n matches.extend([f\"{word}{match}\" for match in key_matches])\n else:\n last_dot = word.rfind(\".\")\n if last_dot != -1:\n base_key = word[0:last_dot]\n partial_key = word[last_dot + 1 :]\n conf_node = OmegaConf.select(config, base_key)\n key_matches = CompletionPlugin._get_matches(conf_node, partial_key)\n matches.extend([f\"{base_key}.{match}\" for match in key_matches])\n else:\n if isinstance(config, DictConfig):\n for key, value in config.items_ex(resolve=False):\n str_key = str(key)\n if str_key.startswith(word):\n matches.append(str_rep(key, value))\n elif OmegaConf.is_list(config):\n assert isinstance(config, ListConfig)\n for idx in range(len(config)):\n try:\n value = config[idx]\n if str(idx).startswith(word):\n matches.append(str_rep(idx, value))\n except MissingMandatoryValue:\n matches.append(str_rep(idx, \"\"))\n\n else:\n assert False, f\"Object is not an instance of config : {type(config)}\"\n\n return matches\n\n def _query_config_groups(\n self, word: str, config_name: Optional[str], words: List[str]\n ) -> Tuple[List[str], bool]:\n is_addition = word.startswith(\"+\")\n is_deletion = word.startswith(\"~\")\n if is_addition or is_deletion:\n prefix, word = word[0], word[1:]\n else:\n prefix = \"\"\n last_eq_index = word.rfind(\"=\")\n last_slash_index = word.rfind(\"/\")\n exact_match: bool = False\n if last_eq_index != -1:\n parent_group = word[0:last_eq_index]\n results_filter = ObjectType.CONFIG\n else:\n results_filter = ObjectType.GROUP\n if last_slash_index == -1:\n parent_group = \"\"\n else:\n parent_group = word[0:last_slash_index]\n\n all_matched_groups = self.config_loader.get_group_options(\n group_name=parent_group,\n results_filter=results_filter,\n config_name=config_name,\n overrides=words,\n )\n matched_groups: List[str] = []\n if results_filter == ObjectType.CONFIG:\n for match in all_matched_groups:\n name = f\"{parent_group}={match}\" if parent_group != \"\" else match\n if name.startswith(word):\n matched_groups.append(name)\n exact_match = True\n elif results_filter == ObjectType.GROUP:\n for match in all_matched_groups:\n name = f\"{parent_group}/{match}\" if parent_group != \"\" else match\n if name.startswith(word):\n files = self.config_loader.get_group_options(\n group_name=name,\n results_filter=ObjectType.CONFIG,\n config_name=config_name,\n overrides=words,\n )\n dirs = self.config_loader.get_group_options(\n group_name=name,\n results_filter=ObjectType.GROUP,\n config_name=config_name,\n overrides=words,\n )\n if len(dirs) == 0 and len(files) > 0 and not is_deletion:\n name = name + \"=\"\n elif len(dirs) > 0 and len(files) == 0:\n name = name + \"/\"\n matched_groups.append(name)\n\n matched_groups = [f\"{prefix}{group}\" for group in matched_groups]\n return matched_groups, exact_match\n\n def _query(self, config_name: Optional[str], line: str) -> List[str]:\n from .._internal.utils import get_args\n\n new_word = len(line) == 0 or line[-1] == \" \"\n parsed_args = get_args(line.split())\n words = parsed_args.overrides\n if new_word or len(words) == 0:\n word = \"\"\n else:\n word = words[-1]\n words = words[0:-1]\n\n fname_prefix, filename = CompletionPlugin._get_filename(word)\n if filename is not None:\n assert fname_prefix is not None\n result = CompletionPlugin.complete_files(filename)\n result = [fname_prefix + file for file in result]\n else:\n matched_groups, exact_match = self._query_config_groups(\n word, config_name=config_name, words=words\n )\n config_matches: List[str] = []\n if not exact_match:\n run_mode = RunMode.MULTIRUN if parsed_args.multirun else RunMode.RUN\n config_matches = []\n try:\n config = self.config_loader.load_configuration(\n config_name=config_name, overrides=words, run_mode=run_mode\n )\n config_matches = CompletionPlugin._get_matches(config, word)\n except ConfigCompositionException:\n # if config fails to load for whatever reason, do not provide config matches.\n # possible reasons:\n # - missing entry in defaults list (- group: ???) and not populated in command line\n # - a config file is not found\n # etc.\n pass\n\n result = list(set(matched_groups + config_matches))\n\n return sorted(result)\n\n @staticmethod\n def strip_python_or_app_name(line: str) -> str:\n \"\"\"\n Take the command line received from shell completion, and strip the app name from it\n which could be at the form of python script.py or some_app.\n it also corrects the key (COMP_INDEX) to reflect the same location in the striped command line.\n :param line: input line, may contain python file.py followed=by_args..\n :return: tuple(args line, key of cursor in args line)\n \"\"\"\n python_args = r\"^\\s*[\\w\\/]*python[3]?\\s*[\\w/\\.]*\\s*(.*)\"\n app_args = r\"^\\s*[\\w_\\-=\\./]+\\s*(.*)\"\n match = re.match(python_args, line)\n if match:\n return match.group(1)\n else:\n match = re.match(app_args, line)\n if match:\n return match.group(1)\n else:\n raise RuntimeError(f\"Error parsing line '{line}'\")\n\n\nclass DefaultCompletionPlugin(CompletionPlugin):\n \"\"\"\n A concrete instance of CompletionPlugin that is used for testing.\n \"\"\"\n\n def install(self) -> None:\n raise NotImplementedError\n\n def uninstall(self) -> None:\n raise NotImplementedError\n\n @staticmethod\n def provides() -> str:\n raise NotImplementedError\n\n def query(self, config_name: Optional[str]) -> None:\n raise NotImplementedError\n\n @staticmethod\n def help(command: str) -> str:\n raise NotImplementedError\n", "path": "hydra/plugins/completion_plugin.py"}]} | 3,651 | 112 |
gh_patches_debug_27317 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3126 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
How to Check a Data Source connected to a Resource
**Describe the issue**
I want to check if the attribute "group" of the resource "azuredevops_group_membership" refers to a data source "azuredevops_group" with the attribute "name" = "Build Administrators" for example.
**Examples**
Snippet from [terraform registry](https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs/resources/group_membership)
```terraform
data "azuredevops_group" "example" {
project_id = azuredevops_project.example.id
name = "Build Administrators"
}
resource "azuredevops_group_membership" "example" {
group = data.azuredevops_group.example.descriptor
members = [
azuredevops_user_entitlement.example.descriptor
]
}
```
I tryed creating a custom policy in python but I didn't understand how I could make this work, I was only able to create a policy to check if the attribute name of the data azuredevops_group was equal to "Build Administrators":
```python
from typing import Dict, List, Any
from checkov.terraform.checks.data.base_check import BaseDataCheck
from checkov.common.models.enums import CheckResult, CheckCategories
class NoBuildAdministratorCreated(BaseDataCheck):
def __init__(self) -> None:
name = 'Ensure no build administrator is created on file'
id = "CKV_ADO_9000"
supported_data = ["azuredevops_group"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)
def scan_data_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if (conf.get("name", "Build Administrators")):
return CheckResult.FAILED
return CheckResult.PASSED
check = NoBuildAdministratorCreated()
```
**Version (please complete the following information):**
- Checkov Version 2.0.1223
**Additional context**
My goal is to check if people are creating admin groups inside of a terraform file. I'm kinda new to reading documentations and code libraries of open source projects so I'm having a bit of a hard time understanding how to use the checkov python scan functions to create custom policies. So any advice or code example to help me understand better how it works and what is this **conf** would be much appreciated, thanks!
</issue>
<code>
[start of checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py]
1 import concurrent.futures
2 import re
3 from typing import List, Tuple, Dict, Any, Optional, Pattern
4
5 from networkx import DiGraph
6
7 from checkov.common.graph.checks_infra.enums import SolverType
8 from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
9
10 from concurrent.futures import ThreadPoolExecutor
11
12 from checkov.common.graph.graph_builder import CustomAttributes
13 from checkov.common.graph.graph_builder.graph_components.block_types import BlockType
14 from checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent
15
16 WILDCARD_PATTERN = re.compile(r"(\S+[.][*][.]*)+")
17
18
19 class BaseAttributeSolver(BaseSolver):
20 operator = ""
21
22 def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:
23 super().__init__(SolverType.ATTRIBUTE)
24 self.resource_types = resource_types
25 self.attribute = attribute
26 self.value = value
27
28 def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
29 executer = ThreadPoolExecutor()
30 jobs = []
31 passed_vertices: List[Dict[str, Any]] = []
32 failed_vertices: List[Dict[str, Any]] = []
33 for _, data in graph_connector.nodes(data=True):
34 if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \
35 and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:
36 jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))
37
38 concurrent.futures.wait(jobs)
39 return passed_vertices, failed_vertices
40
41 def get_operation(self, vertex: Dict[str, Any]) -> bool:
42 if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):
43 attribute_patterns = self.get_attribute_patterns(self.attribute)
44 attribute_matches = [
45 attr
46 for attr in vertex
47 if any(re.match(re.compile(attribute_pattern), attr) for attribute_pattern in attribute_patterns)
48 ]
49 if attribute_matches:
50 return self.resource_type_pred(vertex, self.resource_types) and any(
51 self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches
52 )
53 return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(
54 vertex=vertex, attribute=self.attribute
55 )
56
57 def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:
58 raise NotImplementedError
59
60 def _process_node(
61 self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]
62 ) -> None:
63 if not self.resource_type_pred(data, self.resource_types):
64 return
65 if self.get_operation(vertex=data):
66 passed_vartices.append(data)
67 else:
68 failed_vertices.append(data)
69
70 @staticmethod
71 def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:
72 index_pattern = r"[\d]+"
73 split_by_dots = attribute.split(".")
74
75 pattern_parts = []
76 pattern_parts_without_index = []
77 for attr_part in split_by_dots:
78 if attr_part == "*":
79 pattern_parts.append(index_pattern)
80 else:
81 attr_part_pattern = f"({attr_part})"
82 pattern_parts.append(attr_part_pattern)
83 pattern_parts_without_index.append(attr_part_pattern)
84
85 pattern = "[.]".join(pattern_parts)
86 pattern_with_index = re.compile(pattern)
87
88 pattern = "[.]".join(pattern_parts_without_index)
89 pattern_without_index = re.compile(pattern)
90
91 return pattern_with_index, pattern_without_index
92
93 @staticmethod
94 def _is_variable_dependant(value: Any, source: str) -> bool:
95 if source == 'Terraform' and is_terraform_variable_dependent(value):
96 return True
97 # TODO add logic for CloudFormation
98 # elif source == 'CloudFormation' and is_cloudformation_variable_dependent(value):
99 # return True
100
101 return False
102
[end of checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
--- a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
+++ b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py
@@ -12,7 +12,9 @@
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.common.graph.graph_builder.graph_components.block_types import BlockType
from checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent
+from checkov.terraform.graph_builder.graph_components.block_types import BlockType as TerraformBlockType
+SUPPORTED_BLOCK_TYPES = {BlockType.RESOURCE, TerraformBlockType.DATA}
WILDCARD_PATTERN = re.compile(r"(\S+[.][*][.]*)+")
@@ -32,7 +34,7 @@
failed_vertices: List[Dict[str, Any]] = []
for _, data in graph_connector.nodes(data=True):
if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \
- and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:
+ and data.get(CustomAttributes.BLOCK_TYPE) in SUPPORTED_BLOCK_TYPES:
jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))
concurrent.futures.wait(jobs)
| {"golden_diff": "diff --git a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n--- a/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n+++ b/checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py\n@@ -12,7 +12,9 @@\n from checkov.common.graph.graph_builder import CustomAttributes\n from checkov.common.graph.graph_builder.graph_components.block_types import BlockType\n from checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent\n+from checkov.terraform.graph_builder.graph_components.block_types import BlockType as TerraformBlockType\n \n+SUPPORTED_BLOCK_TYPES = {BlockType.RESOURCE, TerraformBlockType.DATA}\n WILDCARD_PATTERN = re.compile(r\"(\\S+[.][*][.]*)+\")\n \n \n@@ -32,7 +34,7 @@\n failed_vertices: List[Dict[str, Any]] = []\n for _, data in graph_connector.nodes(data=True):\n if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \\\n- and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:\n+ and data.get(CustomAttributes.BLOCK_TYPE) in SUPPORTED_BLOCK_TYPES:\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n \n concurrent.futures.wait(jobs)\n", "issue": "How to Check a Data Source connected to a Resource\n**Describe the issue**\r\nI want to check if the attribute \"group\" of the resource \"azuredevops_group_membership\" refers to a data source \"azuredevops_group\" with the attribute \"name\" = \"Build Administrators\" for example.\r\n\r\n**Examples**\r\nSnippet from [terraform registry](https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs/resources/group_membership)\r\n```terraform\r\ndata \"azuredevops_group\" \"example\" {\r\n project_id = azuredevops_project.example.id\r\n name = \"Build Administrators\"\r\n}\r\n\r\nresource \"azuredevops_group_membership\" \"example\" {\r\n group = data.azuredevops_group.example.descriptor\r\n members = [\r\n azuredevops_user_entitlement.example.descriptor\r\n ]\r\n}\r\n```\r\nI tryed creating a custom policy in python but I didn't understand how I could make this work, I was only able to create a policy to check if the attribute name of the data azuredevops_group was equal to \"Build Administrators\":\r\n\r\n```python\r\nfrom typing import Dict, List, Any\r\n\r\nfrom checkov.terraform.checks.data.base_check import BaseDataCheck\r\nfrom checkov.common.models.enums import CheckResult, CheckCategories\r\n\r\nclass NoBuildAdministratorCreated(BaseDataCheck):\r\n def __init__(self) -> None:\r\n name = 'Ensure no build administrator is created on file'\r\n id = \"CKV_ADO_9000\"\r\n supported_data = [\"azuredevops_group\"]\r\n categories = [CheckCategories.GENERAL_SECURITY]\r\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\r\n\r\n def scan_data_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\r\n\r\n if (conf.get(\"name\", \"Build Administrators\")):\r\n return CheckResult.FAILED\r\n \r\n return CheckResult.PASSED\r\n\r\ncheck = NoBuildAdministratorCreated()\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.0.1223\r\n\r\n**Additional context**\r\nMy goal is to check if people are creating admin groups inside of a terraform file. I'm kinda new to reading documentations and code libraries of open source projects so I'm having a bit of a hard time understanding how to use the checkov python scan functions to create custom policies. So any advice or code example to help me understand better how it works and what is this **conf** would be much appreciated, thanks!\r\n\n", "before_files": [{"content": "import concurrent.futures\nimport re\nfrom typing import List, Tuple, Dict, Any, Optional, Pattern\n\nfrom networkx import DiGraph\n\nfrom checkov.common.graph.checks_infra.enums import SolverType\nfrom checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom checkov.common.graph.graph_builder import CustomAttributes\nfrom checkov.common.graph.graph_builder.graph_components.block_types import BlockType\nfrom checkov.common.util.var_utils import is_terraform_variable_dependent, is_cloudformation_variable_dependent\n\nWILDCARD_PATTERN = re.compile(r\"(\\S+[.][*][.]*)+\")\n\n\nclass BaseAttributeSolver(BaseSolver):\n operator = \"\"\n\n def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:\n super().__init__(SolverType.ATTRIBUTE)\n self.resource_types = resource_types\n self.attribute = attribute\n self.value = value\n\n def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:\n executer = ThreadPoolExecutor()\n jobs = []\n passed_vertices: List[Dict[str, Any]] = []\n failed_vertices: List[Dict[str, Any]] = []\n for _, data in graph_connector.nodes(data=True):\n if (not self.resource_types or data.get(CustomAttributes.RESOURCE_TYPE) in self.resource_types) \\\n and data.get(CustomAttributes.BLOCK_TYPE) == BlockType.RESOURCE:\n jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))\n\n concurrent.futures.wait(jobs)\n return passed_vertices, failed_vertices\n\n def get_operation(self, vertex: Dict[str, Any]) -> bool:\n if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):\n attribute_patterns = self.get_attribute_patterns(self.attribute)\n attribute_matches = [\n attr\n for attr in vertex\n if any(re.match(re.compile(attribute_pattern), attr) for attribute_pattern in attribute_patterns)\n ]\n if attribute_matches:\n return self.resource_type_pred(vertex, self.resource_types) and any(\n self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches\n )\n return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(\n vertex=vertex, attribute=self.attribute\n )\n\n def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:\n raise NotImplementedError\n\n def _process_node(\n self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]\n ) -> None:\n if not self.resource_type_pred(data, self.resource_types):\n return\n if self.get_operation(vertex=data):\n passed_vartices.append(data)\n else:\n failed_vertices.append(data)\n\n @staticmethod\n def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:\n index_pattern = r\"[\\d]+\"\n split_by_dots = attribute.split(\".\")\n\n pattern_parts = []\n pattern_parts_without_index = []\n for attr_part in split_by_dots:\n if attr_part == \"*\":\n pattern_parts.append(index_pattern)\n else:\n attr_part_pattern = f\"({attr_part})\"\n pattern_parts.append(attr_part_pattern)\n pattern_parts_without_index.append(attr_part_pattern)\n\n pattern = \"[.]\".join(pattern_parts)\n pattern_with_index = re.compile(pattern)\n\n pattern = \"[.]\".join(pattern_parts_without_index)\n pattern_without_index = re.compile(pattern)\n\n return pattern_with_index, pattern_without_index\n\n @staticmethod\n def _is_variable_dependant(value: Any, source: str) -> bool:\n if source == 'Terraform' and is_terraform_variable_dependent(value):\n return True\n # TODO add logic for CloudFormation\n # elif source == 'CloudFormation' and is_cloudformation_variable_dependent(value):\n # return True\n\n return False\n", "path": "checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py"}]} | 2,151 | 323 |
gh_patches_debug_35281 | rasdani/github-patches | git_diff | piskvorky__gensim-1819 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`datatype` parameter in `load_word2vec_format` doesn't work as expected
#### Description
Using `datatype=np.float64` in a `KeyedVectors.load_word2vec_call` doesn't work as expected, the loaded floats seem to lose precision. The datatype for `syn0` is still `float64` though, so it seems that they are cast to float32 first while loading, then cast to float64 when creating the array.
#### Steps/Code/Corpus to Reproduce
Using this file -
[test.kv.txt](https://github.com/RaRe-Technologies/gensim/files/1434953/test.kv.txt)
```python
from gensim.models.keyedvectors import KeyedVectors
import numpy as np
kv = KeyedVectors.load_word2vec_format('test.kv.txt', datatype=np.float64)
print(kv['horse.n.01'][0] == -0.0008546282343595379)
# False
print(kv['horse.n.01'].dtype)
# float64
```
#### Expected Results
```python
print(kv['horse.n.01'][0] == -0.0008546282343595379)
# True
```
#### Actual Results
```python
print(kv['horse.n.01'][0] == -0.0008546282343595379)
# False
```
Looking at the code and making a quick hack [here](https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/keyedvectors.py#L253), changing..
```
word, weights = parts[0], [REAL(x) for x in parts[1:]]
```
to..
```
word, weights = parts[0], [datatype(x) for x in parts[1:]]
```
..leads to the correct result. However, I imagine there are other cases to be covered as well.
</issue>
<code>
[start of gensim/models/utils_any2vec.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Author: Shiva Manne <[email protected]>
5 # Copyright (C) 2018 RaRe Technologies s.r.o.
6
7 """This module contains various general functions useful for any2vec models."""
8
9 import logging
10 import numpy as np
11 from gensim import utils
12
13 from numpy import zeros, dtype, float32 as REAL, ascontiguousarray, fromstring
14
15 from six.moves import xrange
16 from six import iteritems
17
18 logger = logging.getLogger(__name__)
19
20
21 def _compute_ngrams(word, min_n, max_n):
22 """Returns the list of all possible ngrams for a given word.
23
24 Parameters
25 ----------
26 word : str
27 The word whose ngrams need to be computed
28 min_n : int
29 minimum character length of the ngrams
30 max_n : int
31 maximum character length of the ngrams
32
33 Returns
34 -------
35 :obj:`list` of :obj:`str`
36 List of character ngrams
37
38 """
39 BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix
40 extended_word = BOW + word + EOW
41 ngrams = []
42 for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):
43 for i in range(0, len(extended_word) - ngram_length + 1):
44 ngrams.append(extended_word[i:i + ngram_length])
45 return ngrams
46
47
48 def _ft_hash(string):
49 """Reproduces [hash method](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
50 used in [1]_.
51
52 Parameter
53 ---------
54 string : str
55 The string whose hash needs to be calculated
56
57 Returns
58 -------
59 int
60 The hash of the string
61
62 """
63 # Runtime warnings for integer overflow are raised, this is expected behaviour. These warnings are suppressed.
64 old_settings = np.seterr(all='ignore')
65 h = np.uint32(2166136261)
66 for c in string:
67 h = h ^ np.uint32(ord(c))
68 h = h * np.uint32(16777619)
69 np.seterr(**old_settings)
70 return h
71
72
73 def _save_word2vec_format(fname, vocab, vectors, fvocab=None, binary=False, total_vec=None):
74 """Store the input-hidden weight matrix in the same format used by the original
75 C word2vec-tool, for compatibility.
76
77 Parameters
78 ----------
79 fname : str
80 The file path used to save the vectors in
81 vocab : dict
82 The vocabulary of words
83 vectors : numpy.array
84 The vectors to be stored
85 fvocab : str
86 Optional file path used to save the vocabulary
87 binary : bool
88 If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
89 total_vec : int
90 Optional parameter to explicitly specify total no. of vectors
91 (in case word vectors are appended with document vectors afterwards)
92
93 """
94 if not (vocab or vectors):
95 raise RuntimeError("no input")
96 if total_vec is None:
97 total_vec = len(vocab)
98 vector_size = vectors.shape[1]
99 if fvocab is not None:
100 logger.info("storing vocabulary in %s", fvocab)
101 with utils.smart_open(fvocab, 'wb') as vout:
102 for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
103 vout.write(utils.to_utf8("%s %s\n" % (word, vocab_.count)))
104 logger.info("storing %sx%s projection weights into %s", total_vec, vector_size, fname)
105 assert (len(vocab), vector_size) == vectors.shape
106 with utils.smart_open(fname, 'wb') as fout:
107 fout.write(utils.to_utf8("%s %s\n" % (total_vec, vector_size)))
108 # store in sorted order: most frequent words at the top
109 for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
110 row = vectors[vocab_.index]
111 if binary:
112 fout.write(utils.to_utf8(word) + b" " + row.tostring())
113 else:
114 fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
115
116
117 def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
118 limit=None, datatype=REAL):
119 """Load the input-hidden weight matrix from the original C word2vec-tool format.
120
121 Note that the information stored in the file is incomplete (the binary tree is missing),
122 so while you can query for word similarity etc., you cannot continue training
123 with a model loaded this way.
124
125 Parameters
126 ----------
127 fname : str
128 The file path to the saved word2vec-format file.
129 fvocab : str
130 Optional file path to the vocabulary.Word counts are read from `fvocab` filename,
131 if set (this is the file generated by `-save-vocab` flag of the original C tool).
132 binary : bool
133 If True, indicates whether the data is in binary word2vec format.
134 encoding : str
135 If you trained the C model using non-utf8 encoding for words, specify that
136 encoding in `encoding`.
137 unicode_errors : str
138 default 'strict', is a string suitable to be passed as the `errors`
139 argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
140 file may include word tokens truncated in the middle of a multibyte unicode character
141 (as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
142 limit : int
143 Sets a maximum number of word-vectors to read from the file. The default,
144 None, means read all.
145 datatype : :class: `numpy.float*`
146 (Experimental) Can coerce dimensions to a non-default float type (such
147 as np.float16) to save memory. (Such types may result in much slower bulk operations
148 or incompatibility with optimized routines.)
149
150 Returns
151 -------
152 :obj: `cls`
153 Returns the loaded model as an instance of :class: `cls`.
154
155 """
156 from gensim.models.keyedvectors import Vocab
157 counts = None
158 if fvocab is not None:
159 logger.info("loading word counts from %s", fvocab)
160 counts = {}
161 with utils.smart_open(fvocab) as fin:
162 for line in fin:
163 word, count = utils.to_unicode(line).strip().split()
164 counts[word] = int(count)
165
166 logger.info("loading projection weights from %s", fname)
167 with utils.smart_open(fname) as fin:
168 header = utils.to_unicode(fin.readline(), encoding=encoding)
169 vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
170 if limit:
171 vocab_size = min(vocab_size, limit)
172 result = cls(vector_size)
173 result.vector_size = vector_size
174 result.vectors = zeros((vocab_size, vector_size), dtype=datatype)
175
176 def add_word(word, weights):
177 word_id = len(result.vocab)
178 if word in result.vocab:
179 logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
180 return
181 if counts is None:
182 # most common scenario: no vocab file given. just make up some bogus counts, in descending order
183 result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
184 elif word in counts:
185 # use count from the vocab file
186 result.vocab[word] = Vocab(index=word_id, count=counts[word])
187 else:
188 # vocab file given, but word is missing -- set count to None (TODO: or raise?)
189 logger.warning("vocabulary file is incomplete: '%s' is missing", word)
190 result.vocab[word] = Vocab(index=word_id, count=None)
191 result.vectors[word_id] = weights
192 result.index2word.append(word)
193
194 if binary:
195 binary_len = dtype(REAL).itemsize * vector_size
196 for _ in xrange(vocab_size):
197 # mixed text and binary: read text first, then binary
198 word = []
199 while True:
200 ch = fin.read(1)
201 if ch == b' ':
202 break
203 if ch == b'':
204 raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
205 if ch != b'\n': # ignore newlines in front of words (some binary files have)
206 word.append(ch)
207 word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
208 weights = fromstring(fin.read(binary_len), dtype=REAL)
209 add_word(word, weights)
210 else:
211 for line_no in xrange(vocab_size):
212 line = fin.readline()
213 if line == b'':
214 raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
215 parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
216 if len(parts) != vector_size + 1:
217 raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
218 word, weights = parts[0], [REAL(x) for x in parts[1:]]
219 add_word(word, weights)
220 if result.vectors.shape[0] != len(result.vocab):
221 logger.info(
222 "duplicate words detected, shrinking matrix size from %i to %i",
223 result.vectors.shape[0], len(result.vocab)
224 )
225 result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])
226 assert (len(result.vocab), vector_size) == result.vectors.shape
227
228 logger.info("loaded %s matrix from %s", result.vectors.shape, fname)
229 return result
230
[end of gensim/models/utils_any2vec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gensim/models/utils_any2vec.py b/gensim/models/utils_any2vec.py
--- a/gensim/models/utils_any2vec.py
+++ b/gensim/models/utils_any2vec.py
@@ -109,9 +109,10 @@
for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
row = vectors[vocab_.index]
if binary:
+ row = row.astype(REAL)
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
- fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
+ fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join(repr(val) for val in row))))
def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
@@ -205,7 +206,7 @@
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
- weights = fromstring(fin.read(binary_len), dtype=REAL)
+ weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
@@ -215,7 +216,7 @@
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
- word, weights = parts[0], [REAL(x) for x in parts[1:]]
+ word, weights = parts[0], [datatype(x) for x in parts[1:]]
add_word(word, weights)
if result.vectors.shape[0] != len(result.vocab):
logger.info(
| {"golden_diff": "diff --git a/gensim/models/utils_any2vec.py b/gensim/models/utils_any2vec.py\n--- a/gensim/models/utils_any2vec.py\n+++ b/gensim/models/utils_any2vec.py\n@@ -109,9 +109,10 @@\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n row = vectors[vocab_.index]\n if binary:\n+ row = row.astype(REAL)\n fout.write(utils.to_utf8(word) + b\" \" + row.tostring())\n else:\n- fout.write(utils.to_utf8(\"%s %s\\n\" % (word, ' '.join(\"%f\" % val for val in row))))\n+ fout.write(utils.to_utf8(\"%s %s\\n\" % (word, ' '.join(repr(val) for val in row))))\n \n \n def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',\n@@ -205,7 +206,7 @@\n if ch != b'\\n': # ignore newlines in front of words (some binary files have)\n word.append(ch)\n word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)\n- weights = fromstring(fin.read(binary_len), dtype=REAL)\n+ weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)\n add_word(word, weights)\n else:\n for line_no in xrange(vocab_size):\n@@ -215,7 +216,7 @@\n parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % line_no)\n- word, weights = parts[0], [REAL(x) for x in parts[1:]]\n+ word, weights = parts[0], [datatype(x) for x in parts[1:]]\n add_word(word, weights)\n if result.vectors.shape[0] != len(result.vocab):\n logger.info(\n", "issue": "`datatype` parameter in `load_word2vec_format` doesn't work as expected\n#### Description\r\nUsing `datatype=np.float64` in a `KeyedVectors.load_word2vec_call` doesn't work as expected, the loaded floats seem to lose precision. The datatype for `syn0` is still `float64` though, so it seems that they are cast to float32 first while loading, then cast to float64 when creating the array.\r\n\r\n#### Steps/Code/Corpus to Reproduce\r\nUsing this file - \r\n[test.kv.txt](https://github.com/RaRe-Technologies/gensim/files/1434953/test.kv.txt)\r\n\r\n```python\r\nfrom gensim.models.keyedvectors import KeyedVectors\r\nimport numpy as np\r\n\r\nkv = KeyedVectors.load_word2vec_format('test.kv.txt', datatype=np.float64)\r\nprint(kv['horse.n.01'][0] == -0.0008546282343595379)\r\n# False\r\nprint(kv['horse.n.01'].dtype)\r\n# float64\r\n```\r\n#### Expected Results\r\n```python\r\nprint(kv['horse.n.01'][0] == -0.0008546282343595379)\r\n# True\r\n```\r\n#### Actual Results\r\n```python\r\nprint(kv['horse.n.01'][0] == -0.0008546282343595379)\r\n# False\r\n```\r\n\r\nLooking at the code and making a quick hack [here](https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/keyedvectors.py#L253), changing..\r\n```\r\nword, weights = parts[0], [REAL(x) for x in parts[1:]]\r\n```\r\nto..\r\n```\r\nword, weights = parts[0], [datatype(x) for x in parts[1:]]\r\n```\r\n..leads to the correct result. However, I imagine there are other cases to be covered as well.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author: Shiva Manne <[email protected]>\n# Copyright (C) 2018 RaRe Technologies s.r.o.\n\n\"\"\"This module contains various general functions useful for any2vec models.\"\"\"\n\nimport logging\nimport numpy as np\nfrom gensim import utils\n\nfrom numpy import zeros, dtype, float32 as REAL, ascontiguousarray, fromstring\n\nfrom six.moves import xrange\nfrom six import iteritems\n\nlogger = logging.getLogger(__name__)\n\n\ndef _compute_ngrams(word, min_n, max_n):\n \"\"\"Returns the list of all possible ngrams for a given word.\n\n Parameters\n ----------\n word : str\n The word whose ngrams need to be computed\n min_n : int\n minimum character length of the ngrams\n max_n : int\n maximum character length of the ngrams\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n List of character ngrams\n\n \"\"\"\n BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix\n extended_word = BOW + word + EOW\n ngrams = []\n for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):\n for i in range(0, len(extended_word) - ngram_length + 1):\n ngrams.append(extended_word[i:i + ngram_length])\n return ngrams\n\n\ndef _ft_hash(string):\n \"\"\"Reproduces [hash method](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)\n used in [1]_.\n\n Parameter\n ---------\n string : str\n The string whose hash needs to be calculated\n\n Returns\n -------\n int\n The hash of the string\n\n \"\"\"\n # Runtime warnings for integer overflow are raised, this is expected behaviour. These warnings are suppressed.\n old_settings = np.seterr(all='ignore')\n h = np.uint32(2166136261)\n for c in string:\n h = h ^ np.uint32(ord(c))\n h = h * np.uint32(16777619)\n np.seterr(**old_settings)\n return h\n\n\ndef _save_word2vec_format(fname, vocab, vectors, fvocab=None, binary=False, total_vec=None):\n \"\"\"Store the input-hidden weight matrix in the same format used by the original\n C word2vec-tool, for compatibility.\n\n Parameters\n ----------\n fname : str\n The file path used to save the vectors in\n vocab : dict\n The vocabulary of words\n vectors : numpy.array\n The vectors to be stored\n fvocab : str\n Optional file path used to save the vocabulary\n binary : bool\n If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.\n total_vec : int\n Optional parameter to explicitly specify total no. of vectors\n (in case word vectors are appended with document vectors afterwards)\n\n \"\"\"\n if not (vocab or vectors):\n raise RuntimeError(\"no input\")\n if total_vec is None:\n total_vec = len(vocab)\n vector_size = vectors.shape[1]\n if fvocab is not None:\n logger.info(\"storing vocabulary in %s\", fvocab)\n with utils.smart_open(fvocab, 'wb') as vout:\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n vout.write(utils.to_utf8(\"%s %s\\n\" % (word, vocab_.count)))\n logger.info(\"storing %sx%s projection weights into %s\", total_vec, vector_size, fname)\n assert (len(vocab), vector_size) == vectors.shape\n with utils.smart_open(fname, 'wb') as fout:\n fout.write(utils.to_utf8(\"%s %s\\n\" % (total_vec, vector_size)))\n # store in sorted order: most frequent words at the top\n for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):\n row = vectors[vocab_.index]\n if binary:\n fout.write(utils.to_utf8(word) + b\" \" + row.tostring())\n else:\n fout.write(utils.to_utf8(\"%s %s\\n\" % (word, ' '.join(\"%f\" % val for val in row))))\n\n\ndef _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',\n limit=None, datatype=REAL):\n \"\"\"Load the input-hidden weight matrix from the original C word2vec-tool format.\n\n Note that the information stored in the file is incomplete (the binary tree is missing),\n so while you can query for word similarity etc., you cannot continue training\n with a model loaded this way.\n\n Parameters\n ----------\n fname : str\n The file path to the saved word2vec-format file.\n fvocab : str\n Optional file path to the vocabulary.Word counts are read from `fvocab` filename,\n if set (this is the file generated by `-save-vocab` flag of the original C tool).\n binary : bool\n If True, indicates whether the data is in binary word2vec format.\n encoding : str\n If you trained the C model using non-utf8 encoding for words, specify that\n encoding in `encoding`.\n unicode_errors : str\n default 'strict', is a string suitable to be passed as the `errors`\n argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source\n file may include word tokens truncated in the middle of a multibyte unicode character\n (as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.\n limit : int\n Sets a maximum number of word-vectors to read from the file. The default,\n None, means read all.\n datatype : :class: `numpy.float*`\n (Experimental) Can coerce dimensions to a non-default float type (such\n as np.float16) to save memory. (Such types may result in much slower bulk operations\n or incompatibility with optimized routines.)\n\n Returns\n -------\n :obj: `cls`\n Returns the loaded model as an instance of :class: `cls`.\n\n \"\"\"\n from gensim.models.keyedvectors import Vocab\n counts = None\n if fvocab is not None:\n logger.info(\"loading word counts from %s\", fvocab)\n counts = {}\n with utils.smart_open(fvocab) as fin:\n for line in fin:\n word, count = utils.to_unicode(line).strip().split()\n counts[word] = int(count)\n\n logger.info(\"loading projection weights from %s\", fname)\n with utils.smart_open(fname) as fin:\n header = utils.to_unicode(fin.readline(), encoding=encoding)\n vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format\n if limit:\n vocab_size = min(vocab_size, limit)\n result = cls(vector_size)\n result.vector_size = vector_size\n result.vectors = zeros((vocab_size, vector_size), dtype=datatype)\n\n def add_word(word, weights):\n word_id = len(result.vocab)\n if word in result.vocab:\n logger.warning(\"duplicate word '%s' in %s, ignoring all but first\", word, fname)\n return\n if counts is None:\n # most common scenario: no vocab file given. just make up some bogus counts, in descending order\n result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)\n elif word in counts:\n # use count from the vocab file\n result.vocab[word] = Vocab(index=word_id, count=counts[word])\n else:\n # vocab file given, but word is missing -- set count to None (TODO: or raise?)\n logger.warning(\"vocabulary file is incomplete: '%s' is missing\", word)\n result.vocab[word] = Vocab(index=word_id, count=None)\n result.vectors[word_id] = weights\n result.index2word.append(word)\n\n if binary:\n binary_len = dtype(REAL).itemsize * vector_size\n for _ in xrange(vocab_size):\n # mixed text and binary: read text first, then binary\n word = []\n while True:\n ch = fin.read(1)\n if ch == b' ':\n break\n if ch == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n if ch != b'\\n': # ignore newlines in front of words (some binary files have)\n word.append(ch)\n word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)\n weights = fromstring(fin.read(binary_len), dtype=REAL)\n add_word(word, weights)\n else:\n for line_no in xrange(vocab_size):\n line = fin.readline()\n if line == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % line_no)\n word, weights = parts[0], [REAL(x) for x in parts[1:]]\n add_word(word, weights)\n if result.vectors.shape[0] != len(result.vocab):\n logger.info(\n \"duplicate words detected, shrinking matrix size from %i to %i\",\n result.vectors.shape[0], len(result.vocab)\n )\n result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])\n assert (len(result.vocab), vector_size) == result.vectors.shape\n\n logger.info(\"loaded %s matrix from %s\", result.vectors.shape, fname)\n return result\n", "path": "gensim/models/utils_any2vec.py"}]} | 3,803 | 475 |
gh_patches_debug_24819 | rasdani/github-patches | git_diff | svthalia__concrexit-3092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix reference face upload UI
The reference face UI should display better buttons for deleting a reference face, and for displaying whether a reference face is processing or accepted / rejected.
</issue>
<code>
[start of website/facedetection/views.py]
1 from django.conf import settings
2 from django.contrib import messages
3 from django.contrib.auth.mixins import LoginRequiredMixin
4 from django.core.exceptions import ValidationError
5 from django.http import HttpResponseRedirect
6 from django.shortcuts import redirect
7 from django.urls import reverse_lazy
8 from django.utils import timezone
9 from django.utils.translation import gettext_lazy as _
10 from django.views.generic import DeleteView, FormView, ListView
11
12 from photos.models import Photo
13 from thaliawebsite.views import PagedView
14
15 from .forms import ReferenceFaceUploadForm
16 from .models import ReferenceFace
17
18
19 class YourPhotosView(LoginRequiredMixin, PagedView):
20 model = Photo
21 paginate_by = 16
22 template_name = "facedetection/your-photos.html"
23 context_object_name = "photos"
24
25 def get(self, request, *args, **kwargs):
26 if not request.member or request.member.current_membership is None:
27 messages.error(request, _("You need to be a member to use this feature."))
28 return redirect("index")
29
30 return super().get(request, *args, **kwargs)
31
32 def get_queryset(self):
33 member = self.request.member
34 photos = (
35 Photo.objects.select_related("album")
36 .filter(album__hidden=False, hidden=False)
37 .filter(
38 facedetectionphoto__encodings__matches__reference__user=self.request.member
39 )
40 )
41
42 # Filter out matches from long before the member's first membership.
43 albums_since = member.earliest_membership.since - timezone.timedelta(days=31)
44 photos.filter(album__date__gte=albums_since)
45
46 # Filter out matches from after the member's last membership.
47 if member.latest_membership.until is not None:
48 photos = photos.filter(album__date__lte=member.latest_membership.until)
49
50 return photos.select_properties("num_likes").order_by("-album__date")
51
52 def get_context_data(self, **kwargs):
53 context = super().get_context_data(**kwargs)
54
55 context[
56 "has_processing_reference_faces"
57 ] = self.request.member.reference_faces.filter(
58 status=ReferenceFace.Status.PROCESSING,
59 marked_for_deletion_at__isnull=True,
60 ).exists()
61
62 context[
63 "has_rejected_reference_faces"
64 ] = self.request.member.reference_faces.filter(
65 status=ReferenceFace.Status.REJECTED,
66 marked_for_deletion_at__isnull=True,
67 ).exists()
68
69 context["has_reference_faces"] = self.request.member.reference_faces.filter(
70 marked_for_deletion_at__isnull=True
71 ).exists()
72
73 return context
74
75
76 class ReferenceFaceView(LoginRequiredMixin, ListView):
77 template_name = "facedetection/reference-faces.html"
78 context_object_name = "reference_faces"
79
80 def get_context_data(self, **kwargs):
81 context = super().get_context_data(**kwargs)
82 context[
83 "reference_faces_limit"
84 ] = settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES
85 context[
86 "storage_period_after_delete"
87 ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS
88 context["reference_faces_limit_reached"] = bool(
89 self.request.member.reference_faces.filter(
90 marked_for_deletion_at__isnull=True
91 ).count()
92 >= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES
93 )
94 return context
95
96 def get_queryset(self):
97 return self.request.member.reference_faces.filter(
98 marked_for_deletion_at__isnull=True
99 ).all()
100
101
102 class ReferenceFaceUploadView(LoginRequiredMixin, FormView):
103 template_name = "facedetection/reference-face-upload.html"
104 form_class = ReferenceFaceUploadForm
105 success_url = reverse_lazy("facedetection:reference-faces")
106
107 def dispatch(self, request, *args, **kwargs):
108 if not request.member or request.member.current_membership is None:
109 messages.error(request, "You need to be a member to use this feature.")
110 return redirect("index")
111 return super().dispatch(request, *args, **kwargs)
112
113 def form_valid(self, form):
114 try:
115 form.save(user=self.request.member)
116 except ValidationError as e:
117 for error in e:
118 messages.error(self.request, error)
119 return self.form_invalid(form)
120 messages.success(self.request, "Your reference face has been uploaded.")
121 return super().form_valid(form)
122
123
124 class ReferenceFaceDeleteView(LoginRequiredMixin, DeleteView):
125 model = ReferenceFace
126 success_url = reverse_lazy("facedetection:reference-faces")
127
128 def get_context_data(self, **kwargs):
129 context = super().get_context_data(**kwargs)
130 context[
131 "storage_period_after_delete"
132 ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS
133 return context
134
135 def get_queryset(self):
136 return self.request.member.reference_faces.filter(
137 marked_for_deletion_at__isnull=True
138 ).all()
139
140 def form_valid(self, form):
141 success_url = self.get_success_url()
142
143 instance = self.get_object()
144 instance.marked_for_deletion_at = timezone.now()
145 instance.save()
146 messages.success(self.request, "Your reference face has been deleted.")
147
148 return HttpResponseRedirect(success_url)
149
[end of website/facedetection/views.py]
[start of website/facedetection/services.py]
1 import json
2 import logging
3 from typing import Union
4
5 from django.conf import settings
6 from django.db.models import Q
7 from django.utils import timezone
8
9 import boto3
10 from sentry_sdk import capture_exception
11
12 from photos.models import Photo
13 from utils.media.services import get_thumbnail_url
14
15 from .models import FaceDetectionPhoto, ReferenceFace
16
17 logger = logging.getLogger(__name__)
18
19
20 def execute_data_minimisation(dry_run=False):
21 """Delete old reference faces.
22
23 This deletes reference faces that have been marked for deletion by the user for
24 some time, as well as reference faces of users that have not logged in for a year.
25 """
26 delete_period_inactive_member = timezone.now() - timezone.timedelta(days=365)
27 delete_period_marked_for_deletion = timezone.now() - timezone.timedelta(
28 days=settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS
29 )
30
31 queryset = ReferenceFace.objects.filter(
32 Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)
33 | Q(member__last_login__lte=delete_period_inactive_member)
34 )
35
36 if not dry_run:
37 for reference_face in queryset:
38 reference_face.delete() # Don't run the queryset method, this will also delete the file
39
40 return queryset
41
42
43 def _serialize_lambda_source(source: Union[ReferenceFace, FaceDetectionPhoto]):
44 """Serialize a source object to be sent to the lambda function."""
45 if isinstance(source, ReferenceFace):
46 return {
47 "type": "reference",
48 "pk": source.pk,
49 "token": source.token,
50 "photo_url": get_thumbnail_url(
51 source.file,
52 "medium",
53 absolute_url=True,
54 # Lambda calls can be queued for up to 6 hours by default, so
55 # we make sure the url it uses is valid for at least that long.
56 expire_seconds=60 * 60 * 7,
57 ),
58 }
59 if isinstance(source, FaceDetectionPhoto):
60 return {
61 "type": "photo",
62 "pk": source.pk,
63 "token": source.token,
64 "photo_url": get_thumbnail_url(
65 source.photo.file,
66 "large",
67 absolute_url=True,
68 expire_seconds=60 * 60 * 7,
69 ),
70 }
71 raise ValueError("source must be a ReferenceFace or FaceDetectionPhoto")
72
73
74 def _trigger_facedetection_lambda_batch(
75 sources: list[Union[ReferenceFace, FaceDetectionPhoto]]
76 ):
77 """Submit a batch of sources to the facedetection lambda function.
78
79 If submitting the sources fails, this is logged and
80 reported to Sentry, but no exception is raised.
81 """
82 payload = {
83 "api_url": settings.BASE_URL,
84 "sources": [_serialize_lambda_source(source) for source in sources],
85 }
86
87 for source in sources:
88 source.submitted_at = timezone.now()
89 source.save()
90
91 try:
92 lambda_client = boto3.client(
93 service_name="lambda",
94 aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
95 aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
96 )
97
98 response = lambda_client.invoke(
99 FunctionName=settings.FACEDETECTION_LAMBDA_ARN,
100 InvocationType="Event",
101 Payload=json.dumps(payload),
102 )
103
104 if response["StatusCode"] != 202:
105 # pylint: disable=broad-exception-raised
106 raise Exception("Lambda response was not 202.")
107
108 # pylint: disable=broad-exception-caught
109 except Exception as e:
110 logger.error(
111 "Submitting sources to lambda failed. Reason: %s", str(e), exc_info=True
112 )
113 capture_exception(e)
114
115
116 def trigger_facedetection_lambda(
117 sources: list[Union[ReferenceFace, FaceDetectionPhoto]]
118 ):
119 """Submit a sources to the facedetection lambda function for processing.
120
121 This function will check if the sources are valid and, if a lambda function has
122 been configured, try to submit the sources to the lambda function in batches.
123
124 If no lambda function has been configured, or submitting (a batch of) sources fails,
125 this is ignored. The sources can be submitted again later.
126 """
127 if len(sources) == 0:
128 raise ValueError("No sources to process.")
129
130 if any(source.status != source.Status.PROCESSING for source in sources):
131 raise ValueError("A source has already been processed.")
132
133 if settings.FACEDETECTION_LAMBDA_ARN is None:
134 logger.warning(
135 "No Lambda ARN has been configured. Sources will not be processed."
136 )
137 return
138
139 batch_size = settings.FACEDETECTION_LAMBDA_BATCH_SIZE
140 for batch in [
141 sources[i : i + batch_size] for i in range(0, len(sources), batch_size)
142 ]:
143 _trigger_facedetection_lambda_batch(batch)
144
145
146 def resubmit_reference_faces() -> list[ReferenceFace]:
147 """Resubmit reference faces that (should) have already been submitted but aren't done.
148
149 Returns a list of reference faces that have been resubmitted.
150 """
151 submitted_before = timezone.now() - timezone.timedelta(hours=7)
152 references = list(
153 ReferenceFace.objects.filter(
154 status=ReferenceFace.Status.PROCESSING,
155 ).filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))
156 )
157 if references:
158 trigger_facedetection_lambda(references)
159 return references
160
161
162 def resubmit_photos() -> list[FaceDetectionPhoto]:
163 """Resubmit photos that (should) have already been submitted but aren't done.
164
165 Returns a list of photos that have been resubmitted.
166 """
167 submitted_before = timezone.now() - timezone.timedelta(hours=7)
168 photos = list(
169 FaceDetectionPhoto.objects.filter(
170 status=FaceDetectionPhoto.Status.PROCESSING,
171 )
172 .filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))
173 .select_related("photo")
174 )
175 if photos:
176 trigger_facedetection_lambda(photos)
177 return photos
178
179
180 def submit_new_photos() -> int:
181 """Submit photos for which no FaceDetectionPhoto exists yet.
182
183 Returns the number of new photos that have been submitted.
184 """
185 count = 0
186 if not Photo.objects.filter(facedetectionphoto__isnull=True).exists():
187 return count
188
189 # We have another level of batching (outside of trigger_facedetection_lambda)
190 # for performance and responsive output when there are thousands of photos.
191 while Photo.objects.filter(facedetectionphoto__isnull=True).exists():
192 photos = FaceDetectionPhoto.objects.bulk_create(
193 [
194 FaceDetectionPhoto(photo=photo)
195 for photo in Photo.objects.filter(facedetectionphoto__isnull=True)[:400]
196 ]
197 )
198
199 trigger_facedetection_lambda(photos)
200 count += len(photos)
201
202 return count
203
[end of website/facedetection/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/facedetection/services.py b/website/facedetection/services.py
--- a/website/facedetection/services.py
+++ b/website/facedetection/services.py
@@ -30,7 +30,7 @@
queryset = ReferenceFace.objects.filter(
Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)
- | Q(member__last_login__lte=delete_period_inactive_member)
+ | Q(user__last_login__lte=delete_period_inactive_member)
)
if not dry_run:
diff --git a/website/facedetection/views.py b/website/facedetection/views.py
--- a/website/facedetection/views.py
+++ b/website/facedetection/views.py
@@ -75,7 +75,6 @@
class ReferenceFaceView(LoginRequiredMixin, ListView):
template_name = "facedetection/reference-faces.html"
- context_object_name = "reference_faces"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
@@ -91,6 +90,12 @@
).count()
>= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES
)
+ context[
+ "has_rejected_reference_faces"
+ ] = self.request.member.reference_faces.filter(
+ status=ReferenceFace.Status.REJECTED,
+ marked_for_deletion_at__isnull=True,
+ ).exists()
return context
def get_queryset(self):
| {"golden_diff": "diff --git a/website/facedetection/services.py b/website/facedetection/services.py\n--- a/website/facedetection/services.py\n+++ b/website/facedetection/services.py\n@@ -30,7 +30,7 @@\n \n queryset = ReferenceFace.objects.filter(\n Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)\n- | Q(member__last_login__lte=delete_period_inactive_member)\n+ | Q(user__last_login__lte=delete_period_inactive_member)\n )\n \n if not dry_run:\ndiff --git a/website/facedetection/views.py b/website/facedetection/views.py\n--- a/website/facedetection/views.py\n+++ b/website/facedetection/views.py\n@@ -75,7 +75,6 @@\n \n class ReferenceFaceView(LoginRequiredMixin, ListView):\n template_name = \"facedetection/reference-faces.html\"\n- context_object_name = \"reference_faces\"\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n@@ -91,6 +90,12 @@\n ).count()\n >= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n )\n+ context[\n+ \"has_rejected_reference_faces\"\n+ ] = self.request.member.reference_faces.filter(\n+ status=ReferenceFace.Status.REJECTED,\n+ marked_for_deletion_at__isnull=True,\n+ ).exists()\n return context\n \n def get_queryset(self):\n", "issue": "Fix reference face upload UI\nThe reference face UI should display better buttons for deleting a reference face, and for displaying whether a reference face is processing or accepted / rejected.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ValidationError\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DeleteView, FormView, ListView\n\nfrom photos.models import Photo\nfrom thaliawebsite.views import PagedView\n\nfrom .forms import ReferenceFaceUploadForm\nfrom .models import ReferenceFace\n\n\nclass YourPhotosView(LoginRequiredMixin, PagedView):\n model = Photo\n paginate_by = 16\n template_name = \"facedetection/your-photos.html\"\n context_object_name = \"photos\"\n\n def get(self, request, *args, **kwargs):\n if not request.member or request.member.current_membership is None:\n messages.error(request, _(\"You need to be a member to use this feature.\"))\n return redirect(\"index\")\n\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n member = self.request.member\n photos = (\n Photo.objects.select_related(\"album\")\n .filter(album__hidden=False, hidden=False)\n .filter(\n facedetectionphoto__encodings__matches__reference__user=self.request.member\n )\n )\n\n # Filter out matches from long before the member's first membership.\n albums_since = member.earliest_membership.since - timezone.timedelta(days=31)\n photos.filter(album__date__gte=albums_since)\n\n # Filter out matches from after the member's last membership.\n if member.latest_membership.until is not None:\n photos = photos.filter(album__date__lte=member.latest_membership.until)\n\n return photos.select_properties(\"num_likes\").order_by(\"-album__date\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\n \"has_processing_reference_faces\"\n ] = self.request.member.reference_faces.filter(\n status=ReferenceFace.Status.PROCESSING,\n marked_for_deletion_at__isnull=True,\n ).exists()\n\n context[\n \"has_rejected_reference_faces\"\n ] = self.request.member.reference_faces.filter(\n status=ReferenceFace.Status.REJECTED,\n marked_for_deletion_at__isnull=True,\n ).exists()\n\n context[\"has_reference_faces\"] = self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).exists()\n\n return context\n\n\nclass ReferenceFaceView(LoginRequiredMixin, ListView):\n template_name = \"facedetection/reference-faces.html\"\n context_object_name = \"reference_faces\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\n \"reference_faces_limit\"\n ] = settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n context[\n \"storage_period_after_delete\"\n ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n context[\"reference_faces_limit_reached\"] = bool(\n self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).count()\n >= settings.FACEDETECTION_MAX_NUM_REFERENCE_FACES\n )\n return context\n\n def get_queryset(self):\n return self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).all()\n\n\nclass ReferenceFaceUploadView(LoginRequiredMixin, FormView):\n template_name = \"facedetection/reference-face-upload.html\"\n form_class = ReferenceFaceUploadForm\n success_url = reverse_lazy(\"facedetection:reference-faces\")\n\n def dispatch(self, request, *args, **kwargs):\n if not request.member or request.member.current_membership is None:\n messages.error(request, \"You need to be a member to use this feature.\")\n return redirect(\"index\")\n return super().dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n try:\n form.save(user=self.request.member)\n except ValidationError as e:\n for error in e:\n messages.error(self.request, error)\n return self.form_invalid(form)\n messages.success(self.request, \"Your reference face has been uploaded.\")\n return super().form_valid(form)\n\n\nclass ReferenceFaceDeleteView(LoginRequiredMixin, DeleteView):\n model = ReferenceFace\n success_url = reverse_lazy(\"facedetection:reference-faces\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\n \"storage_period_after_delete\"\n ] = settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n return context\n\n def get_queryset(self):\n return self.request.member.reference_faces.filter(\n marked_for_deletion_at__isnull=True\n ).all()\n\n def form_valid(self, form):\n success_url = self.get_success_url()\n\n instance = self.get_object()\n instance.marked_for_deletion_at = timezone.now()\n instance.save()\n messages.success(self.request, \"Your reference face has been deleted.\")\n\n return HttpResponseRedirect(success_url)\n", "path": "website/facedetection/views.py"}, {"content": "import json\nimport logging\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nimport boto3\nfrom sentry_sdk import capture_exception\n\nfrom photos.models import Photo\nfrom utils.media.services import get_thumbnail_url\n\nfrom .models import FaceDetectionPhoto, ReferenceFace\n\nlogger = logging.getLogger(__name__)\n\n\ndef execute_data_minimisation(dry_run=False):\n \"\"\"Delete old reference faces.\n\n This deletes reference faces that have been marked for deletion by the user for\n some time, as well as reference faces of users that have not logged in for a year.\n \"\"\"\n delete_period_inactive_member = timezone.now() - timezone.timedelta(days=365)\n delete_period_marked_for_deletion = timezone.now() - timezone.timedelta(\n days=settings.FACEDETECTION_REFERENCE_FACE_STORAGE_PERIOD_AFTER_DELETE_DAYS\n )\n\n queryset = ReferenceFace.objects.filter(\n Q(marked_for_deletion_at__lte=delete_period_marked_for_deletion)\n | Q(member__last_login__lte=delete_period_inactive_member)\n )\n\n if not dry_run:\n for reference_face in queryset:\n reference_face.delete() # Don't run the queryset method, this will also delete the file\n\n return queryset\n\n\ndef _serialize_lambda_source(source: Union[ReferenceFace, FaceDetectionPhoto]):\n \"\"\"Serialize a source object to be sent to the lambda function.\"\"\"\n if isinstance(source, ReferenceFace):\n return {\n \"type\": \"reference\",\n \"pk\": source.pk,\n \"token\": source.token,\n \"photo_url\": get_thumbnail_url(\n source.file,\n \"medium\",\n absolute_url=True,\n # Lambda calls can be queued for up to 6 hours by default, so\n # we make sure the url it uses is valid for at least that long.\n expire_seconds=60 * 60 * 7,\n ),\n }\n if isinstance(source, FaceDetectionPhoto):\n return {\n \"type\": \"photo\",\n \"pk\": source.pk,\n \"token\": source.token,\n \"photo_url\": get_thumbnail_url(\n source.photo.file,\n \"large\",\n absolute_url=True,\n expire_seconds=60 * 60 * 7,\n ),\n }\n raise ValueError(\"source must be a ReferenceFace or FaceDetectionPhoto\")\n\n\ndef _trigger_facedetection_lambda_batch(\n sources: list[Union[ReferenceFace, FaceDetectionPhoto]]\n):\n \"\"\"Submit a batch of sources to the facedetection lambda function.\n\n If submitting the sources fails, this is logged and\n reported to Sentry, but no exception is raised.\n \"\"\"\n payload = {\n \"api_url\": settings.BASE_URL,\n \"sources\": [_serialize_lambda_source(source) for source in sources],\n }\n\n for source in sources:\n source.submitted_at = timezone.now()\n source.save()\n\n try:\n lambda_client = boto3.client(\n service_name=\"lambda\",\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n )\n\n response = lambda_client.invoke(\n FunctionName=settings.FACEDETECTION_LAMBDA_ARN,\n InvocationType=\"Event\",\n Payload=json.dumps(payload),\n )\n\n if response[\"StatusCode\"] != 202:\n # pylint: disable=broad-exception-raised\n raise Exception(\"Lambda response was not 202.\")\n\n # pylint: disable=broad-exception-caught\n except Exception as e:\n logger.error(\n \"Submitting sources to lambda failed. Reason: %s\", str(e), exc_info=True\n )\n capture_exception(e)\n\n\ndef trigger_facedetection_lambda(\n sources: list[Union[ReferenceFace, FaceDetectionPhoto]]\n):\n \"\"\"Submit a sources to the facedetection lambda function for processing.\n\n This function will check if the sources are valid and, if a lambda function has\n been configured, try to submit the sources to the lambda function in batches.\n\n If no lambda function has been configured, or submitting (a batch of) sources fails,\n this is ignored. The sources can be submitted again later.\n \"\"\"\n if len(sources) == 0:\n raise ValueError(\"No sources to process.\")\n\n if any(source.status != source.Status.PROCESSING for source in sources):\n raise ValueError(\"A source has already been processed.\")\n\n if settings.FACEDETECTION_LAMBDA_ARN is None:\n logger.warning(\n \"No Lambda ARN has been configured. Sources will not be processed.\"\n )\n return\n\n batch_size = settings.FACEDETECTION_LAMBDA_BATCH_SIZE\n for batch in [\n sources[i : i + batch_size] for i in range(0, len(sources), batch_size)\n ]:\n _trigger_facedetection_lambda_batch(batch)\n\n\ndef resubmit_reference_faces() -> list[ReferenceFace]:\n \"\"\"Resubmit reference faces that (should) have already been submitted but aren't done.\n\n Returns a list of reference faces that have been resubmitted.\n \"\"\"\n submitted_before = timezone.now() - timezone.timedelta(hours=7)\n references = list(\n ReferenceFace.objects.filter(\n status=ReferenceFace.Status.PROCESSING,\n ).filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))\n )\n if references:\n trigger_facedetection_lambda(references)\n return references\n\n\ndef resubmit_photos() -> list[FaceDetectionPhoto]:\n \"\"\"Resubmit photos that (should) have already been submitted but aren't done.\n\n Returns a list of photos that have been resubmitted.\n \"\"\"\n submitted_before = timezone.now() - timezone.timedelta(hours=7)\n photos = list(\n FaceDetectionPhoto.objects.filter(\n status=FaceDetectionPhoto.Status.PROCESSING,\n )\n .filter(Q(submitted_at__lte=submitted_before) | Q(submitted_at__isnull=True))\n .select_related(\"photo\")\n )\n if photos:\n trigger_facedetection_lambda(photos)\n return photos\n\n\ndef submit_new_photos() -> int:\n \"\"\"Submit photos for which no FaceDetectionPhoto exists yet.\n\n Returns the number of new photos that have been submitted.\n \"\"\"\n count = 0\n if not Photo.objects.filter(facedetectionphoto__isnull=True).exists():\n return count\n\n # We have another level of batching (outside of trigger_facedetection_lambda)\n # for performance and responsive output when there are thousands of photos.\n while Photo.objects.filter(facedetectionphoto__isnull=True).exists():\n photos = FaceDetectionPhoto.objects.bulk_create(\n [\n FaceDetectionPhoto(photo=photo)\n for photo in Photo.objects.filter(facedetectionphoto__isnull=True)[:400]\n ]\n )\n\n trigger_facedetection_lambda(photos)\n count += len(photos)\n\n return count\n", "path": "website/facedetection/services.py"}]} | 4,004 | 321 |
gh_patches_debug_863 | rasdani/github-patches | git_diff | vacanza__python-holidays-451 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't un-pickle a `HolidayBase`
Seems that after a holidays class, e.g. `holidays.UnitedStates()` is used once, it can't be un-pickled.
For example, this snippet:
```python
import holidays
import pickle
from datetime import datetime
# Works:
us_holidays = holidays.UnitedStates()
us_holidays_ = pickle.loads(pickle.dumps(us_holidays))
b = datetime.fromisoformat("2020-01-01") in us_holidays_
# Fails:
us_holidays = holidays.UnitedStates()
b = datetime.fromisoformat("2020-01-01") in us_holidays
dump = pickle.dumps(us_holidays)
pickle.loads(dump) # <- exception
```
Raises the following exception from the last line:
```
~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __setitem__(self, key, value)
116
117 def __setitem__(self, key, value):
--> 118 if key in self:
119 if self.get(key).find(value) < 0 \
120 and value.find(self.get(key)) < 0:
~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __contains__(self, key)
73
74 def __contains__(self, key):
---> 75 return dict.__contains__(self, self.__keytransform__(key))
76
77 def __getitem__(self, key):
~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __keytransform__(self, key)
67 raise TypeError("Cannot convert type '%s' to date." % type(key))
68
---> 69 if self.expand and key.year not in self.years:
70 self.years.add(key.year)
71 self._populate(key.year)
```
The `expand` attribute is set by `__init__`, but it's not there during deserialization via unpickling.
I think it's because the `HolidayBase` inherits from dict and there's some weirdness there - it seems to first populate the dict in the deserialized object and only then sets the attributes from the state. But since `HolidayBase` overrides `__setitem__` and in this override it's using state attributes that weren't yet set on the object, the `expand` attribute is missing.
Tested with `holidays=='0.10.4'`.
</issue>
<code>
[start of holidays/holiday_base.py]
1 # -*- coding: utf-8 -*-
2
3 # python-holidays
4 # ---------------
5 # A fast, efficient Python library for generating country, province and state
6 # specific sets of holidays on the fly. It aims to make determining whether a
7 # specific date is a holiday as fast and flexible as possible.
8 #
9 # Author: ryanss <[email protected]> (c) 2014-2017
10 # dr-prodigy <[email protected]> (c) 2017-2021
11 # Website: https://github.com/dr-prodigy/python-holidays
12 # License: MIT (see LICENSE file)
13
14 from datetime import timedelta, datetime, date
15
16 import six
17 from dateutil.parser import parse
18
19
20 class HolidayBase(dict):
21 PROVINCES = []
22
23 def __init__(
24 self, years=[], expand=True, observed=True, prov=None, state=None
25 ):
26 self.observed = observed
27 self.expand = expand
28 if isinstance(years, int):
29 years = [
30 years,
31 ]
32 self.years = set(years)
33 if not getattr(self, "prov", False):
34 self.prov = prov
35 self.state = state
36 for year in list(self.years):
37 self._populate(year)
38
39 def __setattr__(self, key, value):
40 if key == "observed" and len(self) > 0:
41 dict.__setattr__(self, key, value)
42 if value is True:
43 # Add (Observed) dates
44 years = list(self.years)
45 self.years = set()
46 self.clear()
47 for year in years:
48 self._populate(year)
49 else:
50 # Remove (Observed) dates
51 for k, v in list(self.items()):
52 if v.find("Observed") >= 0:
53 del self[k]
54 else:
55 return dict.__setattr__(self, key, value)
56
57 def __keytransform__(self, key):
58 if isinstance(key, datetime):
59 key = key.date()
60 elif isinstance(key, date):
61 key = key
62 elif isinstance(key, int) or isinstance(key, float):
63 key = datetime.utcfromtimestamp(key).date()
64 elif isinstance(key, six.string_types):
65 try:
66 key = parse(key).date()
67 except (ValueError, OverflowError):
68 raise ValueError("Cannot parse date from string '%s'" % key)
69 else:
70 raise TypeError("Cannot convert type '%s' to date." % type(key))
71
72 if self.expand and key.year not in self.years:
73 self.years.add(key.year)
74 self._populate(key.year)
75 return key
76
77 def __contains__(self, key):
78 return dict.__contains__(self, self.__keytransform__(key))
79
80 def __getitem__(self, key):
81 if isinstance(key, slice):
82 if not key.start or not key.stop:
83 raise ValueError("Both start and stop must be given.")
84
85 start = self.__keytransform__(key.start)
86 stop = self.__keytransform__(key.stop)
87
88 if key.step is None:
89 step = 1
90 elif isinstance(key.step, timedelta):
91 step = key.step.days
92 elif isinstance(key.step, int):
93 step = key.step
94 else:
95 raise TypeError(
96 "Cannot convert type '%s' to int." % type(key.step)
97 )
98
99 if step == 0:
100 raise ValueError("Step value must not be zero.")
101
102 date_diff = stop - start
103 if date_diff.days < 0 <= step or date_diff.days >= 0 > step:
104 step *= -1
105
106 days_in_range = []
107 for delta_days in range(0, date_diff.days, step):
108 day = start + timedelta(days=delta_days)
109 try:
110 dict.__getitem__(self, day)
111 days_in_range.append(day)
112 except KeyError:
113 pass
114 return days_in_range
115 return dict.__getitem__(self, self.__keytransform__(key))
116
117 def __setitem__(self, key, value):
118 if key in self:
119 if self.get(key).find(value) < 0 and value.find(self.get(key)) < 0:
120 value = "%s, %s" % (value, self.get(key))
121 else:
122 value = self.get(key)
123 return dict.__setitem__(self, self.__keytransform__(key), value)
124
125 def update(self, *args):
126 args = list(args)
127 for arg in args:
128 if isinstance(arg, dict):
129 for key, value in list(arg.items()):
130 self[key] = value
131 elif isinstance(arg, list):
132 for item in arg:
133 self[item] = "Holiday"
134 else:
135 self[arg] = "Holiday"
136
137 def append(self, *args):
138 return self.update(*args)
139
140 def get(self, key, default=None):
141 return dict.get(self, self.__keytransform__(key), default)
142
143 def get_list(self, key):
144 return [h for h in self.get(key, "").split(", ") if h]
145
146 def get_named(self, name):
147 # find all dates matching provided name (accepting partial
148 # strings too, case insensitive), returning them in a list
149 original_expand = self.expand
150 self.expand = False
151 matches = [key for key in self if name.lower() in self[key].lower()]
152 self.expand = original_expand
153 return matches
154
155 def pop(self, key, default=None):
156 if default is None:
157 return dict.pop(self, self.__keytransform__(key))
158 return dict.pop(self, self.__keytransform__(key), default)
159
160 def pop_named(self, name):
161 to_pop = self.get_named(name)
162 if not to_pop:
163 raise KeyError(name)
164 for key in to_pop:
165 self.pop(key)
166 return to_pop
167
168 def __eq__(self, other):
169 return dict.__eq__(self, other) and self.__dict__ == other.__dict__
170
171 def __ne__(self, other):
172 return dict.__ne__(self, other) or self.__dict__ != other.__dict__
173
174 def __add__(self, other):
175 if isinstance(other, int) and other == 0:
176 # Required to sum() list of holidays
177 # sum([h1, h2]) is equivalent to (0 + h1 + h2)
178 return self
179 elif not isinstance(other, HolidayBase):
180 raise TypeError()
181 HolidaySum = createHolidaySum(self, other)
182 country = getattr(self, "country", None) or getattr(
183 other, "country", None
184 )
185 if self.country and other.country and self.country != other.country:
186 c1 = self.country
187 if not isinstance(c1, list):
188 c1 = [c1]
189 c2 = other.country
190 if not isinstance(c2, list):
191 c2 = [c2]
192 country = c1 + c2
193 prov = getattr(self, "prov", None) or getattr(other, "prov", None)
194 if self.prov and other.prov and self.prov != other.prov:
195 p1 = self.prov if isinstance(self.prov, list) else [self.prov]
196 p2 = other.prov if isinstance(other.prov, list) else [other.prov]
197 prov = p1 + p2
198 return HolidaySum(
199 years=(self.years | other.years),
200 expand=(self.expand or other.expand),
201 observed=(self.observed or other.observed),
202 country=country,
203 prov=prov,
204 )
205
206 def __radd__(self, other):
207 return self.__add__(other)
208
209 def _populate(self, year):
210 pass
211
212
213 def createHolidaySum(h1, h2):
214 class HolidaySum(HolidayBase):
215 def __init__(self, country, **kwargs):
216 self.country = country
217 self.holidays = []
218 if getattr(h1, "holidays", False):
219 for h in h1.holidays:
220 self.holidays.append(h)
221 else:
222 self.holidays.append(h1)
223 if getattr(h2, "holidays", False):
224 for h in h2.holidays:
225 self.holidays.append(h)
226 else:
227 self.holidays.append(h2)
228 HolidayBase.__init__(self, **kwargs)
229
230 def _populate(self, year):
231 for h in self.holidays[::-1]:
232 h._populate(year)
233 self.update(h)
234
235 return HolidaySum
236
[end of holidays/holiday_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/holidays/holiday_base.py b/holidays/holiday_base.py
--- a/holidays/holiday_base.py
+++ b/holidays/holiday_base.py
@@ -209,6 +209,9 @@
def _populate(self, year):
pass
+ def __reduce__(self):
+ return super(HolidayBase, self).__reduce__()
+
def createHolidaySum(h1, h2):
class HolidaySum(HolidayBase):
| {"golden_diff": "diff --git a/holidays/holiday_base.py b/holidays/holiday_base.py\n--- a/holidays/holiday_base.py\n+++ b/holidays/holiday_base.py\n@@ -209,6 +209,9 @@\n def _populate(self, year):\n pass\n \n+ def __reduce__(self):\n+ return super(HolidayBase, self).__reduce__()\n+\n \n def createHolidaySum(h1, h2):\n class HolidaySum(HolidayBase):\n", "issue": "Can't un-pickle a `HolidayBase`\nSeems that after a holidays class, e.g. `holidays.UnitedStates()` is used once, it can't be un-pickled.\r\n\r\nFor example, this snippet:\r\n\r\n```python\r\nimport holidays\r\nimport pickle\r\nfrom datetime import datetime\r\n\r\n# Works:\r\nus_holidays = holidays.UnitedStates()\r\nus_holidays_ = pickle.loads(pickle.dumps(us_holidays))\r\nb = datetime.fromisoformat(\"2020-01-01\") in us_holidays_\r\n\r\n# Fails:\r\nus_holidays = holidays.UnitedStates()\r\nb = datetime.fromisoformat(\"2020-01-01\") in us_holidays\r\ndump = pickle.dumps(us_holidays)\r\npickle.loads(dump) # <- exception\r\n```\r\n\r\nRaises the following exception from the last line:\r\n```\r\n~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __setitem__(self, key, value)\r\n 116\r\n 117 def __setitem__(self, key, value):\r\n--> 118 if key in self:\r\n 119 if self.get(key).find(value) < 0 \\\r\n 120 and value.find(self.get(key)) < 0:\r\n\r\n~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __contains__(self, key)\r\n 73\r\n 74 def __contains__(self, key):\r\n---> 75 return dict.__contains__(self, self.__keytransform__(key))\r\n 76\r\n 77 def __getitem__(self, key):\r\n\r\n~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __keytransform__(self, key)\r\n 67 raise TypeError(\"Cannot convert type '%s' to date.\" % type(key))\r\n 68\r\n---> 69 if self.expand and key.year not in self.years:\r\n 70 self.years.add(key.year)\r\n 71 self._populate(key.year)\r\n```\r\n\r\nThe `expand` attribute is set by `__init__`, but it's not there during deserialization via unpickling.\r\nI think it's because the `HolidayBase` inherits from dict and there's some weirdness there - it seems to first populate the dict in the deserialized object and only then sets the attributes from the state. But since `HolidayBase` overrides `__setitem__` and in this override it's using state attributes that weren't yet set on the object, the `expand` attribute is missing.\r\n\r\nTested with `holidays=='0.10.4'`.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2021\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta, datetime, date\n\nimport six\nfrom dateutil.parser import parse\n\n\nclass HolidayBase(dict):\n PROVINCES = []\n\n def __init__(\n self, years=[], expand=True, observed=True, prov=None, state=None\n ):\n self.observed = observed\n self.expand = expand\n if isinstance(years, int):\n years = [\n years,\n ]\n self.years = set(years)\n if not getattr(self, \"prov\", False):\n self.prov = prov\n self.state = state\n for year in list(self.years):\n self._populate(year)\n\n def __setattr__(self, key, value):\n if key == \"observed\" and len(self) > 0:\n dict.__setattr__(self, key, value)\n if value is True:\n # Add (Observed) dates\n years = list(self.years)\n self.years = set()\n self.clear()\n for year in years:\n self._populate(year)\n else:\n # Remove (Observed) dates\n for k, v in list(self.items()):\n if v.find(\"Observed\") >= 0:\n del self[k]\n else:\n return dict.__setattr__(self, key, value)\n\n def __keytransform__(self, key):\n if isinstance(key, datetime):\n key = key.date()\n elif isinstance(key, date):\n key = key\n elif isinstance(key, int) or isinstance(key, float):\n key = datetime.utcfromtimestamp(key).date()\n elif isinstance(key, six.string_types):\n try:\n key = parse(key).date()\n except (ValueError, OverflowError):\n raise ValueError(\"Cannot parse date from string '%s'\" % key)\n else:\n raise TypeError(\"Cannot convert type '%s' to date.\" % type(key))\n\n if self.expand and key.year not in self.years:\n self.years.add(key.year)\n self._populate(key.year)\n return key\n\n def __contains__(self, key):\n return dict.__contains__(self, self.__keytransform__(key))\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if not key.start or not key.stop:\n raise ValueError(\"Both start and stop must be given.\")\n\n start = self.__keytransform__(key.start)\n stop = self.__keytransform__(key.stop)\n\n if key.step is None:\n step = 1\n elif isinstance(key.step, timedelta):\n step = key.step.days\n elif isinstance(key.step, int):\n step = key.step\n else:\n raise TypeError(\n \"Cannot convert type '%s' to int.\" % type(key.step)\n )\n\n if step == 0:\n raise ValueError(\"Step value must not be zero.\")\n\n date_diff = stop - start\n if date_diff.days < 0 <= step or date_diff.days >= 0 > step:\n step *= -1\n\n days_in_range = []\n for delta_days in range(0, date_diff.days, step):\n day = start + timedelta(days=delta_days)\n try:\n dict.__getitem__(self, day)\n days_in_range.append(day)\n except KeyError:\n pass\n return days_in_range\n return dict.__getitem__(self, self.__keytransform__(key))\n\n def __setitem__(self, key, value):\n if key in self:\n if self.get(key).find(value) < 0 and value.find(self.get(key)) < 0:\n value = \"%s, %s\" % (value, self.get(key))\n else:\n value = self.get(key)\n return dict.__setitem__(self, self.__keytransform__(key), value)\n\n def update(self, *args):\n args = list(args)\n for arg in args:\n if isinstance(arg, dict):\n for key, value in list(arg.items()):\n self[key] = value\n elif isinstance(arg, list):\n for item in arg:\n self[item] = \"Holiday\"\n else:\n self[arg] = \"Holiday\"\n\n def append(self, *args):\n return self.update(*args)\n\n def get(self, key, default=None):\n return dict.get(self, self.__keytransform__(key), default)\n\n def get_list(self, key):\n return [h for h in self.get(key, \"\").split(\", \") if h]\n\n def get_named(self, name):\n # find all dates matching provided name (accepting partial\n # strings too, case insensitive), returning them in a list\n original_expand = self.expand\n self.expand = False\n matches = [key for key in self if name.lower() in self[key].lower()]\n self.expand = original_expand\n return matches\n\n def pop(self, key, default=None):\n if default is None:\n return dict.pop(self, self.__keytransform__(key))\n return dict.pop(self, self.__keytransform__(key), default)\n\n def pop_named(self, name):\n to_pop = self.get_named(name)\n if not to_pop:\n raise KeyError(name)\n for key in to_pop:\n self.pop(key)\n return to_pop\n\n def __eq__(self, other):\n return dict.__eq__(self, other) and self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return dict.__ne__(self, other) or self.__dict__ != other.__dict__\n\n def __add__(self, other):\n if isinstance(other, int) and other == 0:\n # Required to sum() list of holidays\n # sum([h1, h2]) is equivalent to (0 + h1 + h2)\n return self\n elif not isinstance(other, HolidayBase):\n raise TypeError()\n HolidaySum = createHolidaySum(self, other)\n country = getattr(self, \"country\", None) or getattr(\n other, \"country\", None\n )\n if self.country and other.country and self.country != other.country:\n c1 = self.country\n if not isinstance(c1, list):\n c1 = [c1]\n c2 = other.country\n if not isinstance(c2, list):\n c2 = [c2]\n country = c1 + c2\n prov = getattr(self, \"prov\", None) or getattr(other, \"prov\", None)\n if self.prov and other.prov and self.prov != other.prov:\n p1 = self.prov if isinstance(self.prov, list) else [self.prov]\n p2 = other.prov if isinstance(other.prov, list) else [other.prov]\n prov = p1 + p2\n return HolidaySum(\n years=(self.years | other.years),\n expand=(self.expand or other.expand),\n observed=(self.observed or other.observed),\n country=country,\n prov=prov,\n )\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def _populate(self, year):\n pass\n\n\ndef createHolidaySum(h1, h2):\n class HolidaySum(HolidayBase):\n def __init__(self, country, **kwargs):\n self.country = country\n self.holidays = []\n if getattr(h1, \"holidays\", False):\n for h in h1.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h1)\n if getattr(h2, \"holidays\", False):\n for h in h2.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h2)\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n for h in self.holidays[::-1]:\n h._populate(year)\n self.update(h)\n\n return HolidaySum\n", "path": "holidays/holiday_base.py"}]} | 3,611 | 101 |
gh_patches_debug_33192 | rasdani/github-patches | git_diff | conda__conda-build-1471 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda-build no longer generates .exe files for my scripts on Windows
I notice that conda-build, verson 1.21.7, used to generate .exe files to invoke my scripts. But 2.0.6 doesn't seem to do that. I've got Python scripts, goodstuff, goodstuff-data, and goodstuff-library, which are marked as scripts in my setup.py file. 1.21.7 turns them into goodstuff-script.py and goodstuff.exe, goodstuff-data-script.py and goodstuff-data.exe, etc. Which I found very useful.
Is there any way to get that 1.21.functionality back again?
</issue>
<code>
[start of conda_build/windows.py]
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import sys
5 from os.path import isdir, join
6
7 # importing setuptools patches distutils so that it knows how to find VC for python 2.7
8 import setuptools # noqa
9 # Leverage the hard work done by setuptools/distutils to find vcvarsall using
10 # either the registry or the VS**COMNTOOLS environment variable
11 from distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall
12 from distutils.msvc9compiler import Reg, WINSDK_BASE
13
14 from .conda_interface import bits
15
16 from conda_build import environ
17 from conda_build.utils import _check_call, root_script_dir, path_prepended
18
19
20 assert sys.platform == 'win32'
21
22
23 VS_VERSION_STRING = {
24 '8.0': 'Visual Studio 8 2005',
25 '9.0': 'Visual Studio 9 2008',
26 '10.0': 'Visual Studio 10 2010',
27 '11.0': 'Visual Studio 11 2012',
28 '12.0': 'Visual Studio 12 2013',
29 '14.0': 'Visual Studio 14 2015'
30 }
31
32
33 def build_vcvarsall_vs_path(version):
34 """
35 Given the Visual Studio version, returns the default path to the
36 Microsoft Visual Studio vcvarsall.bat file.
37 Expected versions are of the form {9.0, 10.0, 12.0, 14.0}
38 """
39 # Set up a load of paths that can be imported from the tests
40 if 'ProgramFiles(x86)' in os.environ:
41 PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']
42 else:
43 PROGRAM_FILES_PATH = os.environ['ProgramFiles']
44
45 flatversion = str(version).replace('.', '')
46 vstools = "VS{0}COMNTOOLS".format(flatversion)
47
48 if vstools in os.environ:
49 return os.path.join(os.environ[vstools], '..\\..\\VC\\vcvarsall.bat')
50 else:
51 # prefer looking at env var; fall back to program files defaults
52 return os.path.join(PROGRAM_FILES_PATH,
53 'Microsoft Visual Studio {}'.format(version), 'VC',
54 'vcvarsall.bat')
55
56
57 def msvc_env_cmd(bits, config, override=None):
58 arch_selector = 'x86' if bits == 32 else 'amd64'
59
60 msvc_env_lines = []
61
62 version = None
63 if override is not None:
64 version = override
65
66 # The DISTUTILS_USE_SDK variable tells distutils to not try and validate
67 # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.
68 # For > 3.5 it literally just skips the validation logic.
69 # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py
70 # for more information.
71 msvc_env_lines.append('set DISTUTILS_USE_SDK=1')
72 # This is also required to hit the 'don't validate' logic on < 3.5.
73 # For > 3.5 this is ignored.
74 msvc_env_lines.append('set MSSdk=1')
75
76 if not version:
77 if config.PY3K and config.use_MSVC2015:
78 version = '14.0'
79 elif config.PY3K:
80 version = '10.0'
81 else:
82 version = '9.0'
83
84 if float(version) >= 14.0:
85 # For Python 3.5+, ensure that we link with the dynamic runtime. See
86 # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info
87 msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\vcruntime{0}.dll'.format(
88 version.replace('.', '')))
89
90 vcvarsall_vs_path = build_vcvarsall_vs_path(version)
91
92 def build_vcvarsall_cmd(cmd, arch=arch_selector):
93 # Default argument `arch_selector` is defined above
94 return 'call "{cmd}" {arch}'.format(cmd=cmd, arch=arch)
95
96 msvc_env_lines.append('set "VS_VERSION={}"'.format(version))
97 msvc_env_lines.append('set "VS_MAJOR={}"'.format(version.split('.')[0]))
98 msvc_env_lines.append('set "VS_YEAR={}"'.format(VS_VERSION_STRING[version][-4:]))
99 msvc_env_lines.append('set "CMAKE_GENERATOR={}"'.format(VS_VERSION_STRING[version] +
100 {64: ' Win64', 32: ''}[bits]))
101 # tell msys2 to ignore path conversions for issue-causing windows-style flags in build
102 # See https://github.com/conda-forge/icu-feedstock/pull/5
103 msvc_env_lines.append('set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%"')
104 msvc_env_lines.append('set "MSYS2_ENV_CONV_EXCL=CL"')
105 if version == '10.0':
106 try:
107 WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),
108 'installationfolder')
109 WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')
110
111 win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'
112 win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)
113
114 # There are two methods of building Python 3.3 and 3.4 extensions (both
115 # of which required Visual Studio 2010 - as explained in the Python wiki
116 # https://wiki.python.org/moin/WindowsCompilers)
117 # 1) Use the Windows SDK 7.1
118 # 2) Use Visual Studio 2010 (any edition)
119 # However, VS2010 never shipped with a 64-bit compiler, so in this case
120 # **only** option (1) applies. For this reason, we always try and
121 # activate the Windows SDK first. Unfortunately, unsuccessfully setting
122 # up the environment does **not EXIT 1** and therefore we must fall
123 # back to attempting to set up VS2010.
124 # DelayedExpansion is required for the SetEnv.cmd
125 msvc_env_lines.append('Setlocal EnableDelayedExpansion')
126 msvc_env_lines.append(win_sdk_cmd)
127 # If the WindowsSDKDir environment variable has not been successfully
128 # set then try activating VS2010
129 msvc_env_lines.append('if not "%WindowsSDKDir%" == "{}" ( {} )'.format(
130 WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))
131 # sdk is not installed. Fall back to only trying VS 2010
132 except KeyError:
133 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
134 elif version == '9.0':
135 # Get the Visual Studio 2008 path (not the Visual C++ for Python path)
136 # and get the 'vcvars64.bat' from inside the bin (in the directory above
137 # that returned by distutils_find_vcvarsall)
138 try:
139 VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),
140 'bin', 'vcvars64.bat')
141 # there's an exception if VS or the VC compiler for python are not actually installed.
142 except (KeyError, TypeError):
143 VCVARS64_VS9_BAT_PATH = None
144
145 error1 = 'if errorlevel 1 {}'
146
147 # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7
148 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
149 # The Visual Studio 2008 Express edition does not properly contain
150 # the amd64 build files, so we call the vcvars64.bat manually,
151 # rather than using the vcvarsall.bat which would try and call the
152 # missing bat file.
153 if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:
154 msvc_env_lines.append(error1.format(
155 build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))
156 # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+
157 # by using the logic provided by setuptools
158 msvc_env_lines.append(error1.format(
159 build_vcvarsall_cmd(distutils_find_vcvarsall(9))))
160 else:
161 # Visual Studio 14 or otherwise
162 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
163
164 return '\n'.join(msvc_env_lines) + '\n'
165
166
167 def build(m, bld_bat, config):
168 with path_prepended(config.build_prefix):
169 env = environ.get_dict(config=config, m=m)
170 env["CONDA_BUILD_STATE"] = "BUILD"
171
172 for name in 'BIN', 'INC', 'LIB':
173 path = env['LIBRARY_' + name]
174 if not isdir(path):
175 os.makedirs(path)
176
177 src_dir = config.work_dir
178 if os.path.isfile(bld_bat):
179 with open(bld_bat) as fi:
180 data = fi.read()
181 with open(join(src_dir, 'bld.bat'), 'w') as fo:
182 # more debuggable with echo on
183 fo.write('@echo on\n')
184 for key, value in env.items():
185 fo.write('set "{key}={value}"\n'.format(key=key, value=value))
186 fo.write(msvc_env_cmd(bits=bits, config=config,
187 override=m.get_value('build/msvc_compiler', None)))
188 # Reset echo on, because MSVC scripts might have turned it off
189 fo.write('@echo on\n')
190 fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
191 fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
192 if config.activate:
193 fo.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
194 conda_root=root_script_dir,
195 prefix=config.build_prefix))
196 fo.write("REM ===== end generated header =====\n")
197 fo.write(data)
198
199 cmd = ['cmd.exe', '/c', 'bld.bat']
200 _check_call(cmd, cwd=src_dir)
201
[end of conda_build/windows.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_build/windows.py b/conda_build/windows.py
--- a/conda_build/windows.py
+++ b/conda_build/windows.py
@@ -2,7 +2,7 @@
import os
import sys
-from os.path import isdir, join
+from os.path import isdir, join, dirname, isfile
# importing setuptools patches distutils so that it knows how to find VC for python 2.7
import setuptools # noqa
@@ -14,7 +14,7 @@
from .conda_interface import bits
from conda_build import environ
-from conda_build.utils import _check_call, root_script_dir, path_prepended
+from conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into
assert sys.platform == 'win32'
@@ -30,6 +30,36 @@
}
+def fix_staged_scripts(scripts_dir):
+ """
+ Fixes scripts which have been installed unix-style to have a .bat
+ helper
+ """
+ if not isdir(scripts_dir):
+ return
+ for fn in os.listdir(scripts_dir):
+ # process all the extensionless files
+ if not isfile(join(scripts_dir, fn)) or '.' in fn:
+ continue
+
+ with open(join(scripts_dir, fn)) as f:
+ line = f.readline().lower()
+ # If it's a #!python script
+ if not (line.startswith('#!') and 'python' in line.lower()):
+ continue
+ print('Adjusting unix-style #! script %s, '
+ 'and adding a .bat file for it' % fn)
+ # copy it with a .py extension (skipping that first #! line)
+ with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:
+ fo.write(f.read())
+ # now create the .exe file
+ copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),
+ join(scripts_dir, fn + '.exe'))
+
+ # remove the original script
+ os.remove(join(scripts_dir, fn))
+
+
def build_vcvarsall_vs_path(version):
"""
Given the Visual Studio version, returns the default path to the
@@ -198,3 +228,5 @@
cmd = ['cmd.exe', '/c', 'bld.bat']
_check_call(cmd, cwd=src_dir)
+
+ fix_staged_scripts(join(config.build_prefix, 'Scripts'))
| {"golden_diff": "diff --git a/conda_build/windows.py b/conda_build/windows.py\n--- a/conda_build/windows.py\n+++ b/conda_build/windows.py\n@@ -2,7 +2,7 @@\n \n import os\n import sys\n-from os.path import isdir, join\n+from os.path import isdir, join, dirname, isfile\n \n # importing setuptools patches distutils so that it knows how to find VC for python 2.7\n import setuptools # noqa\n@@ -14,7 +14,7 @@\n from .conda_interface import bits\n \n from conda_build import environ\n-from conda_build.utils import _check_call, root_script_dir, path_prepended\n+from conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into\n \n \n assert sys.platform == 'win32'\n@@ -30,6 +30,36 @@\n }\n \n \n+def fix_staged_scripts(scripts_dir):\n+ \"\"\"\n+ Fixes scripts which have been installed unix-style to have a .bat\n+ helper\n+ \"\"\"\n+ if not isdir(scripts_dir):\n+ return\n+ for fn in os.listdir(scripts_dir):\n+ # process all the extensionless files\n+ if not isfile(join(scripts_dir, fn)) or '.' in fn:\n+ continue\n+\n+ with open(join(scripts_dir, fn)) as f:\n+ line = f.readline().lower()\n+ # If it's a #!python script\n+ if not (line.startswith('#!') and 'python' in line.lower()):\n+ continue\n+ print('Adjusting unix-style #! script %s, '\n+ 'and adding a .bat file for it' % fn)\n+ # copy it with a .py extension (skipping that first #! line)\n+ with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n+ fo.write(f.read())\n+ # now create the .exe file\n+ copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),\n+ join(scripts_dir, fn + '.exe'))\n+\n+ # remove the original script\n+ os.remove(join(scripts_dir, fn))\n+\n+\n def build_vcvarsall_vs_path(version):\n \"\"\"\n Given the Visual Studio version, returns the default path to the\n@@ -198,3 +228,5 @@\n \n cmd = ['cmd.exe', '/c', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n+\n+ fix_staged_scripts(join(config.build_prefix, 'Scripts'))\n", "issue": "conda-build no longer generates .exe files for my scripts on Windows\nI notice that conda-build, verson 1.21.7, used to generate .exe files to invoke my scripts. But 2.0.6 doesn't seem to do that. I've got Python scripts, goodstuff, goodstuff-data, and goodstuff-library, which are marked as scripts in my setup.py file. 1.21.7 turns them into goodstuff-script.py and goodstuff.exe, goodstuff-data-script.py and goodstuff-data.exe, etc. Which I found very useful.\n\nIs there any way to get that 1.21.functionality back again?\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import isdir, join\n\n# importing setuptools patches distutils so that it knows how to find VC for python 2.7\nimport setuptools # noqa\n# Leverage the hard work done by setuptools/distutils to find vcvarsall using\n# either the registry or the VS**COMNTOOLS environment variable\nfrom distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall\nfrom distutils.msvc9compiler import Reg, WINSDK_BASE\n\nfrom .conda_interface import bits\n\nfrom conda_build import environ\nfrom conda_build.utils import _check_call, root_script_dir, path_prepended\n\n\nassert sys.platform == 'win32'\n\n\nVS_VERSION_STRING = {\n '8.0': 'Visual Studio 8 2005',\n '9.0': 'Visual Studio 9 2008',\n '10.0': 'Visual Studio 10 2010',\n '11.0': 'Visual Studio 11 2012',\n '12.0': 'Visual Studio 12 2013',\n '14.0': 'Visual Studio 14 2015'\n}\n\n\ndef build_vcvarsall_vs_path(version):\n \"\"\"\n Given the Visual Studio version, returns the default path to the\n Microsoft Visual Studio vcvarsall.bat file.\n Expected versions are of the form {9.0, 10.0, 12.0, 14.0}\n \"\"\"\n # Set up a load of paths that can be imported from the tests\n if 'ProgramFiles(x86)' in os.environ:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']\n else:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles']\n\n flatversion = str(version).replace('.', '')\n vstools = \"VS{0}COMNTOOLS\".format(flatversion)\n\n if vstools in os.environ:\n return os.path.join(os.environ[vstools], '..\\\\..\\\\VC\\\\vcvarsall.bat')\n else:\n # prefer looking at env var; fall back to program files defaults\n return os.path.join(PROGRAM_FILES_PATH,\n 'Microsoft Visual Studio {}'.format(version), 'VC',\n 'vcvarsall.bat')\n\n\ndef msvc_env_cmd(bits, config, override=None):\n arch_selector = 'x86' if bits == 32 else 'amd64'\n\n msvc_env_lines = []\n\n version = None\n if override is not None:\n version = override\n\n # The DISTUTILS_USE_SDK variable tells distutils to not try and validate\n # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.\n # For > 3.5 it literally just skips the validation logic.\n # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py\n # for more information.\n msvc_env_lines.append('set DISTUTILS_USE_SDK=1')\n # This is also required to hit the 'don't validate' logic on < 3.5.\n # For > 3.5 this is ignored.\n msvc_env_lines.append('set MSSdk=1')\n\n if not version:\n if config.PY3K and config.use_MSVC2015:\n version = '14.0'\n elif config.PY3K:\n version = '10.0'\n else:\n version = '9.0'\n\n if float(version) >= 14.0:\n # For Python 3.5+, ensure that we link with the dynamic runtime. See\n # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info\n msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\\\vcruntime{0}.dll'.format(\n version.replace('.', '')))\n\n vcvarsall_vs_path = build_vcvarsall_vs_path(version)\n\n def build_vcvarsall_cmd(cmd, arch=arch_selector):\n # Default argument `arch_selector` is defined above\n return 'call \"{cmd}\" {arch}'.format(cmd=cmd, arch=arch)\n\n msvc_env_lines.append('set \"VS_VERSION={}\"'.format(version))\n msvc_env_lines.append('set \"VS_MAJOR={}\"'.format(version.split('.')[0]))\n msvc_env_lines.append('set \"VS_YEAR={}\"'.format(VS_VERSION_STRING[version][-4:]))\n msvc_env_lines.append('set \"CMAKE_GENERATOR={}\"'.format(VS_VERSION_STRING[version] +\n {64: ' Win64', 32: ''}[bits]))\n # tell msys2 to ignore path conversions for issue-causing windows-style flags in build\n # See https://github.com/conda-forge/icu-feedstock/pull/5\n msvc_env_lines.append('set \"MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%\"')\n msvc_env_lines.append('set \"MSYS2_ENV_CONV_EXCL=CL\"')\n if version == '10.0':\n try:\n WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),\n 'installationfolder')\n WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')\n\n win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'\n win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)\n\n # There are two methods of building Python 3.3 and 3.4 extensions (both\n # of which required Visual Studio 2010 - as explained in the Python wiki\n # https://wiki.python.org/moin/WindowsCompilers)\n # 1) Use the Windows SDK 7.1\n # 2) Use Visual Studio 2010 (any edition)\n # However, VS2010 never shipped with a 64-bit compiler, so in this case\n # **only** option (1) applies. For this reason, we always try and\n # activate the Windows SDK first. Unfortunately, unsuccessfully setting\n # up the environment does **not EXIT 1** and therefore we must fall\n # back to attempting to set up VS2010.\n # DelayedExpansion is required for the SetEnv.cmd\n msvc_env_lines.append('Setlocal EnableDelayedExpansion')\n msvc_env_lines.append(win_sdk_cmd)\n # If the WindowsSDKDir environment variable has not been successfully\n # set then try activating VS2010\n msvc_env_lines.append('if not \"%WindowsSDKDir%\" == \"{}\" ( {} )'.format(\n WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))\n # sdk is not installed. Fall back to only trying VS 2010\n except KeyError:\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n elif version == '9.0':\n # Get the Visual Studio 2008 path (not the Visual C++ for Python path)\n # and get the 'vcvars64.bat' from inside the bin (in the directory above\n # that returned by distutils_find_vcvarsall)\n try:\n VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),\n 'bin', 'vcvars64.bat')\n # there's an exception if VS or the VC compiler for python are not actually installed.\n except (KeyError, TypeError):\n VCVARS64_VS9_BAT_PATH = None\n\n error1 = 'if errorlevel 1 {}'\n\n # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n # The Visual Studio 2008 Express edition does not properly contain\n # the amd64 build files, so we call the vcvars64.bat manually,\n # rather than using the vcvarsall.bat which would try and call the\n # missing bat file.\n if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))\n # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+\n # by using the logic provided by setuptools\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(distutils_find_vcvarsall(9))))\n else:\n # Visual Studio 14 or otherwise\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n\n return '\\n'.join(msvc_env_lines) + '\\n'\n\n\ndef build(m, bld_bat, config):\n with path_prepended(config.build_prefix):\n env = environ.get_dict(config=config, m=m)\n env[\"CONDA_BUILD_STATE\"] = \"BUILD\"\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = config.work_dir\n if os.path.isfile(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n # more debuggable with echo on\n fo.write('@echo on\\n')\n for key, value in env.items():\n fo.write('set \"{key}={value}\"\\n'.format(key=key, value=value))\n fo.write(msvc_env_cmd(bits=bits, config=config,\n override=m.get_value('build/msvc_compiler', None)))\n # Reset echo on, because MSVC scripts might have turned it off\n fo.write('@echo on\\n')\n fo.write('set \"INCLUDE={};%INCLUDE%\"\\n'.format(env[\"LIBRARY_INC\"]))\n fo.write('set \"LIB={};%LIB%\"\\n'.format(env[\"LIBRARY_LIB\"]))\n if config.activate:\n fo.write('call \"{conda_root}\\\\activate.bat\" \"{prefix}\"\\n'.format(\n conda_root=root_script_dir,\n prefix=config.build_prefix))\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = ['cmd.exe', '/c', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n", "path": "conda_build/windows.py"}]} | 3,545 | 562 |
gh_patches_debug_36797 | rasdani/github-patches | git_diff | docker__docker-py-1812 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
client.networks.containers() is always empty
```
>>> import docker
>>> d = docker.from_env()
>>> [n.name for n in d.networks.list()]
['pedlintegrationtests_default', 'bridge', 'host', 'none']
>>> [n.containers for n in d.networks.list()]
[[], [], [], []]
```
```
$ docker network inspect pedlintegrationtests_default | jq '.[0].Containers | length'
4
```
```
$ pip freeze | grep docker && python --version && docker version
docker==2.5.1
docker-compose==1.16.1
docker-pycreds==0.2.1
dockerpty==0.4.1
nvidia-docker-compose==0.1.4
Python 3.5.2
Client:
Version: 17.09.0-ce
API version: 1.32
Go version: go1.8.3
Git commit: afdb6d4
Built: Tue Sep 26 22:42:18 2017
OS/Arch: linux/amd64
Server:
Version: 17.09.0-ce
API version: 1.32 (minimum version 1.12)
Go version: go1.8.3
Git commit: afdb6d4
Built: Tue Sep 26 22:40:56 2017
OS/Arch: linux/amd64
Experimental: false
```
</issue>
<code>
[start of docker/models/networks.py]
1 from ..api import APIClient
2 from .containers import Container
3 from .resource import Model, Collection
4
5
6 class Network(Model):
7 """
8 A Docker network.
9 """
10 @property
11 def name(self):
12 """
13 The name of the network.
14 """
15 return self.attrs.get('Name')
16
17 @property
18 def containers(self):
19 """
20 The containers that are connected to the network, as a list of
21 :py:class:`~docker.models.containers.Container` objects.
22 """
23 return [
24 self.client.containers.get(cid) for cid in
25 (self.attrs.get('Containers') or {}).keys()
26 ]
27
28 def connect(self, container, *args, **kwargs):
29 """
30 Connect a container to this network.
31
32 Args:
33 container (str): Container to connect to this network, as either
34 an ID, name, or :py:class:`~docker.models.containers.Container`
35 object.
36 aliases (:py:class:`list`): A list of aliases for this endpoint.
37 Names in that list can be used within the network to reach the
38 container. Defaults to ``None``.
39 links (:py:class:`list`): A list of links for this endpoint.
40 Containers declared in this list will be linkedto this
41 container. Defaults to ``None``.
42 ipv4_address (str): The IP address of this container on the
43 network, using the IPv4 protocol. Defaults to ``None``.
44 ipv6_address (str): The IP address of this container on the
45 network, using the IPv6 protocol. Defaults to ``None``.
46 link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
47 addresses.
48
49 Raises:
50 :py:class:`docker.errors.APIError`
51 If the server returns an error.
52 """
53 if isinstance(container, Container):
54 container = container.id
55 return self.client.api.connect_container_to_network(
56 container, self.id, *args, **kwargs
57 )
58
59 def disconnect(self, container, *args, **kwargs):
60 """
61 Disconnect a container from this network.
62
63 Args:
64 container (str): Container to disconnect from this network, as
65 either an ID, name, or
66 :py:class:`~docker.models.containers.Container` object.
67 force (bool): Force the container to disconnect from a network.
68 Default: ``False``
69
70 Raises:
71 :py:class:`docker.errors.APIError`
72 If the server returns an error.
73 """
74 if isinstance(container, Container):
75 container = container.id
76 return self.client.api.disconnect_container_from_network(
77 container, self.id, *args, **kwargs
78 )
79
80 def remove(self):
81 """
82 Remove this network.
83
84 Raises:
85 :py:class:`docker.errors.APIError`
86 If the server returns an error.
87 """
88 return self.client.api.remove_network(self.id)
89
90
91 class NetworkCollection(Collection):
92 """
93 Networks on the Docker server.
94 """
95 model = Network
96
97 def create(self, name, *args, **kwargs):
98 """
99 Create a network. Similar to the ``docker network create``.
100
101 Args:
102 name (str): Name of the network
103 driver (str): Name of the driver used to create the network
104 options (dict): Driver options as a key-value dictionary
105 ipam (IPAMConfig): Optional custom IP scheme for the network.
106 check_duplicate (bool): Request daemon to check for networks with
107 same name. Default: ``None``.
108 internal (bool): Restrict external access to the network. Default
109 ``False``.
110 labels (dict): Map of labels to set on the network. Default
111 ``None``.
112 enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
113 attachable (bool): If enabled, and the network is in the global
114 scope, non-service containers on worker nodes will be able to
115 connect to the network.
116 scope (str): Specify the network's scope (``local``, ``global`` or
117 ``swarm``)
118 ingress (bool): If set, create an ingress network which provides
119 the routing-mesh in swarm mode.
120
121 Returns:
122 (:py:class:`Network`): The network that was created.
123
124 Raises:
125 :py:class:`docker.errors.APIError`
126 If the server returns an error.
127
128 Example:
129 A network using the bridge driver:
130
131 >>> client.networks.create("network1", driver="bridge")
132
133 You can also create more advanced networks with custom IPAM
134 configurations. For example, setting the subnet to
135 ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
136
137 .. code-block:: python
138
139 >>> ipam_pool = docker.types.IPAMPool(
140 subnet='192.168.52.0/24',
141 gateway='192.168.52.254'
142 )
143 >>> ipam_config = docker.types.IPAMConfig(
144 pool_configs=[ipam_pool]
145 )
146 >>> client.networks.create(
147 "network1",
148 driver="bridge",
149 ipam=ipam_config
150 )
151
152 """
153 resp = self.client.api.create_network(name, *args, **kwargs)
154 return self.get(resp['Id'])
155
156 def get(self, network_id):
157 """
158 Get a network by its ID.
159
160 Args:
161 network_id (str): The ID of the network.
162 verbose (bool): Retrieve the service details across the cluster in
163 swarm mode.
164 scope (str): Filter the network by scope (``swarm``, ``global``
165 or ``local``).
166
167 Returns:
168 (:py:class:`Network`) The network.
169
170 Raises:
171 :py:class:`docker.errors.NotFound`
172 If the network does not exist.
173
174 :py:class:`docker.errors.APIError`
175 If the server returns an error.
176
177 """
178 return self.prepare_model(self.client.api.inspect_network(network_id))
179
180 def list(self, *args, **kwargs):
181 """
182 List networks. Similar to the ``docker networks ls`` command.
183
184 Args:
185 names (:py:class:`list`): List of names to filter by.
186 ids (:py:class:`list`): List of ids to filter by.
187
188 Returns:
189 (list of :py:class:`Network`) The networks on the server.
190
191 Raises:
192 :py:class:`docker.errors.APIError`
193 If the server returns an error.
194 """
195 resp = self.client.api.networks(*args, **kwargs)
196 return [self.prepare_model(item) for item in resp]
197
198 def prune(self, filters=None):
199 self.client.api.prune_networks(filters=filters)
200 prune.__doc__ = APIClient.prune_networks.__doc__
201
[end of docker/models/networks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/models/networks.py b/docker/models/networks.py
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -1,4 +1,5 @@
from ..api import APIClient
+from ..utils import version_gte
from .containers import Container
from .resource import Model, Collection
@@ -153,7 +154,7 @@
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
- def get(self, network_id):
+ def get(self, network_id, *args, **kwargs):
"""
Get a network by its ID.
@@ -175,7 +176,9 @@
If the server returns an error.
"""
- return self.prepare_model(self.client.api.inspect_network(network_id))
+ return self.prepare_model(
+ self.client.api.inspect_network(network_id, *args, **kwargs)
+ )
def list(self, *args, **kwargs):
"""
@@ -184,6 +187,13 @@
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+ greedy (bool): Fetch more details for each network individually.
+ You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
@@ -192,8 +202,13 @@
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
- return [self.prepare_model(item) for item in resp]
+ networks = [self.prepare_model(item) for item in resp]
+ if greedy and version_gte(self.client.api._version, '1.28'):
+ for net in networks:
+ net.reload()
+ return networks
def prune(self, filters=None):
self.client.api.prune_networks(filters=filters)
| {"golden_diff": "diff --git a/docker/models/networks.py b/docker/models/networks.py\n--- a/docker/models/networks.py\n+++ b/docker/models/networks.py\n@@ -1,4 +1,5 @@\n from ..api import APIClient\n+from ..utils import version_gte\n from .containers import Container\n from .resource import Model, Collection\n \n@@ -153,7 +154,7 @@\n resp = self.client.api.create_network(name, *args, **kwargs)\n return self.get(resp['Id'])\n \n- def get(self, network_id):\n+ def get(self, network_id, *args, **kwargs):\n \"\"\"\n Get a network by its ID.\n \n@@ -175,7 +176,9 @@\n If the server returns an error.\n \n \"\"\"\n- return self.prepare_model(self.client.api.inspect_network(network_id))\n+ return self.prepare_model(\n+ self.client.api.inspect_network(network_id, *args, **kwargs)\n+ )\n \n def list(self, *args, **kwargs):\n \"\"\"\n@@ -184,6 +187,13 @@\n Args:\n names (:py:class:`list`): List of names to filter by.\n ids (:py:class:`list`): List of ids to filter by.\n+ filters (dict): Filters to be processed on the network list.\n+ Available filters:\n+ - ``driver=[<driver-name>]`` Matches a network's driver.\n+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.\n+ - ``type=[\"custom\"|\"builtin\"]`` Filters networks by type.\n+ greedy (bool): Fetch more details for each network individually.\n+ You might want this to get the containers attached to them.\n \n Returns:\n (list of :py:class:`Network`) The networks on the server.\n@@ -192,8 +202,13 @@\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n+ greedy = kwargs.pop('greedy', False)\n resp = self.client.api.networks(*args, **kwargs)\n- return [self.prepare_model(item) for item in resp]\n+ networks = [self.prepare_model(item) for item in resp]\n+ if greedy and version_gte(self.client.api._version, '1.28'):\n+ for net in networks:\n+ net.reload()\n+ return networks\n \n def prune(self, filters=None):\n self.client.api.prune_networks(filters=filters)\n", "issue": "client.networks.containers() is always empty\n```\r\n>>> import docker\r\n>>> d = docker.from_env()\r\n>>> [n.name for n in d.networks.list()]\r\n['pedlintegrationtests_default', 'bridge', 'host', 'none']\r\n>>> [n.containers for n in d.networks.list()]\r\n[[], [], [], []]\r\n```\r\n\r\n```\r\n$ docker network inspect pedlintegrationtests_default | jq '.[0].Containers | length'\r\n4\r\n```\r\n\r\n```\r\n$ pip freeze | grep docker && python --version && docker version\r\ndocker==2.5.1\r\ndocker-compose==1.16.1\r\ndocker-pycreds==0.2.1\r\ndockerpty==0.4.1\r\nnvidia-docker-compose==0.1.4\r\nPython 3.5.2\r\nClient:\r\n Version: 17.09.0-ce\r\n API version: 1.32\r\n Go version: go1.8.3\r\n Git commit: afdb6d4\r\n Built: Tue Sep 26 22:42:18 2017\r\n OS/Arch: linux/amd64\r\n\r\nServer:\r\n Version: 17.09.0-ce\r\n API version: 1.32 (minimum version 1.12)\r\n Go version: go1.8.3\r\n Git commit: afdb6d4\r\n Built: Tue Sep 26 22:40:56 2017\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n```\n", "before_files": [{"content": "from ..api import APIClient\nfrom .containers import Container\nfrom .resource import Model, Collection\n\n\nclass Network(Model):\n \"\"\"\n A Docker network.\n \"\"\"\n @property\n def name(self):\n \"\"\"\n The name of the network.\n \"\"\"\n return self.attrs.get('Name')\n\n @property\n def containers(self):\n \"\"\"\n The containers that are connected to the network, as a list of\n :py:class:`~docker.models.containers.Container` objects.\n \"\"\"\n return [\n self.client.containers.get(cid) for cid in\n (self.attrs.get('Containers') or {}).keys()\n ]\n\n def connect(self, container, *args, **kwargs):\n \"\"\"\n Connect a container to this network.\n\n Args:\n container (str): Container to connect to this network, as either\n an ID, name, or :py:class:`~docker.models.containers.Container`\n object.\n aliases (:py:class:`list`): A list of aliases for this endpoint.\n Names in that list can be used within the network to reach the\n container. Defaults to ``None``.\n links (:py:class:`list`): A list of links for this endpoint.\n Containers declared in this list will be linkedto this\n container. Defaults to ``None``.\n ipv4_address (str): The IP address of this container on the\n network, using the IPv4 protocol. Defaults to ``None``.\n ipv6_address (str): The IP address of this container on the\n network, using the IPv6 protocol. Defaults to ``None``.\n link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)\n addresses.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if isinstance(container, Container):\n container = container.id\n return self.client.api.connect_container_to_network(\n container, self.id, *args, **kwargs\n )\n\n def disconnect(self, container, *args, **kwargs):\n \"\"\"\n Disconnect a container from this network.\n\n Args:\n container (str): Container to disconnect from this network, as\n either an ID, name, or\n :py:class:`~docker.models.containers.Container` object.\n force (bool): Force the container to disconnect from a network.\n Default: ``False``\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if isinstance(container, Container):\n container = container.id\n return self.client.api.disconnect_container_from_network(\n container, self.id, *args, **kwargs\n )\n\n def remove(self):\n \"\"\"\n Remove this network.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.remove_network(self.id)\n\n\nclass NetworkCollection(Collection):\n \"\"\"\n Networks on the Docker server.\n \"\"\"\n model = Network\n\n def create(self, name, *args, **kwargs):\n \"\"\"\n Create a network. Similar to the ``docker network create``.\n\n Args:\n name (str): Name of the network\n driver (str): Name of the driver used to create the network\n options (dict): Driver options as a key-value dictionary\n ipam (IPAMConfig): Optional custom IP scheme for the network.\n check_duplicate (bool): Request daemon to check for networks with\n same name. Default: ``None``.\n internal (bool): Restrict external access to the network. Default\n ``False``.\n labels (dict): Map of labels to set on the network. Default\n ``None``.\n enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.\n attachable (bool): If enabled, and the network is in the global\n scope, non-service containers on worker nodes will be able to\n connect to the network.\n scope (str): Specify the network's scope (``local``, ``global`` or\n ``swarm``)\n ingress (bool): If set, create an ingress network which provides\n the routing-mesh in swarm mode.\n\n Returns:\n (:py:class:`Network`): The network that was created.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n A network using the bridge driver:\n\n >>> client.networks.create(\"network1\", driver=\"bridge\")\n\n You can also create more advanced networks with custom IPAM\n configurations. For example, setting the subnet to\n ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.\n\n .. code-block:: python\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='192.168.52.0/24',\n gateway='192.168.52.254'\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n >>> client.networks.create(\n \"network1\",\n driver=\"bridge\",\n ipam=ipam_config\n )\n\n \"\"\"\n resp = self.client.api.create_network(name, *args, **kwargs)\n return self.get(resp['Id'])\n\n def get(self, network_id):\n \"\"\"\n Get a network by its ID.\n\n Args:\n network_id (str): The ID of the network.\n verbose (bool): Retrieve the service details across the cluster in\n swarm mode.\n scope (str): Filter the network by scope (``swarm``, ``global``\n or ``local``).\n\n Returns:\n (:py:class:`Network`) The network.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the network does not exist.\n\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n return self.prepare_model(self.client.api.inspect_network(network_id))\n\n def list(self, *args, **kwargs):\n \"\"\"\n List networks. Similar to the ``docker networks ls`` command.\n\n Args:\n names (:py:class:`list`): List of names to filter by.\n ids (:py:class:`list`): List of ids to filter by.\n\n Returns:\n (list of :py:class:`Network`) The networks on the server.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.networks(*args, **kwargs)\n return [self.prepare_model(item) for item in resp]\n\n def prune(self, filters=None):\n self.client.api.prune_networks(filters=filters)\n prune.__doc__ = APIClient.prune_networks.__doc__\n", "path": "docker/models/networks.py"}]} | 2,862 | 546 |
gh_patches_debug_18388 | rasdani/github-patches | git_diff | pypa__pip-3399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RuntimeWarning: Config variable 'Py_DEBUG' is unset
When I run pip (any argument) I get the following message:
```
c:\Program Files\Python34\Scripts>pip --version
c:\progra~1\python34\lib\site-packages\pip\pep425tags.py:89: RuntimeWarning: Config variable 'Py_DEBUG' is unset, Python ABI tag may be incorrect
warn=(impl == 'cp')):
c:\progra~1\python34\lib\site-packages\pip\pep425tags.py:93: RuntimeWarning: Config variable 'WITH_PYMALLOC' is unset, Python ABI tag may be incorrect
warn=(impl == 'cp')):
pip 8.0.0 from c:\progra~1\python34\lib\site-packages (python 3.4)
```
I think something is wrong.
</issue>
<code>
[start of pip/pep425tags.py]
1 """Generate and work with PEP 425 Compatibility Tags."""
2 from __future__ import absolute_import
3
4 import re
5 import sys
6 import warnings
7 import platform
8
9 try:
10 import sysconfig
11 except ImportError: # pragma nocover
12 # Python < 2.7
13 import distutils.sysconfig as sysconfig
14 import distutils.util
15
16 _osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
17
18
19 def get_config_var(var):
20 try:
21 return sysconfig.get_config_var(var)
22 except IOError as e: # Issue #1074
23 warnings.warn("{0}".format(e), RuntimeWarning)
24 return None
25
26
27 def get_abbr_impl():
28 """Return abbreviated implementation name."""
29 if hasattr(sys, 'pypy_version_info'):
30 pyimpl = 'pp'
31 elif sys.platform.startswith('java'):
32 pyimpl = 'jy'
33 elif sys.platform == 'cli':
34 pyimpl = 'ip'
35 else:
36 pyimpl = 'cp'
37 return pyimpl
38
39
40 def get_impl_ver():
41 """Return implementation version."""
42 impl_ver = get_config_var("py_version_nodot")
43 if not impl_ver or get_abbr_impl() == 'pp':
44 impl_ver = ''.join(map(str, get_impl_version_info()))
45 return impl_ver
46
47
48 def get_impl_version_info():
49 """Return sys.version_info-like tuple for use in decrementing the minor
50 version."""
51 if get_abbr_impl() == 'pp':
52 # as per https://github.com/pypa/pip/issues/2882
53 return (sys.version_info[0], sys.pypy_version_info.major,
54 sys.pypy_version_info.minor)
55 else:
56 return sys.version_info[0], sys.version_info[1]
57
58
59 def get_impl_tag():
60 """
61 Returns the Tag for this specific implementation.
62 """
63 return "{0}{1}".format(get_abbr_impl(), get_impl_ver())
64
65
66 def get_flag(var, fallback, expected=True, warn=True):
67 """Use a fallback method for determining SOABI flags if the needed config
68 var is unset or unavailable."""
69 val = get_config_var(var)
70 if val is None:
71 if warn:
72 warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
73 "be incorrect".format(var), RuntimeWarning, 2)
74 return fallback()
75 return val == expected
76
77
78 def get_abi_tag():
79 """Return the ABI tag based on SOABI (if available) or emulate SOABI
80 (CPython 2, PyPy)."""
81 soabi = get_config_var('SOABI')
82 impl = get_abbr_impl()
83 if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
84 d = ''
85 m = ''
86 u = ''
87 if get_flag('Py_DEBUG',
88 lambda: hasattr(sys, 'gettotalrefcount'),
89 warn=(impl == 'cp')):
90 d = 'd'
91 if get_flag('WITH_PYMALLOC',
92 lambda: impl == 'cp',
93 warn=(impl == 'cp')):
94 m = 'm'
95 if get_flag('Py_UNICODE_SIZE',
96 lambda: sys.maxunicode == 0x10ffff,
97 expected=4,
98 warn=(impl == 'cp' and
99 sys.version_info < (3, 3))) \
100 and sys.version_info < (3, 3):
101 u = 'u'
102 abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
103 elif soabi and soabi.startswith('cpython-'):
104 abi = 'cp' + soabi.split('-')[1]
105 elif soabi:
106 abi = soabi.replace('.', '_').replace('-', '_')
107 else:
108 abi = None
109 return abi
110
111
112 def get_platform():
113 """Return our platform name 'win32', 'linux_x86_64'"""
114 if sys.platform == 'darwin':
115 # distutils.util.get_platform() returns the release based on the value
116 # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may
117 # be signficantly older than the user's current machine.
118 release, _, machine = platform.mac_ver()
119 split_ver = release.split('.')
120 return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine)
121 # XXX remove distutils dependency
122 return distutils.util.get_platform().replace('.', '_').replace('-', '_')
123
124
125 def get_supported(versions=None, noarch=False):
126 """Return a list of supported tags for each version specified in
127 `versions`.
128
129 :param versions: a list of string versions, of the form ["33", "32"],
130 or None. The first version will be assumed to support our ABI.
131 """
132 supported = []
133
134 # Versions must be given with respect to the preference
135 if versions is None:
136 versions = []
137 version_info = get_impl_version_info()
138 major = version_info[:-1]
139 # Support all previous minor Python versions.
140 for minor in range(version_info[-1], -1, -1):
141 versions.append(''.join(map(str, major + (minor,))))
142
143 impl = get_abbr_impl()
144
145 abis = []
146
147 abi = get_abi_tag()
148 if abi:
149 abis[0:0] = [abi]
150
151 abi3s = set()
152 import imp
153 for suffix in imp.get_suffixes():
154 if suffix[0].startswith('.abi'):
155 abi3s.add(suffix[0].split('.', 2)[1])
156
157 abis.extend(sorted(list(abi3s)))
158
159 abis.append('none')
160
161 if not noarch:
162 arch = get_platform()
163 if sys.platform == 'darwin':
164 # support macosx-10.6-intel on macosx-10.9-x86_64
165 match = _osx_arch_pat.match(arch)
166 if match:
167 name, major, minor, actual_arch = match.groups()
168 actual_arches = [actual_arch]
169 if actual_arch in ('i386', 'ppc'):
170 actual_arches.append('fat')
171 if actual_arch in ('i386', 'x86_64'):
172 actual_arches.append('intel')
173 if actual_arch in ('ppc64', 'x86_64'):
174 actual_arches.append('fat64')
175 if actual_arch in ('i386', 'ppc', 'x86_64'):
176 actual_arches.append('fat32')
177 if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
178 actual_arches.append('universal')
179 tpl = '{0}_{1}_%i_%s'.format(name, major)
180 arches = []
181 for m in reversed(range(int(minor) + 1)):
182 for a in actual_arches:
183 arches.append(tpl % (m, a))
184 else:
185 # arch pattern didn't match (?!)
186 arches = [arch]
187 else:
188 arches = [arch]
189
190 # Current version, current API (built specifically for our Python):
191 for abi in abis:
192 for arch in arches:
193 supported.append(('%s%s' % (impl, versions[0]), abi, arch))
194
195 # Has binaries, does not use the Python API:
196 supported.append(('py%s' % (versions[0][0]), 'none', arch))
197
198 # No abi / arch, but requires our implementation:
199 for i, version in enumerate(versions):
200 supported.append(('%s%s' % (impl, version), 'none', 'any'))
201 if i == 0:
202 # Tagged specifically as being cross-version compatible
203 # (with just the major version specified)
204 supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
205
206 # No abi / arch, generic Python
207 for i, version in enumerate(versions):
208 supported.append(('py%s' % (version,), 'none', 'any'))
209 if i == 0:
210 supported.append(('py%s' % (version[0]), 'none', 'any'))
211
212 return supported
213
214 supported_tags = get_supported()
215 supported_tags_noarch = get_supported(noarch=True)
216
217 implementation_tag = get_impl_tag()
218
[end of pip/pep425tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pip/pep425tags.py b/pip/pep425tags.py
--- a/pip/pep425tags.py
+++ b/pip/pep425tags.py
@@ -5,6 +5,7 @@
import sys
import warnings
import platform
+import logging
try:
import sysconfig
@@ -13,6 +14,10 @@
import distutils.sysconfig as sysconfig
import distutils.util
+
+logger = logging.getLogger(__name__)
+
+
_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
@@ -69,8 +74,8 @@
val = get_config_var(var)
if val is None:
if warn:
- warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
- "be incorrect".format(var), RuntimeWarning, 2)
+ logger.debug("Config variable '%s' is unset, Python ABI tag may "
+ "be incorrect", var)
return fallback()
return val == expected
| {"golden_diff": "diff --git a/pip/pep425tags.py b/pip/pep425tags.py\n--- a/pip/pep425tags.py\n+++ b/pip/pep425tags.py\n@@ -5,6 +5,7 @@\n import sys\n import warnings\n import platform\n+import logging\n \n try:\n import sysconfig\n@@ -13,6 +14,10 @@\n import distutils.sysconfig as sysconfig\n import distutils.util\n \n+\n+logger = logging.getLogger(__name__)\n+\n+\n _osx_arch_pat = re.compile(r'(.+)_(\\d+)_(\\d+)_(.+)')\n \n \n@@ -69,8 +74,8 @@\n val = get_config_var(var)\n if val is None:\n if warn:\n- warnings.warn(\"Config variable '{0}' is unset, Python ABI tag may \"\n- \"be incorrect\".format(var), RuntimeWarning, 2)\n+ logger.debug(\"Config variable '%s' is unset, Python ABI tag may \"\n+ \"be incorrect\", var)\n return fallback()\n return val == expected\n", "issue": "RuntimeWarning: Config variable 'Py_DEBUG' is unset\nWhen I run pip (any argument) I get the following message:\n\n```\nc:\\Program Files\\Python34\\Scripts>pip --version\nc:\\progra~1\\python34\\lib\\site-packages\\pip\\pep425tags.py:89: RuntimeWarning: Config variable 'Py_DEBUG' is unset, Python ABI tag may be incorrect\n warn=(impl == 'cp')):\nc:\\progra~1\\python34\\lib\\site-packages\\pip\\pep425tags.py:93: RuntimeWarning: Config variable 'WITH_PYMALLOC' is unset, Python ABI tag may be incorrect\n warn=(impl == 'cp')):\npip 8.0.0 from c:\\progra~1\\python34\\lib\\site-packages (python 3.4)\n```\n\nI think something is wrong.\n\n", "before_files": [{"content": "\"\"\"Generate and work with PEP 425 Compatibility Tags.\"\"\"\nfrom __future__ import absolute_import\n\nimport re\nimport sys\nimport warnings\nimport platform\n\ntry:\n import sysconfig\nexcept ImportError: # pragma nocover\n # Python < 2.7\n import distutils.sysconfig as sysconfig\nimport distutils.util\n\n_osx_arch_pat = re.compile(r'(.+)_(\\d+)_(\\d+)_(.+)')\n\n\ndef get_config_var(var):\n try:\n return sysconfig.get_config_var(var)\n except IOError as e: # Issue #1074\n warnings.warn(\"{0}\".format(e), RuntimeWarning)\n return None\n\n\ndef get_abbr_impl():\n \"\"\"Return abbreviated implementation name.\"\"\"\n if hasattr(sys, 'pypy_version_info'):\n pyimpl = 'pp'\n elif sys.platform.startswith('java'):\n pyimpl = 'jy'\n elif sys.platform == 'cli':\n pyimpl = 'ip'\n else:\n pyimpl = 'cp'\n return pyimpl\n\n\ndef get_impl_ver():\n \"\"\"Return implementation version.\"\"\"\n impl_ver = get_config_var(\"py_version_nodot\")\n if not impl_ver or get_abbr_impl() == 'pp':\n impl_ver = ''.join(map(str, get_impl_version_info()))\n return impl_ver\n\n\ndef get_impl_version_info():\n \"\"\"Return sys.version_info-like tuple for use in decrementing the minor\n version.\"\"\"\n if get_abbr_impl() == 'pp':\n # as per https://github.com/pypa/pip/issues/2882\n return (sys.version_info[0], sys.pypy_version_info.major,\n sys.pypy_version_info.minor)\n else:\n return sys.version_info[0], sys.version_info[1]\n\n\ndef get_impl_tag():\n \"\"\"\n Returns the Tag for this specific implementation.\n \"\"\"\n return \"{0}{1}\".format(get_abbr_impl(), get_impl_ver())\n\n\ndef get_flag(var, fallback, expected=True, warn=True):\n \"\"\"Use a fallback method for determining SOABI flags if the needed config\n var is unset or unavailable.\"\"\"\n val = get_config_var(var)\n if val is None:\n if warn:\n warnings.warn(\"Config variable '{0}' is unset, Python ABI tag may \"\n \"be incorrect\".format(var), RuntimeWarning, 2)\n return fallback()\n return val == expected\n\n\ndef get_abi_tag():\n \"\"\"Return the ABI tag based on SOABI (if available) or emulate SOABI\n (CPython 2, PyPy).\"\"\"\n soabi = get_config_var('SOABI')\n impl = get_abbr_impl()\n if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):\n d = ''\n m = ''\n u = ''\n if get_flag('Py_DEBUG',\n lambda: hasattr(sys, 'gettotalrefcount'),\n warn=(impl == 'cp')):\n d = 'd'\n if get_flag('WITH_PYMALLOC',\n lambda: impl == 'cp',\n warn=(impl == 'cp')):\n m = 'm'\n if get_flag('Py_UNICODE_SIZE',\n lambda: sys.maxunicode == 0x10ffff,\n expected=4,\n warn=(impl == 'cp' and\n sys.version_info < (3, 3))) \\\n and sys.version_info < (3, 3):\n u = 'u'\n abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)\n elif soabi and soabi.startswith('cpython-'):\n abi = 'cp' + soabi.split('-')[1]\n elif soabi:\n abi = soabi.replace('.', '_').replace('-', '_')\n else:\n abi = None\n return abi\n\n\ndef get_platform():\n \"\"\"Return our platform name 'win32', 'linux_x86_64'\"\"\"\n if sys.platform == 'darwin':\n # distutils.util.get_platform() returns the release based on the value\n # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may\n # be signficantly older than the user's current machine.\n release, _, machine = platform.mac_ver()\n split_ver = release.split('.')\n return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine)\n # XXX remove distutils dependency\n return distutils.util.get_platform().replace('.', '_').replace('-', '_')\n\n\ndef get_supported(versions=None, noarch=False):\n \"\"\"Return a list of supported tags for each version specified in\n `versions`.\n\n :param versions: a list of string versions, of the form [\"33\", \"32\"],\n or None. The first version will be assumed to support our ABI.\n \"\"\"\n supported = []\n\n # Versions must be given with respect to the preference\n if versions is None:\n versions = []\n version_info = get_impl_version_info()\n major = version_info[:-1]\n # Support all previous minor Python versions.\n for minor in range(version_info[-1], -1, -1):\n versions.append(''.join(map(str, major + (minor,))))\n\n impl = get_abbr_impl()\n\n abis = []\n\n abi = get_abi_tag()\n if abi:\n abis[0:0] = [abi]\n\n abi3s = set()\n import imp\n for suffix in imp.get_suffixes():\n if suffix[0].startswith('.abi'):\n abi3s.add(suffix[0].split('.', 2)[1])\n\n abis.extend(sorted(list(abi3s)))\n\n abis.append('none')\n\n if not noarch:\n arch = get_platform()\n if sys.platform == 'darwin':\n # support macosx-10.6-intel on macosx-10.9-x86_64\n match = _osx_arch_pat.match(arch)\n if match:\n name, major, minor, actual_arch = match.groups()\n actual_arches = [actual_arch]\n if actual_arch in ('i386', 'ppc'):\n actual_arches.append('fat')\n if actual_arch in ('i386', 'x86_64'):\n actual_arches.append('intel')\n if actual_arch in ('ppc64', 'x86_64'):\n actual_arches.append('fat64')\n if actual_arch in ('i386', 'ppc', 'x86_64'):\n actual_arches.append('fat32')\n if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):\n actual_arches.append('universal')\n tpl = '{0}_{1}_%i_%s'.format(name, major)\n arches = []\n for m in reversed(range(int(minor) + 1)):\n for a in actual_arches:\n arches.append(tpl % (m, a))\n else:\n # arch pattern didn't match (?!)\n arches = [arch]\n else:\n arches = [arch]\n\n # Current version, current API (built specifically for our Python):\n for abi in abis:\n for arch in arches:\n supported.append(('%s%s' % (impl, versions[0]), abi, arch))\n\n # Has binaries, does not use the Python API:\n supported.append(('py%s' % (versions[0][0]), 'none', arch))\n\n # No abi / arch, but requires our implementation:\n for i, version in enumerate(versions):\n supported.append(('%s%s' % (impl, version), 'none', 'any'))\n if i == 0:\n # Tagged specifically as being cross-version compatible\n # (with just the major version specified)\n supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))\n\n # No abi / arch, generic Python\n for i, version in enumerate(versions):\n supported.append(('py%s' % (version,), 'none', 'any'))\n if i == 0:\n supported.append(('py%s' % (version[0]), 'none', 'any'))\n\n return supported\n\nsupported_tags = get_supported()\nsupported_tags_noarch = get_supported(noarch=True)\n\nimplementation_tag = get_impl_tag()\n", "path": "pip/pep425tags.py"}]} | 3,127 | 244 |
gh_patches_debug_16894 | rasdani/github-patches | git_diff | svthalia__concrexit-2820 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Admin site doesnt show organizers
### Describe the bug
Organizers are not shown in the site admin
### How to reproduce
Steps to reproduce the behaviour:
1. Go to any event
2. See that the organizers field is empty
### Expected behaviour
there should be at least one organizer
### Additional context
multiple organizers broke things again
Admin site doesnt show organizers
### Describe the bug
Organizers are not shown in the site admin
### How to reproduce
Steps to reproduce the behaviour:
1. Go to any event
2. See that the organizers field is empty
### Expected behaviour
there should be at least one organizer
### Additional context
multiple organizers broke things again
</issue>
<code>
[start of website/events/emails.py]
1 """The emails defined by the events package."""
2 from django.conf import settings
3 from django.core.mail import EmailMessage
4 from django.template.loader import get_template
5 from django.utils.translation import gettext_lazy as _
6
7
8 def notify_first_waiting(event):
9 """Send an email to the first person on the waiting list when someone cancels their registration.
10
11 :param event: the event
12 """
13 if (
14 event.max_participants is not None
15 and event.eventregistration_set.filter(date_cancelled=None).count()
16 > event.max_participants
17 ):
18 # Prepare email to send to the first person on the waiting list
19 first_waiting = event.eventregistration_set.filter(
20 date_cancelled=None
21 ).order_by("date")[event.max_participants]
22
23 text_template = get_template("events/member_email.txt")
24
25 subject = _("[THALIA] Notification about your registration for '{}'").format(
26 event.title
27 )
28
29 organiser_emails = [
30 organiser.contact_address
31 for organiser in event.organisers.all()
32 if organiser.contact_address is not None
33 ]
34 text_message = text_template.render(
35 {
36 "event": event,
37 "registration": first_waiting,
38 "name": first_waiting.name or first_waiting.member.first_name,
39 "base_url": settings.BASE_URL,
40 "organisers": organiser_emails,
41 }
42 )
43
44 EmailMessage(subject, text_message, to=[first_waiting.email]).send()
45
46
47 def notify_organiser(event, registration):
48 """Send an email to the organiser of the event if someone cancels their registration.
49
50 :param event: the event
51 :param registration: the registration that was cancelled
52 """
53 if not event.organisers.exists():
54 return
55
56 text_template = get_template("events/organiser_email.txt")
57 subject = f"Registration for {event.title} cancelled by member"
58 text_message = text_template.render({"event": event, "registration": registration})
59
60 EmailMessage(
61 subject,
62 text_message,
63 to=[
64 organiser.contact_mailinglist.name + "@" + settings.SITE_DOMAIN
65 for organiser in event.organisers.all()
66 ],
67 ).send()
68
69
70 def notify_waiting(event, registration):
71 text_template = get_template("events/more_places_email.txt")
72 subject = _("[THALIA] Notification about your registration for '{}'").format(
73 event.title
74 )
75 text_message = text_template.render(
76 {
77 "event": event,
78 "registration": registration,
79 "name": registration.name or registration.member.first_name,
80 "base_url": settings.BASE_URL,
81 }
82 )
83 EmailMessage(subject, text_message, to=[registration.email]).send()
84
[end of website/events/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/emails.py b/website/events/emails.py
--- a/website/events/emails.py
+++ b/website/events/emails.py
@@ -72,12 +72,21 @@
subject = _("[THALIA] Notification about your registration for '{}'").format(
event.title
)
+
+ organiser_emails = [
+ organiser.contact_address
+ for organiser in event.organisers.all()
+ if organiser.contact_address is not None
+ ]
+
text_message = text_template.render(
{
"event": event,
"registration": registration,
"name": registration.name or registration.member.first_name,
"base_url": settings.BASE_URL,
+ "organisers": organiser_emails,
}
)
+
EmailMessage(subject, text_message, to=[registration.email]).send()
| {"golden_diff": "diff --git a/website/events/emails.py b/website/events/emails.py\n--- a/website/events/emails.py\n+++ b/website/events/emails.py\n@@ -72,12 +72,21 @@\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n+\n+ organiser_emails = [\n+ organiser.contact_address\n+ for organiser in event.organisers.all()\n+ if organiser.contact_address is not None\n+ ]\n+\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": registration,\n \"name\": registration.name or registration.member.first_name,\n \"base_url\": settings.BASE_URL,\n+ \"organisers\": organiser_emails,\n }\n )\n+\n EmailMessage(subject, text_message, to=[registration.email]).send()\n", "issue": "Admin site doesnt show organizers\n### Describe the bug\r\nOrganizers are not shown in the site admin\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to any event\r\n2. See that the organizers field is empty\r\n\r\n### Expected behaviour\r\nthere should be at least one organizer\r\n\r\n### Additional context\r\nmultiple organizers broke things again\r\n\nAdmin site doesnt show organizers\n### Describe the bug\r\nOrganizers are not shown in the site admin\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to any event\r\n2. See that the organizers field is empty\r\n\r\n### Expected behaviour\r\nthere should be at least one organizer\r\n\r\n### Additional context\r\nmultiple organizers broke things again\r\n\n", "before_files": [{"content": "\"\"\"The emails defined by the events package.\"\"\"\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef notify_first_waiting(event):\n \"\"\"Send an email to the first person on the waiting list when someone cancels their registration.\n\n :param event: the event\n \"\"\"\n if (\n event.max_participants is not None\n and event.eventregistration_set.filter(date_cancelled=None).count()\n > event.max_participants\n ):\n # Prepare email to send to the first person on the waiting list\n first_waiting = event.eventregistration_set.filter(\n date_cancelled=None\n ).order_by(\"date\")[event.max_participants]\n\n text_template = get_template(\"events/member_email.txt\")\n\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n\n organiser_emails = [\n organiser.contact_address\n for organiser in event.organisers.all()\n if organiser.contact_address is not None\n ]\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": first_waiting,\n \"name\": first_waiting.name or first_waiting.member.first_name,\n \"base_url\": settings.BASE_URL,\n \"organisers\": organiser_emails,\n }\n )\n\n EmailMessage(subject, text_message, to=[first_waiting.email]).send()\n\n\ndef notify_organiser(event, registration):\n \"\"\"Send an email to the organiser of the event if someone cancels their registration.\n\n :param event: the event\n :param registration: the registration that was cancelled\n \"\"\"\n if not event.organisers.exists():\n return\n\n text_template = get_template(\"events/organiser_email.txt\")\n subject = f\"Registration for {event.title} cancelled by member\"\n text_message = text_template.render({\"event\": event, \"registration\": registration})\n\n EmailMessage(\n subject,\n text_message,\n to=[\n organiser.contact_mailinglist.name + \"@\" + settings.SITE_DOMAIN\n for organiser in event.organisers.all()\n ],\n ).send()\n\n\ndef notify_waiting(event, registration):\n text_template = get_template(\"events/more_places_email.txt\")\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": registration,\n \"name\": registration.name or registration.member.first_name,\n \"base_url\": settings.BASE_URL,\n }\n )\n EmailMessage(subject, text_message, to=[registration.email]).send()\n", "path": "website/events/emails.py"}]} | 1,409 | 188 |
gh_patches_debug_12514 | rasdani/github-patches | git_diff | flairNLP__flair-587 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
one redundant code line
in TextClassifier class
`
34 self.document_embeddings = document_embeddings
`
`
38 self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
`
</issue>
<code>
[start of flair/models/text_classification_model.py]
1 import warnings
2 import logging
3 from pathlib import Path
4 from typing import List, Union
5
6 import torch
7 import torch.nn as nn
8
9 import flair.nn
10 import flair.embeddings
11 from flair.data import Dictionary, Sentence, Label
12 from flair.file_utils import cached_path
13 from flair.training_utils import convert_labels_to_one_hot, clear_embeddings
14
15
16 log = logging.getLogger('flair')
17
18
19 class TextClassifier(flair.nn.Model):
20 """
21 Text Classification Model
22 The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the
23 text representation in the end into a linear layer to get the actual class label.
24 The model can handle single and multi class data sets.
25 """
26
27 def __init__(self,
28 document_embeddings: flair.embeddings.DocumentEmbeddings,
29 label_dictionary: Dictionary,
30 multi_label: bool):
31
32 super(TextClassifier, self).__init__()
33
34 self.document_embeddings = document_embeddings
35 self.label_dictionary: Dictionary = label_dictionary
36 self.multi_label = multi_label
37
38 self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
39
40 self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))
41
42 self._init_weights()
43
44 if multi_label:
45 self.loss_function = nn.BCELoss()
46 else:
47 self.loss_function = nn.CrossEntropyLoss()
48
49 # auto-spawn on GPU if available
50 self.to(flair.device)
51
52 def _init_weights(self):
53 nn.init.xavier_uniform_(self.decoder.weight)
54
55 def forward(self, sentences) -> List[List[float]]:
56 self.document_embeddings.embed(sentences)
57
58 text_embedding_list = [sentence.get_embedding().unsqueeze(0) for sentence in sentences]
59 text_embedding_tensor = torch.cat(text_embedding_list, 0).to(flair.device)
60
61 label_scores = self.decoder(text_embedding_tensor)
62
63 return label_scores
64
65 def save(self, model_file: Union[str, Path]):
66 """
67 Saves the current model to the provided file.
68 :param model_file: the model file
69 """
70 model_state = {
71 'state_dict': self.state_dict(),
72 'document_embeddings': self.document_embeddings,
73 'label_dictionary': self.label_dictionary,
74 'multi_label': self.multi_label,
75 }
76 torch.save(model_state, str(model_file), pickle_protocol=4)
77
78 def save_checkpoint(self, model_file: Union[str, Path], optimizer_state: dict, scheduler_state: dict, epoch: int, loss: float):
79 """
80 Saves the current model to the provided file.
81 :param model_file: the model file
82 """
83 model_state = {
84 'state_dict': self.state_dict(),
85 'document_embeddings': self.document_embeddings,
86 'label_dictionary': self.label_dictionary,
87 'multi_label': self.multi_label,
88 'optimizer_state_dict': optimizer_state,
89 'scheduler_state_dict': scheduler_state,
90 'epoch': epoch,
91 'loss': loss
92 }
93 torch.save(model_state, str(model_file), pickle_protocol=4)
94
95 @classmethod
96 def load_from_file(cls, model_file: Union[str, Path]):
97 """
98 Loads the model from the given file.
99 :param model_file: the model file
100 :return: the loaded text classifier model
101 """
102 state = TextClassifier._load_state(model_file)
103
104 model = TextClassifier(
105 document_embeddings=state['document_embeddings'],
106 label_dictionary=state['label_dictionary'],
107 multi_label=state['multi_label']
108 )
109 model.load_state_dict(state['state_dict'])
110 model.eval()
111 model.to(flair.device)
112
113 return model
114
115 @classmethod
116 def load_checkpoint(cls, model_file: Union[str, Path]):
117 state = TextClassifier._load_state(model_file)
118 model = TextClassifier.load_from_file(model_file)
119
120 epoch = state['epoch'] if 'epoch' in state else None
121 loss = state['loss'] if 'loss' in state else None
122 optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None
123 scheduler_state_dict = state['scheduler_state_dict'] if 'scheduler_state_dict' in state else None
124
125 return {
126 'model': model, 'epoch': epoch, 'loss': loss,
127 'optimizer_state_dict': optimizer_state_dict, 'scheduler_state_dict': scheduler_state_dict
128 }
129
130 @classmethod
131 def _load_state(cls, model_file: Union[str, Path]):
132 # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive
133 # serialization of torch objects
134 # https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings
135 with warnings.catch_warnings():
136 warnings.filterwarnings("ignore")
137 # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
138 # see https://github.com/zalandoresearch/flair/issues/351
139 f = flair.file_utils.load_big_file(str(model_file))
140 state = torch.load(f, map_location=flair.device)
141 return state
142
143 def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> torch.tensor:
144 scores = self.forward(sentences)
145 return self._calculate_loss(scores, sentences)
146
147 def forward_labels_and_loss(self, sentences: Union[Sentence, List[Sentence]]) -> (List[List[Label]], torch.tensor):
148 scores = self.forward(sentences)
149 labels = self._obtain_labels(scores)
150 loss = self._calculate_loss(scores, sentences)
151 return labels, loss
152
153 def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:
154 """
155 Predicts the class labels for the given sentences. The labels are directly added to the sentences.
156 :param sentences: list of sentences
157 :param mini_batch_size: mini batch size to use
158 :return: the list of sentences containing the labels
159 """
160 with torch.no_grad():
161 if type(sentences) is Sentence:
162 sentences = [sentences]
163
164 filtered_sentences = self._filter_empty_sentences(sentences)
165
166 batches = [filtered_sentences[x:x + mini_batch_size] for x in range(0, len(filtered_sentences), mini_batch_size)]
167
168 for batch in batches:
169 scores = self.forward(batch)
170 predicted_labels = self._obtain_labels(scores)
171
172 for (sentence, labels) in zip(batch, predicted_labels):
173 sentence.labels = labels
174
175 clear_embeddings(batch)
176
177 return sentences
178
179 @staticmethod
180 def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
181 filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
182 if len(sentences) != len(filtered_sentences):
183 log.warning('Ignore {} sentence(s) with no tokens.'.format(len(sentences) - len(filtered_sentences)))
184 return filtered_sentences
185
186 def _calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:
187 """
188 Calculates the loss.
189 :param scores: the prediction scores from the model
190 :param sentences: list of sentences
191 :return: loss value
192 """
193 if self.multi_label:
194 return self._calculate_multi_label_loss(scores, sentences)
195
196 return self._calculate_single_label_loss(scores, sentences)
197
198 def _obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:
199 """
200 Predicts the labels of sentences.
201 :param scores: the prediction scores from the model
202 :return: list of predicted labels
203 """
204
205 if self.multi_label:
206 return [self._get_multi_label(s) for s in scores]
207
208 return [self._get_single_label(s) for s in scores]
209
210 def _get_multi_label(self, label_scores) -> List[Label]:
211 labels = []
212
213 sigmoid = torch.nn.Sigmoid()
214
215 results = list(map(lambda x: sigmoid(x), label_scores))
216 for idx, conf in enumerate(results):
217 if conf > 0.5:
218 label = self.label_dictionary.get_item_for_index(idx)
219 labels.append(Label(label, conf.item()))
220
221 return labels
222
223 def _get_single_label(self, label_scores) -> List[Label]:
224 conf, idx = torch.max(label_scores, 0)
225 label = self.label_dictionary.get_item_for_index(idx.item())
226
227 return [Label(label, conf.item())]
228
229 def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:
230 sigmoid = nn.Sigmoid()
231 return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))
232
233 def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:
234 return self.loss_function(label_scores, self._labels_to_indices(sentences))
235
236 def _labels_to_one_hot(self, sentences: List[Sentence]):
237 label_list = [sentence.get_label_names() for sentence in sentences]
238 one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)
239 one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]
240 one_hot = torch.cat(one_hot, 0).to(flair.device)
241 return one_hot
242
243 def _labels_to_indices(self, sentences: List[Sentence]):
244 indices = [
245 torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])
246 for sentence in sentences
247 ]
248
249 vec = torch.cat(indices, 0).to(flair.device)
250
251 return vec
252
253 @staticmethod
254 def load(model: str):
255 model_file = None
256 aws_resource_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4'
257 cache_dir = Path('models')
258
259 if model.lower() == 'de-offensive-language':
260 base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_germ-eval-2018_task-1',
261 'germ-eval-2018-task-1.pt'])
262 model_file = cached_path(base_path, cache_dir=cache_dir)
263
264 elif model.lower() == 'en-sentiment':
265 base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_imdb', 'imdb.pt'])
266 model_file = cached_path(base_path, cache_dir=cache_dir)
267
268 if model_file is not None:
269 return TextClassifier.load_from_file(model_file)
270
[end of flair/models/text_classification_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flair/models/text_classification_model.py b/flair/models/text_classification_model.py
--- a/flair/models/text_classification_model.py
+++ b/flair/models/text_classification_model.py
@@ -31,12 +31,10 @@
super(TextClassifier, self).__init__()
- self.document_embeddings = document_embeddings
+ self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
self.label_dictionary: Dictionary = label_dictionary
self.multi_label = multi_label
- self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
-
self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))
self._init_weights()
| {"golden_diff": "diff --git a/flair/models/text_classification_model.py b/flair/models/text_classification_model.py\n--- a/flair/models/text_classification_model.py\n+++ b/flair/models/text_classification_model.py\n@@ -31,12 +31,10 @@\n \n super(TextClassifier, self).__init__()\n \n- self.document_embeddings = document_embeddings\n+ self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n self.label_dictionary: Dictionary = label_dictionary\n self.multi_label = multi_label\n \n- self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n-\n self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))\n \n self._init_weights()\n", "issue": "one redundant code line\nin TextClassifier class\r\n`\r\n34 self.document_embeddings = document_embeddings\r\n`\r\n`\r\n38 self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\r\n`\n", "before_files": [{"content": "import warnings\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union\n\nimport torch\nimport torch.nn as nn\n\nimport flair.nn\nimport flair.embeddings\nfrom flair.data import Dictionary, Sentence, Label\nfrom flair.file_utils import cached_path\nfrom flair.training_utils import convert_labels_to_one_hot, clear_embeddings\n\n\nlog = logging.getLogger('flair')\n\n\nclass TextClassifier(flair.nn.Model):\n \"\"\"\n Text Classification Model\n The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the\n text representation in the end into a linear layer to get the actual class label.\n The model can handle single and multi class data sets.\n \"\"\"\n\n def __init__(self,\n document_embeddings: flair.embeddings.DocumentEmbeddings,\n label_dictionary: Dictionary,\n multi_label: bool):\n\n super(TextClassifier, self).__init__()\n\n self.document_embeddings = document_embeddings\n self.label_dictionary: Dictionary = label_dictionary\n self.multi_label = multi_label\n\n self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n\n self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))\n\n self._init_weights()\n\n if multi_label:\n self.loss_function = nn.BCELoss()\n else:\n self.loss_function = nn.CrossEntropyLoss()\n\n # auto-spawn on GPU if available\n self.to(flair.device)\n\n def _init_weights(self):\n nn.init.xavier_uniform_(self.decoder.weight)\n\n def forward(self, sentences) -> List[List[float]]:\n self.document_embeddings.embed(sentences)\n\n text_embedding_list = [sentence.get_embedding().unsqueeze(0) for sentence in sentences]\n text_embedding_tensor = torch.cat(text_embedding_list, 0).to(flair.device)\n\n label_scores = self.decoder(text_embedding_tensor)\n\n return label_scores\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n }\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n def save_checkpoint(self, model_file: Union[str, Path], optimizer_state: dict, scheduler_state: dict, epoch: int, loss: float):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n 'optimizer_state_dict': optimizer_state,\n 'scheduler_state_dict': scheduler_state,\n 'epoch': epoch,\n 'loss': loss\n }\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load_from_file(cls, model_file: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model_file: the model file\n :return: the loaded text classifier model\n \"\"\"\n state = TextClassifier._load_state(model_file)\n\n model = TextClassifier(\n document_embeddings=state['document_embeddings'],\n label_dictionary=state['label_dictionary'],\n multi_label=state['multi_label']\n )\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.to(flair.device)\n\n return model\n\n @classmethod\n def load_checkpoint(cls, model_file: Union[str, Path]):\n state = TextClassifier._load_state(model_file)\n model = TextClassifier.load_from_file(model_file)\n\n epoch = state['epoch'] if 'epoch' in state else None\n loss = state['loss'] if 'loss' in state else None\n optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None\n scheduler_state_dict = state['scheduler_state_dict'] if 'scheduler_state_dict' in state else None\n\n return {\n 'model': model, 'epoch': epoch, 'loss': loss,\n 'optimizer_state_dict': optimizer_state_dict, 'scheduler_state_dict': scheduler_state_dict\n }\n\n @classmethod\n def _load_state(cls, model_file: Union[str, Path]):\n # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive\n # serialization of torch objects\n # https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = flair.file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location=flair.device)\n return state\n\n def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> torch.tensor:\n scores = self.forward(sentences)\n return self._calculate_loss(scores, sentences)\n\n def forward_labels_and_loss(self, sentences: Union[Sentence, List[Sentence]]) -> (List[List[Label]], torch.tensor):\n scores = self.forward(sentences)\n labels = self._obtain_labels(scores)\n loss = self._calculate_loss(scores, sentences)\n return labels, loss\n\n def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:\n \"\"\"\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :return: the list of sentences containing the labels\n \"\"\"\n with torch.no_grad():\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n filtered_sentences = self._filter_empty_sentences(sentences)\n\n batches = [filtered_sentences[x:x + mini_batch_size] for x in range(0, len(filtered_sentences), mini_batch_size)]\n\n for batch in batches:\n scores = self.forward(batch)\n predicted_labels = self._obtain_labels(scores)\n\n for (sentence, labels) in zip(batch, predicted_labels):\n sentence.labels = labels\n\n clear_embeddings(batch)\n\n return sentences\n\n @staticmethod\n def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:\n filtered_sentences = [sentence for sentence in sentences if sentence.tokens]\n if len(sentences) != len(filtered_sentences):\n log.warning('Ignore {} sentence(s) with no tokens.'.format(len(sentences) - len(filtered_sentences)))\n return filtered_sentences\n\n def _calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:\n \"\"\"\n Calculates the loss.\n :param scores: the prediction scores from the model\n :param sentences: list of sentences\n :return: loss value\n \"\"\"\n if self.multi_label:\n return self._calculate_multi_label_loss(scores, sentences)\n\n return self._calculate_single_label_loss(scores, sentences)\n\n def _obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:\n \"\"\"\n Predicts the labels of sentences.\n :param scores: the prediction scores from the model\n :return: list of predicted labels\n \"\"\"\n\n if self.multi_label:\n return [self._get_multi_label(s) for s in scores]\n\n return [self._get_single_label(s) for s in scores]\n\n def _get_multi_label(self, label_scores) -> List[Label]:\n labels = []\n\n sigmoid = torch.nn.Sigmoid()\n\n results = list(map(lambda x: sigmoid(x), label_scores))\n for idx, conf in enumerate(results):\n if conf > 0.5:\n label = self.label_dictionary.get_item_for_index(idx)\n labels.append(Label(label, conf.item()))\n\n return labels\n\n def _get_single_label(self, label_scores) -> List[Label]:\n conf, idx = torch.max(label_scores, 0)\n label = self.label_dictionary.get_item_for_index(idx.item())\n\n return [Label(label, conf.item())]\n\n def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n sigmoid = nn.Sigmoid()\n return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))\n\n def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n return self.loss_function(label_scores, self._labels_to_indices(sentences))\n\n def _labels_to_one_hot(self, sentences: List[Sentence]):\n label_list = [sentence.get_label_names() for sentence in sentences]\n one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)\n one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]\n one_hot = torch.cat(one_hot, 0).to(flair.device)\n return one_hot\n\n def _labels_to_indices(self, sentences: List[Sentence]):\n indices = [\n torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])\n for sentence in sentences\n ]\n\n vec = torch.cat(indices, 0).to(flair.device)\n\n return vec\n\n @staticmethod\n def load(model: str):\n model_file = None\n aws_resource_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4'\n cache_dir = Path('models')\n\n if model.lower() == 'de-offensive-language':\n base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_germ-eval-2018_task-1',\n 'germ-eval-2018-task-1.pt'])\n model_file = cached_path(base_path, cache_dir=cache_dir)\n\n elif model.lower() == 'en-sentiment':\n base_path = '/'.join([aws_resource_path, 'TEXT-CLASSIFICATION_imdb', 'imdb.pt'])\n model_file = cached_path(base_path, cache_dir=cache_dir)\n\n if model_file is not None:\n return TextClassifier.load_from_file(model_file)\n", "path": "flair/models/text_classification_model.py"}]} | 3,529 | 153 |
gh_patches_debug_28771 | rasdani/github-patches | git_diff | opsdroid__opsdroid-182 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add ssl to the web server
It should be possible to enable ssl on the web server and pass in paths to the ssl keys in the config.
</issue>
<code>
[start of opsdroid/web.py]
1 """Submodule to handle web requests in opsdroid."""
2
3 import json
4 import logging
5
6 from aiohttp import web
7
8 from opsdroid.const import __version__
9
10
11 _LOGGER = logging.getLogger(__name__)
12
13
14 class Web:
15 """Web server for opsdroid."""
16
17 def __init__(self, opsdroid):
18 """Create web object."""
19 self.opsdroid = opsdroid
20 try:
21 self.config = self.opsdroid.config["web"]
22 except KeyError:
23 self.config = {}
24 self.web_app = web.Application(loop=self.opsdroid.eventloop)
25 self.web_app.router.add_get('/', self.web_index_handler)
26 self.web_app.router.add_get('', self.web_index_handler)
27 self.web_app.router.add_get('/stats', self.web_stats_handler)
28 self.web_app.router.add_get('/stats/', self.web_stats_handler)
29
30 @property
31 def get_port(self):
32 """Return port from config or the default."""
33 try:
34 port = self.config["port"]
35 except KeyError:
36 port = 8080
37 return port
38
39 @property
40 def get_host(self):
41 """Return host from config or the default."""
42 try:
43 host = self.config["host"]
44 except KeyError:
45 host = '127.0.0.1'
46 return host
47
48 def start(self):
49 """Start web servers."""
50 _LOGGER.debug(
51 "Starting web server with host %s and port %s",
52 self.get_host, self.get_port)
53 web.run_app(self.web_app, host=self.get_host,
54 port=self.get_port, print=_LOGGER.info)
55
56 @staticmethod
57 def build_response(status, result):
58 """Build a json response object."""
59 return web.Response(text=json.dumps(result), status=status)
60
61 def web_index_handler(self, request):
62 """Handle root web request."""
63 return self.build_response(200, {
64 "message": "Welcome to the opsdroid API"})
65
66 def web_stats_handler(self, request):
67 """Handle stats request."""
68 stats = self.opsdroid.stats
69 try:
70 stats["average_response_time"] = \
71 stats["total_response_time"] / stats["total_responses"]
72 except ZeroDivisionError:
73 stats["average_response_time"] = 0
74
75 return self.build_response(200, {
76 "version": __version__,
77 "messages": {
78 "total_parsed": stats["messages_parsed"],
79 "webhooks_called": stats["webhooks_called"],
80 "total_response_time": stats["total_response_time"],
81 "total_responses": stats["total_responses"],
82 "average_response_time": stats["average_response_time"]
83 },
84 "modules": {
85 "skills": len(self.opsdroid.skills),
86 "connectors": len(self.opsdroid.connectors),
87 "databases": len(self.opsdroid.memory.databases)
88 }
89 })
90
[end of opsdroid/web.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/web.py b/opsdroid/web.py
--- a/opsdroid/web.py
+++ b/opsdroid/web.py
@@ -2,6 +2,7 @@
import json
import logging
+import ssl
from aiohttp import web
@@ -33,7 +34,10 @@
try:
port = self.config["port"]
except KeyError:
- port = 8080
+ if self.get_ssl_context is not None:
+ port = 8443
+ else:
+ port = 8080
return port
@property
@@ -45,13 +49,28 @@
host = '127.0.0.1'
return host
+ @property
+ def get_ssl_context(self):
+ """Return the ssl context or None."""
+ try:
+ ssl_config = self.config["ssl"]
+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext.load_cert_chain(ssl_config["cert"], ssl_config["key"])
+ return sslcontext
+ except FileNotFoundError:
+ _LOGGER.error("Cannot find ssl cert or key.")
+ return None
+ except KeyError:
+ return None
+
def start(self):
"""Start web servers."""
_LOGGER.debug(
"Starting web server with host %s and port %s",
self.get_host, self.get_port)
web.run_app(self.web_app, host=self.get_host,
- port=self.get_port, print=_LOGGER.info)
+ port=self.get_port, print=_LOGGER.info,
+ ssl_context=self.get_ssl_context)
@staticmethod
def build_response(status, result):
| {"golden_diff": "diff --git a/opsdroid/web.py b/opsdroid/web.py\n--- a/opsdroid/web.py\n+++ b/opsdroid/web.py\n@@ -2,6 +2,7 @@\n \n import json\n import logging\n+import ssl\n \n from aiohttp import web\n \n@@ -33,7 +34,10 @@\n try:\n port = self.config[\"port\"]\n except KeyError:\n- port = 8080\n+ if self.get_ssl_context is not None:\n+ port = 8443\n+ else:\n+ port = 8080\n return port\n \n @property\n@@ -45,13 +49,28 @@\n host = '127.0.0.1'\n return host\n \n+ @property\n+ def get_ssl_context(self):\n+ \"\"\"Return the ssl context or None.\"\"\"\n+ try:\n+ ssl_config = self.config[\"ssl\"]\n+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n+ sslcontext.load_cert_chain(ssl_config[\"cert\"], ssl_config[\"key\"])\n+ return sslcontext\n+ except FileNotFoundError:\n+ _LOGGER.error(\"Cannot find ssl cert or key.\")\n+ return None\n+ except KeyError:\n+ return None\n+\n def start(self):\n \"\"\"Start web servers.\"\"\"\n _LOGGER.debug(\n \"Starting web server with host %s and port %s\",\n self.get_host, self.get_port)\n web.run_app(self.web_app, host=self.get_host,\n- port=self.get_port, print=_LOGGER.info)\n+ port=self.get_port, print=_LOGGER.info,\n+ ssl_context=self.get_ssl_context)\n \n @staticmethod\n def build_response(status, result):\n", "issue": "Add ssl to the web server\nIt should be possible to enable ssl on the web server and pass in paths to the ssl keys in the config.\n", "before_files": [{"content": "\"\"\"Submodule to handle web requests in opsdroid.\"\"\"\n\nimport json\nimport logging\n\nfrom aiohttp import web\n\nfrom opsdroid.const import __version__\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Web:\n \"\"\"Web server for opsdroid.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create web object.\"\"\"\n self.opsdroid = opsdroid\n try:\n self.config = self.opsdroid.config[\"web\"]\n except KeyError:\n self.config = {}\n self.web_app = web.Application(loop=self.opsdroid.eventloop)\n self.web_app.router.add_get('/', self.web_index_handler)\n self.web_app.router.add_get('', self.web_index_handler)\n self.web_app.router.add_get('/stats', self.web_stats_handler)\n self.web_app.router.add_get('/stats/', self.web_stats_handler)\n\n @property\n def get_port(self):\n \"\"\"Return port from config or the default.\"\"\"\n try:\n port = self.config[\"port\"]\n except KeyError:\n port = 8080\n return port\n\n @property\n def get_host(self):\n \"\"\"Return host from config or the default.\"\"\"\n try:\n host = self.config[\"host\"]\n except KeyError:\n host = '127.0.0.1'\n return host\n\n def start(self):\n \"\"\"Start web servers.\"\"\"\n _LOGGER.debug(\n \"Starting web server with host %s and port %s\",\n self.get_host, self.get_port)\n web.run_app(self.web_app, host=self.get_host,\n port=self.get_port, print=_LOGGER.info)\n\n @staticmethod\n def build_response(status, result):\n \"\"\"Build a json response object.\"\"\"\n return web.Response(text=json.dumps(result), status=status)\n\n def web_index_handler(self, request):\n \"\"\"Handle root web request.\"\"\"\n return self.build_response(200, {\n \"message\": \"Welcome to the opsdroid API\"})\n\n def web_stats_handler(self, request):\n \"\"\"Handle stats request.\"\"\"\n stats = self.opsdroid.stats\n try:\n stats[\"average_response_time\"] = \\\n stats[\"total_response_time\"] / stats[\"total_responses\"]\n except ZeroDivisionError:\n stats[\"average_response_time\"] = 0\n\n return self.build_response(200, {\n \"version\": __version__,\n \"messages\": {\n \"total_parsed\": stats[\"messages_parsed\"],\n \"webhooks_called\": stats[\"webhooks_called\"],\n \"total_response_time\": stats[\"total_response_time\"],\n \"total_responses\": stats[\"total_responses\"],\n \"average_response_time\": stats[\"average_response_time\"]\n },\n \"modules\": {\n \"skills\": len(self.opsdroid.skills),\n \"connectors\": len(self.opsdroid.connectors),\n \"databases\": len(self.opsdroid.memory.databases)\n }\n })\n", "path": "opsdroid/web.py"}]} | 1,356 | 387 |
gh_patches_debug_66421 | rasdani/github-patches | git_diff | Textualize__textual-1837 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CSS] Descendant type selectors can't have a numeric in their name
Consider the following code:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
When run we get the following error:
```
Error in stylesheet:
/Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19
╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ ❱ 1 │ │
│ 2 │ Vertical LabelH1 { │
│ 3 │ │ background: red; │
╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
• Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',
'selector universal', or 'whitespace'.
• Did you forget a semicolon at the end of a line?
```
The same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelHOne( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelHOne {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelHOne( "LabelHOne" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
this works fine. Likewise, if I retain the name but *don't* use combination:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
that also works fine.
I would suspect a variation on #1253.
[CSS] Descendant type selectors can't have a numeric in their name
Consider the following code:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
When run we get the following error:
```
Error in stylesheet:
/Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19
╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ ❱ 1 │ │
│ 2 │ Vertical LabelH1 { │
│ 3 │ │ background: red; │
╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
• Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',
'selector universal', or 'whitespace'.
• Did you forget a semicolon at the end of a line?
```
The same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelHOne( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
Vertical LabelHOne {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelHOne( "LabelHOne" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
this works fine. Likewise, if I retain the name but *don't* use combination:
```python
from textual.app import App, ComposeResult
from textual.containers import Vertical
from textual.widgets import Header, Footer, Label
class LabelH1( Label ):
...
class CSSOddnessApp( App[ None ] ):
CSS = """
LabelH1 {
background: red;
}
"""
def compose( self ) -> ComposeResult:
yield Header()
yield Vertical(
Label( "Label" ),
LabelH1( "LabelH1" ),
)
yield Footer()
if __name__ == "__main__":
CSSOddnessApp().run()
```
that also works fine.
I would suspect a variation on #1253.
</issue>
<code>
[start of src/textual/css/tokenize.py]
1 from __future__ import annotations
2
3 import re
4 from pathlib import PurePath
5 from typing import Iterable
6
7 from textual.css.tokenizer import Expect, Token, Tokenizer
8
9 PERCENT = r"-?\d+\.?\d*%"
10 DECIMAL = r"-?\d+\.?\d*"
11 COMMA = r"\s*,\s*"
12 OPEN_BRACE = r"\(\s*"
13 CLOSE_BRACE = r"\s*\)"
14
15 HEX_COLOR = r"\#[0-9a-fA-F]{8}|\#[0-9a-fA-F]{6}|\#[0-9a-fA-F]{4}|\#[0-9a-fA-F]{3}"
16 RGB_COLOR = rf"rgb{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}|rgba{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}"
17 HSL_COLOR = rf"hsl{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{CLOSE_BRACE}|hsla{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{COMMA}{DECIMAL}{CLOSE_BRACE}"
18
19 COMMENT_START = r"\/\*"
20 SCALAR = rf"{DECIMAL}(?:fr|%|w|h|vw|vh)"
21 DURATION = r"\d+\.?\d*(?:ms|s)"
22 NUMBER = r"\-?\d+\.?\d*"
23 COLOR = rf"{HEX_COLOR}|{RGB_COLOR}|{HSL_COLOR}"
24 KEY_VALUE = r"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\-\/]+"
25 TOKEN = "[a-zA-Z][a-zA-Z0-9_-]*"
26 STRING = r"\".*?\""
27 VARIABLE_REF = r"\$[a-zA-Z0-9_\-]+"
28
29 IDENTIFIER = r"[a-zA-Z_\-][a-zA-Z0-9_\-]*"
30
31 # Values permitted in variable and rule declarations.
32 DECLARATION_VALUES = {
33 "scalar": SCALAR,
34 "duration": DURATION,
35 "number": NUMBER,
36 "color": COLOR,
37 "key_value": KEY_VALUE,
38 "token": TOKEN,
39 "string": STRING,
40 "variable_ref": VARIABLE_REF,
41 }
42
43 # The tokenizers "expectation" while at the root/highest level of scope
44 # in the CSS file. At this level we might expect to see selectors, comments,
45 # variable definitions etc.
46 expect_root_scope = Expect(
47 whitespace=r"\s+",
48 comment_start=COMMENT_START,
49 selector_start_id=r"\#" + IDENTIFIER,
50 selector_start_class=r"\." + IDENTIFIER,
51 selector_start_universal=r"\*",
52 selector_start=IDENTIFIER,
53 variable_name=rf"{VARIABLE_REF}:",
54 ).expect_eof(True)
55
56 # After a variable declaration e.g. "$warning-text: TOKENS;"
57 # for tokenizing variable value ------^~~~~~~^
58 expect_variable_name_continue = Expect(
59 variable_value_end=r"\n|;",
60 whitespace=r"\s+",
61 comment_start=COMMENT_START,
62 **DECLARATION_VALUES,
63 ).expect_eof(True)
64
65 expect_comment_end = Expect(
66 comment_end=re.escape("*/"),
67 )
68
69 # After we come across a selector in CSS e.g. ".my-class", we may
70 # find other selectors, pseudo-classes... e.g. ".my-class :hover"
71 expect_selector_continue = Expect(
72 whitespace=r"\s+",
73 comment_start=COMMENT_START,
74 pseudo_class=r"\:[a-zA-Z_-]+",
75 selector_id=r"\#[a-zA-Z_\-][a-zA-Z0-9_\-]*",
76 selector_class=r"\.[a-zA-Z_\-][a-zA-Z0-9_\-]*",
77 selector_universal=r"\*",
78 selector=r"[a-zA-Z_\-]+",
79 combinator_child=">",
80 new_selector=r",",
81 declaration_set_start=r"\{",
82 )
83
84 # A rule declaration e.g. "text: red;"
85 # ^---^
86 expect_declaration = Expect(
87 whitespace=r"\s+",
88 comment_start=COMMENT_START,
89 declaration_name=r"[a-zA-Z_\-]+\:",
90 declaration_set_end=r"\}",
91 )
92
93 expect_declaration_solo = Expect(
94 whitespace=r"\s+",
95 comment_start=COMMENT_START,
96 declaration_name=r"[a-zA-Z_\-]+\:",
97 declaration_set_end=r"\}",
98 ).expect_eof(True)
99
100 # The value(s)/content from a rule declaration e.g. "text: red;"
101 # ^---^
102 expect_declaration_content = Expect(
103 declaration_end=r";",
104 whitespace=r"\s+",
105 comment_start=COMMENT_START,
106 **DECLARATION_VALUES,
107 important=r"\!important",
108 comma=",",
109 declaration_set_end=r"\}",
110 )
111
112 expect_declaration_content_solo = Expect(
113 declaration_end=r";",
114 whitespace=r"\s+",
115 comment_start=COMMENT_START,
116 **DECLARATION_VALUES,
117 important=r"\!important",
118 comma=",",
119 declaration_set_end=r"\}",
120 ).expect_eof(True)
121
122
123 class TokenizerState:
124 """State machine for the tokenizer.
125
126 Attributes:
127 EXPECT: The initial expectation of the tokenizer. Since we start tokenizing
128 at the root scope, we might expect to see either a variable or selector, for example.
129 STATE_MAP: Maps token names to Expects, defines the sets of valid tokens
130 that we'd expect to see next, given the current token. For example, if
131 we've just processed a variable declaration name, we next expect to see
132 the value of that variable.
133 """
134
135 EXPECT = expect_root_scope
136 STATE_MAP = {
137 "variable_name": expect_variable_name_continue,
138 "variable_value_end": expect_root_scope,
139 "selector_start": expect_selector_continue,
140 "selector_start_id": expect_selector_continue,
141 "selector_start_class": expect_selector_continue,
142 "selector_start_universal": expect_selector_continue,
143 "selector_id": expect_selector_continue,
144 "selector_class": expect_selector_continue,
145 "selector_universal": expect_selector_continue,
146 "declaration_set_start": expect_declaration,
147 "declaration_name": expect_declaration_content,
148 "declaration_end": expect_declaration,
149 "declaration_set_end": expect_root_scope,
150 }
151
152 def __call__(self, code: str, path: str | PurePath) -> Iterable[Token]:
153 tokenizer = Tokenizer(code, path=path)
154 expect = self.EXPECT
155 get_token = tokenizer.get_token
156 get_state = self.STATE_MAP.get
157 while True:
158 token = get_token(expect)
159 name = token.name
160 if name == "comment_start":
161 tokenizer.skip_to(expect_comment_end)
162 continue
163 elif name == "eof":
164 break
165 expect = get_state(name, expect)
166 yield token
167
168
169 class DeclarationTokenizerState(TokenizerState):
170 EXPECT = expect_declaration_solo
171 STATE_MAP = {
172 "declaration_name": expect_declaration_content,
173 "declaration_end": expect_declaration_solo,
174 }
175
176
177 class ValueTokenizerState(TokenizerState):
178 EXPECT = expect_declaration_content_solo
179
180
181 tokenize = TokenizerState()
182 tokenize_declarations = DeclarationTokenizerState()
183 tokenize_value = ValueTokenizerState()
184
185
186 def tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:
187 """Tokens the values in a dict of strings.
188
189 Args:
190 values: A mapping of CSS variable name on to a value, to be
191 added to the CSS context.
192
193 Returns:
194 A mapping of name on to a list of tokens,
195 """
196 value_tokens = {
197 name: list(tokenize_value(value, "__name__")) for name, value in values.items()
198 }
199 return value_tokens
200
[end of src/textual/css/tokenize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py
--- a/src/textual/css/tokenize.py
+++ b/src/textual/css/tokenize.py
@@ -75,7 +75,7 @@
selector_id=r"\#[a-zA-Z_\-][a-zA-Z0-9_\-]*",
selector_class=r"\.[a-zA-Z_\-][a-zA-Z0-9_\-]*",
selector_universal=r"\*",
- selector=r"[a-zA-Z_\-]+",
+ selector=IDENTIFIER,
combinator_child=">",
new_selector=r",",
declaration_set_start=r"\{",
| {"golden_diff": "diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py\n--- a/src/textual/css/tokenize.py\n+++ b/src/textual/css/tokenize.py\n@@ -75,7 +75,7 @@\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n- selector=r\"[a-zA-Z_\\-]+\",\n+ selector=IDENTIFIER,\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n", "issue": "[CSS] Descendant type selectors can't have a numeric in their name\nConsider the following code:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nWhen run we get the following error:\r\n\r\n```\r\n Error in stylesheet:\r\n /Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 \u2771 1 \u2502 \u2502\r\n\u2502 2 \u2502 Vertical LabelH1 { \u2502\r\n\u2502 3 \u2502 \u2502 background: red; \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\n \u2022 Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',\r\n 'selector universal', or 'whitespace'.\r\n \u2022 Did you forget a semicolon at the end of a line?\r\n```\r\n\r\nThe same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelHOne( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelHOne {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelHOne( \"LabelHOne\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthis works fine. Likewise, if I retain the name but *don't* use combination:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthat also works fine.\r\n\r\nI would suspect a variation on #1253.\n[CSS] Descendant type selectors can't have a numeric in their name\nConsider the following code:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nWhen run we get the following error:\r\n\r\n```\r\n Error in stylesheet:\r\n /Users/davep/develop/python/textual-sandbox/css_oddness.py:CSSOddnessApp:1:19\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 \u2771 1 \u2502 \u2502\r\n\u2502 2 \u2502 Vertical LabelH1 { \u2502\r\n\u2502 3 \u2502 \u2502 background: red; \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\n \u2022 Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id',\r\n 'selector universal', or 'whitespace'.\r\n \u2022 Did you forget a semicolon at the end of a line?\r\n```\r\n\r\nThe same thing happens with `Vertical LabelH1`. On the other hand, if I remove the number from the inherited label widget:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelHOne( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Vertical LabelHOne {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelHOne( \"LabelHOne\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthis works fine. Likewise, if I retain the name but *don't* use combination:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, Label\r\n\r\nclass LabelH1( Label ):\r\n ...\r\n\r\nclass CSSOddnessApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n LabelH1 {\r\n background: red;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Vertical(\r\n Label( \"Label\" ),\r\n LabelH1( \"LabelH1\" ),\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n CSSOddnessApp().run()\r\n```\r\n\r\nthat also works fine.\r\n\r\nI would suspect a variation on #1253.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom pathlib import PurePath\nfrom typing import Iterable\n\nfrom textual.css.tokenizer import Expect, Token, Tokenizer\n\nPERCENT = r\"-?\\d+\\.?\\d*%\"\nDECIMAL = r\"-?\\d+\\.?\\d*\"\nCOMMA = r\"\\s*,\\s*\"\nOPEN_BRACE = r\"\\(\\s*\"\nCLOSE_BRACE = r\"\\s*\\)\"\n\nHEX_COLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|\\#[0-9a-fA-F]{4}|\\#[0-9a-fA-F]{3}\"\nRGB_COLOR = rf\"rgb{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}|rgba{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\nHSL_COLOR = rf\"hsl{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{CLOSE_BRACE}|hsla{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\n\nCOMMENT_START = r\"\\/\\*\"\nSCALAR = rf\"{DECIMAL}(?:fr|%|w|h|vw|vh)\"\nDURATION = r\"\\d+\\.?\\d*(?:ms|s)\"\nNUMBER = r\"\\-?\\d+\\.?\\d*\"\nCOLOR = rf\"{HEX_COLOR}|{RGB_COLOR}|{HSL_COLOR}\"\nKEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\nTOKEN = \"[a-zA-Z][a-zA-Z0-9_-]*\"\nSTRING = r\"\\\".*?\\\"\"\nVARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n\nIDENTIFIER = r\"[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\"\n\n# Values permitted in variable and rule declarations.\nDECLARATION_VALUES = {\n \"scalar\": SCALAR,\n \"duration\": DURATION,\n \"number\": NUMBER,\n \"color\": COLOR,\n \"key_value\": KEY_VALUE,\n \"token\": TOKEN,\n \"string\": STRING,\n \"variable_ref\": VARIABLE_REF,\n}\n\n# The tokenizers \"expectation\" while at the root/highest level of scope\n# in the CSS file. At this level we might expect to see selectors, comments,\n# variable definitions etc.\nexpect_root_scope = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n selector_start_id=r\"\\#\" + IDENTIFIER,\n selector_start_class=r\"\\.\" + IDENTIFIER,\n selector_start_universal=r\"\\*\",\n selector_start=IDENTIFIER,\n variable_name=rf\"{VARIABLE_REF}:\",\n).expect_eof(True)\n\n# After a variable declaration e.g. \"$warning-text: TOKENS;\"\n# for tokenizing variable value ------^~~~~~~^\nexpect_variable_name_continue = Expect(\n variable_value_end=r\"\\n|;\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n).expect_eof(True)\n\nexpect_comment_end = Expect(\n comment_end=re.escape(\"*/\"),\n)\n\n# After we come across a selector in CSS e.g. \".my-class\", we may\n# find other selectors, pseudo-classes... e.g. \".my-class :hover\"\nexpect_selector_continue = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n pseudo_class=r\"\\:[a-zA-Z_-]+\",\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n selector=r\"[a-zA-Z_\\-]+\",\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n)\n\n# A rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_solo = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n# The value(s)/content from a rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration_content = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_content_solo = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n\nclass TokenizerState:\n \"\"\"State machine for the tokenizer.\n\n Attributes:\n EXPECT: The initial expectation of the tokenizer. Since we start tokenizing\n at the root scope, we might expect to see either a variable or selector, for example.\n STATE_MAP: Maps token names to Expects, defines the sets of valid tokens\n that we'd expect to see next, given the current token. For example, if\n we've just processed a variable declaration name, we next expect to see\n the value of that variable.\n \"\"\"\n\n EXPECT = expect_root_scope\n STATE_MAP = {\n \"variable_name\": expect_variable_name_continue,\n \"variable_value_end\": expect_root_scope,\n \"selector_start\": expect_selector_continue,\n \"selector_start_id\": expect_selector_continue,\n \"selector_start_class\": expect_selector_continue,\n \"selector_start_universal\": expect_selector_continue,\n \"selector_id\": expect_selector_continue,\n \"selector_class\": expect_selector_continue,\n \"selector_universal\": expect_selector_continue,\n \"declaration_set_start\": expect_declaration,\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration,\n \"declaration_set_end\": expect_root_scope,\n }\n\n def __call__(self, code: str, path: str | PurePath) -> Iterable[Token]:\n tokenizer = Tokenizer(code, path=path)\n expect = self.EXPECT\n get_token = tokenizer.get_token\n get_state = self.STATE_MAP.get\n while True:\n token = get_token(expect)\n name = token.name\n if name == \"comment_start\":\n tokenizer.skip_to(expect_comment_end)\n continue\n elif name == \"eof\":\n break\n expect = get_state(name, expect)\n yield token\n\n\nclass DeclarationTokenizerState(TokenizerState):\n EXPECT = expect_declaration_solo\n STATE_MAP = {\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration_solo,\n }\n\n\nclass ValueTokenizerState(TokenizerState):\n EXPECT = expect_declaration_content_solo\n\n\ntokenize = TokenizerState()\ntokenize_declarations = DeclarationTokenizerState()\ntokenize_value = ValueTokenizerState()\n\n\ndef tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:\n \"\"\"Tokens the values in a dict of strings.\n\n Args:\n values: A mapping of CSS variable name on to a value, to be\n added to the CSS context.\n\n Returns:\n A mapping of name on to a list of tokens,\n \"\"\"\n value_tokens = {\n name: list(tokenize_value(value, \"__name__\")) for name, value in values.items()\n }\n return value_tokens\n", "path": "src/textual/css/tokenize.py"}]} | 4,067 | 143 |
gh_patches_debug_15963 | rasdani/github-patches | git_diff | marshmallow-code__webargs-557 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for headers to DjangoParser
```
NotImplementedError: Header parsing not supported by DjangoParser
```
</issue>
<code>
[start of src/webargs/djangoparser.py]
1 """Django request argument parsing.
2
3 Example usage: ::
4
5 from django.views.generic import View
6 from django.http import HttpResponse
7 from marshmallow import fields
8 from webargs.djangoparser import use_args
9
10 hello_args = {
11 'name': fields.Str(missing='World')
12 }
13
14 class MyView(View):
15
16 @use_args(hello_args)
17 def get(self, args, request):
18 return HttpResponse('Hello ' + args['name'])
19 """
20 from webargs import core
21 from webargs.multidictproxy import MultiDictProxy
22
23
24 def is_json_request(req):
25 return core.is_json(req.content_type)
26
27
28 class DjangoParser(core.Parser):
29 """Django request argument parser.
30
31 .. warning::
32
33 :class:`DjangoParser` does not override
34 :meth:`handle_error <webargs.core.Parser.handle_error>`, so your Django
35 views are responsible for catching any :exc:`ValidationErrors` raised by
36 the parser and returning the appropriate `HTTPResponse`.
37 """
38
39 def _raw_load_json(self, req):
40 """Read a json payload from the request for the core parser's load_json
41
42 Checks the input mimetype and may return 'missing' if the mimetype is
43 non-json, even if the request body is parseable as json."""
44 if not is_json_request(req):
45 return core.missing
46
47 return core.parse_json(req.body)
48
49 def load_querystring(self, req, schema):
50 """Return query params from the request as a MultiDictProxy."""
51 return MultiDictProxy(req.GET, schema)
52
53 def load_form(self, req, schema):
54 """Return form values from the request as a MultiDictProxy."""
55 return MultiDictProxy(req.POST, schema)
56
57 def load_cookies(self, req, schema):
58 """Return cookies from the request."""
59 return req.COOKIES
60
61 def load_headers(self, req, schema):
62 raise NotImplementedError(
63 f"Header parsing not supported by {self.__class__.__name__}"
64 )
65
66 def load_files(self, req, schema):
67 """Return files from the request as a MultiDictProxy."""
68 return MultiDictProxy(req.FILES, schema)
69
70 def get_request_from_view_args(self, view, args, kwargs):
71 # The first argument is either `self` or `request`
72 try: # self.request
73 return args[0].request
74 except AttributeError: # first arg is request
75 return args[0]
76
77
78 parser = DjangoParser()
79 use_args = parser.use_args
80 use_kwargs = parser.use_kwargs
81
[end of src/webargs/djangoparser.py]
[start of setup.py]
1 import re
2 from setuptools import setup, find_packages
3
4 FRAMEWORKS = [
5 "Flask>=0.12.2",
6 "Django>=1.11.16",
7 "bottle>=0.12.13",
8 "tornado>=4.5.2",
9 "pyramid>=1.9.1",
10 "webapp2>=3.0.0b1",
11 "falcon>=2.0.0",
12 "aiohttp>=3.0.0",
13 ]
14 EXTRAS_REQUIRE = {
15 "frameworks": FRAMEWORKS,
16 "tests": [
17 "pytest",
18 "webtest==2.0.35",
19 "webtest-aiohttp==2.0.0",
20 "pytest-aiohttp>=0.3.0",
21 ]
22 + FRAMEWORKS,
23 "lint": [
24 "mypy==0.790",
25 "flake8==3.8.4",
26 "flake8-bugbear==20.1.4",
27 "pre-commit~=2.4",
28 ],
29 "docs": ["Sphinx==3.3.0", "sphinx-issues==1.2.0", "sphinx-typlog-theme==0.8.0"]
30 + FRAMEWORKS,
31 }
32 EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["lint"] + ["tox"]
33
34
35 def find_version(fname):
36 """Attempts to find the version number in the file names fname.
37 Raises RuntimeError if not found.
38 """
39 version = ""
40 with open(fname) as fp:
41 reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
42 for line in fp:
43 m = reg.match(line)
44 if m:
45 version = m.group(1)
46 break
47 if not version:
48 raise RuntimeError("Cannot find version information")
49 return version
50
51
52 def read(fname):
53 with open(fname) as fp:
54 content = fp.read()
55 return content
56
57
58 setup(
59 name="webargs",
60 version=find_version("src/webargs/__init__.py"),
61 description=(
62 "Declarative parsing and validation of HTTP request objects, "
63 "with built-in support for popular web frameworks, including "
64 "Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp."
65 ),
66 long_description=read("README.rst"),
67 author="Steven Loria",
68 author_email="[email protected]",
69 url="https://github.com/marshmallow-code/webargs",
70 packages=find_packages("src"),
71 package_dir={"": "src"},
72 install_requires=["marshmallow>=3.0.0"],
73 extras_require=EXTRAS_REQUIRE,
74 license="MIT",
75 zip_safe=False,
76 keywords=(
77 "webargs",
78 "http",
79 "flask",
80 "django",
81 "bottle",
82 "tornado",
83 "aiohttp",
84 "webapp2",
85 "request",
86 "arguments",
87 "validation",
88 "parameters",
89 "rest",
90 "api",
91 "marshmallow",
92 ),
93 python_requires=">=3.6",
94 classifiers=[
95 "Development Status :: 5 - Production/Stable",
96 "Intended Audience :: Developers",
97 "License :: OSI Approved :: MIT License",
98 "Natural Language :: English",
99 "Programming Language :: Python :: 3",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Programming Language :: Python :: 3 :: Only",
105 "Topic :: Internet :: WWW/HTTP :: Dynamic Content",
106 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
107 ],
108 test_suite="tests",
109 project_urls={
110 "Changelog": "https://webargs.readthedocs.io/en/latest/changelog.html",
111 "Issues": "https://github.com/marshmallow-code/webargs/issues",
112 "Funding": "https://opencollective.com/marshmallow",
113 "Tidelift": "https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi", # noqa
114 },
115 )
116
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
FRAMEWORKS = [
"Flask>=0.12.2",
- "Django>=1.11.16",
+ "Django>=2.2.0",
"bottle>=0.12.13",
"tornado>=4.5.2",
"pyramid>=1.9.1",
diff --git a/src/webargs/djangoparser.py b/src/webargs/djangoparser.py
--- a/src/webargs/djangoparser.py
+++ b/src/webargs/djangoparser.py
@@ -59,9 +59,10 @@
return req.COOKIES
def load_headers(self, req, schema):
- raise NotImplementedError(
- f"Header parsing not supported by {self.__class__.__name__}"
- )
+ """Return headers from the request."""
+ # Django's HttpRequest.headers is a case-insensitive dict type, but it
+ # isn't a multidict, so this is not proxied
+ return req.headers
def load_files(self, req, schema):
"""Return files from the request as a MultiDictProxy."""
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,7 @@\n \n FRAMEWORKS = [\n \"Flask>=0.12.2\",\n- \"Django>=1.11.16\",\n+ \"Django>=2.2.0\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\ndiff --git a/src/webargs/djangoparser.py b/src/webargs/djangoparser.py\n--- a/src/webargs/djangoparser.py\n+++ b/src/webargs/djangoparser.py\n@@ -59,9 +59,10 @@\n return req.COOKIES\n \n def load_headers(self, req, schema):\n- raise NotImplementedError(\n- f\"Header parsing not supported by {self.__class__.__name__}\"\n- )\n+ \"\"\"Return headers from the request.\"\"\"\n+ # Django's HttpRequest.headers is a case-insensitive dict type, but it\n+ # isn't a multidict, so this is not proxied\n+ return req.headers\n \n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n", "issue": "Add support for headers to DjangoParser\n```\r\nNotImplementedError: Header parsing not supported by DjangoParser\r\n```\n", "before_files": [{"content": "\"\"\"Django request argument parsing.\n\nExample usage: ::\n\n from django.views.generic import View\n from django.http import HttpResponse\n from marshmallow import fields\n from webargs.djangoparser import use_args\n\n hello_args = {\n 'name': fields.Str(missing='World')\n }\n\n class MyView(View):\n\n @use_args(hello_args)\n def get(self, args, request):\n return HttpResponse('Hello ' + args['name'])\n\"\"\"\nfrom webargs import core\nfrom webargs.multidictproxy import MultiDictProxy\n\n\ndef is_json_request(req):\n return core.is_json(req.content_type)\n\n\nclass DjangoParser(core.Parser):\n \"\"\"Django request argument parser.\n\n .. warning::\n\n :class:`DjangoParser` does not override\n :meth:`handle_error <webargs.core.Parser.handle_error>`, so your Django\n views are responsible for catching any :exc:`ValidationErrors` raised by\n the parser and returning the appropriate `HTTPResponse`.\n \"\"\"\n\n def _raw_load_json(self, req):\n \"\"\"Read a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req):\n return core.missing\n\n return core.parse_json(req.body)\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.GET, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.POST, schema)\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n return req.COOKIES\n\n def load_headers(self, req, schema):\n raise NotImplementedError(\n f\"Header parsing not supported by {self.__class__.__name__}\"\n )\n\n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.FILES, schema)\n\n def get_request_from_view_args(self, view, args, kwargs):\n # The first argument is either `self` or `request`\n try: # self.request\n return args[0].request\n except AttributeError: # first arg is request\n return args[0]\n\n\nparser = DjangoParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/djangoparser.py"}, {"content": "import re\nfrom setuptools import setup, find_packages\n\nFRAMEWORKS = [\n \"Flask>=0.12.2\",\n \"Django>=1.11.16\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"webapp2>=3.0.0b1\",\n \"falcon>=2.0.0\",\n \"aiohttp>=3.0.0\",\n]\nEXTRAS_REQUIRE = {\n \"frameworks\": FRAMEWORKS,\n \"tests\": [\n \"pytest\",\n \"webtest==2.0.35\",\n \"webtest-aiohttp==2.0.0\",\n \"pytest-aiohttp>=0.3.0\",\n ]\n + FRAMEWORKS,\n \"lint\": [\n \"mypy==0.790\",\n \"flake8==3.8.4\",\n \"flake8-bugbear==20.1.4\",\n \"pre-commit~=2.4\",\n ],\n \"docs\": [\"Sphinx==3.3.0\", \"sphinx-issues==1.2.0\", \"sphinx-typlog-theme==0.8.0\"]\n + FRAMEWORKS,\n}\nEXTRAS_REQUIRE[\"dev\"] = EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"tox\"]\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname) as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"webargs\",\n version=find_version(\"src/webargs/__init__.py\"),\n description=(\n \"Declarative parsing and validation of HTTP request objects, \"\n \"with built-in support for popular web frameworks, including \"\n \"Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp.\"\n ),\n long_description=read(\"README.rst\"),\n author=\"Steven Loria\",\n author_email=\"[email protected]\",\n url=\"https://github.com/marshmallow-code/webargs\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n install_requires=[\"marshmallow>=3.0.0\"],\n extras_require=EXTRAS_REQUIRE,\n license=\"MIT\",\n zip_safe=False,\n keywords=(\n \"webargs\",\n \"http\",\n \"flask\",\n \"django\",\n \"bottle\",\n \"tornado\",\n \"aiohttp\",\n \"webapp2\",\n \"request\",\n \"arguments\",\n \"validation\",\n \"parameters\",\n \"rest\",\n \"api\",\n \"marshmallow\",\n ),\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n test_suite=\"tests\",\n project_urls={\n \"Changelog\": \"https://webargs.readthedocs.io/en/latest/changelog.html\",\n \"Issues\": \"https://github.com/marshmallow-code/webargs/issues\",\n \"Funding\": \"https://opencollective.com/marshmallow\",\n \"Tidelift\": \"https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi\", # noqa\n },\n)\n", "path": "setup.py"}]} | 2,464 | 280 |
gh_patches_debug_39427 | rasdani/github-patches | git_diff | sublimelsp__LSP-520 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Intelephense v1.0 released with a few issues
I'm happy to announce Intelephense v1.0 is released. More info can be found [here](https://github.com/bmewburn/intelephense-docs)
I'm not entirely sure if this is an LSP issue or a server issue.
When autocompleting method names I have to manually type the parenthesis and tooltip information seems messed up.

* OSX 10.14 - Intelephense 1.0
* Package Control
</issue>
<code>
[start of plugin/signature_help.py]
1 import mdpopups
2 import sublime
3 import sublime_plugin
4 import webbrowser
5 import re
6 import html
7
8 try:
9 from typing import Any, List, Dict, Optional
10 assert Any and List and Dict and Optional
11 except ImportError:
12 pass
13
14 from .core.configurations import is_supported_syntax
15 from .core.registry import config_for_scope, session_for_view, client_for_view
16 from .core.documents import get_document_position
17 from .core.events import global_events
18 from .core.protocol import Request
19 from .core.logging import debug
20 from .core.popups import popup_css, popup_class
21 from .core.settings import settings
22
23
24 class SignatureHelpListener(sublime_plugin.ViewEventListener):
25
26 def __init__(self, view):
27 self.view = view
28 self._initialized = False
29 self._signature_help_triggers = [] # type: List[str]
30 self._visible = False
31 self._language_id = ""
32 self._signatures = [] # type: List[Any]
33 self._active_signature = -1
34 self._active_parameter = -1
35
36 @classmethod
37 def is_applicable(cls, settings):
38 syntax = settings.get('syntax')
39 return syntax and is_supported_syntax(syntax)
40
41 def initialize(self):
42 session = session_for_view(self.view)
43 if session:
44 signatureHelpProvider = session.get_capability(
45 'signatureHelpProvider')
46 if signatureHelpProvider:
47 self._signature_help_triggers = signatureHelpProvider.get(
48 'triggerCharacters')
49
50 config = config_for_scope(self.view)
51 if config:
52 self._language_id = self._view_language(self.view, config.name)
53
54 self._initialized = True
55
56 def on_modified_async(self):
57 pos = self.view.sel()[0].begin()
58 # TODO: this will fire too often, narrow down using scopes or regex
59 if not self._initialized:
60 self.initialize()
61
62 if self._signature_help_triggers:
63 last_char = self.view.substr(pos - 1)
64 if last_char in self._signature_help_triggers:
65 self.request_signature_help(pos)
66 elif self._visible:
67 if last_char.isspace():
68 # Peek behind to find the last non-whitespace character.
69 last_char = self.view.substr(self.view.find_by_class(pos, False, ~0) - 1)
70 if last_char not in self._signature_help_triggers:
71 self.view.hide_popup()
72
73 def request_signature_help(self, point) -> None:
74 client = client_for_view(self.view)
75 if client:
76 global_events.publish("view.on_purge_changes", self.view)
77 document_position = get_document_position(self.view, point)
78 if document_position:
79 client.send_request(
80 Request.signatureHelp(document_position),
81 lambda response: self.handle_response(response, point))
82
83 def handle_response(self, response: 'Optional[Dict]', point) -> None:
84 if response is not None:
85 self._signatures = response.get("signatures", [])
86 self._active_signature = response.get("activeSignature", -1)
87 self._active_parameter = response.get("activeParameter", -1)
88
89 if self._signatures:
90 if not 0 <= self._active_signature < len(self._signatures):
91 debug("activeSignature {} not a valid index for signatures length {}".format(
92 self._active_signature, len(self._signatures)))
93 self._active_signature = 0
94 else:
95 if self._active_signature != -1:
96 debug("activeSignature should be -1 or null when no signatures are returned")
97 self._active_signature = -1
98
99 if len(self._signatures) > 0:
100 if self._visible:
101 self._update_popup()
102 else:
103 self._show_popup(point)
104
105 def on_query_context(self, key, _, operand, __):
106 if key != "lsp.signature_help":
107 return False # Let someone else handle this keybinding.
108 elif not self._visible:
109 if operand == 0:
110 self.request_signature_help(self.view.sel()[0].begin())
111 return True
112 else:
113 return False # Let someone else handle this keybinding.
114 elif len(self._signatures) < 2:
115 return False # Let someone else handle this keybinding.
116 else:
117 # We use the "operand" for the number -1 or +1. See the keybindings.
118 new_index = self._active_signature + operand
119
120 # clamp signature index
121 new_index = max(0, min(new_index, len(self._signatures) - 1))
122
123 # only update when changed
124 if new_index != self._active_signature:
125 self._active_signature = new_index
126 self._update_popup()
127
128 return True # We handled this keybinding.
129
130 def _show_popup(self, point: int) -> None:
131 mdpopups.show_popup(self.view,
132 self._build_popup_content(),
133 css=popup_css,
134 md=True,
135 flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
136 location=point,
137 wrapper_class=popup_class,
138 max_width=800,
139 on_hide=self._on_hide,
140 on_navigate=self._on_hover_navigate)
141 self._visible = True
142
143 def _update_popup(self) -> None:
144 mdpopups.update_popup(self.view,
145 self._build_popup_content(),
146 css=popup_css,
147 md=True,
148 wrapper_class=popup_class)
149
150 def _build_popup_content(self) -> str:
151 if settings.highlight_active_signature_parameter:
152 return self._build_popup_content_style_vscode()
153 else:
154 # Default to "sublime".
155 return self._build_popup_content_style_sublime()
156
157 def _view_language(self, view: sublime.View, config_name: str) -> 'Optional[str]':
158 languages = view.settings().get('lsp_language')
159 return languages.get(config_name) if languages else None
160
161 def _on_hide(self):
162 self._visible = False
163
164 def _on_hover_navigate(self, href):
165 webbrowser.open_new_tab(href)
166
167 def _build_overload_selector(self) -> str:
168 return "**{}** of **{}** overloads (use the ↑ ↓ keys to navigate):\n".format(
169 str(self._active_signature + 1), str(len(self._signatures)))
170
171 def _build_popup_content_style_sublime(self) -> str:
172 signature = self._signatures[self._active_signature]
173 formatted = []
174
175 if len(self._signatures) > 1:
176 formatted.append(self._build_overload_selector())
177
178 signature_label = signature.get('label')
179 if len(signature_label) > 400:
180 label = "```{} ...```".format(signature_label[0:400]) # long code blocks = hangs
181 else:
182 label = "```{}\n{}\n```\n".format(self._language_id, signature_label)
183 formatted.append(label)
184
185 params = signature.get('parameters')
186 if params:
187 for parameter in params:
188 paramDocs = parameter.get('documentation', None)
189 if paramDocs:
190 formatted.append("**{}**\n".format(parameter.get('label')))
191 formatted.append("* *{}*\n".format(paramDocs))
192 sigDocs = signature.get('documentation', None)
193 if sigDocs:
194 formatted.append(sigDocs)
195 return "\n".join(formatted)
196
197 def _build_popup_content_style_vscode(self) -> str:
198 # Fetch all the relevant data.
199 signature_label = ""
200 signature_documentation = ""
201 parameter_label = ""
202 parameter_documentation = ""
203 if self._active_signature in range(0, len(self._signatures)):
204 signature = self._signatures[self._active_signature]
205 signature_label = html.escape(signature["label"], quote=False)
206 signature_documentation = signature.get("documentation", "") # Optional.
207 parameters = signature.get("parameters", None)
208 if parameters and self._active_parameter in range(0, len(parameters)):
209 parameter = parameters[self._active_parameter]
210 parameter_label = html.escape(parameter["label"], quote=False)
211 parameter_documentation = parameter.get("documentation", "") # Optional.
212
213 formatted = []
214
215 if len(self._signatures) > 1:
216 formatted.append(self._build_overload_selector())
217
218 # Write the active signature and give special treatment to the active parameter (if found).
219 # Note that this <div> class and the extra <pre> are copied from mdpopups' HTML output. When mdpopups changes
220 # its output style, we must update this literal string accordingly.
221 formatted.append('<div class="highlight"><pre>')
222 if parameter_label:
223 signature_label = self._replace_active_parameter(signature_label, parameter_label)
224 formatted.append(signature_label)
225 formatted.append("</pre></div>")
226
227 if parameter_documentation:
228 formatted.append(parameter_documentation)
229
230 if signature_documentation:
231 formatted.append(signature_documentation)
232
233 return "\n".join(formatted)
234
235 def _replace_active_parameter(self, signature: str, parameter: str) -> str:
236 if parameter[0].isalnum() and parameter[-1].isalnum():
237 pattern = r'\b{}\b'.format(re.escape(parameter))
238 else:
239 # If the left or right boundary of the parameter string is not an alphanumeric character, the \b check will
240 # never match. In this case, it's probably safe to assume the parameter string itself will be a good pattern
241 # to search for.
242 pattern = re.escape(parameter)
243 replacement = '<span style="font-weight: bold; text-decoration: underline">{}</span>'.format(parameter)
244 # FIXME: This is somewhat language-specific to look for an opening parenthesis. Most languages use parentheses
245 # for their parameter lists though.
246 start_of_param_list_pos = signature.find('(')
247 # Note that this works even when we don't find an opening parenthesis, because .find returns -1 in that case.
248 start_of_param_list = signature[start_of_param_list_pos + 1:]
249 return signature[:start_of_param_list_pos + 1] + re.sub(pattern, replacement, start_of_param_list, 1)
250
[end of plugin/signature_help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/signature_help.py b/plugin/signature_help.py
--- a/plugin/signature_help.py
+++ b/plugin/signature_help.py
@@ -21,6 +21,22 @@
from .core.settings import settings
+def get_documentation(d: 'Dict[str, Any]') -> 'Optional[str]':
+ docs = d.get('documentation', None)
+ if docs is None:
+ return None
+ elif isinstance(docs, str):
+ # In older version of the protocol, documentation was just a string.
+ return docs
+ elif isinstance(docs, dict):
+ # This can be either "plaintext" or "markdown" format. For now, we can dump it into the popup box. It would
+ # be nice to handle the markdown in a special way.
+ return docs.get('value', None)
+ else:
+ debug('unknown documentation type:', str(d))
+ return None
+
+
class SignatureHelpListener(sublime_plugin.ViewEventListener):
def __init__(self, view):
@@ -185,10 +201,10 @@
params = signature.get('parameters')
if params:
for parameter in params:
- paramDocs = parameter.get('documentation', None)
- if paramDocs:
+ param_docs = get_documentation(parameter)
+ if param_docs:
formatted.append("**{}**\n".format(parameter.get('label')))
- formatted.append("* *{}*\n".format(paramDocs))
+ formatted.append("* *{}*\n".format(param_docs))
sigDocs = signature.get('documentation', None)
if sigDocs:
formatted.append(sigDocs)
@@ -197,18 +213,18 @@
def _build_popup_content_style_vscode(self) -> str:
# Fetch all the relevant data.
signature_label = ""
- signature_documentation = ""
+ signature_documentation = "" # type: Optional[str]
parameter_label = ""
- parameter_documentation = ""
+ parameter_documentation = "" # type: Optional[str]
if self._active_signature in range(0, len(self._signatures)):
signature = self._signatures[self._active_signature]
signature_label = html.escape(signature["label"], quote=False)
- signature_documentation = signature.get("documentation", "") # Optional.
+ signature_documentation = get_documentation(signature)
parameters = signature.get("parameters", None)
if parameters and self._active_parameter in range(0, len(parameters)):
parameter = parameters[self._active_parameter]
parameter_label = html.escape(parameter["label"], quote=False)
- parameter_documentation = parameter.get("documentation", "") # Optional.
+ parameter_documentation = get_documentation(parameter)
formatted = []
| {"golden_diff": "diff --git a/plugin/signature_help.py b/plugin/signature_help.py\n--- a/plugin/signature_help.py\n+++ b/plugin/signature_help.py\n@@ -21,6 +21,22 @@\n from .core.settings import settings\n \n \n+def get_documentation(d: 'Dict[str, Any]') -> 'Optional[str]':\n+ docs = d.get('documentation', None)\n+ if docs is None:\n+ return None\n+ elif isinstance(docs, str):\n+ # In older version of the protocol, documentation was just a string.\n+ return docs\n+ elif isinstance(docs, dict):\n+ # This can be either \"plaintext\" or \"markdown\" format. For now, we can dump it into the popup box. It would\n+ # be nice to handle the markdown in a special way.\n+ return docs.get('value', None)\n+ else:\n+ debug('unknown documentation type:', str(d))\n+ return None\n+\n+\n class SignatureHelpListener(sublime_plugin.ViewEventListener):\n \n def __init__(self, view):\n@@ -185,10 +201,10 @@\n params = signature.get('parameters')\n if params:\n for parameter in params:\n- paramDocs = parameter.get('documentation', None)\n- if paramDocs:\n+ param_docs = get_documentation(parameter)\n+ if param_docs:\n formatted.append(\"**{}**\\n\".format(parameter.get('label')))\n- formatted.append(\"* *{}*\\n\".format(paramDocs))\n+ formatted.append(\"* *{}*\\n\".format(param_docs))\n sigDocs = signature.get('documentation', None)\n if sigDocs:\n formatted.append(sigDocs)\n@@ -197,18 +213,18 @@\n def _build_popup_content_style_vscode(self) -> str:\n # Fetch all the relevant data.\n signature_label = \"\"\n- signature_documentation = \"\"\n+ signature_documentation = \"\" # type: Optional[str]\n parameter_label = \"\"\n- parameter_documentation = \"\"\n+ parameter_documentation = \"\" # type: Optional[str]\n if self._active_signature in range(0, len(self._signatures)):\n signature = self._signatures[self._active_signature]\n signature_label = html.escape(signature[\"label\"], quote=False)\n- signature_documentation = signature.get(\"documentation\", \"\") # Optional.\n+ signature_documentation = get_documentation(signature)\n parameters = signature.get(\"parameters\", None)\n if parameters and self._active_parameter in range(0, len(parameters)):\n parameter = parameters[self._active_parameter]\n parameter_label = html.escape(parameter[\"label\"], quote=False)\n- parameter_documentation = parameter.get(\"documentation\", \"\") # Optional.\n+ parameter_documentation = get_documentation(parameter)\n \n formatted = []\n", "issue": "Intelephense v1.0 released with a few issues\nI'm happy to announce Intelephense v1.0 is released. More info can be found [here](https://github.com/bmewburn/intelephense-docs)\r\n\r\nI'm not entirely sure if this is an LSP issue or a server issue.\r\n\r\nWhen autocompleting method names I have to manually type the parenthesis and tooltip information seems messed up.\r\n\r\n\r\n\r\n\r\n* OSX 10.14 - Intelephense 1.0\r\n* Package Control\n", "before_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nimport re\nimport html\n\ntry:\n from typing import Any, List, Dict, Optional\n assert Any and List and Dict and Optional\nexcept ImportError:\n pass\n\nfrom .core.configurations import is_supported_syntax\nfrom .core.registry import config_for_scope, session_for_view, client_for_view\nfrom .core.documents import get_document_position\nfrom .core.events import global_events\nfrom .core.protocol import Request\nfrom .core.logging import debug\nfrom .core.popups import popup_css, popup_class\nfrom .core.settings import settings\n\n\nclass SignatureHelpListener(sublime_plugin.ViewEventListener):\n\n def __init__(self, view):\n self.view = view\n self._initialized = False\n self._signature_help_triggers = [] # type: List[str]\n self._visible = False\n self._language_id = \"\"\n self._signatures = [] # type: List[Any]\n self._active_signature = -1\n self._active_parameter = -1\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax)\n\n def initialize(self):\n session = session_for_view(self.view)\n if session:\n signatureHelpProvider = session.get_capability(\n 'signatureHelpProvider')\n if signatureHelpProvider:\n self._signature_help_triggers = signatureHelpProvider.get(\n 'triggerCharacters')\n\n config = config_for_scope(self.view)\n if config:\n self._language_id = self._view_language(self.view, config.name)\n\n self._initialized = True\n\n def on_modified_async(self):\n pos = self.view.sel()[0].begin()\n # TODO: this will fire too often, narrow down using scopes or regex\n if not self._initialized:\n self.initialize()\n\n if self._signature_help_triggers:\n last_char = self.view.substr(pos - 1)\n if last_char in self._signature_help_triggers:\n self.request_signature_help(pos)\n elif self._visible:\n if last_char.isspace():\n # Peek behind to find the last non-whitespace character.\n last_char = self.view.substr(self.view.find_by_class(pos, False, ~0) - 1)\n if last_char not in self._signature_help_triggers:\n self.view.hide_popup()\n\n def request_signature_help(self, point) -> None:\n client = client_for_view(self.view)\n if client:\n global_events.publish(\"view.on_purge_changes\", self.view)\n document_position = get_document_position(self.view, point)\n if document_position:\n client.send_request(\n Request.signatureHelp(document_position),\n lambda response: self.handle_response(response, point))\n\n def handle_response(self, response: 'Optional[Dict]', point) -> None:\n if response is not None:\n self._signatures = response.get(\"signatures\", [])\n self._active_signature = response.get(\"activeSignature\", -1)\n self._active_parameter = response.get(\"activeParameter\", -1)\n\n if self._signatures:\n if not 0 <= self._active_signature < len(self._signatures):\n debug(\"activeSignature {} not a valid index for signatures length {}\".format(\n self._active_signature, len(self._signatures)))\n self._active_signature = 0\n else:\n if self._active_signature != -1:\n debug(\"activeSignature should be -1 or null when no signatures are returned\")\n self._active_signature = -1\n\n if len(self._signatures) > 0:\n if self._visible:\n self._update_popup()\n else:\n self._show_popup(point)\n\n def on_query_context(self, key, _, operand, __):\n if key != \"lsp.signature_help\":\n return False # Let someone else handle this keybinding.\n elif not self._visible:\n if operand == 0:\n self.request_signature_help(self.view.sel()[0].begin())\n return True\n else:\n return False # Let someone else handle this keybinding.\n elif len(self._signatures) < 2:\n return False # Let someone else handle this keybinding.\n else:\n # We use the \"operand\" for the number -1 or +1. See the keybindings.\n new_index = self._active_signature + operand\n\n # clamp signature index\n new_index = max(0, min(new_index, len(self._signatures) - 1))\n\n # only update when changed\n if new_index != self._active_signature:\n self._active_signature = new_index\n self._update_popup()\n\n return True # We handled this keybinding.\n\n def _show_popup(self, point: int) -> None:\n mdpopups.show_popup(self.view,\n self._build_popup_content(),\n css=popup_css,\n md=True,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=popup_class,\n max_width=800,\n on_hide=self._on_hide,\n on_navigate=self._on_hover_navigate)\n self._visible = True\n\n def _update_popup(self) -> None:\n mdpopups.update_popup(self.view,\n self._build_popup_content(),\n css=popup_css,\n md=True,\n wrapper_class=popup_class)\n\n def _build_popup_content(self) -> str:\n if settings.highlight_active_signature_parameter:\n return self._build_popup_content_style_vscode()\n else:\n # Default to \"sublime\".\n return self._build_popup_content_style_sublime()\n\n def _view_language(self, view: sublime.View, config_name: str) -> 'Optional[str]':\n languages = view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def _on_hide(self):\n self._visible = False\n\n def _on_hover_navigate(self, href):\n webbrowser.open_new_tab(href)\n\n def _build_overload_selector(self) -> str:\n return \"**{}** of **{}** overloads (use the \u2191 \u2193 keys to navigate):\\n\".format(\n str(self._active_signature + 1), str(len(self._signatures)))\n\n def _build_popup_content_style_sublime(self) -> str:\n signature = self._signatures[self._active_signature]\n formatted = []\n\n if len(self._signatures) > 1:\n formatted.append(self._build_overload_selector())\n\n signature_label = signature.get('label')\n if len(signature_label) > 400:\n label = \"```{} ...```\".format(signature_label[0:400]) # long code blocks = hangs\n else:\n label = \"```{}\\n{}\\n```\\n\".format(self._language_id, signature_label)\n formatted.append(label)\n\n params = signature.get('parameters')\n if params:\n for parameter in params:\n paramDocs = parameter.get('documentation', None)\n if paramDocs:\n formatted.append(\"**{}**\\n\".format(parameter.get('label')))\n formatted.append(\"* *{}*\\n\".format(paramDocs))\n sigDocs = signature.get('documentation', None)\n if sigDocs:\n formatted.append(sigDocs)\n return \"\\n\".join(formatted)\n\n def _build_popup_content_style_vscode(self) -> str:\n # Fetch all the relevant data.\n signature_label = \"\"\n signature_documentation = \"\"\n parameter_label = \"\"\n parameter_documentation = \"\"\n if self._active_signature in range(0, len(self._signatures)):\n signature = self._signatures[self._active_signature]\n signature_label = html.escape(signature[\"label\"], quote=False)\n signature_documentation = signature.get(\"documentation\", \"\") # Optional.\n parameters = signature.get(\"parameters\", None)\n if parameters and self._active_parameter in range(0, len(parameters)):\n parameter = parameters[self._active_parameter]\n parameter_label = html.escape(parameter[\"label\"], quote=False)\n parameter_documentation = parameter.get(\"documentation\", \"\") # Optional.\n\n formatted = []\n\n if len(self._signatures) > 1:\n formatted.append(self._build_overload_selector())\n\n # Write the active signature and give special treatment to the active parameter (if found).\n # Note that this <div> class and the extra <pre> are copied from mdpopups' HTML output. When mdpopups changes\n # its output style, we must update this literal string accordingly.\n formatted.append('<div class=\"highlight\"><pre>')\n if parameter_label:\n signature_label = self._replace_active_parameter(signature_label, parameter_label)\n formatted.append(signature_label)\n formatted.append(\"</pre></div>\")\n\n if parameter_documentation:\n formatted.append(parameter_documentation)\n\n if signature_documentation:\n formatted.append(signature_documentation)\n\n return \"\\n\".join(formatted)\n\n def _replace_active_parameter(self, signature: str, parameter: str) -> str:\n if parameter[0].isalnum() and parameter[-1].isalnum():\n pattern = r'\\b{}\\b'.format(re.escape(parameter))\n else:\n # If the left or right boundary of the parameter string is not an alphanumeric character, the \\b check will\n # never match. In this case, it's probably safe to assume the parameter string itself will be a good pattern\n # to search for.\n pattern = re.escape(parameter)\n replacement = '<span style=\"font-weight: bold; text-decoration: underline\">{}</span>'.format(parameter)\n # FIXME: This is somewhat language-specific to look for an opening parenthesis. Most languages use parentheses\n # for their parameter lists though.\n start_of_param_list_pos = signature.find('(')\n # Note that this works even when we don't find an opening parenthesis, because .find returns -1 in that case.\n start_of_param_list = signature[start_of_param_list_pos + 1:]\n return signature[:start_of_param_list_pos + 1] + re.sub(pattern, replacement, start_of_param_list, 1)\n", "path": "plugin/signature_help.py"}]} | 3,472 | 602 |
gh_patches_debug_11252 | rasdani/github-patches | git_diff | iterative__dvc-4462 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
experiments: show table includes all staged/stashed experiments instead of only the currently applicable ones
```
example-get-started git:executor-tree py:dvc ❯ dvc exp show --no-pager --include-params=featurize
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┓
┃ Experiment ┃ auc ┃ featurize.max_features ┃ featurize.ngrams ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━┩
│ workspace │ 0.54175 │ 500 │ 5 │
│ bbdfa81 (2020-08-21 11:27:38) │ 0.54175 │ 500 │ 5 │
│ ├── ebbf40d (2020-08-21 11:28:42) │ 0.50822 │ 1500 │ 4 │
│ └── *32c3875 (2020-08-21 12:05:16) │ - │ 1500 │ 7 │
│ ├── *8cb834d (2020-08-21 12:04:59) │ - │ 1500 │ 2 │
│ ├── *32d107b (2020-08-21 12:05:01) │ - │ 1500 │ 5 │
│ └── *4f2c53c (2020-08-21 12:05:04) │ - │ 1500 │ 6 │
└────────────────────────────────────┴─────────┴────────────────────────┴──────────────────┘
```
the last 3 stashed experiments are derived from a different baseline commit and should be excluded by default (unless `--all-commit`/etc are used)
</issue>
<code>
[start of dvc/repo/experiments/show.py]
1 import logging
2 import re
3 from collections import OrderedDict, defaultdict
4 from datetime import datetime
5
6 from dvc.repo import locked
7 from dvc.repo.metrics.show import _collect_metrics, _read_metrics
8 from dvc.repo.params.show import _collect_configs, _read_params
9
10 logger = logging.getLogger(__name__)
11
12
13 EXP_RE = re.compile(r"(?P<rev_sha>[a-f0-9]{7})-(?P<exp_sha>[a-f0-9]+)")
14
15
16 def _collect_experiment(repo, branch, stash=False):
17 res = defaultdict(dict)
18 for rev in repo.brancher(revs=[branch]):
19 if rev == "workspace":
20 res["timestamp"] = None
21 else:
22 commit = repo.scm.repo.rev_parse(rev)
23 res["timestamp"] = datetime.fromtimestamp(commit.committed_date)
24
25 configs = _collect_configs(repo)
26 params = _read_params(repo, configs, rev)
27 if params:
28 res["params"] = params
29
30 res["queued"] = stash
31 if not stash:
32 metrics = _collect_metrics(repo, None, False)
33 vals = _read_metrics(repo, metrics, rev)
34 res["metrics"] = vals
35
36 return res
37
38
39 @locked
40 def show(
41 repo, all_branches=False, all_tags=False, revs=None, all_commits=False
42 ):
43 res = defaultdict(OrderedDict)
44
45 if revs is None:
46 revs = [repo.scm.get_rev()]
47
48 revs = OrderedDict(
49 (rev, None)
50 for rev in repo.brancher(
51 revs=revs,
52 all_branches=all_branches,
53 all_tags=all_tags,
54 all_commits=all_commits,
55 )
56 )
57
58 for rev in revs:
59 res[rev]["baseline"] = _collect_experiment(repo, rev)
60
61 # collect reproduced experiments
62 for exp_branch in repo.experiments.scm.list_branches():
63 m = re.match(EXP_RE, exp_branch)
64 if m:
65 rev = repo.scm.resolve_rev(m.group("rev_sha"))
66 if rev in revs:
67 exp_rev = repo.experiments.scm.resolve_rev(exp_branch)
68 with repo.experiments.chdir():
69 experiment = _collect_experiment(
70 repo.experiments.exp_dvc, exp_branch
71 )
72 res[rev][exp_rev] = experiment
73
74 # collect queued (not yet reproduced) experiments
75 for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():
76 with repo.experiments.chdir():
77 experiment = _collect_experiment(
78 repo.experiments.exp_dvc, stash_rev, stash=True
79 )
80 res[baseline_rev][stash_rev] = experiment
81
82 return res
83
[end of dvc/repo/experiments/show.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/repo/experiments/show.py b/dvc/repo/experiments/show.py
--- a/dvc/repo/experiments/show.py
+++ b/dvc/repo/experiments/show.py
@@ -73,10 +73,11 @@
# collect queued (not yet reproduced) experiments
for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():
- with repo.experiments.chdir():
- experiment = _collect_experiment(
- repo.experiments.exp_dvc, stash_rev, stash=True
- )
- res[baseline_rev][stash_rev] = experiment
+ if baseline_rev in revs:
+ with repo.experiments.chdir():
+ experiment = _collect_experiment(
+ repo.experiments.exp_dvc, stash_rev, stash=True
+ )
+ res[baseline_rev][stash_rev] = experiment
return res
| {"golden_diff": "diff --git a/dvc/repo/experiments/show.py b/dvc/repo/experiments/show.py\n--- a/dvc/repo/experiments/show.py\n+++ b/dvc/repo/experiments/show.py\n@@ -73,10 +73,11 @@\n \n # collect queued (not yet reproduced) experiments\n for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():\n- with repo.experiments.chdir():\n- experiment = _collect_experiment(\n- repo.experiments.exp_dvc, stash_rev, stash=True\n- )\n- res[baseline_rev][stash_rev] = experiment\n+ if baseline_rev in revs:\n+ with repo.experiments.chdir():\n+ experiment = _collect_experiment(\n+ repo.experiments.exp_dvc, stash_rev, stash=True\n+ )\n+ res[baseline_rev][stash_rev] = experiment\n \n return res\n", "issue": "experiments: show table includes all staged/stashed experiments instead of only the currently applicable ones\n```\r\nexample-get-started git:executor-tree py:dvc \u276f dvc exp show --no-pager --include-params=featurize\r\n\u250f\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2513\r\n\u2503 Experiment \u2503 auc \u2503 featurize.max_features \u2503 featurize.ngrams \u2503\r\n\u2521\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2529\r\n\u2502 workspace \u2502 0.54175 \u2502 500 \u2502 5 \u2502\r\n\u2502 bbdfa81 (2020-08-21 11:27:38) \u2502 0.54175 \u2502 500 \u2502 5 \u2502\r\n\u2502 \u251c\u2500\u2500 ebbf40d (2020-08-21 11:28:42) \u2502 0.50822 \u2502 1500 \u2502 4 \u2502\r\n\u2502 \u2514\u2500\u2500 *32c3875 (2020-08-21 12:05:16) \u2502 - \u2502 1500 \u2502 7 \u2502\r\n\u2502 \u251c\u2500\u2500 *8cb834d (2020-08-21 12:04:59) \u2502 - \u2502 1500 \u2502 2 \u2502\r\n\u2502 \u251c\u2500\u2500 *32d107b (2020-08-21 12:05:01) \u2502 - \u2502 1500 \u2502 5 \u2502\r\n\u2502 \u2514\u2500\u2500 *4f2c53c (2020-08-21 12:05:04) \u2502 - \u2502 1500 \u2502 6 \u2502\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\r\n```\r\n\r\nthe last 3 stashed experiments are derived from a different baseline commit and should be excluded by default (unless `--all-commit`/etc are used)\n", "before_files": [{"content": "import logging\nimport re\nfrom collections import OrderedDict, defaultdict\nfrom datetime import datetime\n\nfrom dvc.repo import locked\nfrom dvc.repo.metrics.show import _collect_metrics, _read_metrics\nfrom dvc.repo.params.show import _collect_configs, _read_params\n\nlogger = logging.getLogger(__name__)\n\n\nEXP_RE = re.compile(r\"(?P<rev_sha>[a-f0-9]{7})-(?P<exp_sha>[a-f0-9]+)\")\n\n\ndef _collect_experiment(repo, branch, stash=False):\n res = defaultdict(dict)\n for rev in repo.brancher(revs=[branch]):\n if rev == \"workspace\":\n res[\"timestamp\"] = None\n else:\n commit = repo.scm.repo.rev_parse(rev)\n res[\"timestamp\"] = datetime.fromtimestamp(commit.committed_date)\n\n configs = _collect_configs(repo)\n params = _read_params(repo, configs, rev)\n if params:\n res[\"params\"] = params\n\n res[\"queued\"] = stash\n if not stash:\n metrics = _collect_metrics(repo, None, False)\n vals = _read_metrics(repo, metrics, rev)\n res[\"metrics\"] = vals\n\n return res\n\n\n@locked\ndef show(\n repo, all_branches=False, all_tags=False, revs=None, all_commits=False\n):\n res = defaultdict(OrderedDict)\n\n if revs is None:\n revs = [repo.scm.get_rev()]\n\n revs = OrderedDict(\n (rev, None)\n for rev in repo.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n )\n )\n\n for rev in revs:\n res[rev][\"baseline\"] = _collect_experiment(repo, rev)\n\n # collect reproduced experiments\n for exp_branch in repo.experiments.scm.list_branches():\n m = re.match(EXP_RE, exp_branch)\n if m:\n rev = repo.scm.resolve_rev(m.group(\"rev_sha\"))\n if rev in revs:\n exp_rev = repo.experiments.scm.resolve_rev(exp_branch)\n with repo.experiments.chdir():\n experiment = _collect_experiment(\n repo.experiments.exp_dvc, exp_branch\n )\n res[rev][exp_rev] = experiment\n\n # collect queued (not yet reproduced) experiments\n for stash_rev, (_, baseline_rev) in repo.experiments.stash_revs.items():\n with repo.experiments.chdir():\n experiment = _collect_experiment(\n repo.experiments.exp_dvc, stash_rev, stash=True\n )\n res[baseline_rev][stash_rev] = experiment\n\n return res\n", "path": "dvc/repo/experiments/show.py"}]} | 1,825 | 196 |
gh_patches_debug_27036 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1308 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
We should make sure our `ALLOWED_HOSTS` is complete
As noted by @dchiller in https://github.com/DDMAL/CantusDB/pull/1286#discussion_r1476453258
> If I understand correctly, our staging/production sites are configured to receive traffic from `www.cantusdatabase.org`, `mass.cantusdatabase.org`, `staging.cantusdatabase.org`, etc., and we open ourself up to security issues if these hosts are not all included in `ALLOWED_HOSTS`.
</issue>
<code>
[start of django/cantusdb_project/cantusdb/settings.py]
1 """
2 Django settings for cantusdb project.
3
4 Generated by 'django-admin startproject' using Django 3.0.6.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.0/ref/settings/
11 """
12
13 import os
14 from distutils.util import strtobool
15 from django.contrib.messages import constants as messages
16
17 # https://ordinarycoders.com/blog/article/django-messages-framework
18 MESSAGE_TAGS = {
19 messages.DEBUG: "alert-secondary",
20 messages.INFO: "alert-info",
21 messages.SUCCESS: "alert-success",
22 messages.WARNING: "alert-warning",
23 messages.ERROR: "alert-danger",
24 }
25
26 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
27 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
28
29 STATIC_ROOT = os.getenv("CANTUSDB_STATIC_ROOT")
30 MEDIA_ROOT = os.getenv("CANTUSDB_MEDIA_ROOT")
31
32 # Quick-start development settings - unsuitable for production
33 # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
34
35 # SECURITY WARNING: keep the secret key used in production secret!
36 SECRET_KEY = os.getenv("CANTUSDB_SECRET_KEY")
37
38 # SECURITY WARNING: don't run with debug turned on in production!
39 DEBUG = bool(strtobool(os.getenv("CANTUSDB_DEBUG", "False")))
40 # need to set this to false so that we can display the custom 404 page
41
42 ALLOWED_HOSTS = [os.getenv("CANTUSDB_HOSTS")]
43
44
45 # Application definition
46
47 INSTALLED_APPS = [
48 "dal",
49 "dal_select2",
50 "django.contrib.admin",
51 "django.contrib.auth",
52 "django.contrib.contenttypes",
53 "django.contrib.sessions",
54 "django.contrib.messages",
55 "django.contrib.staticfiles",
56 "django.contrib.sites",
57 "django.contrib.flatpages",
58 "django.contrib.humanize",
59 "django.contrib.postgres",
60 "extra_views",
61 "main_app",
62 "articles",
63 "django_quill", # to provide rich-text field for articles
64 "reversion", # django-reversion, for version history of objects in database
65 "users",
66 ]
67
68 MIDDLEWARE = [
69 "django.middleware.security.SecurityMiddleware",
70 "django.contrib.sessions.middleware.SessionMiddleware",
71 "django.middleware.common.CommonMiddleware",
72 "django.middleware.csrf.CsrfViewMiddleware",
73 "django.contrib.auth.middleware.AuthenticationMiddleware",
74 "django.contrib.messages.middleware.MessageMiddleware",
75 "django.middleware.clickjacking.XFrameOptionsMiddleware",
76 "django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
77 "reversion.middleware.RevisionMiddleware",
78 ]
79
80 ROOT_URLCONF = "cantusdb.urls"
81
82 TEMPLATES = [
83 {
84 "BACKEND": "django.template.backends.django.DjangoTemplates",
85 "DIRS": [os.path.join(BASE_DIR, "templates")],
86 "APP_DIRS": True,
87 "OPTIONS": {
88 "context_processors": [
89 "django.template.context_processors.debug",
90 "django.template.context_processors.request",
91 "django.contrib.auth.context_processors.auth",
92 "django.contrib.messages.context_processors.messages",
93 "main_app.context_processors.determine_project_environment",
94 ],
95 },
96 },
97 ]
98
99 TEMPLATE_LOADERS = "django.template.loaders.app_directories.load_template_source"
100
101 WSGI_APPLICATION = "cantusdb.wsgi.application"
102
103
104 # Database
105 # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
106
107 DATABASES = {
108 "default": {
109 "ENGINE": "django.db.backends.postgresql",
110 "NAME": os.getenv("POSTGRES_DB"),
111 "USER": os.getenv("POSTGRES_USER"),
112 "HOST": os.getenv("POSTGRES_HOST"),
113 "PORT": os.getenv("POSTGRES_PORT"),
114 "PASSWORD": os.getenv("POSTGRES_PASSWORD"),
115 }
116 }
117
118
119 # Password validation
120 # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
121
122 AUTH_PASSWORD_VALIDATORS = [
123 {
124 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
125 },
126 {
127 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
134 },
135 ]
136
137
138 # Internationalization
139 # https://docs.djangoproject.com/en/3.0/topics/i18n/
140
141 LANGUAGE_CODE = "en-us"
142
143 TIME_ZONE = "UTC"
144
145 USE_I18N = True
146
147 USE_L10N = True
148
149 USE_TZ = True
150
151
152 # Static files (CSS, JavaScript, Images)
153 # https://docs.djangoproject.com/en/3.0/howto/static-files/
154
155 STATIC_URL = "/static/"
156
157 STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
158
159 AUTH_USER_MODEL = "users.User"
160 LOGIN_REDIRECT_URL = "/"
161 LOGIN_URL = "/login/"
162 LOGOUT_REDIRECT_URL = "/login/"
163
164 SITE_ID = 4
165
166 # New in django 3.2: specify the default type of auto-created primary keys
167 # https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
168 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
169
170 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
171 EMAIL_HOST = "email-smtp.us-west-2.amazonaws.com"
172 EMAIL_PORT = 587
173 EMAIL_HOST_USER = os.getenv("AWS_EMAIL_HOST_USER")
174 EMAIL_HOST_PASSWORD = os.getenv("AWS_EMAIL_HOST_PASSWORD")
175 EMAIL_USE_TLS = True
176
177 DEFAULT_FROM_EMAIL = "[email protected]"
178
179 # automatically disable all panels which user can then manually enable
180 DEBUG_TOOLBAR_CONFIG = {
181 "DISABLE_PANELS": {
182 "debug_toolbar.panels.history.HistoryPanel",
183 "debug_toolbar.panels.versions.VersionsPanel",
184 "debug_toolbar.panels.timer.TimerPanel",
185 "debug_toolbar.panels.settings.SettingsPanel",
186 "debug_toolbar.panels.headers.HeadersPanel",
187 "debug_toolbar.panels.request.RequestPanel",
188 "debug_toolbar.panels.sql.SQLPanel",
189 "debug_toolbar.panels.staticfiles.StaticFilesPanel",
190 "debug_toolbar.panels.templates.TemplatesPanel",
191 "debug_toolbar.panels.cache.CachePanel",
192 "debug_toolbar.panels.signals.SignalsPanel",
193 "debug_toolbar.panels.logging.LoggingPanel",
194 "debug_toolbar.panels.redirects.RedirectsPanel",
195 "debug_toolbar.panels.profiling.ProfilingPanel",
196 },
197 }
198
199 INTERNAL_IPS = [
200 "127.0.0.1",
201 ]
202
203 CSRF_TRUSTED_ORIGINS = ["https://cantusdatabase.org", "https://www.cantusdatabase.org"]
204
205 if DEBUG:
206 INSTALLED_APPS.append("debug_toolbar")
207 # debug toolbar must be inserted as early in the middleware as possible
208 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
209
[end of django/cantusdb_project/cantusdb/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py
--- a/django/cantusdb_project/cantusdb/settings.py
+++ b/django/cantusdb_project/cantusdb/settings.py
@@ -35,11 +35,21 @@
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("CANTUSDB_SECRET_KEY")
+PROJECT_ENVIRONMENT = os.getenv("PROJECT_ENVIRONMENT")
+
# SECURITY WARNING: don't run with debug turned on in production!
-DEBUG = bool(strtobool(os.getenv("CANTUSDB_DEBUG", "False")))
-# need to set this to false so that we can display the custom 404 page
+DEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT=="DEVELOPMENT"
-ALLOWED_HOSTS = [os.getenv("CANTUSDB_HOSTS")]
+if PROJECT_ENVIRONMENT == "DEVELOPMENT":
+ ALLOWED_HOSTS = os.getenv("CANTUSDB_HOSTS_DEVELOPMENT").split(" ")
+ CSRF_TRUSTED_ORIGINS = os.getenv("CANTUSDB_ORIGINS_DEVELOPMENT").split(" ")
+ DEBUG = True
+if PROJECT_ENVIRONMENT == "STAGING":
+ ALLOWED_HOSTS = os.getenv("CANTUSDB_HOSTS_STAGING").split(" ")
+ CSRF_TRUSTED_ORIGINS = os.getenv("CANTUSDB_ORIGINS_STAGING").split(" ")
+if PROJECT_ENVIRONMENT == "PRODUCTION":
+ ALLOWED_HOSTS = os.getenv("CANTUSDB_HOSTS_PRODUCTION").split(" ")
+ CSRF_TRUSTED_ORIGINS = os.getenv("CANTUSDB_ORIGINS_PRODUCTION").split(" ")
# Application definition
@@ -200,8 +210,6 @@
"127.0.0.1",
]
-CSRF_TRUSTED_ORIGINS = ["https://cantusdatabase.org", "https://www.cantusdatabase.org"]
-
if DEBUG:
INSTALLED_APPS.append("debug_toolbar")
# debug toolbar must be inserted as early in the middleware as possible
| {"golden_diff": "diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py\n--- a/django/cantusdb_project/cantusdb/settings.py\n+++ b/django/cantusdb_project/cantusdb/settings.py\n@@ -35,11 +35,21 @@\n # SECURITY WARNING: keep the secret key used in production secret!\n SECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n \n+PROJECT_ENVIRONMENT = os.getenv(\"PROJECT_ENVIRONMENT\")\n+\n # SECURITY WARNING: don't run with debug turned on in production!\n-DEBUG = bool(strtobool(os.getenv(\"CANTUSDB_DEBUG\", \"False\")))\n-# need to set this to false so that we can display the custom 404 page\n+DEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT==\"DEVELOPMENT\"\n \n-ALLOWED_HOSTS = [os.getenv(\"CANTUSDB_HOSTS\")]\n+if PROJECT_ENVIRONMENT == \"DEVELOPMENT\":\n+ ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_DEVELOPMENT\").split(\" \")\n+ CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_DEVELOPMENT\").split(\" \")\n+ DEBUG = True\n+if PROJECT_ENVIRONMENT == \"STAGING\":\n+ ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_STAGING\").split(\" \")\n+ CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_STAGING\").split(\" \")\n+if PROJECT_ENVIRONMENT == \"PRODUCTION\":\n+ ALLOWED_HOSTS = os.getenv(\"CANTUSDB_HOSTS_PRODUCTION\").split(\" \")\n+ CSRF_TRUSTED_ORIGINS = os.getenv(\"CANTUSDB_ORIGINS_PRODUCTION\").split(\" \")\n \n \n # Application definition\n@@ -200,8 +210,6 @@\n \"127.0.0.1\",\n ]\n \n-CSRF_TRUSTED_ORIGINS = [\"https://cantusdatabase.org\", \"https://www.cantusdatabase.org\"]\n-\n if DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n", "issue": "We should make sure our `ALLOWED_HOSTS` is complete\nAs noted by @dchiller in https://github.com/DDMAL/CantusDB/pull/1286#discussion_r1476453258\r\n\r\n> If I understand correctly, our staging/production sites are configured to receive traffic from `www.cantusdatabase.org`, `mass.cantusdatabase.org`, `staging.cantusdatabase.org`, etc., and we open ourself up to security issues if these hosts are not all included in `ALLOWED_HOSTS`.\n", "before_files": [{"content": "\"\"\"\nDjango settings for cantusdb project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\nfrom distutils.util import strtobool\nfrom django.contrib.messages import constants as messages\n\n# https://ordinarycoders.com/blog/article/django-messages-framework\nMESSAGE_TAGS = {\n messages.DEBUG: \"alert-secondary\",\n messages.INFO: \"alert-info\",\n messages.SUCCESS: \"alert-success\",\n messages.WARNING: \"alert-warning\",\n messages.ERROR: \"alert-danger\",\n}\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"CANTUSDB_STATIC_ROOT\")\nMEDIA_ROOT = os.getenv(\"CANTUSDB_MEDIA_ROOT\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"CANTUSDB_DEBUG\", \"False\")))\n# need to set this to false so that we can display the custom 404 page\n\nALLOWED_HOSTS = [os.getenv(\"CANTUSDB_HOSTS\")]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \"dal\",\n \"dal_select2\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sites\",\n \"django.contrib.flatpages\",\n \"django.contrib.humanize\",\n \"django.contrib.postgres\",\n \"extra_views\",\n \"main_app\",\n \"articles\",\n \"django_quill\", # to provide rich-text field for articles\n \"reversion\", # django-reversion, for version history of objects in database\n \"users\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n \"reversion.middleware.RevisionMiddleware\",\n]\n\nROOT_URLCONF = \"cantusdb.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"main_app.context_processors.determine_project_environment\",\n ],\n },\n },\n]\n\nTEMPLATE_LOADERS = \"django.template.loaders.app_directories.load_template_source\"\n\nWSGI_APPLICATION = \"cantusdb.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_DB\"),\n \"USER\": os.getenv(\"POSTGRES_USER\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\"),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"static\")]\n\nAUTH_USER_MODEL = \"users.User\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_URL = \"/login/\"\nLOGOUT_REDIRECT_URL = \"/login/\"\n\nSITE_ID = 4\n\n# New in django 3.2: specify the default type of auto-created primary keys\n# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_HOST = \"email-smtp.us-west-2.amazonaws.com\"\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.getenv(\"AWS_EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = os.getenv(\"AWS_EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# automatically disable all panels which user can then manually enable\nDEBUG_TOOLBAR_CONFIG = {\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.logging.LoggingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n}\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_TRUSTED_ORIGINS = [\"https://cantusdatabase.org\", \"https://www.cantusdatabase.org\"]\n\nif DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n", "path": "django/cantusdb_project/cantusdb/settings.py"}]} | 2,689 | 483 |
gh_patches_debug_29924 | rasdani/github-patches | git_diff | cupy__cupy-3130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Got ValueError when `ord` in `cupy.linalg.norm()` is 2 or -2
Reproducer:
```python
>>> import numpy as np
>>> a = [[2, 0, 1], [-1, 1, 0], [-3, 3, 0]]
>>> a = np.asarray(a, dtype=np.float64)
>>> np.linalg.norm(a, ord=2)
4.723421263784789
>>>
>>> import cupy as cp
>>> b = cp.asarray(a)
>>> cp.linalg.norm(b, ord=2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/leofang/test/cupy2/cupy/linalg/norms.py", line 124, in norm
raise ValueError('Invalid norm order for matrices.')
ValueError: Invalid norm order for matrices.
```
`ord=2` returns the largest singular value and `-2` the smallest.
</issue>
<code>
[start of cupy/linalg/norms.py]
1 import numpy
2 from numpy import linalg
3
4 import cupy
5 from cupy.linalg import decomposition
6 from cupy.linalg import util
7
8
9 def norm(x, ord=None, axis=None, keepdims=False):
10 """Returns one of matrix norms specified by ``ord`` parameter.
11
12 See numpy.linalg.norm for more detail.
13
14 Args:
15 x (cupy.ndarray): Array to take norm. If ``axis`` is None,
16 ``x`` must be 1-D or 2-D.
17 ord (non-zero int, inf, -inf, 'fro'): Norm type.
18 axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
19 ``axis``.
20 keepdims (bool): If this is set ``True``, the axes which are normed
21 over are left.
22
23 Returns:
24 cupy.ndarray
25
26 """
27 if not issubclass(x.dtype.type, numpy.inexact):
28 x = x.astype(float)
29
30 # Immediately handle some default, simple, fast, and common cases.
31 if axis is None:
32 ndim = x.ndim
33 if (ord is None or (ndim == 1 and ord == 2) or
34 (ndim == 2 and ord in ('f', 'fro'))):
35 if x.dtype.kind == 'c':
36 s = abs(x.ravel())
37 s *= s
38 ret = cupy.sqrt(s.sum())
39 else:
40 ret = cupy.sqrt((x * x).sum())
41 if keepdims:
42 ret = ret.reshape((1,) * ndim)
43 return ret
44
45 # Normalize the `axis` argument to a tuple.
46 nd = x.ndim
47 if axis is None:
48 axis = tuple(range(nd))
49 elif not isinstance(axis, tuple):
50 try:
51 axis = int(axis)
52 except Exception:
53 raise TypeError(
54 '\'axis\' must be None, an integer or a tuple of integers')
55 axis = (axis,)
56
57 if len(axis) == 1:
58 if ord == numpy.Inf:
59 return abs(x).max(axis=axis, keepdims=keepdims)
60 elif ord == -numpy.Inf:
61 return abs(x).min(axis=axis, keepdims=keepdims)
62 elif ord == 0:
63 # Zero norm
64 # Convert to Python float in accordance with NumPy
65 return (x != 0).astype(x.real.dtype).sum(
66 axis=axis, keepdims=keepdims)
67 elif ord == 1:
68 # special case for speedup
69 return abs(x).sum(axis=axis, keepdims=keepdims)
70 elif ord is None or ord == 2:
71 # special case for speedup
72 if x.dtype.kind == 'c':
73 s = abs(x)
74 s *= s
75 else:
76 s = x * x
77 return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
78 else:
79 try:
80 float(ord)
81 except TypeError:
82 raise ValueError('Invalid norm order for vectors.')
83
84 absx = abs(x)
85 absx **= ord
86 ret = absx.sum(axis=axis, keepdims=keepdims)
87 ret **= cupy.reciprocal(ord, dtype=ret.dtype)
88 return ret
89 elif len(axis) == 2:
90 row_axis, col_axis = axis
91 if row_axis < 0:
92 row_axis += nd
93 if col_axis < 0:
94 col_axis += nd
95 if not (0 <= row_axis < nd and 0 <= col_axis < nd):
96 raise ValueError('Invalid axis %r for an array with shape %r' %
97 (axis, x.shape))
98 if row_axis == col_axis:
99 raise ValueError('Duplicate axes given.')
100 if ord == 1:
101 if col_axis > row_axis:
102 col_axis -= 1
103 ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
104 elif ord == numpy.Inf:
105 if row_axis > col_axis:
106 row_axis -= 1
107 ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
108 elif ord == -1:
109 if col_axis > row_axis:
110 col_axis -= 1
111 ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
112 elif ord == -numpy.Inf:
113 if row_axis > col_axis:
114 row_axis -= 1
115 ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
116 elif ord in [None, 'fro', 'f']:
117 if x.dtype.kind == 'c':
118 s = abs(x)
119 s *= s
120 ret = cupy.sqrt(s.sum(axis=axis))
121 else:
122 ret = cupy.sqrt((x * x).sum(axis=axis))
123 else:
124 raise ValueError('Invalid norm order for matrices.')
125 if keepdims:
126 ret_shape = list(x.shape)
127 ret_shape[axis[0]] = 1
128 ret_shape[axis[1]] = 1
129 ret = ret.reshape(ret_shape)
130 return ret
131 else:
132 raise ValueError('Improper number of dimensions to norm.')
133
134
135 # TODO(okuta): Implement cond
136
137
138 def det(a):
139 """Retruns the deteminant of an array.
140
141 Args:
142 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
143
144 Returns:
145 cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.
146
147 .. seealso:: :func:`numpy.linalg.det`
148 """
149 sign, logdet = slogdet(a)
150 return sign * cupy.exp(logdet)
151
152
153 def matrix_rank(M, tol=None):
154 """Return matrix rank of array using SVD method
155
156 Args:
157 M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to
158 2.
159 tol (None or float): Threshold of singular value of `M`.
160 When `tol` is `None`, and `eps` is the epsilon value for datatype
161 of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,
162 where `S` is the singular value of `M`.
163 It obeys :func:`numpy.linalg.matrix_rank`.
164
165 Returns:
166 cupy.ndarray: Rank of `M`.
167
168 .. seealso:: :func:`numpy.linalg.matrix_rank`
169 """
170 if M.ndim < 2:
171 return (M != 0).any().astype(int)
172 S = decomposition.svd(M, compute_uv=False)
173 if tol is None:
174 tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *
175 numpy.finfo(S.dtype).eps)
176 return (S > tol).sum(axis=-1, dtype=numpy.intp)
177
178
179 def slogdet(a):
180 """Returns sign and logarithm of the determinant of an array.
181
182 It calculates the natural logarithm of the determinant of a given value.
183
184 Args:
185 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
186
187 Returns:
188 tuple of :class:`~cupy.ndarray`:
189 It returns a tuple ``(sign, logdet)``. ``sign`` represents each
190 sign of the determinant as a real number ``0``, ``1`` or ``-1``.
191 'logdet' represents the natural logarithm of the absolute of the
192 determinant.
193 If the determinant is zero, ``sign`` will be ``0`` and ``logdet``
194 will be ``-inf``.
195 The shapes of both ``sign`` and ``logdet`` are equal to
196 ``a.shape[:-2]``.
197
198 .. warning::
199 This function calls one or more cuSOLVER routine(s) which may yield
200 invalid results if input conditions are not met.
201 To detect these invalid results, you can set the `linalg`
202 configuration to a value that is not `ignore` in
203 :func:`cupyx.errstate` or :func:`cupyx.seterr`.
204
205 .. warning::
206 To produce the same results as :func:`numpy.linalg.slogdet` for
207 singular inputs, set the `linalg` configuration to `raise`.
208
209 .. seealso:: :func:`numpy.linalg.slogdet`
210 """
211 if a.ndim < 2:
212 msg = ('%d-dimensional array given. '
213 'Array must be at least two-dimensional' % a.ndim)
214 raise linalg.LinAlgError(msg)
215 util._assert_nd_squareness(a)
216
217 dtype = numpy.promote_types(a.dtype.char, 'f')
218 real_dtype = dtype
219
220 # TODO(kataoka): support complex types
221 if dtype not in (numpy.float32, numpy.float64):
222 msg = ('dtype must be float32 or float64'
223 ' (actual: {})'.format(a.dtype))
224 raise ValueError(msg)
225
226 a_shape = a.shape
227 shape = a_shape[:-2]
228 n = a_shape[-2]
229
230 if a.size == 0:
231 # empty batch (result is empty, too) or empty matrices det([[]]) == 1
232 sign = cupy.ones(shape, dtype)
233 logdet = cupy.zeros(shape, real_dtype)
234 return sign, logdet
235
236 lu, ipiv, dev_info = decomposition._lu_factor(a, dtype)
237
238 # dev_info < 0 means illegal value (in dimensions, strides, and etc.) that
239 # should never happen even if the matrix contains nan or inf.
240 # TODO(kataoka): assert dev_info >= 0 if synchronization is allowed for
241 # debugging purposes.
242
243 diag = cupy.diagonal(lu, axis1=-2, axis2=-1)
244
245 # ipiv is 1-origin
246 non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, n + 1), axis=-1) +
247 cupy.count_nonzero(diag < 0, axis=-1))
248
249 # Note: sign == -1 ** (non_zero % 2)
250 sign = (non_zero % 2) * -2 + 1
251 logdet = cupy.log(abs(diag)).sum(axis=-1)
252
253 singular = dev_info > 0
254 return (
255 cupy.where(singular, dtype.type(0), sign.astype(dtype)).reshape(shape),
256 cupy.where(singular, real_dtype.type('-inf'), logdet).reshape(shape),
257 )
258
259
260 def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
261 """Returns the sum along the diagonals of an array.
262
263 It computes the sum along the diagonals at ``axis1`` and ``axis2``.
264
265 Args:
266 a (cupy.ndarray): Array to take trace.
267 offset (int): Index of diagonals. Zero indicates the main diagonal, a
268 positive value an upper diagonal, and a negative value a lower
269 diagonal.
270 axis1 (int): The first axis along which the trace is taken.
271 axis2 (int): The second axis along which the trace is taken.
272 dtype: Data type specifier of the output.
273 out (cupy.ndarray): Output array.
274
275 Returns:
276 cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
277
278 .. seealso:: :func:`numpy.trace`
279
280 """
281 # TODO(okuta): check type
282 return a.trace(offset, axis1, axis2, dtype, out)
283
[end of cupy/linalg/norms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py
--- a/cupy/linalg/norms.py
+++ b/cupy/linalg/norms.py
@@ -5,6 +5,14 @@
from cupy.linalg import decomposition
from cupy.linalg import util
+import functools
+
+
+def _multi_svd_norm(x, row_axis, col_axis, op):
+ y = cupy.moveaxis(x, (row_axis, col_axis), (-2, -1))
+ result = op(decomposition.svd(y, compute_uv=False), axis=-1)
+ return result
+
def norm(x, ord=None, axis=None, keepdims=False):
"""Returns one of matrix norms specified by ``ord`` parameter.
@@ -97,7 +105,13 @@
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
- if ord == 1:
+ if ord == 2:
+ op_max = functools.partial(cupy.take, indices=0)
+ ret = _multi_svd_norm(x, row_axis, col_axis, op_max)
+ elif ord == -2:
+ op_min = functools.partial(cupy.take, indices=-1)
+ ret = _multi_svd_norm(x, row_axis, col_axis, op_min)
+ elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
@@ -120,6 +134,8 @@
ret = cupy.sqrt(s.sum(axis=axis))
else:
ret = cupy.sqrt((x * x).sum(axis=axis))
+ elif ord == 'nuc':
+ ret = _multi_svd_norm(x, row_axis, col_axis, cupy.sum)
else:
raise ValueError('Invalid norm order for matrices.')
if keepdims:
| {"golden_diff": "diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py\n--- a/cupy/linalg/norms.py\n+++ b/cupy/linalg/norms.py\n@@ -5,6 +5,14 @@\n from cupy.linalg import decomposition\n from cupy.linalg import util\n \n+import functools\n+\n+\n+def _multi_svd_norm(x, row_axis, col_axis, op):\n+ y = cupy.moveaxis(x, (row_axis, col_axis), (-2, -1))\n+ result = op(decomposition.svd(y, compute_uv=False), axis=-1)\n+ return result\n+\n \n def norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n@@ -97,7 +105,13 @@\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n- if ord == 1:\n+ if ord == 2:\n+ op_max = functools.partial(cupy.take, indices=0)\n+ ret = _multi_svd_norm(x, row_axis, col_axis, op_max)\n+ elif ord == -2:\n+ op_min = functools.partial(cupy.take, indices=-1)\n+ ret = _multi_svd_norm(x, row_axis, col_axis, op_min)\n+ elif ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n@@ -120,6 +134,8 @@\n ret = cupy.sqrt(s.sum(axis=axis))\n else:\n ret = cupy.sqrt((x * x).sum(axis=axis))\n+ elif ord == 'nuc':\n+ ret = _multi_svd_norm(x, row_axis, col_axis, cupy.sum)\n else:\n raise ValueError('Invalid norm order for matrices.')\n if keepdims:\n", "issue": "Got ValueError when `ord` in `cupy.linalg.norm()` is 2 or -2\nReproducer:\r\n```python\r\n>>> import numpy as np\r\n>>> a = [[2, 0, 1], [-1, 1, 0], [-3, 3, 0]]\r\n>>> a = np.asarray(a, dtype=np.float64)\r\n>>> np.linalg.norm(a, ord=2)\r\n4.723421263784789\r\n>>>\r\n>>> import cupy as cp\r\n>>> b = cp.asarray(a)\r\n>>> cp.linalg.norm(b, ord=2)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/leofang/test/cupy2/cupy/linalg/norms.py\", line 124, in norm\r\n raise ValueError('Invalid norm order for matrices.')\r\nValueError: Invalid norm order for matrices.\r\n```\r\n`ord=2` returns the largest singular value and `-2` the smallest. \n", "before_files": [{"content": "import numpy\nfrom numpy import linalg\n\nimport cupy\nfrom cupy.linalg import decomposition\nfrom cupy.linalg import util\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n\n See numpy.linalg.norm for more detail.\n\n Args:\n x (cupy.ndarray): Array to take norm. If ``axis`` is None,\n ``x`` must be 1-D or 2-D.\n ord (non-zero int, inf, -inf, 'fro'): Norm type.\n axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over\n ``axis``.\n keepdims (bool): If this is set ``True``, the axes which are normed\n over are left.\n\n Returns:\n cupy.ndarray\n\n \"\"\"\n if not issubclass(x.dtype.type, numpy.inexact):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n if x.dtype.kind == 'c':\n s = abs(x.ravel())\n s *= s\n ret = cupy.sqrt(s.sum())\n else:\n ret = cupy.sqrt((x * x).sum())\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception:\n raise TypeError(\n '\\'axis\\' must be None, an integer or a tuple of integers')\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == numpy.Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -numpy.Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n # Convert to Python float in accordance with NumPy\n return (x != 0).astype(x.real.dtype).sum(\n axis=axis, keepdims=keepdims)\n elif ord == 1:\n # special case for speedup\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n if x.dtype.kind == 'c':\n s = abs(x)\n s *= s\n else:\n s = x * x\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError('Invalid norm order for vectors.')\n\n absx = abs(x)\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= cupy.reciprocal(ord, dtype=ret.dtype)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if not (0 <= row_axis < nd and 0 <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n elif ord == numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).min(axis=col_axis)\n elif ord == -numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n if x.dtype.kind == 'c':\n s = abs(x)\n s *= s\n ret = cupy.sqrt(s.sum(axis=axis))\n else:\n ret = cupy.sqrt((x * x).sum(axis=axis))\n else:\n raise ValueError('Invalid norm order for matrices.')\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError('Improper number of dimensions to norm.')\n\n\n# TODO(okuta): Implement cond\n\n\ndef det(a):\n \"\"\"Retruns the deteminant of an array.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.det`\n \"\"\"\n sign, logdet = slogdet(a)\n return sign * cupy.exp(logdet)\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"Return matrix rank of array using SVD method\n\n Args:\n M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to\n 2.\n tol (None or float): Threshold of singular value of `M`.\n When `tol` is `None`, and `eps` is the epsilon value for datatype\n of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,\n where `S` is the singular value of `M`.\n It obeys :func:`numpy.linalg.matrix_rank`.\n\n Returns:\n cupy.ndarray: Rank of `M`.\n\n .. seealso:: :func:`numpy.linalg.matrix_rank`\n \"\"\"\n if M.ndim < 2:\n return (M != 0).any().astype(int)\n S = decomposition.svd(M, compute_uv=False)\n if tol is None:\n tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *\n numpy.finfo(S.dtype).eps)\n return (S > tol).sum(axis=-1, dtype=numpy.intp)\n\n\ndef slogdet(a):\n \"\"\"Returns sign and logarithm of the determinant of an array.\n\n It calculates the natural logarithm of the determinant of a given value.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n tuple of :class:`~cupy.ndarray`:\n It returns a tuple ``(sign, logdet)``. ``sign`` represents each\n sign of the determinant as a real number ``0``, ``1`` or ``-1``.\n 'logdet' represents the natural logarithm of the absolute of the\n determinant.\n If the determinant is zero, ``sign`` will be ``0`` and ``logdet``\n will be ``-inf``.\n The shapes of both ``sign`` and ``logdet`` are equal to\n ``a.shape[:-2]``.\n\n .. warning::\n This function calls one or more cuSOLVER routine(s) which may yield\n invalid results if input conditions are not met.\n To detect these invalid results, you can set the `linalg`\n configuration to a value that is not `ignore` in\n :func:`cupyx.errstate` or :func:`cupyx.seterr`.\n\n .. warning::\n To produce the same results as :func:`numpy.linalg.slogdet` for\n singular inputs, set the `linalg` configuration to `raise`.\n\n .. seealso:: :func:`numpy.linalg.slogdet`\n \"\"\"\n if a.ndim < 2:\n msg = ('%d-dimensional array given. '\n 'Array must be at least two-dimensional' % a.ndim)\n raise linalg.LinAlgError(msg)\n util._assert_nd_squareness(a)\n\n dtype = numpy.promote_types(a.dtype.char, 'f')\n real_dtype = dtype\n\n # TODO(kataoka): support complex types\n if dtype not in (numpy.float32, numpy.float64):\n msg = ('dtype must be float32 or float64'\n ' (actual: {})'.format(a.dtype))\n raise ValueError(msg)\n\n a_shape = a.shape\n shape = a_shape[:-2]\n n = a_shape[-2]\n\n if a.size == 0:\n # empty batch (result is empty, too) or empty matrices det([[]]) == 1\n sign = cupy.ones(shape, dtype)\n logdet = cupy.zeros(shape, real_dtype)\n return sign, logdet\n\n lu, ipiv, dev_info = decomposition._lu_factor(a, dtype)\n\n # dev_info < 0 means illegal value (in dimensions, strides, and etc.) that\n # should never happen even if the matrix contains nan or inf.\n # TODO(kataoka): assert dev_info >= 0 if synchronization is allowed for\n # debugging purposes.\n\n diag = cupy.diagonal(lu, axis1=-2, axis2=-1)\n\n # ipiv is 1-origin\n non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, n + 1), axis=-1) +\n cupy.count_nonzero(diag < 0, axis=-1))\n\n # Note: sign == -1 ** (non_zero % 2)\n sign = (non_zero % 2) * -2 + 1\n logdet = cupy.log(abs(diag)).sum(axis=-1)\n\n singular = dev_info > 0\n return (\n cupy.where(singular, dtype.type(0), sign.astype(dtype)).reshape(shape),\n cupy.where(singular, real_dtype.type('-inf'), logdet).reshape(shape),\n )\n\n\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n \"\"\"Returns the sum along the diagonals of an array.\n\n It computes the sum along the diagonals at ``axis1`` and ``axis2``.\n\n Args:\n a (cupy.ndarray): Array to take trace.\n offset (int): Index of diagonals. Zero indicates the main diagonal, a\n positive value an upper diagonal, and a negative value a lower\n diagonal.\n axis1 (int): The first axis along which the trace is taken.\n axis2 (int): The second axis along which the trace is taken.\n dtype: Data type specifier of the output.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.\n\n .. seealso:: :func:`numpy.trace`\n\n \"\"\"\n # TODO(okuta): check type\n return a.trace(offset, axis1, axis2, dtype, out)\n", "path": "cupy/linalg/norms.py"}]} | 4,007 | 433 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.